Pull test into release branch
This commit is contained in:
commit
57e1c5c87d
221 changed files with 8903 additions and 9113 deletions
|
@ -274,6 +274,7 @@ Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
|
|||
|
||||
---------------------------
|
||||
|
||||
<<<<<<< test:Documentation/feature-removal-schedule.txt
|
||||
What: ACPI hotkey driver (CONFIG_ACPI_HOTKEY)
|
||||
When: 2.6.21
|
||||
Why: hotkey.c was an attempt to consolidate multiple drivers that use
|
||||
|
@ -306,11 +307,18 @@ Why: The ACPI namespace is effectively the symbol list for
|
|||
the BIOS can be extracted and disassembled with acpidump
|
||||
and iasl as documented in the pmtools package here:
|
||||
http://ftp.kernel.org/pub/linux/kernel/people/lenb/acpi/utils
|
||||
|
||||
Who: Len Brown <len.brown@intel.com>
|
||||
|
||||
---------------------------
|
||||
|
||||
What: ACPI procfs interface
|
||||
When: July 2007
|
||||
Why: After ACPI sysfs conversion, ACPI attributes will be duplicated
|
||||
in sysfs and the ACPI procfs interface should be removed.
|
||||
Who: Zhang Rui <rui.zhang@intel.com>
|
||||
|
||||
---------------------------
|
||||
|
||||
What: /proc/acpi/button
|
||||
When: August 2007
|
||||
Why: /proc/acpi/button has been replaced by events to the input layer
|
||||
|
|
34
Documentation/video-output.txt
Normal file
34
Documentation/video-output.txt
Normal file
|
@ -0,0 +1,34 @@
|
|||
|
||||
Video Output Switcher Control
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
2006 luming.yu@intel.com
|
||||
|
||||
The output sysfs class driver provides an abstract video output layer that
|
||||
can be used to hook platform specific methods to enable/disable video output
|
||||
device through common sysfs interface. For example, on my IBM ThinkPad T42
|
||||
laptop, The ACPI video driver registered its output devices and read/write
|
||||
method for 'state' with output sysfs class. The user interface under sysfs is:
|
||||
|
||||
linux:/sys/class/video_output # tree .
|
||||
.
|
||||
|-- CRT0
|
||||
| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
|
||||
| |-- state
|
||||
| |-- subsystem -> ../../../class/video_output
|
||||
| `-- uevent
|
||||
|-- DVI0
|
||||
| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
|
||||
| |-- state
|
||||
| |-- subsystem -> ../../../class/video_output
|
||||
| `-- uevent
|
||||
|-- LCD0
|
||||
| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
|
||||
| |-- state
|
||||
| |-- subsystem -> ../../../class/video_output
|
||||
| `-- uevent
|
||||
`-- TV0
|
||||
|-- device -> ../../../devices/pci0000:00/0000:00:01.0
|
||||
|-- state
|
||||
|-- subsystem -> ../../../class/video_output
|
||||
`-- uevent
|
||||
|
|
@ -584,6 +584,14 @@ W: http://sourceforge.net/projects/acpi4asus
|
|||
W: http://xf.iksaif.net/acpi4asus
|
||||
S: Maintained
|
||||
|
||||
ASUS LAPTOP EXTRAS DRIVER
|
||||
P: Corentin Chary
|
||||
M: corentincj@iksaif.net
|
||||
L: acpi4asus-user@lists.sourceforge.net
|
||||
W: http://sourceforge.net/projects/acpi4asus
|
||||
W: http://xf.iksaif.net/acpi4asus
|
||||
S: Maintained
|
||||
|
||||
ATA OVER ETHERNET DRIVER
|
||||
P: Ed L. Cashin
|
||||
M: ecashin@coraid.com
|
||||
|
|
|
@ -466,7 +466,8 @@ CONFIG_FW_LOADER=y
|
|||
#
|
||||
# Plug and Play support
|
||||
#
|
||||
# CONFIG_PNP is not set
|
||||
CONFIG_PNP=y
|
||||
CONFIG_PNPACPI=y
|
||||
|
||||
#
|
||||
# Block devices
|
||||
|
|
|
@ -66,7 +66,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return
|
|||
|
||||
#define BAD_MADT_ENTRY(entry, end) ( \
|
||||
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
|
||||
((acpi_table_entry_header *)entry)->length < sizeof(*entry))
|
||||
((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
|
||||
|
||||
#define PREFIX "ACPI: "
|
||||
|
||||
|
@ -79,7 +79,7 @@ int acpi_ioapic;
|
|||
int acpi_strict;
|
||||
EXPORT_SYMBOL(acpi_strict);
|
||||
|
||||
acpi_interrupt_flags acpi_sci_flags __initdata;
|
||||
u8 acpi_sci_flags __initdata;
|
||||
int acpi_sci_override_gsi __initdata;
|
||||
int acpi_skip_timer_override __initdata;
|
||||
int acpi_use_timer_override __initdata;
|
||||
|
@ -92,11 +92,6 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
|
|||
#warning ACPI uses CMPXCHG, i486 and later hardware
|
||||
#endif
|
||||
|
||||
#define MAX_MADT_ENTRIES 256
|
||||
u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
|
||||
{[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
|
||||
EXPORT_SYMBOL(x86_acpiid_to_apicid);
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
Boot-time Configuration
|
||||
-------------------------------------------------------------------------- */
|
||||
|
@ -166,30 +161,26 @@ char *__acpi_map_table(unsigned long phys, unsigned long size)
|
|||
|
||||
#ifdef CONFIG_PCI_MMCONFIG
|
||||
/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
|
||||
struct acpi_table_mcfg_config *pci_mmcfg_config;
|
||||
struct acpi_mcfg_allocation *pci_mmcfg_config;
|
||||
int pci_mmcfg_config_num;
|
||||
|
||||
int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
|
||||
int __init acpi_parse_mcfg(struct acpi_table_header *header)
|
||||
{
|
||||
struct acpi_table_mcfg *mcfg;
|
||||
unsigned long i;
|
||||
int config_size;
|
||||
|
||||
if (!phys_addr || !size)
|
||||
if (!header)
|
||||
return -EINVAL;
|
||||
|
||||
mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
|
||||
if (!mcfg) {
|
||||
printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
mcfg = (struct acpi_table_mcfg *)header;
|
||||
|
||||
/* how many config structures do we have */
|
||||
pci_mmcfg_config_num = 0;
|
||||
i = size - sizeof(struct acpi_table_mcfg);
|
||||
while (i >= sizeof(struct acpi_table_mcfg_config)) {
|
||||
i = header->length - sizeof(struct acpi_table_mcfg);
|
||||
while (i >= sizeof(struct acpi_mcfg_allocation)) {
|
||||
++pci_mmcfg_config_num;
|
||||
i -= sizeof(struct acpi_table_mcfg_config);
|
||||
i -= sizeof(struct acpi_mcfg_allocation);
|
||||
};
|
||||
if (pci_mmcfg_config_num == 0) {
|
||||
printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
|
||||
|
@ -204,9 +195,9 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memcpy(pci_mmcfg_config, &mcfg->config, config_size);
|
||||
memcpy(pci_mmcfg_config, &mcfg[1], config_size);
|
||||
for (i = 0; i < pci_mmcfg_config_num; ++i) {
|
||||
if (mcfg->config[i].base_reserved) {
|
||||
if (pci_mmcfg_config[i].address > 0xFFFFFFFF) {
|
||||
printk(KERN_ERR PREFIX
|
||||
"MMCONFIG not in low 4GB of memory\n");
|
||||
kfree(pci_mmcfg_config);
|
||||
|
@ -220,24 +211,24 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
|
|||
#endif /* CONFIG_PCI_MMCONFIG */
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
|
||||
static int __init acpi_parse_madt(struct acpi_table_header *table)
|
||||
{
|
||||
struct acpi_table_madt *madt = NULL;
|
||||
|
||||
if (!phys_addr || !size || !cpu_has_apic)
|
||||
if (!cpu_has_apic)
|
||||
return -EINVAL;
|
||||
|
||||
madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
|
||||
madt = (struct acpi_table_madt *)table;
|
||||
if (!madt) {
|
||||
printk(KERN_WARNING PREFIX "Unable to map MADT\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (madt->lapic_address) {
|
||||
acpi_lapic_addr = (u64) madt->lapic_address;
|
||||
if (madt->address) {
|
||||
acpi_lapic_addr = (u64) madt->address;
|
||||
|
||||
printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
|
||||
madt->lapic_address);
|
||||
madt->address);
|
||||
}
|
||||
|
||||
acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
|
||||
|
@ -246,21 +237,17 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
|
|||
}
|
||||
|
||||
static int __init
|
||||
acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
|
||||
acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
|
||||
{
|
||||
struct acpi_table_lapic *processor = NULL;
|
||||
struct acpi_madt_local_apic *processor = NULL;
|
||||
|
||||
processor = (struct acpi_table_lapic *)header;
|
||||
processor = (struct acpi_madt_local_apic *)header;
|
||||
|
||||
if (BAD_MADT_ENTRY(processor, end))
|
||||
return -EINVAL;
|
||||
|
||||
acpi_table_print_madt_entry(header);
|
||||
|
||||
/* Record local apic id only when enabled */
|
||||
if (processor->flags.enabled)
|
||||
x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
|
||||
|
||||
/*
|
||||
* We need to register disabled CPU as well to permit
|
||||
* counting disabled CPUs. This allows us to size
|
||||
|
@ -269,18 +256,18 @@ acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
|
|||
* when we use CPU hotplug.
|
||||
*/
|
||||
mp_register_lapic(processor->id, /* APIC ID */
|
||||
processor->flags.enabled); /* Enabled? */
|
||||
processor->lapic_flags & ACPI_MADT_ENABLED); /* Enabled? */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init
|
||||
acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
|
||||
acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
|
||||
const unsigned long end)
|
||||
{
|
||||
struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
|
||||
struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
|
||||
|
||||
lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header;
|
||||
lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
|
||||
|
||||
if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
|
||||
return -EINVAL;
|
||||
|
@ -291,11 +278,11 @@ acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
|
|||
}
|
||||
|
||||
static int __init
|
||||
acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
|
||||
acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
|
||||
{
|
||||
struct acpi_table_lapic_nmi *lapic_nmi = NULL;
|
||||
struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
|
||||
|
||||
lapic_nmi = (struct acpi_table_lapic_nmi *)header;
|
||||
lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
|
||||
|
||||
if (BAD_MADT_ENTRY(lapic_nmi, end))
|
||||
return -EINVAL;
|
||||
|
@ -313,11 +300,11 @@ acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
|
|||
#ifdef CONFIG_X86_IO_APIC
|
||||
|
||||
static int __init
|
||||
acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
|
||||
acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
|
||||
{
|
||||
struct acpi_table_ioapic *ioapic = NULL;
|
||||
struct acpi_madt_io_apic *ioapic = NULL;
|
||||
|
||||
ioapic = (struct acpi_table_ioapic *)header;
|
||||
ioapic = (struct acpi_madt_io_apic *)header;
|
||||
|
||||
if (BAD_MADT_ENTRY(ioapic, end))
|
||||
return -EINVAL;
|
||||
|
@ -342,11 +329,11 @@ static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
|
|||
polarity = 3;
|
||||
|
||||
/* Command-line over-ride via acpi_sci= */
|
||||
if (acpi_sci_flags.trigger)
|
||||
trigger = acpi_sci_flags.trigger;
|
||||
if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
|
||||
trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
|
||||
|
||||
if (acpi_sci_flags.polarity)
|
||||
polarity = acpi_sci_flags.polarity;
|
||||
if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
|
||||
polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
|
||||
|
||||
/*
|
||||
* mp_config_acpi_legacy_irqs() already setup IRQs < 16
|
||||
|
@ -357,51 +344,52 @@ static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
|
|||
|
||||
/*
|
||||
* stash over-ride to indicate we've been here
|
||||
* and for later update of acpi_fadt
|
||||
* and for later update of acpi_gbl_FADT
|
||||
*/
|
||||
acpi_sci_override_gsi = gsi;
|
||||
return;
|
||||
}
|
||||
|
||||
static int __init
|
||||
acpi_parse_int_src_ovr(acpi_table_entry_header * header,
|
||||
acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
|
||||
const unsigned long end)
|
||||
{
|
||||
struct acpi_table_int_src_ovr *intsrc = NULL;
|
||||
struct acpi_madt_interrupt_override *intsrc = NULL;
|
||||
|
||||
intsrc = (struct acpi_table_int_src_ovr *)header;
|
||||
intsrc = (struct acpi_madt_interrupt_override *)header;
|
||||
|
||||
if (BAD_MADT_ENTRY(intsrc, end))
|
||||
return -EINVAL;
|
||||
|
||||
acpi_table_print_madt_entry(header);
|
||||
|
||||
if (intsrc->bus_irq == acpi_fadt.sci_int) {
|
||||
if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
|
||||
acpi_sci_ioapic_setup(intsrc->global_irq,
|
||||
intsrc->flags.polarity,
|
||||
intsrc->flags.trigger);
|
||||
intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
|
||||
(intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (acpi_skip_timer_override &&
|
||||
intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
|
||||
intsrc->source_irq == 0 && intsrc->global_irq == 2) {
|
||||
printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
mp_override_legacy_irq(intsrc->bus_irq,
|
||||
intsrc->flags.polarity,
|
||||
intsrc->flags.trigger, intsrc->global_irq);
|
||||
mp_override_legacy_irq(intsrc->source_irq,
|
||||
intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
|
||||
(intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
|
||||
intsrc->global_irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init
|
||||
acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
|
||||
acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
|
||||
{
|
||||
struct acpi_table_nmi_src *nmi_src = NULL;
|
||||
struct acpi_madt_nmi_source *nmi_src = NULL;
|
||||
|
||||
nmi_src = (struct acpi_table_nmi_src *)header;
|
||||
nmi_src = (struct acpi_madt_nmi_source *)header;
|
||||
|
||||
if (BAD_MADT_ENTRY(nmi_src, end))
|
||||
return -EINVAL;
|
||||
|
@ -417,7 +405,7 @@ acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
|
|||
|
||||
/*
|
||||
* acpi_pic_sci_set_trigger()
|
||||
*
|
||||
*
|
||||
* use ELCR to set PIC-mode trigger type for SCI
|
||||
*
|
||||
* If a PIC-mode SCI is not recognized or gives spurious IRQ7's
|
||||
|
@ -511,7 +499,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
|||
{
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *obj;
|
||||
struct acpi_table_lapic *lapic;
|
||||
struct acpi_madt_local_apic *lapic;
|
||||
cpumask_t tmp_map, new_map;
|
||||
u8 physid;
|
||||
int cpu;
|
||||
|
@ -529,10 +517,10 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
lapic = (struct acpi_table_lapic *)obj->buffer.pointer;
|
||||
lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
|
||||
|
||||
if ((lapic->header.type != ACPI_MADT_LAPIC) ||
|
||||
(!lapic->flags.enabled)) {
|
||||
if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
|
||||
!(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
|
||||
kfree(buffer.pointer);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -544,7 +532,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
|||
buffer.pointer = NULL;
|
||||
|
||||
tmp_map = cpu_present_map;
|
||||
mp_register_lapic(physid, lapic->flags.enabled);
|
||||
mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
|
||||
|
||||
/*
|
||||
* If mp_register_lapic successfully generates a new logical cpu
|
||||
|
@ -566,14 +554,6 @@ EXPORT_SYMBOL(acpi_map_lsapic);
|
|||
|
||||
int acpi_unmap_lsapic(int cpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
if (x86_acpiid_to_apicid[i] == x86_cpu_to_apicid[cpu]) {
|
||||
x86_acpiid_to_apicid[i] = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
x86_cpu_to_apicid[cpu] = -1;
|
||||
cpu_clear(cpu, cpu_present_map);
|
||||
num_processors--;
|
||||
|
@ -619,42 +599,36 @@ acpi_scan_rsdp(unsigned long start, unsigned long length)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
|
||||
static int __init acpi_parse_sbf(struct acpi_table_header *table)
|
||||
{
|
||||
struct acpi_table_sbf *sb;
|
||||
struct acpi_table_boot *sb;
|
||||
|
||||
if (!phys_addr || !size)
|
||||
return -EINVAL;
|
||||
|
||||
sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size);
|
||||
sb = (struct acpi_table_boot *)table;
|
||||
if (!sb) {
|
||||
printk(KERN_WARNING PREFIX "Unable to map SBF\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
sbf_port = sb->sbf_cmos; /* Save CMOS port */
|
||||
sbf_port = sb->cmos_index; /* Save CMOS port */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HPET_TIMER
|
||||
|
||||
static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
|
||||
static int __init acpi_parse_hpet(struct acpi_table_header *table)
|
||||
{
|
||||
struct acpi_table_hpet *hpet_tbl;
|
||||
struct resource *hpet_res;
|
||||
resource_size_t res_start;
|
||||
|
||||
if (!phys || !size)
|
||||
return -EINVAL;
|
||||
|
||||
hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
|
||||
hpet_tbl = (struct acpi_table_hpet *)table;
|
||||
if (!hpet_tbl) {
|
||||
printk(KERN_WARNING PREFIX "Unable to map HPET\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) {
|
||||
if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
|
||||
printk(KERN_WARNING PREFIX "HPET timers must be located in "
|
||||
"memory.\n");
|
||||
return -1;
|
||||
|
@ -667,29 +641,28 @@ static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
|
|||
hpet_res->name = (void *)&hpet_res[1];
|
||||
hpet_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE,
|
||||
"HPET %u", hpet_tbl->number);
|
||||
"HPET %u", hpet_tbl->sequence);
|
||||
hpet_res->end = (1 * 1024) - 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
vxtime.hpet_address = hpet_tbl->addr.addrl |
|
||||
((long)hpet_tbl->addr.addrh << 32);
|
||||
#ifdef CONFIG_X86_64
|
||||
vxtime.hpet_address = hpet_tbl->address.address;
|
||||
|
||||
printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
|
||||
hpet_tbl->id, vxtime.hpet_address);
|
||||
hpet_tbl->id, vxtime.hpet_address);
|
||||
|
||||
res_start = vxtime.hpet_address;
|
||||
#else /* X86 */
|
||||
#else /* X86 */
|
||||
{
|
||||
extern unsigned long hpet_address;
|
||||
|
||||
hpet_address = hpet_tbl->addr.addrl;
|
||||
hpet_address = hpet_tbl->address.address;
|
||||
printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
|
||||
hpet_tbl->id, hpet_address);
|
||||
hpet_tbl->id, hpet_address);
|
||||
|
||||
res_start = hpet_address;
|
||||
}
|
||||
#endif /* X86 */
|
||||
#endif /* X86 */
|
||||
|
||||
if (hpet_res) {
|
||||
hpet_res->start = res_start;
|
||||
|
@ -707,42 +680,28 @@ static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
|
|||
extern u32 pmtmr_ioport;
|
||||
#endif
|
||||
|
||||
static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
|
||||
static int __init acpi_parse_fadt(struct acpi_table_header *table)
|
||||
{
|
||||
struct fadt_descriptor *fadt = NULL;
|
||||
|
||||
fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
|
||||
if (!fadt) {
|
||||
printk(KERN_WARNING PREFIX "Unable to map FADT\n");
|
||||
return 0;
|
||||
}
|
||||
/* initialize sci_int early for INT_SRC_OVR MADT parsing */
|
||||
acpi_fadt.sci_int = fadt->sci_int;
|
||||
|
||||
/* initialize rev and apic_phys_dest_mode for x86_64 genapic */
|
||||
acpi_fadt.revision = fadt->revision;
|
||||
acpi_fadt.force_apic_physical_destination_mode =
|
||||
fadt->force_apic_physical_destination_mode;
|
||||
|
||||
#ifdef CONFIG_X86_PM_TIMER
|
||||
/* detect the location of the ACPI PM Timer */
|
||||
if (fadt->revision >= FADT2_REVISION_ID) {
|
||||
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
|
||||
/* FADT rev. 2 */
|
||||
if (fadt->xpm_tmr_blk.address_space_id !=
|
||||
if (acpi_gbl_FADT.xpm_timer_block.space_id !=
|
||||
ACPI_ADR_SPACE_SYSTEM_IO)
|
||||
return 0;
|
||||
|
||||
pmtmr_ioport = fadt->xpm_tmr_blk.address;
|
||||
pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
|
||||
/*
|
||||
* "X" fields are optional extensions to the original V1.0
|
||||
* fields, so we must selectively expand V1.0 fields if the
|
||||
* corresponding X field is zero.
|
||||
*/
|
||||
if (!pmtmr_ioport)
|
||||
pmtmr_ioport = fadt->V1_pm_tmr_blk;
|
||||
pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
|
||||
} else {
|
||||
/* FADT rev. 1 */
|
||||
pmtmr_ioport = fadt->V1_pm_tmr_blk;
|
||||
pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
|
||||
}
|
||||
if (pmtmr_ioport)
|
||||
printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
|
||||
|
@ -784,13 +743,13 @@ static int __init acpi_parse_madt_lapic_entries(void)
|
|||
if (!cpu_has_apic)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
/*
|
||||
* Note that the LAPIC address is obtained from the MADT (32-bit value)
|
||||
* and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
|
||||
*/
|
||||
|
||||
count =
|
||||
acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
|
||||
acpi_parse_lapic_addr_ovr, 0);
|
||||
if (count < 0) {
|
||||
printk(KERN_ERR PREFIX
|
||||
|
@ -800,7 +759,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
|
|||
|
||||
mp_register_lapic_address(acpi_lapic_addr);
|
||||
|
||||
count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
|
||||
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic,
|
||||
MAX_APICS);
|
||||
if (!count) {
|
||||
printk(KERN_ERR PREFIX "No LAPIC entries present\n");
|
||||
|
@ -813,7 +772,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
|
|||
}
|
||||
|
||||
count =
|
||||
acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0);
|
||||
if (count < 0) {
|
||||
printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
|
||||
/* TBD: Cleanup to allow fallback to MPS */
|
||||
|
@ -842,7 +801,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!cpu_has_apic)
|
||||
if (!cpu_has_apic)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
|
@ -855,7 +814,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
|
|||
}
|
||||
|
||||
count =
|
||||
acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic,
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
|
||||
MAX_IO_APICS);
|
||||
if (!count) {
|
||||
printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
|
||||
|
@ -866,7 +825,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
|
|||
}
|
||||
|
||||
count =
|
||||
acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr,
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
|
||||
NR_IRQ_VECTORS);
|
||||
if (count < 0) {
|
||||
printk(KERN_ERR PREFIX
|
||||
|
@ -880,13 +839,13 @@ static int __init acpi_parse_madt_ioapic_entries(void)
|
|||
* pretend we got one so we can set the SCI flags.
|
||||
*/
|
||||
if (!acpi_sci_override_gsi)
|
||||
acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
|
||||
acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
|
||||
|
||||
/* Fill in identity legacy mapings where no override */
|
||||
mp_config_acpi_legacy_irqs();
|
||||
|
||||
count =
|
||||
acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src,
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
|
||||
NR_IRQ_VECTORS);
|
||||
if (count < 0) {
|
||||
printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
|
||||
|
@ -908,7 +867,7 @@ static void __init acpi_process_madt(void)
|
|||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
int count, error;
|
||||
|
||||
count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
|
||||
count = acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt);
|
||||
if (count >= 1) {
|
||||
|
||||
/*
|
||||
|
@ -1195,7 +1154,7 @@ int __init acpi_boot_table_init(void)
|
|||
if (acpi_disabled && !acpi_ht)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
/*
|
||||
* Initialize the ACPI boot-time table parser.
|
||||
*/
|
||||
error = acpi_table_init();
|
||||
|
@ -1204,7 +1163,7 @@ int __init acpi_boot_table_init(void)
|
|||
return error;
|
||||
}
|
||||
|
||||
acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
|
||||
acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
|
||||
|
||||
/*
|
||||
* blacklist may disable ACPI entirely
|
||||
|
@ -1232,19 +1191,19 @@ int __init acpi_boot_init(void)
|
|||
if (acpi_disabled && !acpi_ht)
|
||||
return 1;
|
||||
|
||||
acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
|
||||
acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
|
||||
|
||||
/*
|
||||
* set sci_int and PM timer address
|
||||
*/
|
||||
acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
|
||||
acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
|
||||
|
||||
/*
|
||||
* Process the Multiple APIC Description Table (MADT), if present
|
||||
*/
|
||||
acpi_process_madt();
|
||||
|
||||
acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
|
||||
acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1315,13 +1274,17 @@ static int __init setup_acpi_sci(char *s)
|
|||
if (!s)
|
||||
return -EINVAL;
|
||||
if (!strcmp(s, "edge"))
|
||||
acpi_sci_flags.trigger = 1;
|
||||
acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE |
|
||||
(acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
|
||||
else if (!strcmp(s, "level"))
|
||||
acpi_sci_flags.trigger = 3;
|
||||
acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
|
||||
(acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
|
||||
else if (!strcmp(s, "high"))
|
||||
acpi_sci_flags.polarity = 1;
|
||||
acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
|
||||
(acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
|
||||
else if (!strcmp(s, "low"))
|
||||
acpi_sci_flags.polarity = 3;
|
||||
acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
|
||||
(acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
|
||||
else
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
static int nvidia_hpet_detected __initdata;
|
||||
|
||||
static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
|
||||
static int __init nvidia_hpet_check(struct acpi_table_header *header)
|
||||
{
|
||||
nvidia_hpet_detected = 1;
|
||||
return 0;
|
||||
|
@ -30,7 +30,7 @@ static int __init check_bridge(int vendor, int device)
|
|||
is enabled. */
|
||||
if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
|
||||
nvidia_hpet_detected = 0;
|
||||
acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
|
||||
acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
|
||||
if (nvidia_hpet_detected == 0) {
|
||||
acpi_skip_timer_override = 1;
|
||||
printk(KERN_INFO "Nvidia board "
|
||||
|
|
|
@ -190,7 +190,7 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
|
|||
/* Invoke C3 */
|
||||
inb(cx_address);
|
||||
/* Dummy op - must do something useless after P_LVL3 read */
|
||||
t = inl(acpi_fadt.xpm_tmr_blk.address);
|
||||
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
}
|
||||
/* Disable bus ratio bit */
|
||||
local_irq_disable();
|
||||
|
@ -250,8 +250,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
|
|||
outb(3, 0x22);
|
||||
} else if ((pr != NULL) && pr->flags.bm_control) {
|
||||
/* Disable bus master arbitration */
|
||||
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
|
||||
ACPI_MTX_DO_NOT_LOCK);
|
||||
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
|
||||
}
|
||||
switch (longhaul_version) {
|
||||
|
||||
|
@ -281,8 +280,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
|
|||
case TYPE_POWERSAVER:
|
||||
if (longhaul_flags & USE_ACPI_C3) {
|
||||
/* Don't allow wakeup */
|
||||
acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0,
|
||||
ACPI_MTX_DO_NOT_LOCK);
|
||||
acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
|
||||
do_powersaver(cx->address, clock_ratio_index);
|
||||
} else {
|
||||
do_powersaver(0, clock_ratio_index);
|
||||
|
@ -295,8 +293,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
|
|||
outb(0, 0x22);
|
||||
} else if ((pr != NULL) && pr->flags.bm_control) {
|
||||
/* Enable bus master arbitration */
|
||||
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
|
||||
ACPI_MTX_DO_NOT_LOCK);
|
||||
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
|
||||
}
|
||||
outb(pic2_mask,0xA1); /* restore mask */
|
||||
outb(pic1_mask,0x21);
|
||||
|
@ -414,7 +411,7 @@ static int __init longhaul_get_ranges(void)
|
|||
highest_speed = calc_speed(maxmult);
|
||||
lowest_speed = calc_speed(minmult);
|
||||
dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb,
|
||||
print_speed(lowest_speed/1000),
|
||||
print_speed(lowest_speed/1000),
|
||||
print_speed(highest_speed/1000));
|
||||
|
||||
if (lowest_speed == highest_speed) {
|
||||
|
@ -498,7 +495,7 @@ static void __init longhaul_setup_voltagescaling(void)
|
|||
maxvid.mV/1000, maxvid.mV%1000,
|
||||
minvid.mV/1000, minvid.mV%1000,
|
||||
numvscales);
|
||||
|
||||
|
||||
j = 0;
|
||||
while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) {
|
||||
speed = longhaul_table[j].frequency;
|
||||
|
|
|
@ -1057,7 +1057,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
|
|||
static int gsi_to_irq[MAX_GSI_NUM];
|
||||
|
||||
/* Don't set up the ACPI SCI because it's already set up */
|
||||
if (acpi_fadt.sci_int == gsi)
|
||||
if (acpi_gbl_FADT.sci_interrupt == gsi)
|
||||
return gsi;
|
||||
|
||||
ioapic = mp_find_ioapic(gsi);
|
||||
|
@ -1114,7 +1114,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
|
|||
/*
|
||||
* Don't assign IRQ used by ACPI SCI
|
||||
*/
|
||||
if (gsi == acpi_fadt.sci_int)
|
||||
if (gsi == acpi_gbl_FADT.sci_interrupt)
|
||||
gsi = pci_irq++;
|
||||
gsi_to_irq[irq] = gsi;
|
||||
} else {
|
||||
|
|
|
@ -62,19 +62,19 @@ extern void * boot_ioremap(unsigned long, unsigned long);
|
|||
/* Identify CPU proximity domains */
|
||||
static void __init parse_cpu_affinity_structure(char *p)
|
||||
{
|
||||
struct acpi_table_processor_affinity *cpu_affinity =
|
||||
(struct acpi_table_processor_affinity *) p;
|
||||
struct acpi_srat_cpu_affinity *cpu_affinity =
|
||||
(struct acpi_srat_cpu_affinity *) p;
|
||||
|
||||
if (!cpu_affinity->flags.enabled)
|
||||
if ((cpu_affinity->flags & ACPI_SRAT_CPU_ENABLED) == 0)
|
||||
return; /* empty entry */
|
||||
|
||||
/* mark this node as "seen" in node bitmap */
|
||||
BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain);
|
||||
BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain_lo);
|
||||
|
||||
apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain;
|
||||
apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain_lo;
|
||||
|
||||
printk("CPU 0x%02X in proximity domain 0x%02X\n",
|
||||
cpu_affinity->apic_id, cpu_affinity->proximity_domain);
|
||||
cpu_affinity->apic_id, cpu_affinity->proximity_domain_lo);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -84,28 +84,27 @@ static void __init parse_cpu_affinity_structure(char *p)
|
|||
static void __init parse_memory_affinity_structure (char *sratp)
|
||||
{
|
||||
unsigned long long paddr, size;
|
||||
unsigned long start_pfn, end_pfn;
|
||||
unsigned long start_pfn, end_pfn;
|
||||
u8 pxm;
|
||||
struct node_memory_chunk_s *p, *q, *pend;
|
||||
struct acpi_table_memory_affinity *memory_affinity =
|
||||
(struct acpi_table_memory_affinity *) sratp;
|
||||
struct acpi_srat_mem_affinity *memory_affinity =
|
||||
(struct acpi_srat_mem_affinity *) sratp;
|
||||
|
||||
if (!memory_affinity->flags.enabled)
|
||||
if ((memory_affinity->flags & ACPI_SRAT_MEM_ENABLED) == 0)
|
||||
return; /* empty entry */
|
||||
|
||||
pxm = memory_affinity->proximity_domain & 0xff;
|
||||
|
||||
/* mark this node as "seen" in node bitmap */
|
||||
BMAP_SET(pxm_bitmap, memory_affinity->proximity_domain);
|
||||
BMAP_SET(pxm_bitmap, pxm);
|
||||
|
||||
/* calculate info for memory chunk structure */
|
||||
paddr = memory_affinity->base_addr_hi;
|
||||
paddr = (paddr << 32) | memory_affinity->base_addr_lo;
|
||||
size = memory_affinity->length_hi;
|
||||
size = (size << 32) | memory_affinity->length_lo;
|
||||
|
||||
paddr = memory_affinity->base_address;
|
||||
size = memory_affinity->length;
|
||||
|
||||
start_pfn = paddr >> PAGE_SHIFT;
|
||||
end_pfn = (paddr + size) >> PAGE_SHIFT;
|
||||
|
||||
pxm = memory_affinity->proximity_domain;
|
||||
|
||||
|
||||
if (num_memory_chunks >= MAXCHUNKS) {
|
||||
printk("Too many mem chunks in SRAT. Ignoring %lld MBytes at %llx\n",
|
||||
|
@ -132,8 +131,8 @@ static void __init parse_memory_affinity_structure (char *sratp)
|
|||
printk("Memory range 0x%lX to 0x%lX (type 0x%X) in proximity domain 0x%02X %s\n",
|
||||
start_pfn, end_pfn,
|
||||
memory_affinity->memory_type,
|
||||
memory_affinity->proximity_domain,
|
||||
(memory_affinity->flags.hot_pluggable ?
|
||||
pxm,
|
||||
((memory_affinity->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
|
||||
"enabled and removable" : "enabled" ) );
|
||||
}
|
||||
|
||||
|
@ -185,10 +184,10 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
|
|||
num_memory_chunks = 0;
|
||||
while (p < end) {
|
||||
switch (*p) {
|
||||
case ACPI_SRAT_PROCESSOR_AFFINITY:
|
||||
case ACPI_SRAT_TYPE_CPU_AFFINITY:
|
||||
parse_cpu_affinity_structure(p);
|
||||
break;
|
||||
case ACPI_SRAT_MEMORY_AFFINITY:
|
||||
case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
|
||||
parse_memory_affinity_structure(p);
|
||||
break;
|
||||
default:
|
||||
|
@ -262,31 +261,30 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct acpi_static_rsdt {
|
||||
struct acpi_table_rsdt table;
|
||||
u32 padding[7]; /* Allow for 7 more table entries */
|
||||
};
|
||||
|
||||
int __init get_memcfg_from_srat(void)
|
||||
{
|
||||
struct acpi_table_header *header = NULL;
|
||||
struct acpi_table_rsdp *rsdp = NULL;
|
||||
struct acpi_table_rsdt *rsdt = NULL;
|
||||
struct acpi_pointer *rsdp_address = NULL;
|
||||
struct acpi_table_rsdt saved_rsdt;
|
||||
acpi_native_uint rsdp_address = 0;
|
||||
struct acpi_static_rsdt saved_rsdt;
|
||||
int tables = 0;
|
||||
int i = 0;
|
||||
|
||||
if (ACPI_FAILURE(acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING,
|
||||
rsdp_address))) {
|
||||
rsdp_address = acpi_find_rsdp();
|
||||
if (!rsdp_address) {
|
||||
printk("%s: System description tables not found\n",
|
||||
__FUNCTION__);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) {
|
||||
printk("%s: assigning address to rsdp\n", __FUNCTION__);
|
||||
rsdp = (struct acpi_table_rsdp *)
|
||||
(u32)rsdp_address->pointer.physical;
|
||||
} else {
|
||||
printk("%s: rsdp_address is not a physical pointer\n", __FUNCTION__);
|
||||
goto out_err;
|
||||
}
|
||||
printk("%s: assigning address to rsdp\n", __FUNCTION__);
|
||||
rsdp = (struct acpi_table_rsdp *)(u32)rsdp_address;
|
||||
if (!rsdp) {
|
||||
printk("%s: Didn't find ACPI root!\n", __FUNCTION__);
|
||||
goto out_err;
|
||||
|
@ -295,13 +293,13 @@ int __init get_memcfg_from_srat(void)
|
|||
printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision,
|
||||
rsdp->oem_id);
|
||||
|
||||
if (strncmp(rsdp->signature, RSDP_SIG,strlen(RSDP_SIG))) {
|
||||
if (strncmp(rsdp->signature, ACPI_SIG_RSDP,strlen(ACPI_SIG_RSDP))) {
|
||||
printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
rsdt = (struct acpi_table_rsdt *)
|
||||
boot_ioremap(rsdp->rsdt_address, sizeof(struct acpi_table_rsdt));
|
||||
boot_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt));
|
||||
|
||||
if (!rsdt) {
|
||||
printk(KERN_WARNING
|
||||
|
@ -310,9 +308,9 @@ int __init get_memcfg_from_srat(void)
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
header = & rsdt->header;
|
||||
header = &rsdt->header;
|
||||
|
||||
if (strncmp(header->signature, RSDT_SIG, strlen(RSDT_SIG))) {
|
||||
if (strncmp(header->signature, ACPI_SIG_RSDT, strlen(ACPI_SIG_RSDT))) {
|
||||
printk(KERN_WARNING "ACPI: RSDT signature incorrect\n");
|
||||
goto out_err;
|
||||
}
|
||||
|
@ -330,9 +328,9 @@ int __init get_memcfg_from_srat(void)
|
|||
|
||||
memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt));
|
||||
|
||||
if (saved_rsdt.header.length > sizeof(saved_rsdt)) {
|
||||
if (saved_rsdt.table.header.length > sizeof(saved_rsdt)) {
|
||||
printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n",
|
||||
saved_rsdt.header.length);
|
||||
saved_rsdt.table.header.length);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
|
@ -341,15 +339,15 @@ int __init get_memcfg_from_srat(void)
|
|||
for (i = 0; i < tables; i++) {
|
||||
/* Map in header, then map in full table length. */
|
||||
header = (struct acpi_table_header *)
|
||||
boot_ioremap(saved_rsdt.entry[i], sizeof(struct acpi_table_header));
|
||||
boot_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header));
|
||||
if (!header)
|
||||
break;
|
||||
header = (struct acpi_table_header *)
|
||||
boot_ioremap(saved_rsdt.entry[i], header->length);
|
||||
boot_ioremap(saved_rsdt.table.table_offset_entry[i], header->length);
|
||||
if (!header)
|
||||
break;
|
||||
|
||||
if (strncmp((char *) &header->signature, "SRAT", 4))
|
||||
if (strncmp((char *) &header->signature, ACPI_SIG_SRAT, 4))
|
||||
continue;
|
||||
|
||||
/* we've found the srat table. don't need to look at any more tables */
|
||||
|
|
|
@ -84,15 +84,6 @@ struct es7000_oem_table {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
struct acpi_table_sdt {
|
||||
unsigned long pa;
|
||||
unsigned long count;
|
||||
struct {
|
||||
unsigned long pa;
|
||||
enum acpi_table_id id;
|
||||
unsigned long size;
|
||||
} entry[50];
|
||||
};
|
||||
|
||||
struct oem_table {
|
||||
struct acpi_table_header Header;
|
||||
|
|
|
@ -160,51 +160,14 @@ parse_unisys_oem (char *oemptr)
|
|||
int __init
|
||||
find_unisys_acpi_oem_table(unsigned long *oem_addr)
|
||||
{
|
||||
struct acpi_table_rsdp *rsdp = NULL;
|
||||
unsigned long rsdp_phys = 0;
|
||||
struct acpi_table_header *header = NULL;
|
||||
int i;
|
||||
struct acpi_table_sdt sdt;
|
||||
|
||||
rsdp_phys = acpi_find_rsdp();
|
||||
rsdp = __va(rsdp_phys);
|
||||
if (rsdp->rsdt_address) {
|
||||
struct acpi_table_rsdt *mapped_rsdt = NULL;
|
||||
sdt.pa = rsdp->rsdt_address;
|
||||
|
||||
header = (struct acpi_table_header *)
|
||||
__acpi_map_table(sdt.pa, sizeof(struct acpi_table_header));
|
||||
if (!header)
|
||||
return -ENODEV;
|
||||
|
||||
sdt.count = (header->length - sizeof(struct acpi_table_header)) >> 3;
|
||||
mapped_rsdt = (struct acpi_table_rsdt *)
|
||||
__acpi_map_table(sdt.pa, header->length);
|
||||
if (!mapped_rsdt)
|
||||
return -ENODEV;
|
||||
|
||||
header = &mapped_rsdt->header;
|
||||
|
||||
for (i = 0; i < sdt.count; i++)
|
||||
sdt.entry[i].pa = (unsigned long) mapped_rsdt->entry[i];
|
||||
};
|
||||
for (i = 0; i < sdt.count; i++) {
|
||||
|
||||
header = (struct acpi_table_header *)
|
||||
__acpi_map_table(sdt.entry[i].pa,
|
||||
sizeof(struct acpi_table_header));
|
||||
if (!header)
|
||||
continue;
|
||||
if (!strncmp((char *) &header->signature, "OEM1", 4)) {
|
||||
if (!strncmp((char *) &header->oem_id, "UNISYS", 6)) {
|
||||
void *addr;
|
||||
struct oem_table *t;
|
||||
acpi_table_print(header, sdt.entry[i].pa);
|
||||
t = (struct oem_table *) __acpi_map_table(sdt.entry[i].pa, header->length);
|
||||
addr = (void *) __acpi_map_table(t->OEMTableAddr, t->OEMTableSize);
|
||||
*oem_addr = (unsigned long) addr;
|
||||
return 0;
|
||||
}
|
||||
struct acpi_table_header *header = NULL;
|
||||
int i = 0;
|
||||
while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
|
||||
if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
|
||||
struct oem_table *t = (struct oem_table *)header;
|
||||
*oem_addr = (unsigned long)__acpi_map_table(t->OEMTableAddr,
|
||||
t->OEMTableSize);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
|
|
|
@ -36,7 +36,7 @@ static DECLARE_BITMAP(fallback_slots, MAX_CHECK_BUS*32);
|
|||
static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
|
||||
{
|
||||
int cfg_num = -1;
|
||||
struct acpi_table_mcfg_config *cfg;
|
||||
struct acpi_mcfg_allocation *cfg;
|
||||
|
||||
if (seg == 0 && bus < MAX_CHECK_BUS &&
|
||||
test_bit(PCI_SLOT(devfn) + 32*bus, fallback_slots))
|
||||
|
@ -48,11 +48,11 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
|
|||
break;
|
||||
}
|
||||
cfg = &pci_mmcfg_config[cfg_num];
|
||||
if (cfg->pci_segment_group_number != seg)
|
||||
if (cfg->pci_segment != seg)
|
||||
continue;
|
||||
if ((cfg->start_bus_number <= bus) &&
|
||||
(cfg->end_bus_number >= bus))
|
||||
return cfg->base_address;
|
||||
return cfg->address;
|
||||
}
|
||||
|
||||
/* Handle more broken MCFG tables on Asus etc.
|
||||
|
@ -60,9 +60,9 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
|
|||
this applies to all busses. */
|
||||
cfg = &pci_mmcfg_config[0];
|
||||
if (pci_mmcfg_config_num == 1 &&
|
||||
cfg->pci_segment_group_number == 0 &&
|
||||
cfg->pci_segment == 0 &&
|
||||
(cfg->start_bus_number | cfg->end_bus_number) == 0)
|
||||
return cfg->base_address;
|
||||
return cfg->address;
|
||||
|
||||
/* Fall back to type 0 */
|
||||
return 0;
|
||||
|
@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
|
|||
unsigned long flags;
|
||||
u32 base;
|
||||
|
||||
if ((bus > 255) || (devfn > 255) || (reg > 4095))
|
||||
if ((bus > 255) || (devfn > 255) || (reg > 4095))
|
||||
return -EINVAL;
|
||||
|
||||
base = get_base_addr(seg, bus, devfn);
|
||||
|
@ -199,19 +199,19 @@ void __init pci_mmcfg_init(int type)
|
|||
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
|
||||
return;
|
||||
|
||||
acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
|
||||
acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
|
||||
if ((pci_mmcfg_config_num == 0) ||
|
||||
(pci_mmcfg_config == NULL) ||
|
||||
(pci_mmcfg_config[0].base_address == 0))
|
||||
(pci_mmcfg_config[0].address == 0))
|
||||
return;
|
||||
|
||||
/* Only do this check when type 1 works. If it doesn't work
|
||||
assume we run on a Mac and always use MCFG */
|
||||
if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address,
|
||||
pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
|
||||
if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address,
|
||||
pci_mmcfg_config[0].address + MMCONFIG_APER_MIN,
|
||||
E820_RESERVED)) {
|
||||
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
|
||||
pci_mmcfg_config[0].base_address);
|
||||
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n",
|
||||
(unsigned long)pci_mmcfg_config[0].address);
|
||||
printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
|
||||
#define BAD_MADT_ENTRY(entry, end) ( \
|
||||
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
|
||||
((acpi_table_entry_header *)entry)->length < sizeof(*entry))
|
||||
((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
|
||||
|
||||
#define PREFIX "ACPI: "
|
||||
|
||||
|
@ -67,16 +67,11 @@ EXPORT_SYMBOL(pm_power_off);
|
|||
unsigned int acpi_cpei_override;
|
||||
unsigned int acpi_cpei_phys_cpuid;
|
||||
|
||||
#define MAX_SAPICS 256
|
||||
u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = {[0 ... MAX_SAPICS - 1] = -1 };
|
||||
|
||||
EXPORT_SYMBOL(ia64_acpiid_to_sapicid);
|
||||
|
||||
const char *acpi_get_sysname(void)
|
||||
{
|
||||
#ifdef CONFIG_IA64_GENERIC
|
||||
unsigned long rsdp_phys;
|
||||
struct acpi20_table_rsdp *rsdp;
|
||||
struct acpi_table_rsdp *rsdp;
|
||||
struct acpi_table_xsdt *xsdt;
|
||||
struct acpi_table_header *hdr;
|
||||
|
||||
|
@ -87,16 +82,16 @@ const char *acpi_get_sysname(void)
|
|||
return "dig";
|
||||
}
|
||||
|
||||
rsdp = (struct acpi20_table_rsdp *)__va(rsdp_phys);
|
||||
if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) {
|
||||
rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys);
|
||||
if (strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)) {
|
||||
printk(KERN_ERR
|
||||
"ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
|
||||
return "dig";
|
||||
}
|
||||
|
||||
xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_address);
|
||||
xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address);
|
||||
hdr = &xsdt->header;
|
||||
if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) {
|
||||
if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1)) {
|
||||
printk(KERN_ERR
|
||||
"ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
|
||||
return "dig";
|
||||
|
@ -169,12 +164,12 @@ struct acpi_table_madt *acpi_madt __initdata;
|
|||
static u8 has_8259;
|
||||
|
||||
static int __init
|
||||
acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
|
||||
acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
|
||||
const unsigned long end)
|
||||
{
|
||||
struct acpi_table_lapic_addr_ovr *lapic;
|
||||
struct acpi_madt_local_apic_override *lapic;
|
||||
|
||||
lapic = (struct acpi_table_lapic_addr_ovr *)header;
|
||||
lapic = (struct acpi_madt_local_apic_override *)header;
|
||||
|
||||
if (BAD_MADT_ENTRY(lapic, end))
|
||||
return -EINVAL;
|
||||
|
@ -187,22 +182,19 @@ acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
|
|||
}
|
||||
|
||||
static int __init
|
||||
acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end)
|
||||
acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end)
|
||||
{
|
||||
struct acpi_table_lsapic *lsapic;
|
||||
struct acpi_madt_local_sapic *lsapic;
|
||||
|
||||
lsapic = (struct acpi_table_lsapic *)header;
|
||||
lsapic = (struct acpi_madt_local_sapic *)header;
|
||||
|
||||
if (BAD_MADT_ENTRY(lsapic, end))
|
||||
return -EINVAL;
|
||||
/*Skip BAD_MADT_ENTRY check, as lsapic size could vary */
|
||||
|
||||
if (lsapic->flags.enabled) {
|
||||
if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
|
||||
#ifdef CONFIG_SMP
|
||||
smp_boot_data.cpu_phys_id[available_cpus] =
|
||||
(lsapic->id << 8) | lsapic->eid;
|
||||
#endif
|
||||
ia64_acpiid_to_sapicid[lsapic->acpi_id] =
|
||||
(lsapic->id << 8) | lsapic->eid;
|
||||
++available_cpus;
|
||||
}
|
||||
|
||||
|
@ -211,11 +203,11 @@ acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end)
|
|||
}
|
||||
|
||||
static int __init
|
||||
acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
|
||||
acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
|
||||
{
|
||||
struct acpi_table_lapic_nmi *lacpi_nmi;
|
||||
struct acpi_madt_local_apic_nmi *lacpi_nmi;
|
||||
|
||||
lacpi_nmi = (struct acpi_table_lapic_nmi *)header;
|
||||
lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header;
|
||||
|
||||
if (BAD_MADT_ENTRY(lacpi_nmi, end))
|
||||
return -EINVAL;
|
||||
|
@ -225,11 +217,11 @@ acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
|
|||
}
|
||||
|
||||
static int __init
|
||||
acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
|
||||
acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end)
|
||||
{
|
||||
struct acpi_table_iosapic *iosapic;
|
||||
struct acpi_madt_io_sapic *iosapic;
|
||||
|
||||
iosapic = (struct acpi_table_iosapic *)header;
|
||||
iosapic = (struct acpi_madt_io_sapic *)header;
|
||||
|
||||
if (BAD_MADT_ENTRY(iosapic, end))
|
||||
return -EINVAL;
|
||||
|
@ -240,13 +232,13 @@ acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
|
|||
static unsigned int __initdata acpi_madt_rev;
|
||||
|
||||
static int __init
|
||||
acpi_parse_plat_int_src(acpi_table_entry_header * header,
|
||||
acpi_parse_plat_int_src(struct acpi_subtable_header * header,
|
||||
const unsigned long end)
|
||||
{
|
||||
struct acpi_table_plat_int_src *plintsrc;
|
||||
struct acpi_madt_interrupt_source *plintsrc;
|
||||
int vector;
|
||||
|
||||
plintsrc = (struct acpi_table_plat_int_src *)header;
|
||||
plintsrc = (struct acpi_madt_interrupt_source *)header;
|
||||
|
||||
if (BAD_MADT_ENTRY(plintsrc, end))
|
||||
return -EINVAL;
|
||||
|
@ -257,19 +249,19 @@ acpi_parse_plat_int_src(acpi_table_entry_header * header,
|
|||
*/
|
||||
vector = iosapic_register_platform_intr(plintsrc->type,
|
||||
plintsrc->global_irq,
|
||||
plintsrc->iosapic_vector,
|
||||
plintsrc->io_sapic_vector,
|
||||
plintsrc->eid,
|
||||
plintsrc->id,
|
||||
(plintsrc->flags.polarity ==
|
||||
1) ? IOSAPIC_POL_HIGH :
|
||||
IOSAPIC_POL_LOW,
|
||||
(plintsrc->flags.trigger ==
|
||||
1) ? IOSAPIC_EDGE :
|
||||
IOSAPIC_LEVEL);
|
||||
((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) ==
|
||||
ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
|
||||
IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
|
||||
((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
|
||||
ACPI_MADT_TRIGGER_EDGE) ?
|
||||
IOSAPIC_EDGE : IOSAPIC_LEVEL);
|
||||
|
||||
platform_intr_list[plintsrc->type] = vector;
|
||||
if (acpi_madt_rev > 1) {
|
||||
acpi_cpei_override = plintsrc->plint_flags.cpei_override_flag;
|
||||
acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -324,30 +316,32 @@ unsigned int get_cpei_target_cpu(void)
|
|||
}
|
||||
|
||||
static int __init
|
||||
acpi_parse_int_src_ovr(acpi_table_entry_header * header,
|
||||
acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
|
||||
const unsigned long end)
|
||||
{
|
||||
struct acpi_table_int_src_ovr *p;
|
||||
struct acpi_madt_interrupt_override *p;
|
||||
|
||||
p = (struct acpi_table_int_src_ovr *)header;
|
||||
p = (struct acpi_madt_interrupt_override *)header;
|
||||
|
||||
if (BAD_MADT_ENTRY(p, end))
|
||||
return -EINVAL;
|
||||
|
||||
iosapic_override_isa_irq(p->bus_irq, p->global_irq,
|
||||
(p->flags.polarity ==
|
||||
1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
|
||||
(p->flags.trigger ==
|
||||
1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
|
||||
iosapic_override_isa_irq(p->source_irq, p->global_irq,
|
||||
((p->inti_flags & ACPI_MADT_POLARITY_MASK) ==
|
||||
ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
|
||||
IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
|
||||
((p->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
|
||||
ACPI_MADT_TRIGGER_EDGE) ?
|
||||
IOSAPIC_EDGE : IOSAPIC_LEVEL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init
|
||||
acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
|
||||
acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
|
||||
{
|
||||
struct acpi_table_nmi_src *nmi_src;
|
||||
struct acpi_madt_nmi_source *nmi_src;
|
||||
|
||||
nmi_src = (struct acpi_table_nmi_src *)header;
|
||||
nmi_src = (struct acpi_madt_nmi_source *)header;
|
||||
|
||||
if (BAD_MADT_ENTRY(nmi_src, end))
|
||||
return -EINVAL;
|
||||
|
@ -371,12 +365,12 @@ static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
|||
}
|
||||
}
|
||||
|
||||
static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
|
||||
static int __init acpi_parse_madt(struct acpi_table_header *table)
|
||||
{
|
||||
if (!phys_addr || !size)
|
||||
if (!table)
|
||||
return -EINVAL;
|
||||
|
||||
acpi_madt = (struct acpi_table_madt *)__va(phys_addr);
|
||||
acpi_madt = (struct acpi_table_madt *)table;
|
||||
|
||||
acpi_madt_rev = acpi_madt->header.revision;
|
||||
|
||||
|
@ -384,14 +378,14 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
|
|||
#ifdef CONFIG_ITANIUM
|
||||
has_8259 = 1; /* Firmware on old Itanium systems is broken */
|
||||
#else
|
||||
has_8259 = acpi_madt->flags.pcat_compat;
|
||||
has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT;
|
||||
#endif
|
||||
iosapic_system_init(has_8259);
|
||||
|
||||
/* Get base address of IPI Message Block */
|
||||
|
||||
if (acpi_madt->lapic_address)
|
||||
ipi_base_addr = ioremap(acpi_madt->lapic_address, 0);
|
||||
if (acpi_madt->address)
|
||||
ipi_base_addr = ioremap(acpi_madt->address, 0);
|
||||
|
||||
printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
|
||||
|
||||
|
@ -413,23 +407,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
|
|||
#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
|
||||
static struct acpi_table_slit __initdata *slit_table;
|
||||
|
||||
static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa)
|
||||
static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
|
||||
{
|
||||
int pxm;
|
||||
|
||||
pxm = pa->proximity_domain;
|
||||
pxm = pa->proximity_domain_lo;
|
||||
if (ia64_platform_is("sn2"))
|
||||
pxm += pa->reserved[0] << 8;
|
||||
pxm += pa->proximity_domain_hi[0] << 8;
|
||||
return pxm;
|
||||
}
|
||||
|
||||
static int get_memory_proximity_domain(struct acpi_table_memory_affinity *ma)
|
||||
static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
|
||||
{
|
||||
int pxm;
|
||||
|
||||
pxm = ma->proximity_domain;
|
||||
if (ia64_platform_is("sn2"))
|
||||
pxm += ma->reserved1[0] << 8;
|
||||
if (!ia64_platform_is("sn2"))
|
||||
pxm &= 0xff;
|
||||
|
||||
return pxm;
|
||||
}
|
||||
|
||||
|
@ -442,7 +437,7 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
|
|||
u32 len;
|
||||
|
||||
len = sizeof(struct acpi_table_header) + 8
|
||||
+ slit->localities * slit->localities;
|
||||
+ slit->locality_count * slit->locality_count;
|
||||
if (slit->header.length != len) {
|
||||
printk(KERN_ERR
|
||||
"ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
|
||||
|
@ -454,11 +449,11 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
|
|||
}
|
||||
|
||||
void __init
|
||||
acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
|
||||
acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
|
||||
{
|
||||
int pxm;
|
||||
|
||||
if (!pa->flags.enabled)
|
||||
if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
|
||||
return;
|
||||
|
||||
pxm = get_processor_proximity_domain(pa);
|
||||
|
@ -467,14 +462,14 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
|
|||
pxm_bit_set(pxm);
|
||||
|
||||
node_cpuid[srat_num_cpus].phys_id =
|
||||
(pa->apic_id << 8) | (pa->lsapic_eid);
|
||||
(pa->apic_id << 8) | (pa->local_sapic_eid);
|
||||
/* nid should be overridden as logical node id later */
|
||||
node_cpuid[srat_num_cpus].nid = pxm;
|
||||
srat_num_cpus++;
|
||||
}
|
||||
|
||||
void __init
|
||||
acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
|
||||
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
|
||||
{
|
||||
unsigned long paddr, size;
|
||||
int pxm;
|
||||
|
@ -483,13 +478,11 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
|
|||
pxm = get_memory_proximity_domain(ma);
|
||||
|
||||
/* fill node memory chunk structure */
|
||||
paddr = ma->base_addr_hi;
|
||||
paddr = (paddr << 32) | ma->base_addr_lo;
|
||||
size = ma->length_hi;
|
||||
size = (size << 32) | ma->length_lo;
|
||||
paddr = ma->base_address;
|
||||
size = ma->length;
|
||||
|
||||
/* Ignore disabled entries */
|
||||
if (!ma->flags.enabled)
|
||||
if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
|
||||
return;
|
||||
|
||||
/* record this node in proximity bitmap */
|
||||
|
@ -560,16 +553,16 @@ void __init acpi_numa_arch_fixup(void)
|
|||
if (!slit_table)
|
||||
return;
|
||||
memset(numa_slit, -1, sizeof(numa_slit));
|
||||
for (i = 0; i < slit_table->localities; i++) {
|
||||
for (i = 0; i < slit_table->locality_count; i++) {
|
||||
if (!pxm_bit_test(i))
|
||||
continue;
|
||||
node_from = pxm_to_node(i);
|
||||
for (j = 0; j < slit_table->localities; j++) {
|
||||
for (j = 0; j < slit_table->locality_count; j++) {
|
||||
if (!pxm_bit_test(j))
|
||||
continue;
|
||||
node_to = pxm_to_node(j);
|
||||
node_distance(node_from, node_to) =
|
||||
slit_table->entry[i * slit_table->localities + j];
|
||||
slit_table->entry[i * slit_table->locality_count + j];
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -617,21 +610,21 @@ void acpi_unregister_gsi(u32 gsi)
|
|||
|
||||
EXPORT_SYMBOL(acpi_unregister_gsi);
|
||||
|
||||
static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size)
|
||||
static int __init acpi_parse_fadt(struct acpi_table_header *table)
|
||||
{
|
||||
struct acpi_table_header *fadt_header;
|
||||
struct fadt_descriptor *fadt;
|
||||
struct acpi_table_fadt *fadt;
|
||||
|
||||
if (!phys_addr || !size)
|
||||
if (!table)
|
||||
return -EINVAL;
|
||||
|
||||
fadt_header = (struct acpi_table_header *)__va(phys_addr);
|
||||
fadt_header = (struct acpi_table_header *)table;
|
||||
if (fadt_header->revision != 3)
|
||||
return -ENODEV; /* Only deal with ACPI 2.0 FADT */
|
||||
|
||||
fadt = (struct fadt_descriptor *)fadt_header;
|
||||
fadt = (struct acpi_table_fadt *)fadt_header;
|
||||
|
||||
acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
|
||||
acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -658,7 +651,7 @@ int __init acpi_boot_init(void)
|
|||
* information -- the successor to MPS tables.
|
||||
*/
|
||||
|
||||
if (acpi_table_parse(ACPI_APIC, acpi_parse_madt) < 1) {
|
||||
if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt) < 1) {
|
||||
printk(KERN_ERR PREFIX "Can't find MADT\n");
|
||||
goto skip_madt;
|
||||
}
|
||||
|
@ -666,40 +659,40 @@ int __init acpi_boot_init(void)
|
|||
/* Local APIC */
|
||||
|
||||
if (acpi_table_parse_madt
|
||||
(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0)
|
||||
(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0)
|
||||
printk(KERN_ERR PREFIX
|
||||
"Error parsing LAPIC address override entry\n");
|
||||
|
||||
if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS)
|
||||
if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS)
|
||||
< 1)
|
||||
printk(KERN_ERR PREFIX
|
||||
"Error parsing MADT - no LAPIC entries\n");
|
||||
|
||||
if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0)
|
||||
if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
|
||||
< 0)
|
||||
printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
|
||||
|
||||
/* I/O APIC */
|
||||
|
||||
if (acpi_table_parse_madt
|
||||
(ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
|
||||
(ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
|
||||
printk(KERN_ERR PREFIX
|
||||
"Error parsing MADT - no IOSAPIC entries\n");
|
||||
|
||||
/* System-Level Interrupt Routing */
|
||||
|
||||
if (acpi_table_parse_madt
|
||||
(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src,
|
||||
(ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src,
|
||||
ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
|
||||
printk(KERN_ERR PREFIX
|
||||
"Error parsing platform interrupt source entry\n");
|
||||
|
||||
if (acpi_table_parse_madt
|
||||
(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0)
|
||||
(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0)
|
||||
printk(KERN_ERR PREFIX
|
||||
"Error parsing interrupt source overrides entry\n");
|
||||
|
||||
if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0)
|
||||
if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0)
|
||||
printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
|
||||
skip_madt:
|
||||
|
||||
|
@ -709,7 +702,7 @@ int __init acpi_boot_init(void)
|
|||
* gets interrupts such as power and sleep buttons. If it's not
|
||||
* on a Legacy interrupt, it needs to be setup.
|
||||
*/
|
||||
if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1)
|
||||
if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt) < 1)
|
||||
printk(KERN_ERR PREFIX "Can't find FADT\n");
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -842,7 +835,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
|||
{
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *obj;
|
||||
struct acpi_table_lsapic *lsapic;
|
||||
struct acpi_madt_local_sapic *lsapic;
|
||||
cpumask_t tmp_map;
|
||||
long physid;
|
||||
int cpu;
|
||||
|
@ -854,16 +847,16 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
|||
return -EINVAL;
|
||||
|
||||
obj = buffer.pointer;
|
||||
if (obj->type != ACPI_TYPE_BUFFER ||
|
||||
obj->buffer.length < sizeof(*lsapic)) {
|
||||
if (obj->type != ACPI_TYPE_BUFFER)
|
||||
{
|
||||
kfree(buffer.pointer);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
lsapic = (struct acpi_table_lsapic *)obj->buffer.pointer;
|
||||
lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer;
|
||||
|
||||
if ((lsapic->header.type != ACPI_MADT_LSAPIC) ||
|
||||
(!lsapic->flags.enabled)) {
|
||||
if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) ||
|
||||
(!lsapic->lapic_flags & ACPI_MADT_ENABLED)) {
|
||||
kfree(buffer.pointer);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -883,7 +876,6 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
|||
|
||||
cpu_set(cpu, cpu_present_map);
|
||||
ia64_cpu_to_sapicid[cpu] = physid;
|
||||
ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu];
|
||||
|
||||
*pcpu = cpu;
|
||||
return (0);
|
||||
|
@ -893,14 +885,6 @@ EXPORT_SYMBOL(acpi_map_lsapic);
|
|||
|
||||
int acpi_unmap_lsapic(int cpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_SAPICS; i++) {
|
||||
if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) {
|
||||
ia64_acpiid_to_sapicid[i] = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ia64_cpu_to_sapicid[cpu] = -1;
|
||||
cpu_clear(cpu, cpu_present_map);
|
||||
|
||||
|
@ -920,7 +904,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
|
|||
{
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *obj;
|
||||
struct acpi_table_iosapic *iosapic;
|
||||
struct acpi_madt_io_sapic *iosapic;
|
||||
unsigned int gsi_base;
|
||||
int pxm, node;
|
||||
|
||||
|
@ -938,9 +922,9 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
|
|||
return AE_OK;
|
||||
}
|
||||
|
||||
iosapic = (struct acpi_table_iosapic *)obj->buffer.pointer;
|
||||
iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer;
|
||||
|
||||
if (iosapic->header.type != ACPI_MADT_IOSAPIC) {
|
||||
if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) {
|
||||
kfree(buffer.pointer);
|
||||
return AE_OK;
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <asm/sn/sn_sal.h>
|
||||
#include "xtalk/hubdev.h"
|
||||
#include <linux/acpi.h>
|
||||
#include <acpi/acnamesp.h>
|
||||
|
||||
|
||||
/*
|
||||
|
@ -31,6 +32,12 @@ struct acpi_vendor_uuid sn_uuid = {
|
|||
0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 },
|
||||
};
|
||||
|
||||
struct sn_pcidev_match {
|
||||
u8 bus;
|
||||
unsigned int devfn;
|
||||
acpi_handle handle;
|
||||
};
|
||||
|
||||
/*
|
||||
* Perform the early IO init in PROM.
|
||||
*/
|
||||
|
@ -119,9 +126,11 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
|
|||
status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
|
||||
&sn_uuid, &buffer);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk(KERN_ERR "get_acpi_pcibus_ptr: "
|
||||
"get_acpi_bussoft_info() failed: %d\n",
|
||||
status);
|
||||
printk(KERN_ERR "%s: "
|
||||
"acpi_get_vendor_resource() failed (0x%x) for: ",
|
||||
__FUNCTION__, status);
|
||||
acpi_ns_print_node_pathname(handle, NULL);
|
||||
printk("\n");
|
||||
return NULL;
|
||||
}
|
||||
resource = buffer.pointer;
|
||||
|
@ -130,8 +139,8 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
|
|||
if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
|
||||
sizeof(struct pcibus_bussoft *)) {
|
||||
printk(KERN_ERR
|
||||
"get_acpi_bussoft_ptr: Invalid vendor data "
|
||||
"length %d\n", vendor->byte_length);
|
||||
"%s: Invalid vendor data length %d\n",
|
||||
__FUNCTION__, vendor->byte_length);
|
||||
kfree(buffer.pointer);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -143,34 +152,254 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
|
|||
}
|
||||
|
||||
/*
|
||||
* sn_acpi_bus_fixup
|
||||
* sn_extract_device_info - Extract the pcidev_info and the sn_irq_info
|
||||
* pointers from the vendor resource using the
|
||||
* provided acpi handle, and copy the structures
|
||||
* into the argument buffers.
|
||||
*/
|
||||
void
|
||||
sn_acpi_bus_fixup(struct pci_bus *bus)
|
||||
static int
|
||||
sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
|
||||
struct sn_irq_info **sn_irq_info)
|
||||
{
|
||||
struct pci_dev *pci_dev = NULL;
|
||||
struct pcibus_bussoft *prom_bussoft_ptr;
|
||||
extern void sn_common_bus_fixup(struct pci_bus *,
|
||||
struct pcibus_bussoft *);
|
||||
u64 addr;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
struct sn_irq_info *irq_info, *irq_info_prom;
|
||||
struct pcidev_info *pcidev_ptr, *pcidev_prom_ptr;
|
||||
struct acpi_resource *resource;
|
||||
int ret = 0;
|
||||
acpi_status status;
|
||||
struct acpi_resource_vendor_typed *vendor;
|
||||
|
||||
if (!bus->parent) { /* If root bus */
|
||||
prom_bussoft_ptr = sn_get_bussoft_ptr(bus);
|
||||
if (prom_bussoft_ptr == NULL) {
|
||||
printk(KERN_ERR
|
||||
"sn_pci_fixup_bus: 0x%04x:0x%02x Unable to "
|
||||
"obtain prom_bussoft_ptr\n",
|
||||
pci_domain_nr(bus), bus->number);
|
||||
return;
|
||||
/*
|
||||
* The pointer to this device's pcidev_info structure in
|
||||
* the PROM, is in the vendor resource.
|
||||
*/
|
||||
status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
|
||||
&sn_uuid, &buffer);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk(KERN_ERR
|
||||
"%s: acpi_get_vendor_resource() failed (0x%x) for: ",
|
||||
__FUNCTION__, status);
|
||||
acpi_ns_print_node_pathname(handle, NULL);
|
||||
printk("\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
resource = buffer.pointer;
|
||||
vendor = &resource->data.vendor_typed;
|
||||
if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
|
||||
sizeof(struct pci_devdev_info *)) {
|
||||
printk(KERN_ERR
|
||||
"%s: Invalid vendor data length: %d for: ",
|
||||
__FUNCTION__, vendor->byte_length);
|
||||
acpi_ns_print_node_pathname(handle, NULL);
|
||||
printk("\n");
|
||||
ret = 1;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
|
||||
if (!pcidev_ptr)
|
||||
panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__);
|
||||
|
||||
memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *));
|
||||
pcidev_prom_ptr = __va(addr);
|
||||
memcpy(pcidev_ptr, pcidev_prom_ptr, sizeof(struct pcidev_info));
|
||||
|
||||
/* Get the IRQ info */
|
||||
irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
|
||||
if (!irq_info)
|
||||
panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__);
|
||||
|
||||
if (pcidev_ptr->pdi_sn_irq_info) {
|
||||
irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info);
|
||||
memcpy(irq_info, irq_info_prom, sizeof(struct sn_irq_info));
|
||||
}
|
||||
|
||||
*pcidev_info = pcidev_ptr;
|
||||
*sn_irq_info = irq_info;
|
||||
|
||||
exit:
|
||||
kfree(buffer.pointer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
|
||||
{
|
||||
unsigned long adr;
|
||||
acpi_handle child;
|
||||
unsigned int devfn;
|
||||
int function;
|
||||
acpi_handle parent;
|
||||
int slot;
|
||||
acpi_status status;
|
||||
|
||||
/*
|
||||
* Do an upward search to find the root bus device, and
|
||||
* obtain the host devfn from the previous child device.
|
||||
*/
|
||||
child = device_handle;
|
||||
while (child) {
|
||||
status = acpi_get_parent(child, &parent);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk(KERN_ERR "%s: acpi_get_parent() failed "
|
||||
"(0x%x) for: ", __FUNCTION__, status);
|
||||
acpi_ns_print_node_pathname(child, NULL);
|
||||
printk("\n");
|
||||
panic("%s: Unable to find host devfn\n", __FUNCTION__);
|
||||
}
|
||||
sn_common_bus_fixup(bus, prom_bussoft_ptr);
|
||||
if (parent == rootbus_handle)
|
||||
break;
|
||||
child = parent;
|
||||
}
|
||||
list_for_each_entry(pci_dev, &bus->devices, bus_list) {
|
||||
sn_pci_fixup_slot(pci_dev);
|
||||
if (!child) {
|
||||
printk(KERN_ERR "%s: Unable to find root bus for: ",
|
||||
__FUNCTION__);
|
||||
acpi_ns_print_node_pathname(device_handle, NULL);
|
||||
printk("\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ",
|
||||
__FUNCTION__, status);
|
||||
acpi_ns_print_node_pathname(child, NULL);
|
||||
printk("\n");
|
||||
panic("%s: Unable to find host devfn\n", __FUNCTION__);
|
||||
}
|
||||
|
||||
slot = (adr >> 16) & 0xffff;
|
||||
function = adr & 0xffff;
|
||||
devfn = PCI_DEVFN(slot, function);
|
||||
return devfn;
|
||||
}
|
||||
|
||||
/*
|
||||
* sn_acpi_slot_fixup - Perform any SN specific slot fixup.
|
||||
* find_matching_device - Callback routine to find the ACPI device
|
||||
* that matches up with our pci_dev device.
|
||||
* Matching is done on bus number and devfn.
|
||||
* To find the bus number for a particular
|
||||
* ACPI device, we must look at the _BBN method
|
||||
* of its parent.
|
||||
*/
|
||||
static acpi_status
|
||||
find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv)
|
||||
{
|
||||
unsigned long bbn = -1;
|
||||
unsigned long adr;
|
||||
acpi_handle parent = NULL;
|
||||
acpi_status status;
|
||||
unsigned int devfn;
|
||||
int function;
|
||||
int slot;
|
||||
struct sn_pcidev_match *info = context;
|
||||
|
||||
status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
|
||||
&adr);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
status = acpi_get_parent(handle, &parent);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk(KERN_ERR
|
||||
"%s: acpi_get_parent() failed (0x%x) for: ",
|
||||
__FUNCTION__, status);
|
||||
acpi_ns_print_node_pathname(handle, NULL);
|
||||
printk("\n");
|
||||
return AE_OK;
|
||||
}
|
||||
status = acpi_evaluate_integer(parent, METHOD_NAME__BBN,
|
||||
NULL, &bbn);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk(KERN_ERR
|
||||
"%s: Failed to find _BBN in parent of: ",
|
||||
__FUNCTION__);
|
||||
acpi_ns_print_node_pathname(handle, NULL);
|
||||
printk("\n");
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
slot = (adr >> 16) & 0xffff;
|
||||
function = adr & 0xffff;
|
||||
devfn = PCI_DEVFN(slot, function);
|
||||
if ((info->devfn == devfn) && (info->bus == bbn)) {
|
||||
/* We have a match! */
|
||||
info->handle = handle;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* sn_acpi_get_pcidev_info - Search ACPI namespace for the acpi
|
||||
* device matching the specified pci_dev,
|
||||
* and return the pcidev info and irq info.
|
||||
*/
|
||||
int
|
||||
sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
|
||||
struct sn_irq_info **sn_irq_info)
|
||||
{
|
||||
unsigned int host_devfn;
|
||||
struct sn_pcidev_match pcidev_match;
|
||||
acpi_handle rootbus_handle;
|
||||
unsigned long segment;
|
||||
acpi_status status;
|
||||
|
||||
rootbus_handle = PCI_CONTROLLER(dev)->acpi_handle;
|
||||
status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL,
|
||||
&segment);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
if (segment != pci_domain_nr(dev)) {
|
||||
printk(KERN_ERR
|
||||
"%s: Segment number mismatch, 0x%lx vs 0x%x for: ",
|
||||
__FUNCTION__, segment, pci_domain_nr(dev));
|
||||
acpi_ns_print_node_pathname(rootbus_handle, NULL);
|
||||
printk("\n");
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
printk(KERN_ERR "%s: Unable to get __SEG from: ",
|
||||
__FUNCTION__);
|
||||
acpi_ns_print_node_pathname(rootbus_handle, NULL);
|
||||
printk("\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* We want to search all devices in this segment/domain
|
||||
* of the ACPI namespace for the matching ACPI device,
|
||||
* which holds the pcidev_info pointer in its vendor resource.
|
||||
*/
|
||||
pcidev_match.bus = dev->bus->number;
|
||||
pcidev_match.devfn = dev->devfn;
|
||||
pcidev_match.handle = NULL;
|
||||
|
||||
acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX,
|
||||
find_matching_device, &pcidev_match, NULL);
|
||||
|
||||
if (!pcidev_match.handle) {
|
||||
printk(KERN_ERR
|
||||
"%s: Could not find matching ACPI device for %s.\n",
|
||||
__FUNCTION__, pci_name(dev));
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (sn_extract_device_info(pcidev_match.handle, pcidev_info, sn_irq_info))
|
||||
return 1;
|
||||
|
||||
/* Build up the pcidev_info.pdi_slot_host_handle */
|
||||
host_devfn = get_host_devfn(pcidev_match.handle, rootbus_handle);
|
||||
(*pcidev_info)->pdi_slot_host_handle =
|
||||
((unsigned long) pci_domain_nr(dev) << 40) |
|
||||
/* bus == 0 */
|
||||
host_devfn;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* sn_acpi_slot_fixup - Obtain the pcidev_info and sn_irq_info.
|
||||
* Perform any SN specific slot fixup.
|
||||
* At present there does not appear to be
|
||||
* any generic way to handle a ROM image
|
||||
* that has been shadowed by the PROM, so
|
||||
|
@ -179,11 +408,18 @@ sn_acpi_bus_fixup(struct pci_bus *bus)
|
|||
*/
|
||||
|
||||
void
|
||||
sn_acpi_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
|
||||
sn_acpi_slot_fixup(struct pci_dev *dev)
|
||||
{
|
||||
void __iomem *addr;
|
||||
struct pcidev_info *pcidev_info = NULL;
|
||||
struct sn_irq_info *sn_irq_info = NULL;
|
||||
size_t size;
|
||||
|
||||
if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) {
|
||||
panic("%s: Failure obtaining pcidev_info for %s\n",
|
||||
__FUNCTION__, pci_name(dev));
|
||||
}
|
||||
|
||||
if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) {
|
||||
/*
|
||||
* A valid ROM image exists and has been shadowed by the
|
||||
|
@ -200,8 +436,11 @@ sn_acpi_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
|
|||
(unsigned long) addr + size;
|
||||
dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY;
|
||||
}
|
||||
sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(sn_acpi_slot_fixup);
|
||||
|
||||
static struct acpi_driver acpi_sn_hubdev_driver = {
|
||||
.name = "SGI HUBDEV Driver",
|
||||
.ids = "SGIHUB,SGITIO",
|
||||
|
@ -211,6 +450,33 @@ static struct acpi_driver acpi_sn_hubdev_driver = {
|
|||
};
|
||||
|
||||
|
||||
/*
|
||||
* sn_acpi_bus_fixup - Perform SN specific setup of software structs
|
||||
* (pcibus_bussoft, pcidev_info) and hardware
|
||||
* registers, for the specified bus and devices under it.
|
||||
*/
|
||||
void
|
||||
sn_acpi_bus_fixup(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_dev *pci_dev = NULL;
|
||||
struct pcibus_bussoft *prom_bussoft_ptr;
|
||||
|
||||
if (!bus->parent) { /* If root bus */
|
||||
prom_bussoft_ptr = sn_get_bussoft_ptr(bus);
|
||||
if (prom_bussoft_ptr == NULL) {
|
||||
printk(KERN_ERR
|
||||
"%s: 0x%04x:0x%02x Unable to "
|
||||
"obtain prom_bussoft_ptr\n",
|
||||
__FUNCTION__, pci_domain_nr(bus), bus->number);
|
||||
return;
|
||||
}
|
||||
sn_common_bus_fixup(bus, prom_bussoft_ptr);
|
||||
}
|
||||
list_for_each_entry(pci_dev, &bus->devices, bus_list) {
|
||||
sn_acpi_slot_fixup(pci_dev);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the
|
||||
* nodes and root buses in the DSDT. As a result, bus scanning
|
||||
|
|
|
@ -26,14 +26,10 @@
|
|||
#include <linux/acpi.h>
|
||||
#include <asm/sn/sn2/sn_hwperf.h>
|
||||
#include <asm/sn/acpi.h>
|
||||
#include "acpi/acglobal.h"
|
||||
|
||||
extern void sn_init_cpei_timer(void);
|
||||
extern void register_sn_procfs(void);
|
||||
extern void sn_acpi_bus_fixup(struct pci_bus *);
|
||||
extern void sn_bus_fixup(struct pci_bus *);
|
||||
extern void sn_acpi_slot_fixup(struct pci_dev *, struct pcidev_info *);
|
||||
extern void sn_more_slot_fixup(struct pci_dev *, struct pcidev_info *);
|
||||
extern void sn_legacy_pci_window_fixup(struct pci_controller *, u64, u64);
|
||||
extern void sn_io_acpi_init(void);
|
||||
extern void sn_io_init(void);
|
||||
|
||||
|
@ -48,6 +44,9 @@ struct sysdata_el {
|
|||
|
||||
int sn_ioif_inited; /* SN I/O infrastructure initialized? */
|
||||
|
||||
int sn_acpi_rev; /* SN ACPI revision */
|
||||
EXPORT_SYMBOL_GPL(sn_acpi_rev);
|
||||
|
||||
struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
|
||||
|
||||
/*
|
||||
|
@ -98,25 +97,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
|
|||
return ret_stuff.status;
|
||||
}
|
||||
|
||||
/*
|
||||
* Retrieve the pci device information given the bus and device|function number.
|
||||
*/
|
||||
static inline u64
|
||||
sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
|
||||
u64 sn_irq_info)
|
||||
{
|
||||
struct ia64_sal_retval ret_stuff;
|
||||
ret_stuff.status = 0;
|
||||
ret_stuff.v0 = 0;
|
||||
|
||||
SAL_CALL_NOLOCK(ret_stuff,
|
||||
(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
|
||||
(u64) segment, (u64) bus_number, (u64) devfn,
|
||||
(u64) pci_dev,
|
||||
sn_irq_info, 0, 0);
|
||||
return ret_stuff.v0;
|
||||
}
|
||||
|
||||
/*
|
||||
* sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
|
||||
* device.
|
||||
|
@ -249,50 +229,25 @@ void sn_pci_unfixup_slot(struct pci_dev *dev)
|
|||
}
|
||||
|
||||
/*
|
||||
* sn_pci_fixup_slot() - This routine sets up a slot's resources consistent
|
||||
* with the Linux PCI abstraction layer. Resources
|
||||
* acquired from our PCI provider include PIO maps
|
||||
* to BAR space and interrupt objects.
|
||||
* sn_pci_fixup_slot()
|
||||
*/
|
||||
void sn_pci_fixup_slot(struct pci_dev *dev)
|
||||
void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *pcidev_info,
|
||||
struct sn_irq_info *sn_irq_info)
|
||||
{
|
||||
int segment = pci_domain_nr(dev->bus);
|
||||
int status = 0;
|
||||
struct pcibus_bussoft *bs;
|
||||
struct pci_bus *host_pci_bus;
|
||||
struct pci_dev *host_pci_dev;
|
||||
struct pcidev_info *pcidev_info;
|
||||
struct sn_irq_info *sn_irq_info;
|
||||
unsigned int bus_no, devfn;
|
||||
struct pci_bus *host_pci_bus;
|
||||
struct pci_dev *host_pci_dev;
|
||||
unsigned int bus_no, devfn;
|
||||
|
||||
pci_dev_get(dev); /* for the sysdata pointer */
|
||||
pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
|
||||
if (!pcidev_info)
|
||||
BUG(); /* Cannot afford to run out of memory */
|
||||
|
||||
sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
|
||||
if (!sn_irq_info)
|
||||
BUG(); /* Cannot afford to run out of memory */
|
||||
|
||||
/* Call to retrieve pci device information needed by kernel. */
|
||||
status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
|
||||
dev->devfn,
|
||||
(u64) __pa(pcidev_info),
|
||||
(u64) __pa(sn_irq_info));
|
||||
if (status)
|
||||
BUG(); /* Cannot get platform pci device information */
|
||||
|
||||
/* Add pcidev_info to list in pci_controller.platform_data */
|
||||
list_add_tail(&pcidev_info->pdi_list,
|
||||
&(SN_PLATFORM_DATA(dev->bus)->pcidev_info));
|
||||
|
||||
if (SN_ACPI_BASE_SUPPORT())
|
||||
sn_acpi_slot_fixup(dev, pcidev_info);
|
||||
else
|
||||
sn_more_slot_fixup(dev, pcidev_info);
|
||||
/*
|
||||
* Using the PROMs values for the PCI host bus, get the Linux
|
||||
* PCI host_pci_dev struct and set up host bus linkages
|
||||
* PCI host_pci_dev struct and set up host bus linkages
|
||||
*/
|
||||
|
||||
bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
|
||||
|
@ -489,11 +444,6 @@ void sn_generate_path(struct pci_bus *pci_bus, char *address)
|
|||
sprintf(address, "%s^%d", address, geo_slot(geoid));
|
||||
}
|
||||
|
||||
/*
|
||||
* sn_pci_fixup_bus() - Perform SN specific setup of software structs
|
||||
* (pcibus_bussoft, pcidev_info) and hardware
|
||||
* registers, for the specified bus and devices under it.
|
||||
*/
|
||||
void __devinit
|
||||
sn_pci_fixup_bus(struct pci_bus *bus)
|
||||
{
|
||||
|
@ -519,6 +469,15 @@ sn_io_early_init(void)
|
|||
if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
|
||||
return 0;
|
||||
|
||||
/* we set the acpi revision to that of the DSDT table OEM rev. */
|
||||
{
|
||||
struct acpi_table_header *header = NULL;
|
||||
|
||||
acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
|
||||
BUG_ON(header == NULL);
|
||||
sn_acpi_rev = header->oem_revision;
|
||||
}
|
||||
|
||||
/*
|
||||
* prime sn_pci_provider[]. Individial provider init routines will
|
||||
* override their respective default entries.
|
||||
|
@ -544,8 +503,12 @@ sn_io_early_init(void)
|
|||
register_sn_procfs();
|
||||
#endif
|
||||
|
||||
printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n",
|
||||
acpi_gbl_DSDT->oem_revision);
|
||||
{
|
||||
struct acpi_table_header *header;
|
||||
(void)acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
|
||||
printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n",
|
||||
header->oem_revision);
|
||||
}
|
||||
if (SN_ACPI_BASE_SUPPORT())
|
||||
sn_io_acpi_init();
|
||||
else
|
||||
|
@ -605,7 +568,6 @@ sn_io_late_init(void)
|
|||
|
||||
fs_initcall(sn_io_late_init);
|
||||
|
||||
EXPORT_SYMBOL(sn_pci_fixup_slot);
|
||||
EXPORT_SYMBOL(sn_pci_unfixup_slot);
|
||||
EXPORT_SYMBOL(sn_bus_store_sysdata);
|
||||
EXPORT_SYMBOL(sn_bus_free_sysdata);
|
||||
|
|
|
@ -56,6 +56,25 @@ static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
|
|||
return ret_stuff.v0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Retrieve the pci device information given the bus and device|function number.
|
||||
*/
|
||||
static inline u64
|
||||
sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
|
||||
u64 sn_irq_info)
|
||||
{
|
||||
struct ia64_sal_retval ret_stuff;
|
||||
ret_stuff.status = 0;
|
||||
ret_stuff.v0 = 0;
|
||||
|
||||
SAL_CALL_NOLOCK(ret_stuff,
|
||||
(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
|
||||
(u64) segment, (u64) bus_number, (u64) devfn,
|
||||
(u64) pci_dev,
|
||||
sn_irq_info, 0, 0);
|
||||
return ret_stuff.v0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* sn_fixup_ionodes() - This routine initializes the HUB data structure for
|
||||
|
@ -172,18 +191,40 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
|
|||
}
|
||||
|
||||
/*
|
||||
* sn_more_slot_fixup() - We are not running with an ACPI capable PROM,
|
||||
* sn_io_slot_fixup() - We are not running with an ACPI capable PROM,
|
||||
* and need to convert the pci_dev->resource
|
||||
* 'start' and 'end' addresses to mapped addresses,
|
||||
* and setup the pci_controller->window array entries.
|
||||
*/
|
||||
void
|
||||
sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
|
||||
sn_io_slot_fixup(struct pci_dev *dev)
|
||||
{
|
||||
unsigned int count = 0;
|
||||
int idx;
|
||||
s64 pci_addrs[PCI_ROM_RESOURCE + 1];
|
||||
unsigned long addr, end, size, start;
|
||||
struct pcidev_info *pcidev_info;
|
||||
struct sn_irq_info *sn_irq_info;
|
||||
int status;
|
||||
|
||||
pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
|
||||
if (!pcidev_info)
|
||||
panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__);
|
||||
|
||||
sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
|
||||
if (!sn_irq_info)
|
||||
panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__);
|
||||
|
||||
/* Call to retrieve pci device information needed by kernel. */
|
||||
status = sal_get_pcidev_info((u64) pci_domain_nr(dev),
|
||||
(u64) dev->bus->number,
|
||||
dev->devfn,
|
||||
(u64) __pa(pcidev_info),
|
||||
(u64) __pa(sn_irq_info));
|
||||
|
||||
if (status)
|
||||
BUG(); /* Cannot get platform pci device information */
|
||||
|
||||
|
||||
/* Copy over PIO Mapped Addresses */
|
||||
for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
|
||||
|
@ -219,8 +260,12 @@ sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
|
|||
*/
|
||||
if (count > 0)
|
||||
sn_pci_window_fixup(dev, count, pci_addrs);
|
||||
|
||||
sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(sn_io_slot_fixup);
|
||||
|
||||
/*
|
||||
* sn_pci_controller_fixup() - This routine sets up a bus's resources
|
||||
* consistent with the Linux PCI abstraction layer.
|
||||
|
@ -272,9 +317,6 @@ sn_bus_fixup(struct pci_bus *bus)
|
|||
{
|
||||
struct pci_dev *pci_dev = NULL;
|
||||
struct pcibus_bussoft *prom_bussoft_ptr;
|
||||
extern void sn_common_bus_fixup(struct pci_bus *,
|
||||
struct pcibus_bussoft *);
|
||||
|
||||
|
||||
if (!bus->parent) { /* If root bus */
|
||||
prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data;
|
||||
|
@ -291,7 +333,7 @@ sn_bus_fixup(struct pci_bus *bus)
|
|||
prom_bussoft_ptr->bs_legacy_mem);
|
||||
}
|
||||
list_for_each_entry(pci_dev, &bus->devices, bus_list) {
|
||||
sn_pci_fixup_slot(pci_dev);
|
||||
sn_io_slot_fixup(pci_dev);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/*
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
|
@ -26,9 +26,10 @@
|
|||
* @port: port to convert
|
||||
*
|
||||
* Legacy in/out instructions are converted to ld/st instructions
|
||||
* on IA64. This routine will convert a port number into a valid
|
||||
* on IA64. This routine will convert a port number into a valid
|
||||
* SN i/o address. Used by sn_in*() and sn_out*().
|
||||
*/
|
||||
|
||||
void *sn_io_addr(unsigned long port)
|
||||
{
|
||||
if (!IS_RUNNING_ON_SIMULATOR()) {
|
||||
|
|
|
@ -20,7 +20,8 @@
|
|||
#include "xtalk/hubdev.h"
|
||||
|
||||
int
|
||||
sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
|
||||
sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp,
|
||||
char **ssdt)
|
||||
{
|
||||
struct ia64_sal_retval ret_stuff;
|
||||
u64 busnum;
|
||||
|
@ -32,7 +33,8 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
|
|||
segment = soft->pbi_buscommon.bs_persist_segment;
|
||||
busnum = soft->pbi_buscommon.bs_persist_busnum;
|
||||
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
|
||||
busnum, (u64) device, (u64) resp, 0, 0, 0);
|
||||
busnum, (u64) device, (u64) resp, (u64)ia64_tpa(ssdt),
|
||||
0, 0);
|
||||
|
||||
return (int)ret_stuff.v0;
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ static void via_bugs(void)
|
|||
|
||||
static int nvidia_hpet_detected __initdata;
|
||||
|
||||
static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
|
||||
static int __init nvidia_hpet_check(struct acpi_table_header *header)
|
||||
{
|
||||
nvidia_hpet_detected = 1;
|
||||
return 0;
|
||||
|
@ -53,7 +53,7 @@ static void nvidia_bugs(void)
|
|||
return;
|
||||
|
||||
nvidia_hpet_detected = 0;
|
||||
acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
|
||||
acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
|
||||
if (nvidia_hpet_detected == 0) {
|
||||
acpi_skip_timer_override = 1;
|
||||
printk(KERN_INFO "Nvidia board "
|
||||
|
|
|
@ -58,8 +58,8 @@ void __init clustered_apic_check(void)
|
|||
* Some x86_64 machines use physical APIC mode regardless of how many
|
||||
* procs/clusters are present (x86_64 ES7000 is an example).
|
||||
*/
|
||||
if (acpi_fadt.revision > FADT2_REVISION_ID)
|
||||
if (acpi_fadt.force_apic_physical_destination_mode) {
|
||||
if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID)
|
||||
if (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) {
|
||||
genapic = &apic_cluster;
|
||||
goto print;
|
||||
}
|
||||
|
|
|
@ -798,7 +798,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
|
|||
return gsi;
|
||||
|
||||
/* Don't set up the ACPI SCI because it's already set up */
|
||||
if (acpi_fadt.sci_int == gsi)
|
||||
if (acpi_gbl_FADT.sci_interrupt == gsi)
|
||||
return gsi;
|
||||
|
||||
ioapic = mp_find_ioapic(gsi);
|
||||
|
|
|
@ -498,7 +498,7 @@ static unsigned long get_cmos_time(void)
|
|||
{
|
||||
unsigned int year, mon, day, hour, min, sec;
|
||||
unsigned long flags;
|
||||
unsigned extyear = 0;
|
||||
unsigned century = 0;
|
||||
|
||||
spin_lock_irqsave(&rtc_lock, flags);
|
||||
|
||||
|
@ -510,9 +510,9 @@ static unsigned long get_cmos_time(void)
|
|||
mon = CMOS_READ(RTC_MONTH);
|
||||
year = CMOS_READ(RTC_YEAR);
|
||||
#ifdef CONFIG_ACPI
|
||||
if (acpi_fadt.revision >= FADT2_REVISION_ID &&
|
||||
acpi_fadt.century)
|
||||
extyear = CMOS_READ(acpi_fadt.century);
|
||||
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
|
||||
acpi_gbl_FADT.century)
|
||||
century = CMOS_READ(acpi_gbl_FADT.century);
|
||||
#endif
|
||||
} while (sec != CMOS_READ(RTC_SECONDS));
|
||||
|
||||
|
@ -530,10 +530,10 @@ static unsigned long get_cmos_time(void)
|
|||
BCD_TO_BIN(mon);
|
||||
BCD_TO_BIN(year);
|
||||
|
||||
if (extyear) {
|
||||
BCD_TO_BIN(extyear);
|
||||
year += extyear;
|
||||
printk(KERN_INFO "Extended CMOS year: %d\n", extyear);
|
||||
if (century) {
|
||||
BCD_TO_BIN(century);
|
||||
year += century * 100;
|
||||
printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
|
||||
} else {
|
||||
/*
|
||||
* x86-64 systems only exists since 2002.
|
||||
|
@ -954,7 +954,7 @@ __cpuinit int unsynchronized_tsc(void)
|
|||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
|
||||
#ifdef CONFIG_ACPI
|
||||
/* But TSC doesn't tick in C3 so don't use it there */
|
||||
if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 1000)
|
||||
if (acpi_gbl_FADT.header.length > 0 && acpi_gbl_FADT.C3latency < 1000)
|
||||
return 1;
|
||||
#endif
|
||||
return 0;
|
||||
|
|
|
@ -101,7 +101,7 @@ static __init inline int srat_disabled(void)
|
|||
static __init int slit_valid(struct acpi_table_slit *slit)
|
||||
{
|
||||
int i, j;
|
||||
int d = slit->localities;
|
||||
int d = slit->locality_count;
|
||||
for (i = 0; i < d; i++) {
|
||||
for (j = 0; j < d; j++) {
|
||||
u8 val = slit->entry[d*i + j];
|
||||
|
@ -127,18 +127,18 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
|
|||
|
||||
/* Callback for Proximity Domain -> LAPIC mapping */
|
||||
void __init
|
||||
acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
|
||||
acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
|
||||
{
|
||||
int pxm, node;
|
||||
if (srat_disabled())
|
||||
return;
|
||||
if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
|
||||
if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
|
||||
bad_srat();
|
||||
return;
|
||||
}
|
||||
if (pa->flags.enabled == 0)
|
||||
if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
|
||||
return;
|
||||
pxm = pa->proximity_domain;
|
||||
pxm = pa->proximity_domain_lo;
|
||||
node = setup_node(pxm);
|
||||
if (node < 0) {
|
||||
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
|
||||
|
@ -254,21 +254,21 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
|
|||
/* Looks good */
|
||||
|
||||
if (nd->start == nd->end) {
|
||||
nd->start = start;
|
||||
nd->end = end;
|
||||
nd->start = start;
|
||||
nd->end = end;
|
||||
changed = 1;
|
||||
} else {
|
||||
if (nd->start == end) {
|
||||
nd->start = start;
|
||||
} else {
|
||||
if (nd->start == end) {
|
||||
nd->start = start;
|
||||
changed = 1;
|
||||
}
|
||||
if (nd->end == start) {
|
||||
nd->end = end;
|
||||
if (nd->end == start) {
|
||||
nd->end = end;
|
||||
changed = 1;
|
||||
}
|
||||
if (!changed)
|
||||
printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
|
||||
}
|
||||
}
|
||||
|
||||
ret = update_end_of_memory(nd->end);
|
||||
|
||||
|
@ -279,7 +279,7 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
|
|||
|
||||
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
|
||||
void __init
|
||||
acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
|
||||
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
|
||||
{
|
||||
struct bootnode *nd, oldnode;
|
||||
unsigned long start, end;
|
||||
|
@ -288,16 +288,17 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
|
|||
|
||||
if (srat_disabled())
|
||||
return;
|
||||
if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) {
|
||||
if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
|
||||
bad_srat();
|
||||
return;
|
||||
}
|
||||
if (ma->flags.enabled == 0)
|
||||
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
|
||||
return;
|
||||
if (ma->flags.hot_pluggable && !save_add_info())
|
||||
|
||||
if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
|
||||
return;
|
||||
start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
|
||||
end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
|
||||
start = ma->base_address;
|
||||
end = start + ma->length;
|
||||
pxm = ma->proximity_domain;
|
||||
node = setup_node(pxm);
|
||||
if (node < 0) {
|
||||
|
@ -337,7 +338,8 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
|
|||
push_node_boundaries(node, nd->start >> PAGE_SHIFT,
|
||||
nd->end >> PAGE_SHIFT);
|
||||
|
||||
if (ma->flags.hot_pluggable && (reserve_hotadd(node, start, end) < 0)) {
|
||||
if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) &&
|
||||
(reserve_hotadd(node, start, end) < 0)) {
|
||||
/* Ignore hotadd region. Undo damage */
|
||||
printk(KERN_NOTICE "SRAT: Hotplug region ignored\n");
|
||||
*nd = oldnode;
|
||||
|
@ -394,7 +396,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
|
|||
|
||||
/* First clean up the node list */
|
||||
for (i = 0; i < MAX_NUMNODES; i++) {
|
||||
cutoff_node(i, start, end);
|
||||
cutoff_node(i, start, end);
|
||||
if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
|
||||
unparse_node(i);
|
||||
node_set_offline(i);
|
||||
|
@ -426,7 +428,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
|
|||
if (!node_online(i))
|
||||
setup_node_bootmem(i, nodes[i].start, nodes[i].end);
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
if (cpu_to_node[i] == NUMA_NO_NODE)
|
||||
continue;
|
||||
if (!node_isset(cpu_to_node[i], nodes_parsed))
|
||||
|
@ -461,7 +463,7 @@ int __node_distance(int a, int b)
|
|||
|
||||
if (!acpi_slit)
|
||||
return a == b ? 10 : 20;
|
||||
index = acpi_slit->localities * node_to_pxm(a);
|
||||
index = acpi_slit->locality_count * node_to_pxm(a);
|
||||
return acpi_slit->entry[index + node_to_pxm(b)];
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*
|
||||
* mmconfig.c - Low-level direct PCI config space access via MMCONFIG
|
||||
*
|
||||
*
|
||||
* This is an 64bit optimized version that always keeps the full mmconfig
|
||||
* space mapped. This allows lockless config space operation.
|
||||
*/
|
||||
|
@ -25,7 +25,7 @@ static DECLARE_BITMAP(fallback_slots, 32*MAX_CHECK_BUS);
|
|||
|
||||
/* Static virtual mapping of the MMCONFIG aperture */
|
||||
struct mmcfg_virt {
|
||||
struct acpi_table_mcfg_config *cfg;
|
||||
struct acpi_mcfg_allocation *cfg;
|
||||
char __iomem *virt;
|
||||
};
|
||||
static struct mmcfg_virt *pci_mmcfg_virt;
|
||||
|
@ -33,14 +33,14 @@ static struct mmcfg_virt *pci_mmcfg_virt;
|
|||
static char __iomem *get_virt(unsigned int seg, unsigned bus)
|
||||
{
|
||||
int cfg_num = -1;
|
||||
struct acpi_table_mcfg_config *cfg;
|
||||
struct acpi_mcfg_allocation *cfg;
|
||||
|
||||
while (1) {
|
||||
++cfg_num;
|
||||
if (cfg_num >= pci_mmcfg_config_num)
|
||||
break;
|
||||
cfg = pci_mmcfg_virt[cfg_num].cfg;
|
||||
if (cfg->pci_segment_group_number != seg)
|
||||
if (cfg->pci_segment != seg)
|
||||
continue;
|
||||
if ((cfg->start_bus_number <= bus) &&
|
||||
(cfg->end_bus_number >= bus))
|
||||
|
@ -52,7 +52,7 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
|
|||
this applies to all busses. */
|
||||
cfg = &pci_mmcfg_config[0];
|
||||
if (pci_mmcfg_config_num == 1 &&
|
||||
cfg->pci_segment_group_number == 0 &&
|
||||
cfg->pci_segment == 0 &&
|
||||
(cfg->start_bus_number | cfg->end_bus_number) == 0)
|
||||
return pci_mmcfg_virt[0].virt;
|
||||
|
||||
|
@ -170,19 +170,19 @@ void __init pci_mmcfg_init(int type)
|
|||
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
|
||||
return;
|
||||
|
||||
acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
|
||||
acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
|
||||
if ((pci_mmcfg_config_num == 0) ||
|
||||
(pci_mmcfg_config == NULL) ||
|
||||
(pci_mmcfg_config[0].base_address == 0))
|
||||
(pci_mmcfg_config[0].address == 0))
|
||||
return;
|
||||
|
||||
/* Only do this check when type 1 works. If it doesn't work
|
||||
assume we run on a Mac and always use MCFG */
|
||||
if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address,
|
||||
pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
|
||||
if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address,
|
||||
pci_mmcfg_config[0].address + MMCONFIG_APER_MIN,
|
||||
E820_RESERVED)) {
|
||||
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
|
||||
pci_mmcfg_config[0].base_address);
|
||||
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n",
|
||||
(unsigned long)pci_mmcfg_config[0].address);
|
||||
printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
|
||||
return;
|
||||
}
|
||||
|
@ -194,15 +194,16 @@ void __init pci_mmcfg_init(int type)
|
|||
}
|
||||
for (i = 0; i < pci_mmcfg_config_num; ++i) {
|
||||
pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
|
||||
pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address,
|
||||
pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].address,
|
||||
MMCONFIG_APER_MAX);
|
||||
if (!pci_mmcfg_virt[i].virt) {
|
||||
printk(KERN_ERR "PCI: Cannot map mmconfig aperture for "
|
||||
"segment %d\n",
|
||||
pci_mmcfg_config[i].pci_segment_group_number);
|
||||
pci_mmcfg_config[i].pci_segment);
|
||||
return;
|
||||
}
|
||||
printk(KERN_INFO "PCI: Using MMCONFIG at %x\n", pci_mmcfg_config[i].base_address);
|
||||
printk(KERN_INFO "PCI: Using MMCONFIG at %lx\n",
|
||||
(unsigned long)pci_mmcfg_config[i].address);
|
||||
}
|
||||
|
||||
unreachable_devices();
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#
|
||||
|
||||
menu "ACPI (Advanced Configuration and Power Interface) Support"
|
||||
depends on !X86_NUMAQ
|
||||
depends on !X86_VISWS
|
||||
depends on !IA64_HP_SIM
|
||||
depends on IA64 || X86
|
||||
|
@ -77,6 +78,20 @@ config ACPI_SLEEP_PROC_SLEEP
|
|||
Create /proc/acpi/sleep
|
||||
Deprecated by /sys/power/state
|
||||
|
||||
config ACPI_PROCFS
|
||||
bool "Procfs interface (deprecated)"
|
||||
depends on ACPI
|
||||
default y
|
||||
---help---
|
||||
Procfs interface for ACPI is made optional for back-compatible.
|
||||
As the same functions are duplicated in sysfs interface
|
||||
and this proc interface will be removed some time later,
|
||||
it's marked as deprecated.
|
||||
( /proc/acpi/debug_layer && debug_level are deprecated by
|
||||
/sys/module/acpi/parameters/debug_layer && debug_level.
|
||||
/proc/acpi/info is deprecated by
|
||||
/sys/module/acpi/parameters/acpica_version )
|
||||
|
||||
config ACPI_AC
|
||||
tristate "AC Adapter"
|
||||
depends on X86
|
||||
|
@ -107,7 +122,7 @@ config ACPI_BUTTON
|
|||
|
||||
config ACPI_VIDEO
|
||||
tristate "Video"
|
||||
depends on X86
|
||||
depends on X86 && BACKLIGHT_CLASS_DEVICE
|
||||
help
|
||||
This driver implement the ACPI Extensions For Display Adapters
|
||||
for integrated graphics devices on motherboard, as specified in
|
||||
|
@ -139,6 +154,13 @@ config ACPI_DOCK
|
|||
help
|
||||
This driver adds support for ACPI controlled docking stations
|
||||
|
||||
config ACPI_BAY
|
||||
tristate "Removable Drive Bay (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL
|
||||
help
|
||||
This driver adds support for ACPI controlled removable drive
|
||||
bays such as the IBM ultrabay or the Dell Module Bay.
|
||||
|
||||
config ACPI_PROCESSOR
|
||||
tristate "Processor"
|
||||
default y
|
||||
|
@ -186,19 +208,22 @@ config ACPI_ASUS
|
|||
|
||||
Note: display switching code is currently considered EXPERIMENTAL,
|
||||
toying with these values may even lock your machine.
|
||||
|
||||
|
||||
All settings are changed via /proc/acpi/asus directory entries. Owner
|
||||
and group for these entries can be set with asus_uid and asus_gid
|
||||
parameters.
|
||||
|
||||
|
||||
More information and a userspace daemon for handling the extra buttons
|
||||
at <http://sourceforge.net/projects/acpi4asus/>.
|
||||
|
||||
|
||||
If you have an ACPI-compatible ASUS laptop, say Y or M here. This
|
||||
driver is still under development, so if your laptop is unsupported or
|
||||
something works not quite as expected, please use the mailing list
|
||||
available on the above page (acpi4asus-user@lists.sourceforge.net)
|
||||
|
||||
available on the above page (acpi4asus-user@lists.sourceforge.net).
|
||||
|
||||
NOTE: This driver is deprecated and will probably be removed soon,
|
||||
use asus-laptop instead.
|
||||
|
||||
config ACPI_IBM
|
||||
tristate "IBM ThinkPad Laptop Extras"
|
||||
depends on X86
|
||||
|
|
|
@ -37,13 +37,15 @@ endif
|
|||
|
||||
obj-y += sleep/
|
||||
obj-y += bus.o glue.o
|
||||
obj-y += scan.o
|
||||
obj-$(CONFIG_ACPI_AC) += ac.o
|
||||
obj-$(CONFIG_ACPI_BATTERY) += battery.o
|
||||
obj-$(CONFIG_ACPI_BUTTON) += button.o
|
||||
obj-$(CONFIG_ACPI_EC) += ec.o
|
||||
obj-$(CONFIG_ACPI_FAN) += fan.o
|
||||
obj-$(CONFIG_ACPI_DOCK) += dock.o
|
||||
obj-$(CONFIG_ACPI_VIDEO) += video.o
|
||||
obj-$(CONFIG_ACPI_BAY) += bay.o
|
||||
obj-$(CONFIG_ACPI_VIDEO) += video.o
|
||||
obj-$(CONFIG_ACPI_HOTKEY) += hotkey.o
|
||||
obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
|
||||
obj-$(CONFIG_ACPI_POWER) += power.o
|
||||
|
@ -56,7 +58,6 @@ obj-$(CONFIG_ACPI_NUMA) += numa.o
|
|||
obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
|
||||
obj-$(CONFIG_ACPI_IBM) += ibm_acpi.o
|
||||
obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
|
||||
obj-y += scan.o motherboard.o
|
||||
obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o
|
||||
obj-y += cm_sbs.o
|
||||
obj-$(CONFIG_ACPI_SBS) += i2c_ec.o sbs.o
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
* Pontus Fuchs - Helper functions, cleanup
|
||||
* Johann Wiesner - Small compile fixes
|
||||
* John Belmonte - ACPI code for Toshiba laptop was a good starting point.
|
||||
* Éric Burghard - LED display support for W1N
|
||||
* <EFBFBD>ic Burghard - LED display support for W1N
|
||||
*
|
||||
*/
|
||||
|
||||
|
@ -1128,7 +1128,6 @@ static int asus_model_match(char *model)
|
|||
static int asus_hotk_get_info(void)
|
||||
{
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
struct acpi_buffer dsdt = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *model = NULL;
|
||||
int bsts_result;
|
||||
char *string = NULL;
|
||||
|
@ -1142,11 +1141,9 @@ static int asus_hotk_get_info(void)
|
|||
* HID), this bit will be moved. A global variable asus_info contains
|
||||
* the DSDT header.
|
||||
*/
|
||||
status = acpi_get_table(ACPI_TABLE_ID_DSDT, 1, &dsdt);
|
||||
status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
|
||||
if (ACPI_FAILURE(status))
|
||||
printk(KERN_WARNING " Couldn't get the DSDT table header\n");
|
||||
else
|
||||
asus_info = dsdt.pointer;
|
||||
|
||||
/* We have to write 0 on init this far for all ASUS models */
|
||||
if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
|
||||
|
@ -1358,8 +1355,6 @@ static void __exit asus_acpi_exit(void)
|
|||
acpi_bus_unregister_driver(&asus_hotk_driver);
|
||||
remove_proc_entry(PROC_ASUS, acpi_root_dir);
|
||||
|
||||
kfree(asus_info);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
|
|||
|
||||
static int acpi_battery_add(struct acpi_device *device);
|
||||
static int acpi_battery_remove(struct acpi_device *device, int type);
|
||||
static int acpi_battery_resume(struct acpi_device *device, int status);
|
||||
static int acpi_battery_resume(struct acpi_device *device);
|
||||
|
||||
static struct acpi_driver acpi_battery_driver = {
|
||||
.name = ACPI_BATTERY_DRIVER_NAME,
|
||||
|
@ -753,7 +753,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
|
|||
}
|
||||
|
||||
/* this is needed to learn about changes made in suspended state */
|
||||
static int acpi_battery_resume(struct acpi_device *device, int state)
|
||||
static int acpi_battery_resume(struct acpi_device *device)
|
||||
{
|
||||
struct acpi_battery *battery;
|
||||
|
||||
|
|
490
drivers/acpi/bay.c
Normal file
490
drivers/acpi/bay.c
Normal file
|
@ -0,0 +1,490 @@
|
|||
/*
|
||||
* bay.c - ACPI removable drive bay driver
|
||||
*
|
||||
* Copyright (C) 2006 Kristen Carlson Accardi <kristen.c.accardi@intel.com>
|
||||
*
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or (at
|
||||
* your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
|
||||
*
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <acpi/acpi_bus.h>
|
||||
#include <acpi/acpi_drivers.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#define ACPI_BAY_DRIVER_NAME "ACPI Removable Drive Bay Driver"
|
||||
|
||||
ACPI_MODULE_NAME("bay")
|
||||
MODULE_AUTHOR("Kristen Carlson Accardi");
|
||||
MODULE_DESCRIPTION(ACPI_BAY_DRIVER_NAME);
|
||||
MODULE_LICENSE("GPL");
|
||||
#define ACPI_BAY_CLASS "bay"
|
||||
#define ACPI_BAY_COMPONENT 0x10000000
|
||||
#define _COMPONENT ACPI_BAY_COMPONENT
|
||||
#define bay_dprintk(h,s) {\
|
||||
char prefix[80] = {'\0'};\
|
||||
struct acpi_buffer buffer = {sizeof(prefix), prefix};\
|
||||
acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);\
|
||||
printk(KERN_DEBUG PREFIX "%s: %s\n", prefix, s); }
|
||||
static void bay_notify(acpi_handle handle, u32 event, void *data);
|
||||
static int acpi_bay_add(struct acpi_device *device);
|
||||
static int acpi_bay_remove(struct acpi_device *device, int type);
|
||||
|
||||
static struct acpi_driver acpi_bay_driver = {
|
||||
.name = ACPI_BAY_DRIVER_NAME,
|
||||
.class = ACPI_BAY_CLASS,
|
||||
.ids = ACPI_BAY_HID,
|
||||
.ops = {
|
||||
.add = acpi_bay_add,
|
||||
.remove = acpi_bay_remove,
|
||||
},
|
||||
};
|
||||
|
||||
struct bay {
|
||||
acpi_handle handle;
|
||||
char *name;
|
||||
struct list_head list;
|
||||
struct platform_device *pdev;
|
||||
};
|
||||
|
||||
static LIST_HEAD(drive_bays);
|
||||
|
||||
|
||||
/*****************************************************************************
|
||||
* Drive Bay functions *
|
||||
*****************************************************************************/
|
||||
/**
|
||||
* is_ejectable - see if a device is ejectable
|
||||
* @handle: acpi handle of the device
|
||||
*
|
||||
* If an acpi object has a _EJ0 method, then it is ejectable
|
||||
*/
|
||||
static int is_ejectable(acpi_handle handle)
|
||||
{
|
||||
acpi_status status;
|
||||
acpi_handle tmp;
|
||||
|
||||
status = acpi_get_handle(handle, "_EJ0", &tmp);
|
||||
if (ACPI_FAILURE(status))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* bay_present - see if the bay device is present
|
||||
* @bay: the drive bay
|
||||
*
|
||||
* execute the _STA method.
|
||||
*/
|
||||
static int bay_present(struct bay *bay)
|
||||
{
|
||||
unsigned long sta;
|
||||
acpi_status status;
|
||||
|
||||
if (bay) {
|
||||
status = acpi_evaluate_integer(bay->handle, "_STA", NULL, &sta);
|
||||
if (ACPI_SUCCESS(status) && sta)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* eject_device - respond to an eject request
|
||||
* @handle - the device to eject
|
||||
*
|
||||
* Call this devices _EJ0 method.
|
||||
*/
|
||||
static void eject_device(acpi_handle handle)
|
||||
{
|
||||
struct acpi_object_list arg_list;
|
||||
union acpi_object arg;
|
||||
|
||||
bay_dprintk(handle, "Ejecting device");
|
||||
|
||||
arg_list.count = 1;
|
||||
arg_list.pointer = &arg;
|
||||
arg.type = ACPI_TYPE_INTEGER;
|
||||
arg.integer.value = 1;
|
||||
|
||||
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_EJ0",
|
||||
&arg_list, NULL)))
|
||||
pr_debug("Failed to evaluate _EJ0!\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* show_present - read method for "present" file in sysfs
|
||||
*/
|
||||
static ssize_t show_present(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct bay *bay = dev_get_drvdata(dev);
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", bay_present(bay));
|
||||
|
||||
}
|
||||
DEVICE_ATTR(present, S_IRUGO, show_present, NULL);
|
||||
|
||||
/*
|
||||
* write_eject - write method for "eject" file in sysfs
|
||||
*/
|
||||
static ssize_t write_eject(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct bay *bay = dev_get_drvdata(dev);
|
||||
|
||||
if (!count)
|
||||
return -EINVAL;
|
||||
|
||||
eject_device(bay->handle);
|
||||
return count;
|
||||
}
|
||||
DEVICE_ATTR(eject, S_IWUSR, NULL, write_eject);
|
||||
|
||||
/**
|
||||
* is_ata - see if a device is an ata device
|
||||
* @handle: acpi handle of the device
|
||||
*
|
||||
* If an acpi object has one of 4 ATA ACPI methods defined,
|
||||
* then it is an ATA device
|
||||
*/
|
||||
static int is_ata(acpi_handle handle)
|
||||
{
|
||||
acpi_handle tmp;
|
||||
|
||||
if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
|
||||
(ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
|
||||
(ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
|
||||
(ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* parent_is_ata(acpi_handle handle)
|
||||
*
|
||||
*/
|
||||
static int parent_is_ata(acpi_handle handle)
|
||||
{
|
||||
acpi_handle phandle;
|
||||
|
||||
if (acpi_get_parent(handle, &phandle))
|
||||
return 0;
|
||||
|
||||
return is_ata(phandle);
|
||||
}
|
||||
|
||||
/**
|
||||
* is_ejectable_bay - see if a device is an ejectable drive bay
|
||||
* @handle: acpi handle of the device
|
||||
*
|
||||
* If an acpi object is ejectable and has one of the ACPI ATA
|
||||
* methods defined, then we can safely call it an ejectable
|
||||
* drive bay
|
||||
*/
|
||||
static int is_ejectable_bay(acpi_handle handle)
|
||||
{
|
||||
if ((is_ata(handle) || parent_is_ata(handle)) && is_ejectable(handle))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* eject_removable_drive - try to eject this drive
|
||||
* @dev : the device structure of the drive
|
||||
*
|
||||
* If a device is a removable drive that requires an _EJ0 method
|
||||
* to be executed in order to safely remove from the system, do
|
||||
* it. ATM - always returns success
|
||||
*/
|
||||
int eject_removable_drive(struct device *dev)
|
||||
{
|
||||
acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
|
||||
|
||||
if (handle) {
|
||||
bay_dprintk(handle, "Got device handle");
|
||||
if (is_ejectable_bay(handle))
|
||||
eject_device(handle);
|
||||
} else {
|
||||
printk("No acpi handle for device\n");
|
||||
}
|
||||
|
||||
/* should I return an error code? */
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eject_removable_drive);
|
||||
|
||||
static int acpi_bay_add(struct acpi_device *device)
|
||||
{
|
||||
bay_dprintk(device->handle, "adding bay device");
|
||||
strcpy(acpi_device_name(device), "Dockable Bay");
|
||||
strcpy(acpi_device_class(device), "bay");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int acpi_bay_add_fs(struct bay *bay)
|
||||
{
|
||||
int ret;
|
||||
struct device *dev = &bay->pdev->dev;
|
||||
|
||||
ret = device_create_file(dev, &dev_attr_present);
|
||||
if (ret)
|
||||
goto add_fs_err;
|
||||
ret = device_create_file(dev, &dev_attr_eject);
|
||||
if (ret) {
|
||||
device_remove_file(dev, &dev_attr_present);
|
||||
goto add_fs_err;
|
||||
}
|
||||
return 0;
|
||||
|
||||
add_fs_err:
|
||||
bay_dprintk(bay->handle, "Error adding sysfs files\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void acpi_bay_remove_fs(struct bay *bay)
|
||||
{
|
||||
struct device *dev = &bay->pdev->dev;
|
||||
|
||||
/* cleanup sysfs */
|
||||
device_remove_file(dev, &dev_attr_present);
|
||||
device_remove_file(dev, &dev_attr_eject);
|
||||
}
|
||||
|
||||
static int bay_is_dock_device(acpi_handle handle)
|
||||
{
|
||||
acpi_handle parent;
|
||||
|
||||
acpi_get_parent(handle, &parent);
|
||||
|
||||
/* if the device or it's parent is dependent on the
|
||||
* dock, then we are a dock device
|
||||
*/
|
||||
return (is_dock_device(handle) || is_dock_device(parent));
|
||||
}
|
||||
|
||||
static int bay_add(acpi_handle handle, int id)
|
||||
{
|
||||
acpi_status status;
|
||||
struct bay *new_bay;
|
||||
struct platform_device *pdev;
|
||||
struct acpi_buffer nbuffer = {ACPI_ALLOCATE_BUFFER, NULL};
|
||||
acpi_get_name(handle, ACPI_FULL_PATHNAME, &nbuffer);
|
||||
|
||||
bay_dprintk(handle, "Adding notify handler");
|
||||
|
||||
/*
|
||||
* Initialize bay device structure
|
||||
*/
|
||||
new_bay = kzalloc(GFP_ATOMIC, sizeof(*new_bay));
|
||||
INIT_LIST_HEAD(&new_bay->list);
|
||||
new_bay->handle = handle;
|
||||
new_bay->name = (char *)nbuffer.pointer;
|
||||
|
||||
/* initialize platform device stuff */
|
||||
pdev = platform_device_register_simple(ACPI_BAY_CLASS, id, NULL, 0);
|
||||
if (pdev == NULL) {
|
||||
printk(KERN_ERR PREFIX "Error registering bay device\n");
|
||||
goto bay_add_err;
|
||||
}
|
||||
new_bay->pdev = pdev;
|
||||
platform_set_drvdata(pdev, new_bay);
|
||||
|
||||
if (acpi_bay_add_fs(new_bay)) {
|
||||
platform_device_unregister(new_bay->pdev);
|
||||
goto bay_add_err;
|
||||
}
|
||||
|
||||
/* register for events on this device */
|
||||
status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
|
||||
bay_notify, new_bay);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk(KERN_ERR PREFIX "Error installing bay notify handler\n");
|
||||
}
|
||||
|
||||
/* if we are on a dock station, we should register for dock
|
||||
* notifications.
|
||||
*/
|
||||
if (bay_is_dock_device(handle)) {
|
||||
bay_dprintk(handle, "Is dependent on dock\n");
|
||||
register_hotplug_dock_device(handle, bay_notify, new_bay);
|
||||
}
|
||||
list_add(&new_bay->list, &drive_bays);
|
||||
printk(KERN_INFO PREFIX "Bay [%s] Added\n", new_bay->name);
|
||||
return 0;
|
||||
|
||||
bay_add_err:
|
||||
kfree(new_bay->name);
|
||||
kfree(new_bay);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int acpi_bay_remove(struct acpi_device *device, int type)
|
||||
{
|
||||
/*** FIXME: do something here */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* bay_create_acpi_device - add new devices to acpi
|
||||
* @handle - handle of the device to add
|
||||
*
|
||||
* This function will create a new acpi_device for the given
|
||||
* handle if one does not exist already. This should cause
|
||||
* acpi to scan for drivers for the given devices, and call
|
||||
* matching driver's add routine.
|
||||
*
|
||||
* Returns a pointer to the acpi_device corresponding to the handle.
|
||||
*/
|
||||
static struct acpi_device * bay_create_acpi_device(acpi_handle handle)
|
||||
{
|
||||
struct acpi_device *device = NULL;
|
||||
struct acpi_device *parent_device;
|
||||
acpi_handle parent;
|
||||
int ret;
|
||||
|
||||
bay_dprintk(handle, "Trying to get device");
|
||||
if (acpi_bus_get_device(handle, &device)) {
|
||||
/*
|
||||
* no device created for this object,
|
||||
* so we should create one.
|
||||
*/
|
||||
bay_dprintk(handle, "No device for handle");
|
||||
acpi_get_parent(handle, &parent);
|
||||
if (acpi_bus_get_device(parent, &parent_device))
|
||||
parent_device = NULL;
|
||||
|
||||
ret = acpi_bus_add(&device, parent_device, handle,
|
||||
ACPI_BUS_TYPE_DEVICE);
|
||||
if (ret) {
|
||||
pr_debug("error adding bus, %x\n",
|
||||
-ret);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
return device;
|
||||
}
|
||||
|
||||
/**
|
||||
* bay_notify - act upon an acpi bay notification
|
||||
* @handle: the bay handle
|
||||
* @event: the acpi event
|
||||
* @data: our driver data struct
|
||||
*
|
||||
*/
|
||||
static void bay_notify(acpi_handle handle, u32 event, void *data)
|
||||
{
|
||||
struct acpi_device *dev;
|
||||
|
||||
bay_dprintk(handle, "Bay event");
|
||||
|
||||
switch(event) {
|
||||
case ACPI_NOTIFY_BUS_CHECK:
|
||||
printk("Bus Check\n");
|
||||
case ACPI_NOTIFY_DEVICE_CHECK:
|
||||
printk("Device Check\n");
|
||||
dev = bay_create_acpi_device(handle);
|
||||
if (dev)
|
||||
acpi_bus_generate_event(dev, event, 0);
|
||||
else
|
||||
printk("No device for generating event\n");
|
||||
/* wouldn't it be a good idea to just rescan SATA
|
||||
* right here?
|
||||
*/
|
||||
break;
|
||||
case ACPI_NOTIFY_EJECT_REQUEST:
|
||||
printk("Eject request\n");
|
||||
dev = bay_create_acpi_device(handle);
|
||||
if (dev)
|
||||
acpi_bus_generate_event(dev, event, 0);
|
||||
else
|
||||
printk("No device for generating eventn");
|
||||
|
||||
/* wouldn't it be a good idea to just call the
|
||||
* eject_device here if we were a SATA device?
|
||||
*/
|
||||
break;
|
||||
default:
|
||||
printk("unknown event %d\n", event);
|
||||
}
|
||||
}
|
||||
|
||||
static acpi_status
|
||||
find_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
|
||||
{
|
||||
int *count = (int *)context;
|
||||
|
||||
/*
|
||||
* there could be more than one ejectable bay.
|
||||
* so, just return AE_OK always so that every object
|
||||
* will be checked.
|
||||
*/
|
||||
if (is_ejectable_bay(handle)) {
|
||||
bay_dprintk(handle, "found ejectable bay");
|
||||
if (!bay_add(handle, *count))
|
||||
(*count)++;
|
||||
}
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
static int __init bay_init(void)
|
||||
{
|
||||
int bays = 0;
|
||||
|
||||
INIT_LIST_HEAD(&drive_bays);
|
||||
|
||||
/* look for dockable drive bays */
|
||||
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX, find_bay, &bays, NULL);
|
||||
|
||||
if (bays)
|
||||
if ((acpi_bus_register_driver(&acpi_bay_driver) < 0))
|
||||
printk(KERN_ERR "Unable to register bay driver\n");
|
||||
|
||||
if (!bays)
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit bay_exit(void)
|
||||
{
|
||||
struct bay *bay, *tmp;
|
||||
|
||||
list_for_each_entry_safe(bay, tmp, &drive_bays, list) {
|
||||
if (is_dock_device(bay->handle))
|
||||
unregister_hotplug_dock_device(bay->handle);
|
||||
acpi_bay_remove_fs(bay);
|
||||
acpi_remove_notify_handler(bay->handle, ACPI_SYSTEM_NOTIFY,
|
||||
bay_notify);
|
||||
platform_device_unregister(bay->pdev);
|
||||
kfree(bay->name);
|
||||
kfree(bay);
|
||||
}
|
||||
|
||||
acpi_bus_unregister_driver(&acpi_bay_driver);
|
||||
}
|
||||
|
||||
postcore_initcall(bay_init);
|
||||
module_exit(bay_exit);
|
||||
|
|
@ -44,7 +44,7 @@ struct acpi_blacklist_item {
|
|||
char oem_id[7];
|
||||
char oem_table_id[9];
|
||||
u32 oem_revision;
|
||||
acpi_table_type table;
|
||||
char *table;
|
||||
enum acpi_blacklist_predicates oem_revision_predicate;
|
||||
char *reason;
|
||||
u32 is_critical_error;
|
||||
|
@ -56,18 +56,18 @@ struct acpi_blacklist_item {
|
|||
*/
|
||||
static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
|
||||
/* Compaq Presario 1700 */
|
||||
{"PTLTD ", " DSDT ", 0x06040000, ACPI_DSDT, less_than_or_equal,
|
||||
{"PTLTD ", " DSDT ", 0x06040000, ACPI_SIG_DSDT, less_than_or_equal,
|
||||
"Multiple problems", 1},
|
||||
/* Sony FX120, FX140, FX150? */
|
||||
{"SONY ", "U0 ", 0x20010313, ACPI_DSDT, less_than_or_equal,
|
||||
{"SONY ", "U0 ", 0x20010313, ACPI_SIG_DSDT, less_than_or_equal,
|
||||
"ACPI driver problem", 1},
|
||||
/* Compaq Presario 800, Insyde BIOS */
|
||||
{"INT440", "SYSFexxx", 0x00001001, ACPI_DSDT, less_than_or_equal,
|
||||
{"INT440", "SYSFexxx", 0x00001001, ACPI_SIG_DSDT, less_than_or_equal,
|
||||
"Does not use _REG to protect EC OpRegions", 1},
|
||||
/* IBM 600E - _ADR should return 7, but it returns 1 */
|
||||
{"IBM ", "TP600E ", 0x00000105, ACPI_DSDT, less_than_or_equal,
|
||||
{"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
|
||||
"Incorrect _ADR", 1},
|
||||
{"ASUS\0\0", "P2B-S ", 0, ACPI_DSDT, all_versions,
|
||||
{"ASUS\0\0", "P2B-S ", 0, ACPI_SIG_DSDT, all_versions,
|
||||
"Bogus PCI routing", 1},
|
||||
|
||||
{""}
|
||||
|
@ -79,7 +79,7 @@ static int __init blacklist_by_year(void)
|
|||
{
|
||||
int year = dmi_get_year(DMI_BIOS_DATE);
|
||||
/* Doesn't exist? Likely an old system */
|
||||
if (year == -1)
|
||||
if (year == -1)
|
||||
return 1;
|
||||
/* 0? Likely a buggy new BIOS */
|
||||
if (year == 0)
|
||||
|
@ -103,22 +103,21 @@ int __init acpi_blacklisted(void)
|
|||
{
|
||||
int i = 0;
|
||||
int blacklisted = 0;
|
||||
struct acpi_table_header *table_header;
|
||||
struct acpi_table_header table_header;
|
||||
|
||||
while (acpi_blacklist[i].oem_id[0] != '\0') {
|
||||
if (acpi_get_table_header_early
|
||||
(acpi_blacklist[i].table, &table_header)) {
|
||||
if (acpi_get_table_header(acpi_blacklist[i].table, 0, &table_header)) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (strncmp(acpi_blacklist[i].oem_id, table_header->oem_id, 6)) {
|
||||
if (strncmp(acpi_blacklist[i].oem_id, table_header.oem_id, 6)) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (strncmp
|
||||
(acpi_blacklist[i].oem_table_id, table_header->oem_table_id,
|
||||
(acpi_blacklist[i].oem_table_id, table_header.oem_table_id,
|
||||
8)) {
|
||||
i++;
|
||||
continue;
|
||||
|
@ -127,14 +126,14 @@ int __init acpi_blacklisted(void)
|
|||
if ((acpi_blacklist[i].oem_revision_predicate == all_versions)
|
||||
|| (acpi_blacklist[i].oem_revision_predicate ==
|
||||
less_than_or_equal
|
||||
&& table_header->oem_revision <=
|
||||
&& table_header.oem_revision <=
|
||||
acpi_blacklist[i].oem_revision)
|
||||
|| (acpi_blacklist[i].oem_revision_predicate ==
|
||||
greater_than_or_equal
|
||||
&& table_header->oem_revision >=
|
||||
&& table_header.oem_revision >=
|
||||
acpi_blacklist[i].oem_revision)
|
||||
|| (acpi_blacklist[i].oem_revision_predicate == equal
|
||||
&& table_header->oem_revision ==
|
||||
&& table_header.oem_revision ==
|
||||
acpi_blacklist[i].oem_revision)) {
|
||||
|
||||
printk(KERN_ERR PREFIX
|
||||
|
|
|
@ -44,9 +44,6 @@ ACPI_MODULE_NAME("acpi_bus")
|
|||
extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger);
|
||||
#endif
|
||||
|
||||
struct fadt_descriptor acpi_fadt;
|
||||
EXPORT_SYMBOL(acpi_fadt);
|
||||
|
||||
struct acpi_device *acpi_root;
|
||||
struct proc_dir_entry *acpi_root_dir;
|
||||
EXPORT_SYMBOL(acpi_root_dir);
|
||||
|
@ -195,7 +192,7 @@ int acpi_bus_set_power(acpi_handle handle, int state)
|
|||
|
||||
if (!device->flags.power_manageable) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
|
||||
device->kobj.name));
|
||||
device->dev.kobj.name));
|
||||
return -ENODEV;
|
||||
}
|
||||
/*
|
||||
|
@ -582,11 +579,12 @@ static int __init acpi_bus_init_irq(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
acpi_native_uint acpi_gbl_permanent_mmap;
|
||||
|
||||
|
||||
void __init acpi_early_init(void)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
struct acpi_buffer buffer = { sizeof(acpi_fadt), &acpi_fadt };
|
||||
|
||||
|
||||
if (acpi_disabled)
|
||||
return;
|
||||
|
@ -597,6 +595,15 @@ void __init acpi_early_init(void)
|
|||
if (!acpi_strict)
|
||||
acpi_gbl_enable_interpreter_slack = TRUE;
|
||||
|
||||
acpi_gbl_permanent_mmap = 1;
|
||||
|
||||
status = acpi_reallocate_root_table();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk(KERN_ERR PREFIX
|
||||
"Unable to reallocate ACPI tables\n");
|
||||
goto error0;
|
||||
}
|
||||
|
||||
status = acpi_initialize_subsystem();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk(KERN_ERR PREFIX
|
||||
|
@ -611,32 +618,25 @@ void __init acpi_early_init(void)
|
|||
goto error0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a separate copy of the FADT for use by other drivers.
|
||||
*/
|
||||
status = acpi_get_table(ACPI_TABLE_ID_FADT, 1, &buffer);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk(KERN_ERR PREFIX "Unable to get the FADT\n");
|
||||
goto error0;
|
||||
}
|
||||
#ifdef CONFIG_X86
|
||||
if (!acpi_ioapic) {
|
||||
extern acpi_interrupt_flags acpi_sci_flags;
|
||||
extern u8 acpi_sci_flags;
|
||||
|
||||
/* compatible (0) means level (3) */
|
||||
if (acpi_sci_flags.trigger == 0)
|
||||
acpi_sci_flags.trigger = 3;
|
||||
|
||||
if (!(acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)) {
|
||||
acpi_sci_flags &= ~ACPI_MADT_TRIGGER_MASK;
|
||||
acpi_sci_flags |= ACPI_MADT_TRIGGER_LEVEL;
|
||||
}
|
||||
/* Set PIC-mode SCI trigger type */
|
||||
acpi_pic_sci_set_trigger(acpi_fadt.sci_int,
|
||||
acpi_sci_flags.trigger);
|
||||
acpi_pic_sci_set_trigger(acpi_gbl_FADT.sci_interrupt,
|
||||
(acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
|
||||
} else {
|
||||
extern int acpi_sci_override_gsi;
|
||||
/*
|
||||
* now that acpi_fadt is initialized,
|
||||
* now that acpi_gbl_FADT is initialized,
|
||||
* update it with result from INT_SRC_OVR parsing
|
||||
*/
|
||||
acpi_fadt.sci_int = acpi_sci_override_gsi;
|
||||
acpi_gbl_FADT.sci_interrupt = acpi_sci_override_gsi;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ static int acpi_button_state_open_fs(struct inode *inode, struct file *file);
|
|||
static struct acpi_driver acpi_button_driver = {
|
||||
.name = ACPI_BUTTON_DRIVER_NAME,
|
||||
.class = ACPI_BUTTON_CLASS,
|
||||
.ids = "ACPI_FPB,ACPI_FSB,PNP0C0D,PNP0C0C,PNP0C0E",
|
||||
.ids = "button_power,button_sleep,PNP0C0D,PNP0C0C,PNP0C0E",
|
||||
.ops = {
|
||||
.add = acpi_button_add,
|
||||
.remove = acpi_button_remove,
|
||||
|
|
|
@ -167,7 +167,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
|
|||
if (ACPI_FAILURE(status) || !device) {
|
||||
result = container_device_add(&device, handle);
|
||||
if (!result)
|
||||
kobject_uevent(&device->kobj,
|
||||
kobject_uevent(&device->dev.kobj,
|
||||
KOBJ_ONLINE);
|
||||
else
|
||||
printk("Failed to add container\n");
|
||||
|
@ -175,13 +175,13 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
|
|||
} else {
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
/* device exist and this is a remove request */
|
||||
kobject_uevent(&device->kobj, KOBJ_OFFLINE);
|
||||
kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case ACPI_NOTIFY_EJECT_REQUEST:
|
||||
if (!acpi_bus_get_device(handle, &device) && device) {
|
||||
kobject_uevent(&device->kobj, KOBJ_OFFLINE);
|
||||
kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -13,14 +13,11 @@
|
|||
|
||||
#define _COMPONENT ACPI_SYSTEM_COMPONENT
|
||||
ACPI_MODULE_NAME("debug")
|
||||
#define ACPI_SYSTEM_FILE_DEBUG_LAYER "debug_layer"
|
||||
#define ACPI_SYSTEM_FILE_DEBUG_LEVEL "debug_level"
|
||||
|
||||
#ifdef MODULE_PARAM_PREFIX
|
||||
#undef MODULE_PARAM_PREFIX
|
||||
#endif
|
||||
#define MODULE_PARAM_PREFIX
|
||||
module_param(acpi_dbg_layer, uint, 0400);
|
||||
module_param(acpi_dbg_level, uint, 0400);
|
||||
#define MODULE_PARAM_PREFIX "acpi."
|
||||
|
||||
struct acpi_dlayer {
|
||||
const char *name;
|
||||
|
@ -86,6 +83,60 @@ static const struct acpi_dlevel acpi_debug_levels[] = {
|
|||
ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
|
||||
};
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
FS Interface (/sys)
|
||||
-------------------------------------------------------------------------- */
|
||||
static int param_get_debug_layer(char *buffer, struct kernel_param *kp) {
|
||||
int result = 0;
|
||||
int i;
|
||||
|
||||
result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
|
||||
|
||||
for(i = 0; i <ARRAY_SIZE(acpi_debug_layers); i++) {
|
||||
result += sprintf(buffer+result, "%-25s\t0x%08lX [%c]\n",
|
||||
acpi_debug_layers[i].name,
|
||||
acpi_debug_layers[i].value,
|
||||
(acpi_dbg_layer & acpi_debug_layers[i].value) ? '*' : ' ');
|
||||
}
|
||||
result += sprintf(buffer+result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
|
||||
ACPI_ALL_DRIVERS,
|
||||
(acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
|
||||
ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer &
|
||||
ACPI_ALL_DRIVERS) == 0 ? ' ' : '-');
|
||||
result += sprintf(buffer+result, "--\ndebug_layer = 0x%08X ( * = enabled)\n", acpi_dbg_layer);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int param_get_debug_level(char *buffer, struct kernel_param *kp) {
|
||||
int result = 0;
|
||||
int i;
|
||||
|
||||
result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
|
||||
result += sprintf(buffer+result, "%-25s\t0x%08lX [%c]\n",
|
||||
acpi_debug_levels[i].name,
|
||||
acpi_debug_levels[i].value,
|
||||
(acpi_dbg_level & acpi_debug_levels[i].
|
||||
value) ? '*' : ' ');
|
||||
}
|
||||
result += sprintf(buffer+result, "--\ndebug_level = 0x%08X (* = enabled)\n",
|
||||
acpi_dbg_level);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
module_param_call(debug_layer, param_set_uint, param_get_debug_layer, &acpi_dbg_layer, 0644);
|
||||
module_param_call(debug_level, param_set_uint, param_get_debug_level, &acpi_dbg_level, 0644);
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
FS Interface (/proc)
|
||||
-------------------------------------------------------------------------- */
|
||||
#ifdef CONFIG_ACPI_PROCFS
|
||||
#define ACPI_SYSTEM_FILE_DEBUG_LAYER "debug_layer"
|
||||
#define ACPI_SYSTEM_FILE_DEBUG_LEVEL "debug_level"
|
||||
|
||||
static int
|
||||
acpi_system_read_debug(char *page,
|
||||
char **start, off_t off, int count, int *eof, void *data)
|
||||
|
@ -221,3 +272,4 @@ static int __init acpi_debug_init(void)
|
|||
}
|
||||
|
||||
subsys_initcall(acpi_debug_init);
|
||||
#endif
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -133,7 +133,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
|
|||
}
|
||||
}
|
||||
|
||||
/* We could put the returned object (Node) on the object stack for later,
|
||||
/*
|
||||
* We could put the returned object (Node) on the object stack for later,
|
||||
* but for now, we will put it in the "op" object that the parser uses,
|
||||
* so we can get it again at the end of this scope
|
||||
*/
|
||||
|
@ -514,8 +515,33 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
|
|||
|
||||
/* Third arg is the bank_value */
|
||||
|
||||
/* TBD: This arg is a term_arg, not a constant, and must be evaluated */
|
||||
|
||||
arg = arg->common.next;
|
||||
info.bank_value = (u32) arg->common.value.integer;
|
||||
|
||||
/* Currently, only the following constants are supported */
|
||||
|
||||
switch (arg->common.aml_opcode) {
|
||||
case AML_ZERO_OP:
|
||||
info.bank_value = 0;
|
||||
break;
|
||||
|
||||
case AML_ONE_OP:
|
||||
info.bank_value = 1;
|
||||
break;
|
||||
|
||||
case AML_BYTE_OP:
|
||||
case AML_WORD_OP:
|
||||
case AML_DWORD_OP:
|
||||
case AML_QWORD_OP:
|
||||
info.bank_value = (u32) arg->common.value.integer;
|
||||
break;
|
||||
|
||||
default:
|
||||
info.bank_value = 0;
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Non-constant BankValue for BankField is not implemented"));
|
||||
}
|
||||
|
||||
/* Fourth arg is the field flags */
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -44,6 +44,7 @@
|
|||
#include <acpi/acpi.h>
|
||||
#include <acpi/acdispat.h>
|
||||
#include <acpi/acnamesp.h>
|
||||
#include <acpi/actables.h>
|
||||
|
||||
#define _COMPONENT ACPI_DISPATCHER
|
||||
ACPI_MODULE_NAME("dsinit")
|
||||
|
@ -90,7 +91,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
|
|||
* We are only interested in NS nodes owned by the table that
|
||||
* was just loaded
|
||||
*/
|
||||
if (node->owner_id != info->table_desc->owner_id) {
|
||||
if (node->owner_id != info->owner_id) {
|
||||
return (AE_OK);
|
||||
}
|
||||
|
||||
|
@ -150,14 +151,21 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
|
|||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
|
||||
acpi_ds_initialize_objects(acpi_native_uint table_index,
|
||||
struct acpi_namespace_node * start_node)
|
||||
{
|
||||
acpi_status status;
|
||||
struct acpi_init_walk_info info;
|
||||
struct acpi_table_header *table;
|
||||
acpi_owner_id owner_id;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ds_initialize_objects);
|
||||
|
||||
status = acpi_tb_get_owner_id(table_index, &owner_id);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
|
||||
"**** Starting initialization of namespace objects ****\n"));
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:"));
|
||||
|
@ -166,7 +174,8 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
|
|||
info.op_region_count = 0;
|
||||
info.object_count = 0;
|
||||
info.device_count = 0;
|
||||
info.table_desc = table_desc;
|
||||
info.table_index = table_index;
|
||||
info.owner_id = owner_id;
|
||||
|
||||
/* Walk entire namespace from the supplied root */
|
||||
|
||||
|
@ -176,10 +185,14 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
|
|||
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
|
||||
}
|
||||
|
||||
status = acpi_get_table_by_index(table_index, &table);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
|
||||
"\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n",
|
||||
table_desc->pointer->signature,
|
||||
table_desc->owner_id, info.object_count,
|
||||
table->signature, owner_id, info.object_count,
|
||||
info.device_count, info.method_count,
|
||||
info.op_region_count));
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -327,7 +327,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
|
|||
ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
|
||||
"Execute method %p, currentstate=%p\n",
|
||||
"Calling method %p, currentstate=%p\n",
|
||||
this_walk_state->prev_op, this_walk_state));
|
||||
|
||||
/*
|
||||
|
@ -351,49 +351,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
|
|||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*
|
||||
* 1) Parse the method. All "normal" methods are parsed for each execution.
|
||||
* Internal methods (_OSI, etc.) do not require parsing.
|
||||
*/
|
||||
if (!(obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY)) {
|
||||
|
||||
/* Create a new walk state for the parse */
|
||||
|
||||
next_walk_state =
|
||||
acpi_ds_create_walk_state(obj_desc->method.owner_id, op,
|
||||
obj_desc, NULL);
|
||||
if (!next_walk_state) {
|
||||
status = AE_NO_MEMORY;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Create and init a parse tree root */
|
||||
|
||||
op = acpi_ps_create_scope_op();
|
||||
if (!op) {
|
||||
status = AE_NO_MEMORY;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
status = acpi_ds_init_aml_walk(next_walk_state, op, method_node,
|
||||
obj_desc->method.aml_start,
|
||||
obj_desc->method.aml_length,
|
||||
NULL, 1);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_ps_delete_parse_tree(op);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Begin AML parse (deletes next_walk_state) */
|
||||
|
||||
status = acpi_ps_parse_aml(next_walk_state);
|
||||
acpi_ps_delete_parse_tree(op);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
/* 2) Begin method execution. Create a new walk state */
|
||||
/* Begin method parse/execution. Create a new walk state */
|
||||
|
||||
next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
|
||||
NULL, obj_desc, thread);
|
||||
|
@ -424,7 +382,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
|
|||
|
||||
status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
|
||||
obj_desc->method.aml_start,
|
||||
obj_desc->method.aml_length, info, 3);
|
||||
obj_desc->method.aml_length, info,
|
||||
ACPI_IMODE_EXECUTE);
|
||||
|
||||
ACPI_FREE(info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
@ -445,8 +404,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
|
|||
this_walk_state->num_operands = 0;
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
|
||||
"Starting nested execution, newstate=%p\n",
|
||||
next_walk_state));
|
||||
"**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
|
||||
method_node->name.ascii, next_walk_state));
|
||||
|
||||
/* Invoke an internal method if necessary */
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
******************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -260,7 +260,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
|
|||
}
|
||||
|
||||
obj_desc->buffer.flags |= AOPOBJ_DATA_VALID;
|
||||
op->common.node = (struct acpi_namespace_node *)obj_desc;
|
||||
op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
|
@ -270,7 +270,8 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
|
|||
*
|
||||
* PARAMETERS: walk_state - Current walk state
|
||||
* Op - Parser object to be translated
|
||||
* package_length - Number of elements in the package
|
||||
* element_count - Number of elements in the package - this is
|
||||
* the num_elements argument to Package()
|
||||
* obj_desc_ptr - Where the ACPI internal object is returned
|
||||
*
|
||||
* RETURN: Status
|
||||
|
@ -278,18 +279,29 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
|
|||
* DESCRIPTION: Translate a parser Op package object to the equivalent
|
||||
* namespace object
|
||||
*
|
||||
* NOTE: The number of elements in the package will be always be the num_elements
|
||||
* count, regardless of the number of elements in the package list. If
|
||||
* num_elements is smaller, only that many package list elements are used.
|
||||
* if num_elements is larger, the Package object is padded out with
|
||||
* objects of type Uninitialized (as per ACPI spec.)
|
||||
*
|
||||
* Even though the ASL compilers do not allow num_elements to be smaller
|
||||
* than the Package list length (for the fixed length package opcode), some
|
||||
* BIOS code modifies the AML on the fly to adjust the num_elements, and
|
||||
* this code compensates for that. This also provides compatibility with
|
||||
* other AML interpreters.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
|
||||
union acpi_parse_object *op,
|
||||
u32 package_length,
|
||||
u32 element_count,
|
||||
union acpi_operand_object **obj_desc_ptr)
|
||||
{
|
||||
union acpi_parse_object *arg;
|
||||
union acpi_parse_object *parent;
|
||||
union acpi_operand_object *obj_desc = NULL;
|
||||
u32 package_list_length;
|
||||
acpi_status status = AE_OK;
|
||||
acpi_native_uint i;
|
||||
|
||||
|
@ -318,32 +330,13 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
|
|||
obj_desc->package.node = parent->common.node;
|
||||
}
|
||||
|
||||
obj_desc->package.count = package_length;
|
||||
|
||||
/* Count the number of items in the package list */
|
||||
|
||||
arg = op->common.value.arg;
|
||||
arg = arg->common.next;
|
||||
for (package_list_length = 0; arg; package_list_length++) {
|
||||
arg = arg->common.next;
|
||||
}
|
||||
|
||||
/*
|
||||
* The package length (number of elements) will be the greater
|
||||
* of the specified length and the length of the initializer list
|
||||
*/
|
||||
if (package_list_length > package_length) {
|
||||
obj_desc->package.count = package_list_length;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate the pointer array (array of pointers to the
|
||||
* individual objects). Add an extra pointer slot so
|
||||
* that the list is always null terminated.
|
||||
* Allocate the element array (array of pointers to the individual
|
||||
* objects) based on the num_elements parameter. Add an extra pointer slot
|
||||
* so that the list is always null terminated.
|
||||
*/
|
||||
obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
|
||||
obj_desc->package.
|
||||
count +
|
||||
element_count +
|
||||
1) * sizeof(void *));
|
||||
|
||||
if (!obj_desc->package.elements) {
|
||||
|
@ -351,15 +344,20 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
|
|||
return_ACPI_STATUS(AE_NO_MEMORY);
|
||||
}
|
||||
|
||||
obj_desc->package.count = element_count;
|
||||
|
||||
/*
|
||||
* Initialize all elements of the package
|
||||
* Initialize the elements of the package, up to the num_elements count.
|
||||
* Package is automatically padded with uninitialized (NULL) elements
|
||||
* if num_elements is greater than the package list length. Likewise,
|
||||
* Package is truncated if num_elements is less than the list length.
|
||||
*/
|
||||
arg = op->common.value.arg;
|
||||
arg = arg->common.next;
|
||||
for (i = 0; arg; i++) {
|
||||
for (i = 0; arg && (i < element_count); i++) {
|
||||
if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
|
||||
|
||||
/* Object (package or buffer) is already built */
|
||||
/* This package element is already built, just get it */
|
||||
|
||||
obj_desc->package.elements[i] =
|
||||
ACPI_CAST_PTR(union acpi_operand_object,
|
||||
|
@ -373,8 +371,14 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
|
|||
arg = arg->common.next;
|
||||
}
|
||||
|
||||
if (!arg) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Package List length larger than NumElements count (%X), truncated\n",
|
||||
element_count));
|
||||
}
|
||||
|
||||
obj_desc->package.flags |= AOPOBJ_DATA_VALID;
|
||||
op->common.node = (struct acpi_namespace_node *)obj_desc;
|
||||
op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
|
@ -488,8 +492,9 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
|
|||
/*
|
||||
* Defer evaluation of Buffer term_arg operand
|
||||
*/
|
||||
obj_desc->buffer.node = (struct acpi_namespace_node *)
|
||||
walk_state->operands[0];
|
||||
obj_desc->buffer.node =
|
||||
ACPI_CAST_PTR(struct acpi_namespace_node,
|
||||
walk_state->operands[0]);
|
||||
obj_desc->buffer.aml_start = op->named.data;
|
||||
obj_desc->buffer.aml_length = op->named.length;
|
||||
break;
|
||||
|
@ -499,8 +504,9 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
|
|||
/*
|
||||
* Defer evaluation of Package term_arg operand
|
||||
*/
|
||||
obj_desc->package.node = (struct acpi_namespace_node *)
|
||||
walk_state->operands[0];
|
||||
obj_desc->package.node =
|
||||
ACPI_CAST_PTR(struct acpi_namespace_node,
|
||||
walk_state->operands[0]);
|
||||
obj_desc->package.aml_start = op->named.data;
|
||||
obj_desc->package.aml_length = op->named.length;
|
||||
break;
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -114,7 +114,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
|
|||
}
|
||||
|
||||
status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
|
||||
aml_length, NULL, 1);
|
||||
aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_ds_delete_walk_state(walk_state);
|
||||
goto cleanup;
|
||||
|
@ -157,7 +157,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
|
|||
/* Execute the opcode and arguments */
|
||||
|
||||
status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
|
||||
aml_length, NULL, 3);
|
||||
aml_length, NULL, ACPI_IMODE_EXECUTE);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_ds_delete_walk_state(walk_state);
|
||||
goto cleanup;
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
******************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -219,7 +219,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
|
|||
if (!op) {
|
||||
status = acpi_ds_load2_begin_op(walk_state, out_op);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
goto error_exit;
|
||||
}
|
||||
|
||||
op = *out_op;
|
||||
|
@ -238,7 +238,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
|
|||
|
||||
status = acpi_ds_scope_stack_pop(walk_state);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
goto error_exit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -287,7 +287,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
|
|||
|
||||
status = acpi_ds_result_stack_push(walk_state);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
goto error_exit;
|
||||
}
|
||||
|
||||
status = acpi_ds_exec_begin_control_op(walk_state, op);
|
||||
|
@ -328,6 +328,10 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
|
|||
/* Nothing to do here during method execution */
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
|
||||
error_exit:
|
||||
status = acpi_ds_method_error(status, walk_state);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -196,6 +196,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
|
|||
* one of the opcodes that actually opens a scope
|
||||
*/
|
||||
switch (node->type) {
|
||||
case ACPI_TYPE_ANY:
|
||||
case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
|
||||
case ACPI_TYPE_DEVICE:
|
||||
case ACPI_TYPE_POWER:
|
||||
|
@ -546,6 +547,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
|
|||
acpi_status status;
|
||||
acpi_object_type object_type;
|
||||
char *buffer_ptr;
|
||||
u32 flags;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ds_load2_begin_op);
|
||||
|
||||
|
@ -669,6 +671,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
|
|||
* one of the opcodes that actually opens a scope
|
||||
*/
|
||||
switch (node->type) {
|
||||
case ACPI_TYPE_ANY:
|
||||
case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
|
||||
case ACPI_TYPE_DEVICE:
|
||||
case ACPI_TYPE_POWER:
|
||||
|
@ -750,12 +753,20 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
|
|||
break;
|
||||
}
|
||||
|
||||
/* Add new entry into namespace */
|
||||
flags = ACPI_NS_NO_UPSEARCH;
|
||||
if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
|
||||
|
||||
/* Execution mode, node cannot already exist, node is temporary */
|
||||
|
||||
flags |= (ACPI_NS_ERROR_IF_FOUND | ACPI_NS_TEMPORARY);
|
||||
}
|
||||
|
||||
/* Add new entry or lookup existing entry */
|
||||
|
||||
status =
|
||||
acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
|
||||
object_type, ACPI_IMODE_LOAD_PASS2,
|
||||
ACPI_NS_NO_UPSEARCH, walk_state, &(node));
|
||||
object_type, ACPI_IMODE_LOAD_PASS2, flags,
|
||||
walk_state, &node);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -615,20 +615,28 @@ static acpi_status
|
|||
find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv)
|
||||
{
|
||||
acpi_status status;
|
||||
acpi_handle tmp;
|
||||
acpi_handle tmp, parent;
|
||||
struct dock_station *ds = context;
|
||||
struct dock_dependent_device *dd;
|
||||
|
||||
status = acpi_bus_get_ejd(handle, &tmp);
|
||||
if (ACPI_FAILURE(status))
|
||||
return AE_OK;
|
||||
if (ACPI_FAILURE(status)) {
|
||||
/* try the parent device as well */
|
||||
status = acpi_get_parent(handle, &parent);
|
||||
if (ACPI_FAILURE(status))
|
||||
goto fdd_out;
|
||||
/* see if parent is dependent on dock */
|
||||
status = acpi_bus_get_ejd(parent, &tmp);
|
||||
if (ACPI_FAILURE(status))
|
||||
goto fdd_out;
|
||||
}
|
||||
|
||||
if (tmp == ds->handle) {
|
||||
dd = alloc_dock_dependent_device(handle);
|
||||
if (dd)
|
||||
add_dock_dependent_device(ds, dd);
|
||||
}
|
||||
|
||||
fdd_out:
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -872,9 +872,8 @@ static int __init acpi_ec_get_real_ecdt(void)
|
|||
acpi_status status;
|
||||
struct acpi_table_ecdt *ecdt_ptr;
|
||||
|
||||
status = acpi_get_firmware_table("ECDT", 1, ACPI_LOGICAL_ADDRESSING,
|
||||
(struct acpi_table_header **)
|
||||
&ecdt_ptr);
|
||||
status = acpi_get_table(ACPI_SIG_ECDT, 1,
|
||||
(struct acpi_table_header **)&ecdt_ptr);
|
||||
if (ACPI_FAILURE(status))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -891,14 +890,14 @@ static int __init acpi_ec_get_real_ecdt(void)
|
|||
if (acpi_ec_mode == EC_INTR) {
|
||||
init_waitqueue_head(&ec_ecdt->wait);
|
||||
}
|
||||
ec_ecdt->command_addr = ecdt_ptr->ec_control.address;
|
||||
ec_ecdt->data_addr = ecdt_ptr->ec_data.address;
|
||||
ec_ecdt->gpe = ecdt_ptr->gpe_bit;
|
||||
ec_ecdt->command_addr = ecdt_ptr->control.address;
|
||||
ec_ecdt->data_addr = ecdt_ptr->data.address;
|
||||
ec_ecdt->gpe = ecdt_ptr->gpe;
|
||||
/* use the GL just to be safe */
|
||||
ec_ecdt->global_lock = TRUE;
|
||||
ec_ecdt->uid = ecdt_ptr->uid;
|
||||
|
||||
status = acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->handle);
|
||||
status = acpi_get_handle(NULL, ecdt_ptr->id, &ec_ecdt->handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto error;
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -70,13 +70,6 @@ acpi_status acpi_ev_initialize_events(void)
|
|||
|
||||
ACPI_FUNCTION_TRACE(ev_initialize_events);
|
||||
|
||||
/* Make sure we have ACPI tables */
|
||||
|
||||
if (!acpi_gbl_DSDT) {
|
||||
ACPI_WARNING((AE_INFO, "No ACPI tables present!"));
|
||||
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the Fixed and General Purpose Events. This is done prior to
|
||||
* enabling SCIs to prevent interrupts from occurring before the handlers are
|
||||
|
@ -211,8 +204,7 @@ static acpi_status acpi_ev_fixed_event_initialize(void)
|
|||
if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) {
|
||||
status =
|
||||
acpi_set_register(acpi_gbl_fixed_event_info[i].
|
||||
enable_register_id, 0,
|
||||
ACPI_MTX_LOCK);
|
||||
enable_register_id, 0);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return (status);
|
||||
}
|
||||
|
@ -298,7 +290,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
|
|||
/* Clear the status bit */
|
||||
|
||||
(void)acpi_set_register(acpi_gbl_fixed_event_info[event].
|
||||
status_register_id, 1, ACPI_MTX_DO_NOT_LOCK);
|
||||
status_register_id, 1);
|
||||
|
||||
/*
|
||||
* Make sure we've got a handler. If not, report an error.
|
||||
|
@ -306,8 +298,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
|
|||
*/
|
||||
if (NULL == acpi_gbl_fixed_event_handlers[event].handler) {
|
||||
(void)acpi_set_register(acpi_gbl_fixed_event_info[event].
|
||||
enable_register_id, 0,
|
||||
ACPI_MTX_DO_NOT_LOCK);
|
||||
enable_register_id, 0);
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"No installed handler for fixed event [%08X]",
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -121,7 +121,9 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
|
|||
if (!gpe_register_info) {
|
||||
return_ACPI_STATUS(AE_NOT_EXIST);
|
||||
}
|
||||
register_bit = gpe_event_info->register_bit;
|
||||
register_bit = (u8)
|
||||
(1 <<
|
||||
(gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
|
||||
|
||||
/* 1) Disable case. Simply clear all enable bits */
|
||||
|
||||
|
@ -458,8 +460,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
|
|||
|
||||
/* Examine one GPE bit */
|
||||
|
||||
if (enabled_status_byte &
|
||||
acpi_gbl_decode_to8bit[j]) {
|
||||
if (enabled_status_byte & (1 << j)) {
|
||||
/*
|
||||
* Found an active GPE. Dispatch the event to a handler
|
||||
* or method.
|
||||
|
@ -570,7 +571,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
|
|||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"While evaluating GPE method [%4.4s]",
|
||||
"while evaluating GPE method [%4.4s]",
|
||||
acpi_ut_get_node_name
|
||||
(local_gpe_event_info.dispatch.
|
||||
method_node)));
|
||||
|
@ -618,6 +619,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
|
|||
|
||||
ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
|
||||
|
||||
acpi_gpe_count++;
|
||||
|
||||
/*
|
||||
* If edge-triggered, clear the GPE status bit now. Note that
|
||||
* level-triggered events are cleared after the GPE is serviced.
|
||||
|
@ -633,20 +636,23 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
|
|||
}
|
||||
}
|
||||
|
||||
/* Save current system state */
|
||||
|
||||
if (acpi_gbl_system_awake_and_running) {
|
||||
ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING);
|
||||
} else {
|
||||
ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING);
|
||||
if (!acpi_gbl_system_awake_and_running) {
|
||||
/*
|
||||
* We just woke up because of a wake GPE. Disable any further GPEs
|
||||
* until we are fully up and running (Only wake GPEs should be enabled
|
||||
* at this time, but we just brute-force disable them all.)
|
||||
* 1) We must disable this particular wake GPE so it won't fire again
|
||||
* 2) We want to disable all wake GPEs, since we are now awake
|
||||
*/
|
||||
(void)acpi_hw_disable_all_gpes();
|
||||
}
|
||||
|
||||
/*
|
||||
* Dispatch the GPE to either an installed handler, or the control
|
||||
* method associated with this GPE (_Lxx or _Exx).
|
||||
* If a handler exists, we invoke it and do not attempt to run the method.
|
||||
* If there is neither a handler nor a method, we disable the level to
|
||||
* prevent further events from coming in here.
|
||||
* Dispatch the GPE to either an installed handler, or the control method
|
||||
* associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
|
||||
* it and do not attempt to run the method. If there is neither a handler
|
||||
* nor a method, we disable this GPE to prevent further such pointless
|
||||
* events from firing.
|
||||
*/
|
||||
switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
|
||||
case ACPI_GPE_DISPATCH_HANDLER:
|
||||
|
@ -677,8 +683,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
|
|||
case ACPI_GPE_DISPATCH_METHOD:
|
||||
|
||||
/*
|
||||
* Disable GPE, so it doesn't keep firing before the method has a
|
||||
* chance to run.
|
||||
* Disable the GPE, so it doesn't keep firing before the method has a
|
||||
* chance to run (it runs asynchronously with interrupts enabled).
|
||||
*/
|
||||
status = acpi_ev_disable_gpe(gpe_event_info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
@ -711,7 +717,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
|
|||
gpe_number));
|
||||
|
||||
/*
|
||||
* Disable the GPE. The GPE will remain disabled until the ACPI
|
||||
* Disable the GPE. The GPE will remain disabled until the ACPI
|
||||
* Core Subsystem is restarted, or a handler is installed.
|
||||
*/
|
||||
status = acpi_ev_disable_gpe(gpe_event_info);
|
||||
|
@ -726,50 +732,3 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
|
|||
|
||||
return_UINT32(ACPI_INTERRUPT_HANDLED);
|
||||
}
|
||||
|
||||
#ifdef ACPI_GPE_NOTIFY_CHECK
|
||||
/*******************************************************************************
|
||||
* TBD: NOT USED, PROTOTYPE ONLY AND WILL PROBABLY BE REMOVED
|
||||
*
|
||||
* FUNCTION: acpi_ev_check_for_wake_only_gpe
|
||||
*
|
||||
* PARAMETERS: gpe_event_info - info for this GPE
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Determine if a a GPE is "wake-only".
|
||||
*
|
||||
* Called from Notify() code in interpreter when a "DeviceWake"
|
||||
* Notify comes in.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_check_for_wake_only_gpe);
|
||||
|
||||
if ((gpe_event_info) && /* Only >0 for _Lxx/_Exx */
|
||||
((gpe_event_info->flags & ACPI_GPE_SYSTEM_MASK) == ACPI_GPE_SYSTEM_RUNNING)) { /* System state at GPE time */
|
||||
/* This must be a wake-only GPE, disable it */
|
||||
|
||||
status = acpi_ev_disable_gpe(gpe_event_info);
|
||||
|
||||
/* Set GPE to wake-only. Do not change wake disabled/enabled status */
|
||||
|
||||
acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
|
||||
|
||||
ACPI_INFO((AE_INFO,
|
||||
"GPE %p was updated from wake/run to wake-only",
|
||||
gpe_event_info));
|
||||
|
||||
/* This was a wake-only GPE */
|
||||
|
||||
return_ACPI_STATUS(AE_WAKE_ONLY_GPE);
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -529,7 +529,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
|
|||
|
||||
/* Install new interrupt handler if not SCI_INT */
|
||||
|
||||
if (interrupt_number != acpi_gbl_FADT->sci_int) {
|
||||
if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
|
||||
status = acpi_os_install_interrupt_handler(interrupt_number,
|
||||
acpi_ev_gpe_xrupt_handler,
|
||||
gpe_xrupt);
|
||||
|
@ -567,7 +567,7 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
|
|||
|
||||
/* We never want to remove the SCI interrupt handler */
|
||||
|
||||
if (gpe_xrupt->interrupt_number == acpi_gbl_FADT->sci_int) {
|
||||
if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
|
||||
gpe_xrupt->gpe_block_list_head = NULL;
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
@ -796,30 +796,31 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
|
|||
(u8) (gpe_block->block_base_number +
|
||||
(i * ACPI_GPE_REGISTER_WIDTH));
|
||||
|
||||
ACPI_STORE_ADDRESS(this_register->status_address.address,
|
||||
(gpe_block->block_address.address + i));
|
||||
this_register->status_address.address =
|
||||
gpe_block->block_address.address + i;
|
||||
|
||||
ACPI_STORE_ADDRESS(this_register->enable_address.address,
|
||||
(gpe_block->block_address.address
|
||||
+ i + gpe_block->register_count));
|
||||
this_register->enable_address.address =
|
||||
gpe_block->block_address.address + i +
|
||||
gpe_block->register_count;
|
||||
|
||||
this_register->status_address.address_space_id =
|
||||
gpe_block->block_address.address_space_id;
|
||||
this_register->enable_address.address_space_id =
|
||||
gpe_block->block_address.address_space_id;
|
||||
this_register->status_address.register_bit_width =
|
||||
this_register->status_address.space_id =
|
||||
gpe_block->block_address.space_id;
|
||||
this_register->enable_address.space_id =
|
||||
gpe_block->block_address.space_id;
|
||||
this_register->status_address.bit_width =
|
||||
ACPI_GPE_REGISTER_WIDTH;
|
||||
this_register->enable_address.register_bit_width =
|
||||
this_register->enable_address.bit_width =
|
||||
ACPI_GPE_REGISTER_WIDTH;
|
||||
this_register->status_address.register_bit_offset =
|
||||
this_register->status_address.bit_offset =
|
||||
ACPI_GPE_REGISTER_WIDTH;
|
||||
this_register->enable_address.register_bit_offset =
|
||||
this_register->enable_address.bit_offset =
|
||||
ACPI_GPE_REGISTER_WIDTH;
|
||||
|
||||
/* Init the event_info for each GPE within this register */
|
||||
|
||||
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
|
||||
this_event->register_bit = acpi_gbl_decode_to8bit[j];
|
||||
this_event->gpe_number =
|
||||
(u8) (this_register->base_gpe_number + j);
|
||||
this_event->register_info = this_register;
|
||||
this_event++;
|
||||
}
|
||||
|
@ -1109,11 +1110,12 @@ acpi_status acpi_ev_gpe_initialize(void)
|
|||
* If EITHER the register length OR the block address are zero, then that
|
||||
* particular block is not supported.
|
||||
*/
|
||||
if (acpi_gbl_FADT->gpe0_blk_len && acpi_gbl_FADT->xgpe0_blk.address) {
|
||||
if (acpi_gbl_FADT.gpe0_block_length &&
|
||||
acpi_gbl_FADT.xgpe0_block.address) {
|
||||
|
||||
/* GPE block 0 exists (has both length and address > 0) */
|
||||
|
||||
register_count0 = (u16) (acpi_gbl_FADT->gpe0_blk_len / 2);
|
||||
register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
|
||||
|
||||
gpe_number_max =
|
||||
(register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
|
||||
|
@ -1121,9 +1123,9 @@ acpi_status acpi_ev_gpe_initialize(void)
|
|||
/* Install GPE Block 0 */
|
||||
|
||||
status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
|
||||
&acpi_gbl_FADT->xgpe0_blk,
|
||||
&acpi_gbl_FADT.xgpe0_block,
|
||||
register_count0, 0,
|
||||
acpi_gbl_FADT->sci_int,
|
||||
acpi_gbl_FADT.sci_interrupt,
|
||||
&acpi_gbl_gpe_fadt_blocks[0]);
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
@ -1132,20 +1134,21 @@ acpi_status acpi_ev_gpe_initialize(void)
|
|||
}
|
||||
}
|
||||
|
||||
if (acpi_gbl_FADT->gpe1_blk_len && acpi_gbl_FADT->xgpe1_blk.address) {
|
||||
if (acpi_gbl_FADT.gpe1_block_length &&
|
||||
acpi_gbl_FADT.xgpe1_block.address) {
|
||||
|
||||
/* GPE block 1 exists (has both length and address > 0) */
|
||||
|
||||
register_count1 = (u16) (acpi_gbl_FADT->gpe1_blk_len / 2);
|
||||
register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
|
||||
|
||||
/* Check for GPE0/GPE1 overlap (if both banks exist) */
|
||||
|
||||
if ((register_count0) &&
|
||||
(gpe_number_max >= acpi_gbl_FADT->gpe1_base)) {
|
||||
(gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1",
|
||||
gpe_number_max, acpi_gbl_FADT->gpe1_base,
|
||||
acpi_gbl_FADT->gpe1_base +
|
||||
gpe_number_max, acpi_gbl_FADT.gpe1_base,
|
||||
acpi_gbl_FADT.gpe1_base +
|
||||
((register_count1 *
|
||||
ACPI_GPE_REGISTER_WIDTH) - 1)));
|
||||
|
||||
|
@ -1157,10 +1160,11 @@ acpi_status acpi_ev_gpe_initialize(void)
|
|||
|
||||
status =
|
||||
acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
|
||||
&acpi_gbl_FADT->xgpe1_blk,
|
||||
&acpi_gbl_FADT.xgpe1_block,
|
||||
register_count1,
|
||||
acpi_gbl_FADT->gpe1_base,
|
||||
acpi_gbl_FADT->sci_int,
|
||||
acpi_gbl_FADT.gpe1_base,
|
||||
acpi_gbl_FADT.
|
||||
sci_interrupt,
|
||||
&acpi_gbl_gpe_fadt_blocks
|
||||
[1]);
|
||||
|
||||
|
@ -1173,7 +1177,7 @@ acpi_status acpi_ev_gpe_initialize(void)
|
|||
* GPE0 and GPE1 do not have to be contiguous in the GPE number
|
||||
* space. However, GPE0 always starts at GPE number zero.
|
||||
*/
|
||||
gpe_number_max = acpi_gbl_FADT->gpe1_base +
|
||||
gpe_number_max = acpi_gbl_FADT.gpe1_base +
|
||||
((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -63,14 +63,18 @@ static const char *acpi_notify_value_names[] = {
|
|||
};
|
||||
#endif
|
||||
|
||||
/* Pointer to FACS needed for the Global Lock */
|
||||
|
||||
static struct acpi_table_facs *facs = NULL;
|
||||
|
||||
/* Local prototypes */
|
||||
|
||||
static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
|
||||
|
||||
static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context);
|
||||
|
||||
static u32 acpi_ev_global_lock_handler(void *context);
|
||||
|
||||
static acpi_status acpi_ev_remove_global_lock_handler(void);
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_is_notify_object
|
||||
|
@ -280,51 +284,21 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
|
|||
acpi_ut_delete_generic_state(notify_info);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_global_lock_thread
|
||||
*
|
||||
* PARAMETERS: Context - From thread interface, not used
|
||||
*
|
||||
* RETURN: None
|
||||
*
|
||||
* DESCRIPTION: Invoked by SCI interrupt handler upon acquisition of the
|
||||
* Global Lock. Simply signal all threads that are waiting
|
||||
* for the lock.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
/* Signal threads that are waiting for the lock */
|
||||
|
||||
if (acpi_gbl_global_lock_thread_count) {
|
||||
|
||||
/* Send sufficient units to the semaphore */
|
||||
|
||||
status =
|
||||
acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore,
|
||||
acpi_gbl_global_lock_thread_count);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not signal Global Lock semaphore"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_global_lock_handler
|
||||
*
|
||||
* PARAMETERS: Context - From thread interface, not used
|
||||
*
|
||||
* RETURN: ACPI_INTERRUPT_HANDLED or ACPI_INTERRUPT_NOT_HANDLED
|
||||
* RETURN: ACPI_INTERRUPT_HANDLED
|
||||
*
|
||||
* DESCRIPTION: Invoked directly from the SCI handler when a global lock
|
||||
* release interrupt occurs. Grab the global lock and queue
|
||||
* the global lock thread for execution
|
||||
* release interrupt occurs. Attempt to acquire the global lock,
|
||||
* if successful, signal the thread waiting for the lock.
|
||||
*
|
||||
* NOTE: Assumes that the semaphore can be signaled from interrupt level. If
|
||||
* this is not possible for some reason, a separate thread will have to be
|
||||
* scheduled to do this.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
|
@ -333,16 +307,24 @@ static u32 acpi_ev_global_lock_handler(void *context)
|
|||
u8 acquired = FALSE;
|
||||
|
||||
/*
|
||||
* Attempt to get the lock
|
||||
* Attempt to get the lock.
|
||||
*
|
||||
* If we don't get it now, it will be marked pending and we will
|
||||
* take another interrupt when it becomes free.
|
||||
*/
|
||||
ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, acquired);
|
||||
ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
|
||||
if (acquired) {
|
||||
|
||||
/* Got the lock, now wake all threads waiting for it */
|
||||
|
||||
acpi_gbl_global_lock_acquired = TRUE;
|
||||
acpi_ev_global_lock_thread(context);
|
||||
/* Send a unit to the semaphore */
|
||||
|
||||
if (ACPI_FAILURE(acpi_os_signal_semaphore(
|
||||
acpi_gbl_global_lock_semaphore, 1))) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not signal Global Lock semaphore"));
|
||||
}
|
||||
}
|
||||
|
||||
return (ACPI_INTERRUPT_HANDLED);
|
||||
|
@ -366,6 +348,13 @@ acpi_status acpi_ev_init_global_lock_handler(void)
|
|||
|
||||
ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
|
||||
|
||||
status =
|
||||
acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
|
||||
(struct acpi_table_header **)&facs);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
acpi_gbl_global_lock_present = TRUE;
|
||||
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
|
||||
acpi_ev_global_lock_handler,
|
||||
|
@ -389,6 +378,31 @@ acpi_status acpi_ev_init_global_lock_handler(void)
|
|||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_remove_global_lock_handler
|
||||
*
|
||||
* PARAMETERS: None
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Remove the handler for the Global Lock
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static acpi_status acpi_ev_remove_global_lock_handler(void)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
|
||||
|
||||
acpi_gbl_global_lock_present = FALSE;
|
||||
status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
|
||||
acpi_ev_global_lock_handler);
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_acquire_global_lock
|
||||
|
@ -399,6 +413,16 @@ acpi_status acpi_ev_init_global_lock_handler(void)
|
|||
*
|
||||
* DESCRIPTION: Attempt to gain ownership of the Global Lock.
|
||||
*
|
||||
* MUTEX: Interpreter must be locked
|
||||
*
|
||||
* Note: The original implementation allowed multiple threads to "acquire" the
|
||||
* Global Lock, and the OS would hold the lock until the last thread had
|
||||
* released it. However, this could potentially starve the BIOS out of the
|
||||
* lock, especially in the case where there is a tight handshake between the
|
||||
* Embedded Controller driver and the BIOS. Therefore, this implementation
|
||||
* allows only one thread to acquire the HW Global Lock at a time, and makes
|
||||
* the global lock appear as a standard mutex on the OS side.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
acpi_status acpi_ev_acquire_global_lock(u16 timeout)
|
||||
|
@ -408,53 +432,51 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
|
|||
|
||||
ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
|
||||
|
||||
#ifndef ACPI_APPLICATION
|
||||
/* Make sure that we actually have a global lock */
|
||||
|
||||
if (!acpi_gbl_global_lock_present) {
|
||||
return_ACPI_STATUS(AE_NO_GLOBAL_LOCK);
|
||||
/*
|
||||
* Only one thread can acquire the GL at a time, the global_lock_mutex
|
||||
* enforces this. This interface releases the interpreter if we must wait.
|
||||
*/
|
||||
status = acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex, timeout);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* One more thread wants the global lock */
|
||||
|
||||
acpi_gbl_global_lock_thread_count++;
|
||||
|
||||
/*
|
||||
* If we (OS side vs. BIOS side) have the hardware lock already,
|
||||
* we are done
|
||||
* Make sure that a global lock actually exists. If not, just treat
|
||||
* the lock as a standard mutex.
|
||||
*/
|
||||
if (acpi_gbl_global_lock_acquired) {
|
||||
if (!acpi_gbl_global_lock_present) {
|
||||
acpi_gbl_global_lock_acquired = TRUE;
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/* We must acquire the actual hardware lock */
|
||||
/* Attempt to acquire the actual hardware lock */
|
||||
|
||||
ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, acquired);
|
||||
ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
|
||||
if (acquired) {
|
||||
|
||||
/* We got the lock */
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
|
||||
"Acquired the HW Global Lock\n"));
|
||||
"Acquired hardware Global Lock\n"));
|
||||
|
||||
acpi_gbl_global_lock_acquired = TRUE;
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Did not get the lock. The pending bit was set above, and we must now
|
||||
* Did not get the lock. The pending bit was set above, and we must now
|
||||
* wait until we get the global lock released interrupt.
|
||||
*/
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for the HW Global Lock\n"));
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));
|
||||
|
||||
/*
|
||||
* Acquire the global lock semaphore first.
|
||||
* Since this wait will block, we must release the interpreter
|
||||
* Wait for handshake with the global lock interrupt handler.
|
||||
* This interface releases the interpreter if we must wait.
|
||||
*/
|
||||
status =
|
||||
acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
|
||||
timeout);
|
||||
status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
|
||||
ACPI_WAIT_FOREVER);
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
|
@ -477,38 +499,39 @@ acpi_status acpi_ev_release_global_lock(void)
|
|||
|
||||
ACPI_FUNCTION_TRACE(ev_release_global_lock);
|
||||
|
||||
if (!acpi_gbl_global_lock_thread_count) {
|
||||
/* Lock must be already acquired */
|
||||
|
||||
if (!acpi_gbl_global_lock_acquired) {
|
||||
ACPI_WARNING((AE_INFO,
|
||||
"Cannot release HW Global Lock, it has not been acquired"));
|
||||
"Cannot release the ACPI Global Lock, it has not been acquired"));
|
||||
return_ACPI_STATUS(AE_NOT_ACQUIRED);
|
||||
}
|
||||
|
||||
/* One fewer thread has the global lock */
|
||||
if (acpi_gbl_global_lock_present) {
|
||||
|
||||
acpi_gbl_global_lock_thread_count--;
|
||||
if (acpi_gbl_global_lock_thread_count) {
|
||||
/* Allow any thread to release the lock */
|
||||
|
||||
/* There are still some threads holding the lock, cannot release */
|
||||
ACPI_RELEASE_GLOBAL_LOCK(facs, pending);
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
/*
|
||||
* If the pending bit was set, we must write GBL_RLS to the control
|
||||
* register
|
||||
*/
|
||||
if (pending) {
|
||||
status =
|
||||
acpi_set_register(ACPI_BITREG_GLOBAL_LOCK_RELEASE,
|
||||
1);
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
|
||||
"Released hardware Global Lock\n"));
|
||||
}
|
||||
|
||||
/*
|
||||
* No more threads holding lock, we can do the actual hardware
|
||||
* release
|
||||
*/
|
||||
ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, pending);
|
||||
acpi_gbl_global_lock_acquired = FALSE;
|
||||
|
||||
/*
|
||||
* If the pending bit was set, we must write GBL_RLS to the control
|
||||
* register
|
||||
*/
|
||||
if (pending) {
|
||||
status = acpi_set_register(ACPI_BITREG_GLOBAL_LOCK_RELEASE,
|
||||
1, ACPI_MTX_LOCK);
|
||||
}
|
||||
/* Release the local GL mutex */
|
||||
|
||||
acpi_os_release_mutex(acpi_gbl_global_lock_mutex);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
|
@ -558,6 +581,12 @@ void acpi_ev_terminate(void)
|
|||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
|
||||
}
|
||||
|
||||
status = acpi_ev_remove_global_lock_handler();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not remove Global Lock handler"));
|
||||
}
|
||||
}
|
||||
|
||||
/* Deallocate all handler objects installed within GPE info structs */
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -291,7 +291,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
|||
u32 bit_width, acpi_integer * value)
|
||||
{
|
||||
acpi_status status;
|
||||
acpi_status status2;
|
||||
acpi_adr_space_handler handler;
|
||||
acpi_adr_space_setup region_setup;
|
||||
union acpi_operand_object *handler_desc;
|
||||
|
@ -345,7 +344,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
|||
* setup will potentially execute control methods
|
||||
* (e.g., _REG method for this region)
|
||||
*/
|
||||
acpi_ex_exit_interpreter();
|
||||
acpi_ex_relinquish_interpreter();
|
||||
|
||||
status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
|
||||
handler_desc->address_space.context,
|
||||
|
@ -353,10 +352,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
|||
|
||||
/* Re-enter the interpreter */
|
||||
|
||||
status2 = acpi_ex_enter_interpreter();
|
||||
if (ACPI_FAILURE(status2)) {
|
||||
return_ACPI_STATUS(status2);
|
||||
}
|
||||
acpi_ex_reacquire_interpreter();
|
||||
|
||||
/* Check for failure of the Region Setup */
|
||||
|
||||
|
@ -409,7 +405,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
|||
* exit the interpreter because the handler *might* block -- we don't
|
||||
* know what it will do, so we can't hold the lock on the intepreter.
|
||||
*/
|
||||
acpi_ex_exit_interpreter();
|
||||
acpi_ex_relinquish_interpreter();
|
||||
}
|
||||
|
||||
/* Call the handler */
|
||||
|
@ -430,10 +426,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
|||
* We just returned from a non-default handler, we must re-enter the
|
||||
* interpreter
|
||||
*/
|
||||
status2 = acpi_ex_enter_interpreter();
|
||||
if (ACPI_FAILURE(status2)) {
|
||||
return_ACPI_STATUS(status2);
|
||||
}
|
||||
acpi_ex_reacquire_interpreter();
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -48,6 +48,11 @@
|
|||
#define _COMPONENT ACPI_EVENTS
|
||||
ACPI_MODULE_NAME("evrgnini")
|
||||
|
||||
/* Local prototypes */
|
||||
static u8 acpi_ev_match_pci_root_bridge(char *id);
|
||||
|
||||
static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_system_memory_region_setup
|
||||
|
@ -62,6 +67,7 @@ ACPI_MODULE_NAME("evrgnini")
|
|||
* DESCRIPTION: Setup a system_memory operation region
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
acpi_ev_system_memory_region_setup(acpi_handle handle,
|
||||
u32 function,
|
||||
|
@ -168,9 +174,9 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
|
|||
union acpi_operand_object *handler_obj;
|
||||
struct acpi_namespace_node *parent_node;
|
||||
struct acpi_namespace_node *pci_root_node;
|
||||
struct acpi_namespace_node *pci_device_node;
|
||||
union acpi_operand_object *region_obj =
|
||||
(union acpi_operand_object *)handle;
|
||||
struct acpi_device_id object_hID;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_pci_config_region_setup);
|
||||
|
||||
|
@ -215,45 +221,30 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
|
|||
|
||||
pci_root_node = parent_node;
|
||||
while (pci_root_node != acpi_gbl_root_node) {
|
||||
status =
|
||||
acpi_ut_execute_HID(pci_root_node, &object_hID);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
/*
|
||||
* Got a valid _HID string, check if this is a PCI root.
|
||||
* New for ACPI 3.0: check for a PCI Express root also.
|
||||
*/
|
||||
if (!
|
||||
(ACPI_STRNCMP
|
||||
(object_hID.value, PCI_ROOT_HID_STRING,
|
||||
sizeof(PCI_ROOT_HID_STRING)))
|
||||
||
|
||||
!(ACPI_STRNCMP
|
||||
(object_hID.value,
|
||||
PCI_EXPRESS_ROOT_HID_STRING,
|
||||
sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
|
||||
|
||||
/* Install a handler for this PCI root bridge */
|
||||
/* Get the _HID/_CID in order to detect a root_bridge */
|
||||
|
||||
status =
|
||||
acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
if (status == AE_SAME_HANDLER) {
|
||||
/*
|
||||
* It is OK if the handler is already installed on the root
|
||||
* bridge. Still need to return a context object for the
|
||||
* new PCI_Config operation region, however.
|
||||
*/
|
||||
status = AE_OK;
|
||||
} else {
|
||||
ACPI_EXCEPTION((AE_INFO,
|
||||
status,
|
||||
"Could not install PciConfig handler for Root Bridge %4.4s",
|
||||
acpi_ut_get_node_name
|
||||
(pci_root_node)));
|
||||
}
|
||||
if (acpi_ev_is_pci_root_bridge(pci_root_node)) {
|
||||
|
||||
/* Install a handler for this PCI root bridge */
|
||||
|
||||
status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
if (status == AE_SAME_HANDLER) {
|
||||
/*
|
||||
* It is OK if the handler is already installed on the root
|
||||
* bridge. Still need to return a context object for the
|
||||
* new PCI_Config operation region, however.
|
||||
*/
|
||||
status = AE_OK;
|
||||
} else {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Could not install PciConfig handler for Root Bridge %4.4s",
|
||||
acpi_ut_get_node_name
|
||||
(pci_root_node)));
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
pci_root_node = acpi_ns_get_parent_node(pci_root_node);
|
||||
|
@ -282,14 +273,25 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
|
|||
/*
|
||||
* For PCI_Config space access, we need the segment, bus,
|
||||
* device and function numbers. Acquire them here.
|
||||
*
|
||||
* Find the parent device object. (This allows the operation region to be
|
||||
* within a subscope under the device, such as a control method.)
|
||||
*/
|
||||
pci_device_node = region_obj->region.node;
|
||||
while (pci_device_node && (pci_device_node->type != ACPI_TYPE_DEVICE)) {
|
||||
pci_device_node = acpi_ns_get_parent_node(pci_device_node);
|
||||
}
|
||||
|
||||
if (!pci_device_node) {
|
||||
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the PCI device and function numbers from the _ADR object
|
||||
* contained in the parent's scope.
|
||||
*/
|
||||
status =
|
||||
acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, parent_node,
|
||||
acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, pci_device_node,
|
||||
&pci_value);
|
||||
|
||||
/*
|
||||
|
@ -327,6 +329,91 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
|
|||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_match_pci_root_bridge
|
||||
*
|
||||
* PARAMETERS: Id - The HID/CID in string format
|
||||
*
|
||||
* RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge
|
||||
*
|
||||
* DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static u8 acpi_ev_match_pci_root_bridge(char *id)
|
||||
{
|
||||
|
||||
/*
|
||||
* Check if this is a PCI root.
|
||||
* ACPI 3.0+: check for a PCI Express root also.
|
||||
*/
|
||||
if (!(ACPI_STRNCMP(id,
|
||||
PCI_ROOT_HID_STRING,
|
||||
sizeof(PCI_ROOT_HID_STRING))) ||
|
||||
!(ACPI_STRNCMP(id,
|
||||
PCI_EXPRESS_ROOT_HID_STRING,
|
||||
sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
|
||||
return (TRUE);
|
||||
}
|
||||
|
||||
return (FALSE);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_is_pci_root_bridge
|
||||
*
|
||||
* PARAMETERS: Node - Device node being examined
|
||||
*
|
||||
* RETURN: TRUE if device is a PCI/PCI-Express Root Bridge
|
||||
*
|
||||
* DESCRIPTION: Determine if the input device represents a PCI Root Bridge by
|
||||
* examining the _HID and _CID for the device.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
|
||||
{
|
||||
acpi_status status;
|
||||
struct acpi_device_id hid;
|
||||
struct acpi_compatible_id_list *cid;
|
||||
acpi_native_uint i;
|
||||
|
||||
/*
|
||||
* Get the _HID and check for a PCI Root Bridge
|
||||
*/
|
||||
status = acpi_ut_execute_HID(node, &hid);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return (FALSE);
|
||||
}
|
||||
|
||||
if (acpi_ev_match_pci_root_bridge(hid.value)) {
|
||||
return (TRUE);
|
||||
}
|
||||
|
||||
/*
|
||||
* The _HID did not match.
|
||||
* Get the _CID and check for a PCI Root Bridge
|
||||
*/
|
||||
status = acpi_ut_execute_CID(node, &cid);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return (FALSE);
|
||||
}
|
||||
|
||||
/* Check all _CIDs in the returned list */
|
||||
|
||||
for (i = 0; i < cid->count; i++) {
|
||||
if (acpi_ev_match_pci_root_bridge(cid->id[i].value)) {
|
||||
ACPI_FREE(cid);
|
||||
return (TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
ACPI_FREE(cid);
|
||||
return (FALSE);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_pci_bar_region_setup
|
||||
|
@ -432,6 +519,9 @@ acpi_ev_default_region_setup(acpi_handle handle,
|
|||
* a PCI address in the scope of the definition. This address is
|
||||
* required to perform an access to PCI config space.
|
||||
*
|
||||
* MUTEX: Interpreter should be unlocked, because we may run the _REG
|
||||
* method for this region.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
******************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -142,9 +142,10 @@ u32 acpi_ev_install_sci_handler(void)
|
|||
|
||||
ACPI_FUNCTION_TRACE(ev_install_sci_handler);
|
||||
|
||||
status = acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT->sci_int,
|
||||
acpi_ev_sci_xrupt_handler,
|
||||
acpi_gbl_gpe_xrupt_list_head);
|
||||
status =
|
||||
acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
|
||||
acpi_ev_sci_xrupt_handler,
|
||||
acpi_gbl_gpe_xrupt_list_head);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
|
@ -175,8 +176,9 @@ acpi_status acpi_ev_remove_sci_handler(void)
|
|||
|
||||
/* Just let the OS remove the handler and disable the level */
|
||||
|
||||
status = acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT->sci_int,
|
||||
acpi_ev_sci_xrupt_handler);
|
||||
status =
|
||||
acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
|
||||
acpi_ev_sci_xrupt_handler);
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -768,11 +768,9 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
|
|||
return (AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
status = acpi_ex_enter_interpreter();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return (status);
|
||||
}
|
||||
/* Must lock interpreter to prevent race conditions */
|
||||
|
||||
acpi_ex_enter_interpreter();
|
||||
status = acpi_ev_acquire_global_lock(timeout);
|
||||
acpi_ex_exit_interpreter();
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -44,6 +44,7 @@
|
|||
#include <acpi/acpi.h>
|
||||
#include <acpi/acevents.h>
|
||||
#include <acpi/acnamesp.h>
|
||||
#include <acpi/actables.h>
|
||||
|
||||
#define _COMPONENT ACPI_EVENTS
|
||||
ACPI_MODULE_NAME("evxfevnt")
|
||||
|
@ -65,13 +66,14 @@ acpi_status acpi_enable(void)
|
|||
|
||||
ACPI_FUNCTION_TRACE(acpi_enable);
|
||||
|
||||
/* Make sure we have the FADT */
|
||||
/* ACPI tables must be present */
|
||||
|
||||
if (!acpi_gbl_FADT) {
|
||||
ACPI_WARNING((AE_INFO, "No FADT information present!"));
|
||||
if (!acpi_tb_tables_loaded()) {
|
||||
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
|
||||
}
|
||||
|
||||
/* Check current mode */
|
||||
|
||||
if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
|
||||
"System is already in ACPI mode\n"));
|
||||
|
@ -111,11 +113,6 @@ acpi_status acpi_disable(void)
|
|||
|
||||
ACPI_FUNCTION_TRACE(acpi_disable);
|
||||
|
||||
if (!acpi_gbl_FADT) {
|
||||
ACPI_WARNING((AE_INFO, "No FADT information present!"));
|
||||
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
|
||||
}
|
||||
|
||||
if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
|
||||
"System is already in legacy (non-ACPI) mode\n"));
|
||||
|
@ -169,7 +166,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
|
|||
*/
|
||||
status =
|
||||
acpi_set_register(acpi_gbl_fixed_event_info[event].
|
||||
enable_register_id, 1, ACPI_MTX_LOCK);
|
||||
enable_register_id, 1);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
@ -178,7 +175,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
|
|||
|
||||
status =
|
||||
acpi_get_register(acpi_gbl_fixed_event_info[event].
|
||||
enable_register_id, &value, ACPI_MTX_LOCK);
|
||||
enable_register_id, &value);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
@ -368,14 +365,14 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
|
|||
*/
|
||||
status =
|
||||
acpi_set_register(acpi_gbl_fixed_event_info[event].
|
||||
enable_register_id, 0, ACPI_MTX_LOCK);
|
||||
enable_register_id, 0);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
status =
|
||||
acpi_get_register(acpi_gbl_fixed_event_info[event].
|
||||
enable_register_id, &value, ACPI_MTX_LOCK);
|
||||
enable_register_id, &value);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
@ -421,7 +418,7 @@ acpi_status acpi_clear_event(u32 event)
|
|||
*/
|
||||
status =
|
||||
acpi_set_register(acpi_gbl_fixed_event_info[event].
|
||||
status_register_id, 1, ACPI_MTX_LOCK);
|
||||
status_register_id, 1);
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
@ -510,7 +507,7 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
|
|||
|
||||
status =
|
||||
acpi_get_register(acpi_gbl_fixed_event_info[event].
|
||||
status_register_id, event_status, ACPI_MTX_LOCK);
|
||||
status_register_id, event_status);
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -54,7 +54,7 @@ ACPI_MODULE_NAME("exconfig")
|
|||
|
||||
/* Local prototypes */
|
||||
static acpi_status
|
||||
acpi_ex_add_table(struct acpi_table_header *table,
|
||||
acpi_ex_add_table(acpi_native_uint table_index,
|
||||
struct acpi_namespace_node *parent_node,
|
||||
union acpi_operand_object **ddb_handle);
|
||||
|
||||
|
@ -74,12 +74,11 @@ acpi_ex_add_table(struct acpi_table_header *table,
|
|||
******************************************************************************/
|
||||
|
||||
static acpi_status
|
||||
acpi_ex_add_table(struct acpi_table_header *table,
|
||||
acpi_ex_add_table(acpi_native_uint table_index,
|
||||
struct acpi_namespace_node *parent_node,
|
||||
union acpi_operand_object **ddb_handle)
|
||||
{
|
||||
acpi_status status;
|
||||
struct acpi_table_desc table_info;
|
||||
union acpi_operand_object *obj_desc;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ex_add_table);
|
||||
|
@ -98,42 +97,16 @@ acpi_ex_add_table(struct acpi_table_header *table,
|
|||
|
||||
/* Install the new table into the local data structures */
|
||||
|
||||
ACPI_MEMSET(&table_info, 0, sizeof(struct acpi_table_desc));
|
||||
|
||||
table_info.type = ACPI_TABLE_ID_SSDT;
|
||||
table_info.pointer = table;
|
||||
table_info.length = (acpi_size) table->length;
|
||||
table_info.allocation = ACPI_MEM_ALLOCATED;
|
||||
|
||||
status = acpi_tb_install_table(&table_info);
|
||||
obj_desc->reference.object = table_info.installed_desc;
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
if (status == AE_ALREADY_EXISTS) {
|
||||
|
||||
/* Table already exists, just return the handle */
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
goto cleanup;
|
||||
}
|
||||
obj_desc->reference.object = ACPI_CAST_PTR(void, table_index);
|
||||
|
||||
/* Add the table to the namespace */
|
||||
|
||||
status = acpi_ns_load_table(table_info.installed_desc, parent_node);
|
||||
status = acpi_ns_load_table(table_index, parent_node);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
||||
/* Uninstall table on error */
|
||||
|
||||
(void)acpi_tb_uninstall_table(table_info.installed_desc);
|
||||
goto cleanup;
|
||||
acpi_ut_remove_reference(obj_desc);
|
||||
*ddb_handle = NULL;
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
|
||||
cleanup:
|
||||
acpi_ut_remove_reference(obj_desc);
|
||||
*ddb_handle = NULL;
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
|
@ -146,7 +119,7 @@ acpi_ex_add_table(struct acpi_table_header *table,
|
|||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Load an ACPI table
|
||||
* DESCRIPTION: Load an ACPI table from the RSDT/XSDT
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
|
@ -156,33 +129,20 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
|
|||
{
|
||||
acpi_status status;
|
||||
union acpi_operand_object **operand = &walk_state->operands[0];
|
||||
struct acpi_table_header *table;
|
||||
acpi_native_uint table_index;
|
||||
struct acpi_namespace_node *parent_node;
|
||||
struct acpi_namespace_node *start_node;
|
||||
struct acpi_namespace_node *parameter_node = NULL;
|
||||
union acpi_operand_object *ddb_handle;
|
||||
struct acpi_table_header *table;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ex_load_table_op);
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Make sure that the signature does not match one of the tables that
|
||||
* is already loaded.
|
||||
*/
|
||||
status = acpi_tb_match_signature(operand[0]->string.pointer, NULL);
|
||||
if (status == AE_OK) {
|
||||
|
||||
/* Signature matched -- don't allow override */
|
||||
|
||||
return_ACPI_STATUS(AE_ALREADY_EXISTS);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Find the ACPI table */
|
||||
/* Find the ACPI table in the RSDT/XSDT */
|
||||
|
||||
status = acpi_tb_find_table(operand[0]->string.pointer,
|
||||
operand[1]->string.pointer,
|
||||
operand[2]->string.pointer, &table);
|
||||
operand[2]->string.pointer, &table_index);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
if (status != AE_NOT_FOUND) {
|
||||
return_ACPI_STATUS(status);
|
||||
|
@ -245,7 +205,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
|
|||
|
||||
/* Load the table into the namespace */
|
||||
|
||||
status = acpi_ex_add_table(table, parent_node, &ddb_handle);
|
||||
status = acpi_ex_add_table(table_index, parent_node, &ddb_handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
@ -266,9 +226,13 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
|
|||
}
|
||||
}
|
||||
|
||||
ACPI_INFO((AE_INFO,
|
||||
"Dynamic OEM Table Load - [%4.4s] OemId [%6.6s] OemTableId [%8.8s]",
|
||||
table->signature, table->oem_id, table->oem_table_id));
|
||||
status = acpi_get_table_by_index(table_index, &table);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
ACPI_INFO((AE_INFO,
|
||||
"Dynamic OEM Table Load - [%4.4s] OemId [%6.6s] OemTableId [%8.8s]",
|
||||
table->signature, table->oem_id,
|
||||
table->oem_table_id));
|
||||
}
|
||||
|
||||
*return_desc = ddb_handle;
|
||||
return_ACPI_STATUS(status);
|
||||
|
@ -278,7 +242,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
|
|||
*
|
||||
* FUNCTION: acpi_ex_load_op
|
||||
*
|
||||
* PARAMETERS: obj_desc - Region or Field where the table will be
|
||||
* PARAMETERS: obj_desc - Region or Buffer/Field where the table will be
|
||||
* obtained
|
||||
* Target - Where a handle to the table will be stored
|
||||
* walk_state - Current state
|
||||
|
@ -287,6 +251,12 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
|
|||
*
|
||||
* DESCRIPTION: Load an ACPI table from a field or operation region
|
||||
*
|
||||
* NOTE: Region Fields (Field, bank_field, index_fields) are resolved to buffer
|
||||
* objects before this code is reached.
|
||||
*
|
||||
* If source is an operation region, it must refer to system_memory, as
|
||||
* per the ACPI specification.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
|
@ -294,22 +264,26 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
|
|||
union acpi_operand_object *target,
|
||||
struct acpi_walk_state *walk_state)
|
||||
{
|
||||
acpi_status status;
|
||||
union acpi_operand_object *ddb_handle;
|
||||
union acpi_operand_object *buffer_desc = NULL;
|
||||
struct acpi_table_header *table_ptr = NULL;
|
||||
acpi_physical_address address;
|
||||
struct acpi_table_header table_header;
|
||||
acpi_integer temp;
|
||||
u32 i;
|
||||
struct acpi_table_desc table_desc;
|
||||
acpi_native_uint table_index;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ex_load_op);
|
||||
|
||||
/* Object can be either an op_region or a Field */
|
||||
ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
|
||||
|
||||
/* Source Object can be either an op_region or a Buffer/Field */
|
||||
|
||||
switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
|
||||
case ACPI_TYPE_REGION:
|
||||
|
||||
/* Region must be system_memory (from ACPI spec) */
|
||||
|
||||
if (obj_desc->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
|
||||
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Region %p %s\n",
|
||||
obj_desc,
|
||||
acpi_ut_get_object_type_name(obj_desc)));
|
||||
|
@ -325,113 +299,41 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
|
|||
}
|
||||
}
|
||||
|
||||
/* Get the base physical address of the region */
|
||||
|
||||
address = obj_desc->region.address;
|
||||
|
||||
/* Get part of the table header to get the table length */
|
||||
|
||||
table_header.length = 0;
|
||||
for (i = 0; i < 8; i++) {
|
||||
status =
|
||||
acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
|
||||
(acpi_physical_address)
|
||||
(i + address), 8,
|
||||
&temp);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Get the one valid byte of the returned 64-bit value */
|
||||
|
||||
ACPI_CAST_PTR(u8, &table_header)[i] = (u8) temp;
|
||||
}
|
||||
|
||||
/* Sanity check the table length */
|
||||
|
||||
if (table_header.length < sizeof(struct acpi_table_header)) {
|
||||
return_ACPI_STATUS(AE_BAD_HEADER);
|
||||
}
|
||||
|
||||
/* Allocate a buffer for the entire table */
|
||||
|
||||
table_ptr = ACPI_ALLOCATE(table_header.length);
|
||||
if (!table_ptr) {
|
||||
return_ACPI_STATUS(AE_NO_MEMORY);
|
||||
}
|
||||
|
||||
/* Get the entire table from the op region */
|
||||
|
||||
for (i = 0; i < table_header.length; i++) {
|
||||
status =
|
||||
acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
|
||||
(acpi_physical_address)
|
||||
(i + address), 8,
|
||||
&temp);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Get the one valid byte of the returned 64-bit value */
|
||||
|
||||
ACPI_CAST_PTR(u8, table_ptr)[i] = (u8) temp;
|
||||
}
|
||||
table_desc.address = obj_desc->region.address;
|
||||
table_desc.length = obj_desc->region.length;
|
||||
table_desc.flags = ACPI_TABLE_ORIGIN_MAPPED;
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_LOCAL_REGION_FIELD:
|
||||
case ACPI_TYPE_LOCAL_BANK_FIELD:
|
||||
case ACPI_TYPE_LOCAL_INDEX_FIELD:
|
||||
case ACPI_TYPE_BUFFER: /* Buffer or resolved region_field */
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Field %p %s\n",
|
||||
obj_desc,
|
||||
/* Simply extract the buffer from the buffer object */
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
|
||||
"Load from Buffer or Field %p %s\n", obj_desc,
|
||||
acpi_ut_get_object_type_name(obj_desc)));
|
||||
|
||||
/*
|
||||
* The length of the field must be at least as large as the table.
|
||||
* Read the entire field and thus the entire table. Buffer is
|
||||
* allocated during the read.
|
||||
*/
|
||||
status =
|
||||
acpi_ex_read_data_from_field(walk_state, obj_desc,
|
||||
&buffer_desc);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
table_desc.pointer = ACPI_CAST_PTR(struct acpi_table_header,
|
||||
obj_desc->buffer.pointer);
|
||||
table_desc.length = table_desc.pointer->length;
|
||||
table_desc.flags = ACPI_TABLE_ORIGIN_ALLOCATED;
|
||||
|
||||
table_ptr = ACPI_CAST_PTR(struct acpi_table_header,
|
||||
buffer_desc->buffer.pointer);
|
||||
|
||||
/* All done with the buffer_desc, delete it */
|
||||
|
||||
buffer_desc->buffer.pointer = NULL;
|
||||
acpi_ut_remove_reference(buffer_desc);
|
||||
|
||||
/* Sanity check the table length */
|
||||
|
||||
if (table_ptr->length < sizeof(struct acpi_table_header)) {
|
||||
status = AE_BAD_HEADER;
|
||||
goto cleanup;
|
||||
}
|
||||
obj_desc->buffer.pointer = NULL;
|
||||
break;
|
||||
|
||||
default:
|
||||
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
|
||||
}
|
||||
|
||||
/* The table must be either an SSDT or a PSDT */
|
||||
|
||||
if ((!ACPI_COMPARE_NAME(table_ptr->signature, PSDT_SIG)) &&
|
||||
(!ACPI_COMPARE_NAME(table_ptr->signature, SSDT_SIG))) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Table has invalid signature [%4.4s], must be SSDT or PSDT",
|
||||
table_ptr->signature));
|
||||
status = AE_BAD_SIGNATURE;
|
||||
/*
|
||||
* Install the new table into the local data structures
|
||||
*/
|
||||
status = acpi_tb_add_table(&table_desc, &table_index);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Install the new table into the local data structures */
|
||||
|
||||
status = acpi_ex_add_table(table_ptr, acpi_gbl_root_node, &ddb_handle);
|
||||
status =
|
||||
acpi_ex_add_table(table_index, acpi_gbl_root_node, &ddb_handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
||||
/* On error, table_ptr was deallocated above */
|
||||
|
@ -450,13 +352,9 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
|
|||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_INFO((AE_INFO,
|
||||
"Dynamic SSDT Load - OemId [%6.6s] OemTableId [%8.8s]",
|
||||
table_ptr->oem_id, table_ptr->oem_table_id));
|
||||
|
||||
cleanup:
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_FREE(table_ptr);
|
||||
acpi_tb_delete_table(&table_desc);
|
||||
}
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
@ -477,7 +375,7 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
|
|||
{
|
||||
acpi_status status = AE_OK;
|
||||
union acpi_operand_object *table_desc = ddb_handle;
|
||||
struct acpi_table_desc *table_info;
|
||||
acpi_native_uint table_index;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ex_unload_table);
|
||||
|
||||
|
@ -493,19 +391,18 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
|
|||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
/* Get the actual table descriptor from the ddb_handle */
|
||||
/* Get the table index from the ddb_handle */
|
||||
|
||||
table_info = (struct acpi_table_desc *)table_desc->reference.object;
|
||||
table_index = (acpi_native_uint) table_desc->reference.object;
|
||||
|
||||
/*
|
||||
* Delete the entire namespace under this table Node
|
||||
* (Offset contains the table_id)
|
||||
*/
|
||||
acpi_ns_delete_namespace_by_owner(table_info->owner_id);
|
||||
acpi_tb_delete_namespace_by_owner(table_index);
|
||||
acpi_tb_release_owner_id(table_index);
|
||||
|
||||
/* Delete the table itself */
|
||||
|
||||
(void)acpi_tb_uninstall_table(table_info->installed_desc);
|
||||
acpi_tb_set_table_loaded_flag(table_index, FALSE);
|
||||
|
||||
/* Delete the table descriptor (ddb_handle) */
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -359,8 +359,9 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
|
|||
union acpi_operand_object **operand = &walk_state->operands[0];
|
||||
union acpi_operand_object *obj_desc;
|
||||
struct acpi_namespace_node *node;
|
||||
struct acpi_table_header *table;
|
||||
union acpi_operand_object *region_obj2;
|
||||
acpi_native_uint table_index;
|
||||
struct acpi_table_header *table;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ex_create_table_region);
|
||||
|
||||
|
@ -380,7 +381,7 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
|
|||
|
||||
status = acpi_tb_find_table(operand[1]->string.pointer,
|
||||
operand[2]->string.pointer,
|
||||
operand[3]->string.pointer, &table);
|
||||
operand[3]->string.pointer, &table_index);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
@ -395,6 +396,11 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
|
|||
region_obj2 = obj_desc->common.next_object;
|
||||
region_obj2->extra.region_context = NULL;
|
||||
|
||||
status = acpi_get_table_by_index(table_index, &table);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Init the region from the operands */
|
||||
|
||||
obj_desc->region.space_id = REGION_DATA_TABLE;
|
||||
|
@ -553,7 +559,8 @@ acpi_ex_create_method(u8 * aml_start,
|
|||
|
||||
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
|
||||
if (!obj_desc) {
|
||||
return_ACPI_STATUS(AE_NO_MEMORY);
|
||||
status = AE_NO_MEMORY;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Save the method's AML pointer and length */
|
||||
|
@ -576,10 +583,7 @@ acpi_ex_create_method(u8 * aml_start,
|
|||
* Get the sync_level. If method is serialized, a mutex will be
|
||||
* created for this method when it is parsed.
|
||||
*/
|
||||
if (acpi_gbl_all_methods_serialized) {
|
||||
obj_desc->method.sync_level = 0;
|
||||
obj_desc->method.method_flags |= AML_METHOD_SERIALIZED;
|
||||
} else if (method_flags & AML_METHOD_SERIALIZED) {
|
||||
if (method_flags & AML_METHOD_SERIALIZED) {
|
||||
/*
|
||||
* ACPI 1.0: sync_level = 0
|
||||
* ACPI 2.0: sync_level = sync_level in method declaration
|
||||
|
@ -597,6 +601,7 @@ acpi_ex_create_method(u8 * aml_start,
|
|||
|
||||
acpi_ut_remove_reference(obj_desc);
|
||||
|
||||
exit:
|
||||
/* Remove a reference to the operand */
|
||||
|
||||
acpi_ut_remove_reference(operand[1]);
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -59,8 +59,6 @@ static void acpi_ex_out_string(char *title, char *value);
|
|||
|
||||
static void acpi_ex_out_pointer(char *title, void *value);
|
||||
|
||||
static void acpi_ex_out_address(char *title, acpi_physical_address value);
|
||||
|
||||
static void
|
||||
acpi_ex_dump_object(union acpi_operand_object *obj_desc,
|
||||
struct acpi_exdump_info *info);
|
||||
|
@ -92,10 +90,11 @@ static struct acpi_exdump_info acpi_ex_dump_string[4] = {
|
|||
{ACPI_EXD_STRING, 0, NULL}
|
||||
};
|
||||
|
||||
static struct acpi_exdump_info acpi_ex_dump_buffer[4] = {
|
||||
static struct acpi_exdump_info acpi_ex_dump_buffer[5] = {
|
||||
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL},
|
||||
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"},
|
||||
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"},
|
||||
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
|
||||
{ACPI_EXD_BUFFER, 0, NULL}
|
||||
};
|
||||
|
||||
|
@ -165,8 +164,8 @@ static struct acpi_exdump_info acpi_ex_dump_power[5] = {
|
|||
|
||||
static struct acpi_exdump_info acpi_ex_dump_processor[7] = {
|
||||
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_processor), NULL},
|
||||
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"},
|
||||
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(processor.length), "Length"},
|
||||
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"},
|
||||
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.length), "Length"},
|
||||
{ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"},
|
||||
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.system_notify),
|
||||
"System Notify"},
|
||||
|
@ -379,18 +378,12 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
|
|||
break;
|
||||
|
||||
case ACPI_EXD_POINTER:
|
||||
case ACPI_EXD_ADDRESS:
|
||||
|
||||
acpi_ex_out_pointer(name,
|
||||
*ACPI_CAST_PTR(void *, target));
|
||||
break;
|
||||
|
||||
case ACPI_EXD_ADDRESS:
|
||||
|
||||
acpi_ex_out_address(name,
|
||||
*ACPI_CAST_PTR
|
||||
(acpi_physical_address, target));
|
||||
break;
|
||||
|
||||
case ACPI_EXD_STRING:
|
||||
|
||||
acpi_ut_print_string(obj_desc->string.pointer,
|
||||
|
@ -834,16 +827,6 @@ static void acpi_ex_out_pointer(char *title, void *value)
|
|||
acpi_os_printf("%20s : %p\n", title, value);
|
||||
}
|
||||
|
||||
static void acpi_ex_out_address(char *title, acpi_physical_address value)
|
||||
{
|
||||
|
||||
#if ACPI_MACHINE_WIDTH == 16
|
||||
acpi_os_printf("%20s : %p\n", title, value);
|
||||
#else
|
||||
acpi_os_printf("%20s : %8.8X%8.8X\n", title, ACPI_FORMAT_UINT64(value));
|
||||
#endif
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ex_dump_namespace_node
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -257,14 +257,13 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
|
|||
}
|
||||
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
|
||||
" Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n",
|
||||
" Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
|
||||
acpi_ut_get_region_name(rgn_desc->region.
|
||||
space_id),
|
||||
rgn_desc->region.space_id,
|
||||
obj_desc->common_field.access_byte_width,
|
||||
obj_desc->common_field.base_byte_offset,
|
||||
field_datum_byte_offset,
|
||||
ACPI_FORMAT_UINT64(address)));
|
||||
field_datum_byte_offset, (void *)address));
|
||||
|
||||
/* Invoke the appropriate address_space/op_region handler */
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -44,6 +44,7 @@
|
|||
|
||||
#include <acpi/acpi.h>
|
||||
#include <acpi/acinterp.h>
|
||||
#include <acpi/acevents.h>
|
||||
|
||||
#define _COMPONENT ACPI_EXECUTER
|
||||
ACPI_MODULE_NAME("exmutex")
|
||||
|
@ -150,7 +151,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
|
|||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
/* Sanity check -- we must have a valid thread ID */
|
||||
/* Sanity check: we must have a valid thread ID */
|
||||
|
||||
if (!walk_state->thread) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
|
@ -174,24 +175,28 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
|
|||
/* Support for multiple acquires by the owning thread */
|
||||
|
||||
if (obj_desc->mutex.owner_thread) {
|
||||
|
||||
/* Special case for Global Lock, allow all threads */
|
||||
|
||||
if ((obj_desc->mutex.owner_thread->thread_id ==
|
||||
walk_state->thread->thread_id) ||
|
||||
(obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK)) {
|
||||
if (obj_desc->mutex.owner_thread->thread_id ==
|
||||
walk_state->thread->thread_id) {
|
||||
/*
|
||||
* The mutex is already owned by this thread,
|
||||
* just increment the acquisition depth
|
||||
* The mutex is already owned by this thread, just increment the
|
||||
* acquisition depth
|
||||
*/
|
||||
obj_desc->mutex.acquisition_depth++;
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
}
|
||||
|
||||
/* Acquire the mutex, wait if necessary */
|
||||
/* Acquire the mutex, wait if necessary. Special case for Global Lock */
|
||||
|
||||
if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
|
||||
status =
|
||||
acpi_ev_acquire_global_lock((u16) time_desc->integer.value);
|
||||
} else {
|
||||
status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
|
||||
(u16) time_desc->integer.
|
||||
value);
|
||||
}
|
||||
|
||||
status = acpi_ex_system_acquire_mutex(time_desc, obj_desc);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
||||
/* Includes failure from a timeout on time_desc */
|
||||
|
@ -211,7 +216,6 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
|
|||
/* Link the mutex to the current thread for force-unlock at method exit */
|
||||
|
||||
acpi_ex_link_mutex(obj_desc, walk_state->thread);
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
|
@ -232,7 +236,7 @@ acpi_status
|
|||
acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
||||
struct acpi_walk_state *walk_state)
|
||||
{
|
||||
acpi_status status;
|
||||
acpi_status status = AE_OK;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ex_release_mutex);
|
||||
|
||||
|
@ -249,7 +253,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
|||
return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
|
||||
}
|
||||
|
||||
/* Sanity check -- we must have a valid thread ID */
|
||||
/* Sanity check: we must have a valid thread ID */
|
||||
|
||||
if (!walk_state->thread) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
|
@ -264,7 +268,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
|||
*/
|
||||
if ((obj_desc->mutex.owner_thread->thread_id !=
|
||||
walk_state->thread->thread_id)
|
||||
&& (obj_desc->mutex.os_mutex != ACPI_GLOBAL_LOCK)) {
|
||||
&& (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
|
||||
(unsigned long)walk_state->thread->thread_id,
|
||||
|
@ -274,8 +278,8 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
|||
}
|
||||
|
||||
/*
|
||||
* The sync level of the mutex must be less than or
|
||||
* equal to the current sync level
|
||||
* The sync level of the mutex must be less than or equal to the current
|
||||
* sync level
|
||||
*/
|
||||
if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
|
@ -298,11 +302,15 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
|||
|
||||
acpi_ex_unlink_mutex(obj_desc);
|
||||
|
||||
/* Release the mutex */
|
||||
/* Release the mutex, special case for Global Lock */
|
||||
|
||||
status = acpi_ex_system_release_mutex(obj_desc);
|
||||
if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
|
||||
status = acpi_ev_release_global_lock();
|
||||
} else {
|
||||
acpi_os_release_mutex(obj_desc->mutex.os_mutex);
|
||||
}
|
||||
|
||||
/* Update the mutex and walk state, restore sync_level before acquire */
|
||||
/* Update the mutex and restore sync_level */
|
||||
|
||||
obj_desc->mutex.owner_thread = NULL;
|
||||
walk_state->thread->current_sync_level =
|
||||
|
@ -321,39 +329,49 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
|||
*
|
||||
* DESCRIPTION: Release all mutexes held by this thread
|
||||
*
|
||||
* NOTE: This function is called as the thread is exiting the interpreter.
|
||||
* Mutexes are not released when an individual control method is exited, but
|
||||
* only when the parent thread actually exits the interpreter. This allows one
|
||||
* method to acquire a mutex, and a different method to release it, as long as
|
||||
* this is performed underneath a single parent control method.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
|
||||
{
|
||||
union acpi_operand_object *next = thread->acquired_mutex_list;
|
||||
union acpi_operand_object *this;
|
||||
acpi_status status;
|
||||
union acpi_operand_object *obj_desc;
|
||||
|
||||
ACPI_FUNCTION_ENTRY();
|
||||
|
||||
/* Traverse the list of owned mutexes, releasing each one */
|
||||
|
||||
while (next) {
|
||||
this = next;
|
||||
next = this->mutex.next;
|
||||
obj_desc = next;
|
||||
next = obj_desc->mutex.next;
|
||||
|
||||
this->mutex.acquisition_depth = 1;
|
||||
this->mutex.prev = NULL;
|
||||
this->mutex.next = NULL;
|
||||
obj_desc->mutex.prev = NULL;
|
||||
obj_desc->mutex.next = NULL;
|
||||
obj_desc->mutex.acquisition_depth = 0;
|
||||
|
||||
/* Release the mutex */
|
||||
/* Release the mutex, special case for Global Lock */
|
||||
|
||||
status = acpi_ex_system_release_mutex(this);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
continue;
|
||||
if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
|
||||
|
||||
/* Ignore errors */
|
||||
|
||||
(void)acpi_ev_release_global_lock();
|
||||
} else {
|
||||
acpi_os_release_mutex(obj_desc->mutex.os_mutex);
|
||||
}
|
||||
|
||||
/* Mark mutex unowned */
|
||||
|
||||
this->mutex.owner_thread = NULL;
|
||||
obj_desc->mutex.owner_thread = NULL;
|
||||
|
||||
/* Update Thread sync_level (Last mutex is the important one) */
|
||||
|
||||
thread->current_sync_level = this->mutex.original_sync_level;
|
||||
thread->current_sync_level =
|
||||
obj_desc->mutex.original_sync_level;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -104,9 +104,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
|
|||
status = AE_NO_MEMORY;
|
||||
goto cleanup;
|
||||
}
|
||||
#if ACPI_MACHINE_WIDTH != 16
|
||||
return_desc->integer.value = acpi_os_get_timer();
|
||||
#endif
|
||||
break;
|
||||
|
||||
default: /* Unknown opcode */
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -155,16 +155,15 @@ acpi_ex_system_memory_space_handler(u32 function,
|
|||
|
||||
/* Create a new mapping starting at the address given */
|
||||
|
||||
status = acpi_os_map_memory(address, window_size,
|
||||
(void **)&mem_info->
|
||||
mapped_logical_address);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
mem_info->mapped_logical_address =
|
||||
acpi_os_map_memory((acpi_native_uint) address, window_size);
|
||||
if (!mem_info->mapped_logical_address) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not map memory at %8.8X%8.8X, size %X",
|
||||
ACPI_FORMAT_UINT64(address),
|
||||
(u32) window_size));
|
||||
mem_info->mapped_length = 0;
|
||||
return_ACPI_STATUS(status);
|
||||
return_ACPI_STATUS(AE_NO_MEMORY);
|
||||
}
|
||||
|
||||
/* Save the physical address and mapping size */
|
||||
|
@ -210,11 +209,10 @@ acpi_ex_system_memory_space_handler(u32 function,
|
|||
*value = (acpi_integer) ACPI_GET32(logical_addr_ptr);
|
||||
break;
|
||||
|
||||
#if ACPI_MACHINE_WIDTH != 16
|
||||
case 64:
|
||||
*value = (acpi_integer) ACPI_GET64(logical_addr_ptr);
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
/* bit_width was already validated */
|
||||
break;
|
||||
|
@ -236,11 +234,9 @@ acpi_ex_system_memory_space_handler(u32 function,
|
|||
ACPI_SET32(logical_addr_ptr) = (u32) * value;
|
||||
break;
|
||||
|
||||
#if ACPI_MACHINE_WIDTH != 16
|
||||
case 64:
|
||||
ACPI_SET64(logical_addr_ptr) = (u64) * value;
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
/* bit_width was already validated */
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -141,7 +141,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
|
|||
acpi_status status = AE_OK;
|
||||
union acpi_operand_object *stack_desc;
|
||||
void *temp_node;
|
||||
union acpi_operand_object *obj_desc;
|
||||
union acpi_operand_object *obj_desc = NULL;
|
||||
u16 opcode;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ex_resolve_object_to_value);
|
||||
|
@ -299,8 +299,6 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
|
|||
status = acpi_ds_get_package_arguments(stack_desc);
|
||||
break;
|
||||
|
||||
/* These cases may never happen here, but just in case.. */
|
||||
|
||||
case ACPI_TYPE_BUFFER_FIELD:
|
||||
case ACPI_TYPE_LOCAL_REGION_FIELD:
|
||||
case ACPI_TYPE_LOCAL_BANK_FIELD:
|
||||
|
@ -314,6 +312,10 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
|
|||
status =
|
||||
acpi_ex_read_data_from_field(walk_state, stack_desc,
|
||||
&obj_desc);
|
||||
|
||||
/* Remove a reference to the original operand, then override */
|
||||
|
||||
acpi_ut_remove_reference(*stack_ptr);
|
||||
*stack_ptr = (void *)obj_desc;
|
||||
break;
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -611,22 +611,20 @@ acpi_ex_resolve_operands(u16 opcode,
|
|||
}
|
||||
goto next_operand;
|
||||
|
||||
case ARGI_REGION_OR_FIELD:
|
||||
case ARGI_REGION_OR_BUFFER: /* Used by Load() only */
|
||||
|
||||
/* Need an operand of type REGION or a FIELD in a region */
|
||||
/* Need an operand of type REGION or a BUFFER (which could be a resolved region field) */
|
||||
|
||||
switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
|
||||
case ACPI_TYPE_BUFFER:
|
||||
case ACPI_TYPE_REGION:
|
||||
case ACPI_TYPE_LOCAL_REGION_FIELD:
|
||||
case ACPI_TYPE_LOCAL_BANK_FIELD:
|
||||
case ACPI_TYPE_LOCAL_INDEX_FIELD:
|
||||
|
||||
/* Valid operand */
|
||||
break;
|
||||
|
||||
default:
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Needed [Region/RegionField], found [%s] %p",
|
||||
"Needed [Region/Buffer], found [%s] %p",
|
||||
acpi_ut_get_object_type_name
|
||||
(obj_desc), obj_desc));
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -66,7 +66,6 @@ ACPI_MODULE_NAME("exsystem")
|
|||
acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
|
||||
{
|
||||
acpi_status status;
|
||||
acpi_status status2;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
|
||||
|
||||
|
@ -79,7 +78,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
|
|||
|
||||
/* We must wait, so unlock the interpreter */
|
||||
|
||||
acpi_ex_exit_interpreter();
|
||||
acpi_ex_relinquish_interpreter();
|
||||
|
||||
status = acpi_os_wait_semaphore(semaphore, 1, timeout);
|
||||
|
||||
|
@ -89,13 +88,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
|
|||
|
||||
/* Reacquire the interpreter */
|
||||
|
||||
status2 = acpi_ex_enter_interpreter();
|
||||
if (ACPI_FAILURE(status2)) {
|
||||
|
||||
/* Report fatal error, could not acquire interpreter */
|
||||
|
||||
return_ACPI_STATUS(status2);
|
||||
}
|
||||
acpi_ex_reacquire_interpreter();
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
|
@ -119,7 +112,6 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
|
|||
acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
|
||||
{
|
||||
acpi_status status;
|
||||
acpi_status status2;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ex_system_wait_mutex);
|
||||
|
||||
|
@ -132,7 +124,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
|
|||
|
||||
/* We must wait, so unlock the interpreter */
|
||||
|
||||
acpi_ex_exit_interpreter();
|
||||
acpi_ex_relinquish_interpreter();
|
||||
|
||||
status = acpi_os_acquire_mutex(mutex, timeout);
|
||||
|
||||
|
@ -142,13 +134,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
|
|||
|
||||
/* Reacquire the interpreter */
|
||||
|
||||
status2 = acpi_ex_enter_interpreter();
|
||||
if (ACPI_FAILURE(status2)) {
|
||||
|
||||
/* Report fatal error, could not acquire interpreter */
|
||||
|
||||
return_ACPI_STATUS(status2);
|
||||
}
|
||||
acpi_ex_reacquire_interpreter();
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
|
@ -209,96 +195,18 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
|
|||
|
||||
acpi_status acpi_ex_system_do_suspend(acpi_integer how_long)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_ENTRY();
|
||||
|
||||
/* Since this thread will sleep, we must release the interpreter */
|
||||
|
||||
acpi_ex_exit_interpreter();
|
||||
acpi_ex_relinquish_interpreter();
|
||||
|
||||
acpi_os_sleep(how_long);
|
||||
|
||||
/* And now we must get the interpreter again */
|
||||
|
||||
status = acpi_ex_enter_interpreter();
|
||||
return (status);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ex_system_acquire_mutex
|
||||
*
|
||||
* PARAMETERS: time_desc - Maximum time to wait for the mutex
|
||||
* obj_desc - The object descriptor for this op
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Provides an access point to perform synchronization operations
|
||||
* within the AML. This function will cause a lock to be generated
|
||||
* for the Mutex pointed to by obj_desc.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
acpi_ex_system_acquire_mutex(union acpi_operand_object * time_desc,
|
||||
union acpi_operand_object * obj_desc)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
|
||||
ACPI_FUNCTION_TRACE_PTR(ex_system_acquire_mutex, obj_desc);
|
||||
|
||||
if (!obj_desc) {
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
/* Support for the _GL_ Mutex object -- go get the global lock */
|
||||
|
||||
if (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK) {
|
||||
status =
|
||||
acpi_ev_acquire_global_lock((u16) time_desc->integer.value);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
|
||||
(u16) time_desc->integer.value);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ex_system_release_mutex
|
||||
*
|
||||
* PARAMETERS: obj_desc - The object descriptor for this op
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Provides an access point to perform synchronization operations
|
||||
* within the AML. This operation is a request to release a
|
||||
* previously acquired Mutex. If the Mutex variable is set then
|
||||
* it will be decremented.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ex_system_release_mutex);
|
||||
|
||||
if (!obj_desc) {
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
/* Support for the _GL_ Mutex object -- release the global lock */
|
||||
|
||||
if (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK) {
|
||||
status = acpi_ev_release_global_lock();
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
acpi_os_release_mutex(obj_desc->mutex.os_mutex);
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
acpi_ex_reacquire_interpreter();
|
||||
return (AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
|
@ -314,7 +222,7 @@ acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc)
|
|||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_ex_system_signal_event(union acpi_operand_object *obj_desc)
|
||||
acpi_status acpi_ex_system_signal_event(union acpi_operand_object * obj_desc)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -76,14 +76,15 @@ static u32 acpi_ex_digits_needed(acpi_integer value, u32 base);
|
|||
*
|
||||
* PARAMETERS: None
|
||||
*
|
||||
* RETURN: Status
|
||||
* RETURN: None
|
||||
*
|
||||
* DESCRIPTION: Enter the interpreter execution region. Failure to enter
|
||||
* the interpreter region is a fatal system error
|
||||
* DESCRIPTION: Enter the interpreter execution region. Failure to enter
|
||||
* the interpreter region is a fatal system error. Used in
|
||||
* conjunction with exit_interpreter.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_ex_enter_interpreter(void)
|
||||
void acpi_ex_enter_interpreter(void)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
|
@ -91,10 +92,42 @@ acpi_status acpi_ex_enter_interpreter(void)
|
|||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex"));
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not acquire AML Interpreter mutex"));
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ex_reacquire_interpreter
|
||||
*
|
||||
* PARAMETERS: None
|
||||
*
|
||||
* RETURN: None
|
||||
*
|
||||
* DESCRIPTION: Reacquire the interpreter execution region from within the
|
||||
* interpreter code. Failure to enter the interpreter region is a
|
||||
* fatal system error. Used in conjuction with
|
||||
* relinquish_interpreter
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
void acpi_ex_reacquire_interpreter(void)
|
||||
{
|
||||
ACPI_FUNCTION_TRACE(ex_reacquire_interpreter);
|
||||
|
||||
/*
|
||||
* If the global serialized flag is set, do not release the interpreter,
|
||||
* since it was not actually released by acpi_ex_relinquish_interpreter.
|
||||
* This forces the interpreter to be single threaded.
|
||||
*/
|
||||
if (!acpi_gbl_all_methods_serialized) {
|
||||
acpi_ex_enter_interpreter();
|
||||
}
|
||||
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
|
@ -105,17 +138,9 @@ acpi_status acpi_ex_enter_interpreter(void)
|
|||
*
|
||||
* RETURN: None
|
||||
*
|
||||
* DESCRIPTION: Exit the interpreter execution region
|
||||
*
|
||||
* Cases where the interpreter is unlocked:
|
||||
* 1) Completion of the execution of a control method
|
||||
* 2) Method blocked on a Sleep() AML opcode
|
||||
* 3) Method blocked on an Acquire() AML opcode
|
||||
* 4) Method blocked on a Wait() AML opcode
|
||||
* 5) Method blocked to acquire the global lock
|
||||
* 6) Method blocked to execute a serialized control method that is
|
||||
* already executing
|
||||
* 7) About to invoke a user-installed opregion handler
|
||||
* DESCRIPTION: Exit the interpreter execution region. This is the top level
|
||||
* routine used to exit the interpreter when all processing has
|
||||
* been completed.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
|
@ -127,7 +152,46 @@ void acpi_ex_exit_interpreter(void)
|
|||
|
||||
status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_ERROR((AE_INFO, "Could not release interpreter mutex"));
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not release AML Interpreter mutex"));
|
||||
}
|
||||
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ex_relinquish_interpreter
|
||||
*
|
||||
* PARAMETERS: None
|
||||
*
|
||||
* RETURN: None
|
||||
*
|
||||
* DESCRIPTION: Exit the interpreter execution region, from within the
|
||||
* interpreter - before attempting an operation that will possibly
|
||||
* block the running thread.
|
||||
*
|
||||
* Cases where the interpreter is unlocked internally
|
||||
* 1) Method to be blocked on a Sleep() AML opcode
|
||||
* 2) Method to be blocked on an Acquire() AML opcode
|
||||
* 3) Method to be blocked on a Wait() AML opcode
|
||||
* 4) Method to be blocked to acquire the global lock
|
||||
* 5) Method to be blocked waiting to execute a serialized control method
|
||||
* that is currently executing
|
||||
* 6) About to invoke a user-installed opregion handler
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
void acpi_ex_relinquish_interpreter(void)
|
||||
{
|
||||
ACPI_FUNCTION_TRACE(ex_relinquish_interpreter);
|
||||
|
||||
/*
|
||||
* If the global serialized flag is set, do not release the interpreter.
|
||||
* This forces the interpreter to be single threaded.
|
||||
*/
|
||||
if (!acpi_gbl_all_methods_serialized) {
|
||||
acpi_ex_exit_interpreter();
|
||||
}
|
||||
|
||||
return_VOID;
|
||||
|
@ -141,8 +205,8 @@ void acpi_ex_exit_interpreter(void)
|
|||
*
|
||||
* RETURN: none
|
||||
*
|
||||
* DESCRIPTION: Truncate a number to 32-bits if the currently executing method
|
||||
* belongs to a 32-bit ACPI table.
|
||||
* DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
|
||||
* 32-bit, as determined by the revision of the DSDT.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
|
|
|
@ -48,8 +48,8 @@ MODULE_LICENSE("GPL");
|
|||
|
||||
static int acpi_fan_add(struct acpi_device *device);
|
||||
static int acpi_fan_remove(struct acpi_device *device, int type);
|
||||
static int acpi_fan_suspend(struct acpi_device *device, int state);
|
||||
static int acpi_fan_resume(struct acpi_device *device, int state);
|
||||
static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state);
|
||||
static int acpi_fan_resume(struct acpi_device *device);
|
||||
|
||||
static struct acpi_driver acpi_fan_driver = {
|
||||
.name = ACPI_FAN_DRIVER_NAME,
|
||||
|
@ -237,7 +237,7 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int acpi_fan_suspend(struct acpi_device *device, int state)
|
||||
static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state)
|
||||
{
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
@ -247,7 +247,7 @@ static int acpi_fan_suspend(struct acpi_device *device, int state)
|
|||
return AE_OK;
|
||||
}
|
||||
|
||||
static int acpi_fan_resume(struct acpi_device *device, int state)
|
||||
static int acpi_fan_resume(struct acpi_device *device)
|
||||
{
|
||||
int result = 0;
|
||||
int power_state = 0;
|
||||
|
|
|
@ -86,129 +86,6 @@ static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Get PCI root bridge's handle from its segment and bus number */
|
||||
struct acpi_find_pci_root {
|
||||
unsigned int seg;
|
||||
unsigned int bus;
|
||||
acpi_handle handle;
|
||||
};
|
||||
|
||||
static acpi_status
|
||||
do_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
|
||||
{
|
||||
unsigned long *busnr = data;
|
||||
struct acpi_resource_address64 address;
|
||||
|
||||
if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 &&
|
||||
resource->type != ACPI_RESOURCE_TYPE_ADDRESS32 &&
|
||||
resource->type != ACPI_RESOURCE_TYPE_ADDRESS64)
|
||||
return AE_OK;
|
||||
|
||||
acpi_resource_to_address64(resource, &address);
|
||||
if ((address.address_length > 0) &&
|
||||
(address.resource_type == ACPI_BUS_NUMBER_RANGE))
|
||||
*busnr = address.minimum;
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
static int get_root_bridge_busnr(acpi_handle handle)
|
||||
{
|
||||
acpi_status status;
|
||||
unsigned long bus, bbn;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
|
||||
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
|
||||
|
||||
status = acpi_evaluate_integer(handle, METHOD_NAME__BBN, NULL,
|
||||
&bbn);
|
||||
if (status == AE_NOT_FOUND) {
|
||||
/* Assume bus = 0 */
|
||||
printk(KERN_INFO PREFIX
|
||||
"Assume root bridge [%s] bus is 0\n",
|
||||
(char *)buffer.pointer);
|
||||
status = AE_OK;
|
||||
bbn = 0;
|
||||
}
|
||||
if (ACPI_FAILURE(status)) {
|
||||
bbn = -ENODEV;
|
||||
goto exit;
|
||||
}
|
||||
if (bbn > 0)
|
||||
goto exit;
|
||||
|
||||
/* _BBN in some systems return 0 for all root bridges */
|
||||
bus = -1;
|
||||
status = acpi_walk_resources(handle, METHOD_NAME__CRS,
|
||||
do_root_bridge_busnr_callback, &bus);
|
||||
/* If _CRS failed, we just use _BBN */
|
||||
if (ACPI_FAILURE(status) || (bus == -1))
|
||||
goto exit;
|
||||
/* We select _CRS */
|
||||
if (bbn != bus) {
|
||||
printk(KERN_INFO PREFIX
|
||||
"_BBN and _CRS returns different value for %s. Select _CRS\n",
|
||||
(char *)buffer.pointer);
|
||||
bbn = bus;
|
||||
}
|
||||
exit:
|
||||
kfree(buffer.pointer);
|
||||
return (int)bbn;
|
||||
}
|
||||
|
||||
static acpi_status
|
||||
find_pci_rootbridge(acpi_handle handle, u32 lvl, void *context, void **rv)
|
||||
{
|
||||
struct acpi_find_pci_root *find = (struct acpi_find_pci_root *)context;
|
||||
unsigned long seg, bus;
|
||||
acpi_status status;
|
||||
int tmp;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
|
||||
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
|
||||
|
||||
status = acpi_evaluate_integer(handle, METHOD_NAME__SEG, NULL, &seg);
|
||||
if (status == AE_NOT_FOUND) {
|
||||
/* Assume seg = 0 */
|
||||
status = AE_OK;
|
||||
seg = 0;
|
||||
}
|
||||
if (ACPI_FAILURE(status)) {
|
||||
status = AE_CTRL_DEPTH;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
tmp = get_root_bridge_busnr(handle);
|
||||
if (tmp < 0) {
|
||||
printk(KERN_ERR PREFIX
|
||||
"Find root bridge failed for %s\n",
|
||||
(char *)buffer.pointer);
|
||||
status = AE_CTRL_DEPTH;
|
||||
goto exit;
|
||||
}
|
||||
bus = tmp;
|
||||
|
||||
if (seg == find->seg && bus == find->bus)
|
||||
{
|
||||
find->handle = handle;
|
||||
status = AE_CTRL_TERMINATE;
|
||||
}
|
||||
else
|
||||
status = AE_OK;
|
||||
exit:
|
||||
kfree(buffer.pointer);
|
||||
return status;
|
||||
}
|
||||
|
||||
acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
|
||||
{
|
||||
struct acpi_find_pci_root find = { seg, bus, NULL };
|
||||
|
||||
acpi_get_devices(PCI_ROOT_HID_STRING, find_pci_rootbridge, &find, NULL);
|
||||
return find.handle;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle);
|
||||
|
||||
/* Get device's handler per its address under its parent */
|
||||
struct acpi_find_child {
|
||||
acpi_handle handle;
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -47,41 +47,6 @@
|
|||
#define _COMPONENT ACPI_HARDWARE
|
||||
ACPI_MODULE_NAME("hwacpi")
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_hw_initialize
|
||||
*
|
||||
* PARAMETERS: None
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Initialize and validate the various ACPI registers defined in
|
||||
* the FADT.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status acpi_hw_initialize(void)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(hw_initialize);
|
||||
|
||||
/* We must have the ACPI tables by the time we get here */
|
||||
|
||||
if (!acpi_gbl_FADT) {
|
||||
ACPI_ERROR((AE_INFO, "No FADT is present"));
|
||||
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
|
||||
}
|
||||
|
||||
/* Sanity check the FADT for valid values */
|
||||
|
||||
status = acpi_ut_validate_fadt();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_hw_set_mode
|
||||
|
@ -93,7 +58,6 @@ acpi_status acpi_hw_initialize(void)
|
|||
* DESCRIPTION: Transitions the system into the requested mode.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_hw_set_mode(u32 mode)
|
||||
{
|
||||
|
||||
|
@ -106,7 +70,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
|
|||
* ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
|
||||
* system does not support mode transition.
|
||||
*/
|
||||
if (!acpi_gbl_FADT->smi_cmd) {
|
||||
if (!acpi_gbl_FADT.smi_command) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"No SMI_CMD in FADT, mode transition failed"));
|
||||
return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
|
||||
|
@ -119,7 +83,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
|
|||
* we make sure both the numbers are zero to determine these
|
||||
* transitions are not supported.
|
||||
*/
|
||||
if (!acpi_gbl_FADT->acpi_enable && !acpi_gbl_FADT->acpi_disable) {
|
||||
if (!acpi_gbl_FADT.acpi_enable && !acpi_gbl_FADT.acpi_disable) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"No ACPI mode transition supported in this system (enable/disable both zero)"));
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
|
@ -130,9 +94,8 @@ acpi_status acpi_hw_set_mode(u32 mode)
|
|||
|
||||
/* BIOS should have disabled ALL fixed and GP events */
|
||||
|
||||
status = acpi_os_write_port(acpi_gbl_FADT->smi_cmd,
|
||||
(u32) acpi_gbl_FADT->acpi_enable,
|
||||
8);
|
||||
status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
|
||||
(u32) acpi_gbl_FADT.acpi_enable, 8);
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Attempting to enable ACPI mode\n"));
|
||||
break;
|
||||
|
@ -143,8 +106,8 @@ acpi_status acpi_hw_set_mode(u32 mode)
|
|||
* BIOS should clear all fixed status bits and restore fixed event
|
||||
* enable bits to default
|
||||
*/
|
||||
status = acpi_os_write_port(acpi_gbl_FADT->smi_cmd,
|
||||
(u32) acpi_gbl_FADT->acpi_disable,
|
||||
status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
|
||||
(u32) acpi_gbl_FADT.acpi_disable,
|
||||
8);
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Attempting to enable Legacy (non-ACPI) mode\n"));
|
||||
|
@ -204,12 +167,11 @@ u32 acpi_hw_get_mode(void)
|
|||
* ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
|
||||
* system does not support mode transition.
|
||||
*/
|
||||
if (!acpi_gbl_FADT->smi_cmd) {
|
||||
if (!acpi_gbl_FADT.smi_command) {
|
||||
return_UINT32(ACPI_SYS_MODE_ACPI);
|
||||
}
|
||||
|
||||
status =
|
||||
acpi_get_register(ACPI_BITREG_SCI_ENABLE, &value, ACPI_MTX_LOCK);
|
||||
status = acpi_get_register(ACPI_BITREG_SCI_ENABLE, &value);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_UINT32(ACPI_SYS_MODE_LEGACY);
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -105,14 +105,20 @@ acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info)
|
|||
acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
|
||||
{
|
||||
acpi_status status;
|
||||
u8 register_bit;
|
||||
|
||||
ACPI_FUNCTION_ENTRY();
|
||||
|
||||
register_bit = (u8)
|
||||
(1 <<
|
||||
(gpe_event_info->gpe_number -
|
||||
gpe_event_info->register_info->base_gpe_number));
|
||||
|
||||
/*
|
||||
* Write a one to the appropriate bit in the status register to
|
||||
* clear this GPE.
|
||||
*/
|
||||
status = acpi_hw_low_level_write(8, gpe_event_info->register_bit,
|
||||
status = acpi_hw_low_level_write(8, register_bit,
|
||||
&gpe_event_info->register_info->
|
||||
status_address);
|
||||
|
||||
|
@ -155,7 +161,10 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
|
|||
|
||||
/* Get the register bitmask for this GPE */
|
||||
|
||||
register_bit = gpe_event_info->register_bit;
|
||||
register_bit = (u8)
|
||||
(1 <<
|
||||
(gpe_event_info->gpe_number -
|
||||
gpe_event_info->register_info->base_gpe_number));
|
||||
|
||||
/* GPE currently enabled? (enabled for runtime?) */
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
******************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -54,17 +54,15 @@ ACPI_MODULE_NAME("hwregs")
|
|||
*
|
||||
* FUNCTION: acpi_hw_clear_acpi_status
|
||||
*
|
||||
* PARAMETERS: Flags - Lock the hardware or not
|
||||
* PARAMETERS: None
|
||||
*
|
||||
* RETURN: none
|
||||
* RETURN: None
|
||||
*
|
||||
* DESCRIPTION: Clears all fixed and general purpose status bits
|
||||
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
|
||||
*
|
||||
* NOTE: TBD: Flags parameter is obsolete, to be removed
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status acpi_hw_clear_acpi_status(u32 flags)
|
||||
acpi_status acpi_hw_clear_acpi_status(void)
|
||||
{
|
||||
acpi_status status;
|
||||
acpi_cpu_flags lock_flags = 0;
|
||||
|
@ -73,7 +71,7 @@ acpi_status acpi_hw_clear_acpi_status(u32 flags)
|
|||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n",
|
||||
ACPI_BITMASK_ALL_FIXED_STATUS,
|
||||
(u16) acpi_gbl_FADT->xpm1a_evt_blk.address));
|
||||
(u16) acpi_gbl_FADT.xpm1a_event_block.address));
|
||||
|
||||
lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
|
||||
|
||||
|
@ -86,10 +84,10 @@ acpi_status acpi_hw_clear_acpi_status(u32 flags)
|
|||
|
||||
/* Clear the fixed events */
|
||||
|
||||
if (acpi_gbl_FADT->xpm1b_evt_blk.address) {
|
||||
if (acpi_gbl_FADT.xpm1b_event_block.address) {
|
||||
status =
|
||||
acpi_hw_low_level_write(16, ACPI_BITMASK_ALL_FIXED_STATUS,
|
||||
&acpi_gbl_FADT->xpm1b_evt_blk);
|
||||
&acpi_gbl_FADT.xpm1b_event_block);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
@ -253,18 +251,15 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
|
|||
*
|
||||
* PARAMETERS: register_id - ID of ACPI bit_register to access
|
||||
* return_value - Value that was read from the register
|
||||
* Flags - Lock the hardware or not
|
||||
*
|
||||
* RETURN: Status and the value read from specified Register. Value
|
||||
* returned is normalized to bit0 (is shifted all the way right)
|
||||
*
|
||||
* DESCRIPTION: ACPI bit_register read function.
|
||||
*
|
||||
* NOTE: TBD: Flags parameter is obsolete, to be removed
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags)
|
||||
acpi_status acpi_get_register(u32 register_id, u32 * return_value)
|
||||
{
|
||||
u32 register_value = 0;
|
||||
struct acpi_bit_register_info *bit_reg_info;
|
||||
|
@ -312,16 +307,13 @@ ACPI_EXPORT_SYMBOL(acpi_get_register)
|
|||
* PARAMETERS: register_id - ID of ACPI bit_register to access
|
||||
* Value - (only used on write) value to write to the
|
||||
* Register, NOT pre-normalized to the bit pos
|
||||
* Flags - Lock the hardware or not
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: ACPI Bit Register write function.
|
||||
*
|
||||
* NOTE: TBD: Flags parameter is obsolete, to be removed
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
|
||||
acpi_status acpi_set_register(u32 register_id, u32 value)
|
||||
{
|
||||
u32 register_value = 0;
|
||||
struct acpi_bit_register_info *bit_reg_info;
|
||||
|
@ -422,8 +414,9 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
|
|||
ACPI_DEBUG_PRINT((ACPI_DB_IO,
|
||||
"PM2 control: Read %X from %8.8X%8.8X\n",
|
||||
register_value,
|
||||
ACPI_FORMAT_UINT64(acpi_gbl_FADT->
|
||||
xpm2_cnt_blk.address)));
|
||||
ACPI_FORMAT_UINT64(acpi_gbl_FADT.
|
||||
xpm2_control_block.
|
||||
address)));
|
||||
|
||||
ACPI_REGISTER_INSERT_VALUE(register_value,
|
||||
bit_reg_info->bit_position,
|
||||
|
@ -433,8 +426,9 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
|
|||
ACPI_DEBUG_PRINT((ACPI_DB_IO,
|
||||
"About to write %4.4X to %8.8X%8.8X\n",
|
||||
register_value,
|
||||
ACPI_FORMAT_UINT64(acpi_gbl_FADT->
|
||||
xpm2_cnt_blk.address)));
|
||||
ACPI_FORMAT_UINT64(acpi_gbl_FADT.
|
||||
xpm2_control_block.
|
||||
address)));
|
||||
|
||||
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM2_CONTROL,
|
||||
|
@ -495,7 +489,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
|
|||
|
||||
status =
|
||||
acpi_hw_low_level_read(16, &value1,
|
||||
&acpi_gbl_FADT->xpm1a_evt_blk);
|
||||
&acpi_gbl_FADT.xpm1a_event_block);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
@ -504,7 +498,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
|
|||
|
||||
status =
|
||||
acpi_hw_low_level_read(16, &value2,
|
||||
&acpi_gbl_FADT->xpm1b_evt_blk);
|
||||
&acpi_gbl_FADT.xpm1b_event_block);
|
||||
value1 |= value2;
|
||||
break;
|
||||
|
||||
|
@ -527,14 +521,14 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
|
|||
|
||||
status =
|
||||
acpi_hw_low_level_read(16, &value1,
|
||||
&acpi_gbl_FADT->xpm1a_cnt_blk);
|
||||
&acpi_gbl_FADT.xpm1a_control_block);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
status =
|
||||
acpi_hw_low_level_read(16, &value2,
|
||||
&acpi_gbl_FADT->xpm1b_cnt_blk);
|
||||
&acpi_gbl_FADT.xpm1b_control_block);
|
||||
value1 |= value2;
|
||||
break;
|
||||
|
||||
|
@ -542,19 +536,20 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
|
|||
|
||||
status =
|
||||
acpi_hw_low_level_read(8, &value1,
|
||||
&acpi_gbl_FADT->xpm2_cnt_blk);
|
||||
&acpi_gbl_FADT.xpm2_control_block);
|
||||
break;
|
||||
|
||||
case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
|
||||
|
||||
status =
|
||||
acpi_hw_low_level_read(32, &value1,
|
||||
&acpi_gbl_FADT->xpm_tmr_blk);
|
||||
&acpi_gbl_FADT.xpm_timer_block);
|
||||
break;
|
||||
|
||||
case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
|
||||
|
||||
status = acpi_os_read_port(acpi_gbl_FADT->smi_cmd, &value1, 8);
|
||||
status =
|
||||
acpi_os_read_port(acpi_gbl_FADT.smi_command, &value1, 8);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -635,7 +630,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
|
|||
|
||||
status =
|
||||
acpi_hw_low_level_write(16, value,
|
||||
&acpi_gbl_FADT->xpm1a_evt_blk);
|
||||
&acpi_gbl_FADT.xpm1a_event_block);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
@ -644,7 +639,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
|
|||
|
||||
status =
|
||||
acpi_hw_low_level_write(16, value,
|
||||
&acpi_gbl_FADT->xpm1b_evt_blk);
|
||||
&acpi_gbl_FADT.xpm1b_event_block);
|
||||
break;
|
||||
|
||||
case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */
|
||||
|
@ -682,49 +677,50 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
|
|||
|
||||
status =
|
||||
acpi_hw_low_level_write(16, value,
|
||||
&acpi_gbl_FADT->xpm1a_cnt_blk);
|
||||
&acpi_gbl_FADT.xpm1a_control_block);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
status =
|
||||
acpi_hw_low_level_write(16, value,
|
||||
&acpi_gbl_FADT->xpm1b_cnt_blk);
|
||||
&acpi_gbl_FADT.xpm1b_control_block);
|
||||
break;
|
||||
|
||||
case ACPI_REGISTER_PM1A_CONTROL: /* 16-bit access */
|
||||
|
||||
status =
|
||||
acpi_hw_low_level_write(16, value,
|
||||
&acpi_gbl_FADT->xpm1a_cnt_blk);
|
||||
&acpi_gbl_FADT.xpm1a_control_block);
|
||||
break;
|
||||
|
||||
case ACPI_REGISTER_PM1B_CONTROL: /* 16-bit access */
|
||||
|
||||
status =
|
||||
acpi_hw_low_level_write(16, value,
|
||||
&acpi_gbl_FADT->xpm1b_cnt_blk);
|
||||
&acpi_gbl_FADT.xpm1b_control_block);
|
||||
break;
|
||||
|
||||
case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */
|
||||
|
||||
status =
|
||||
acpi_hw_low_level_write(8, value,
|
||||
&acpi_gbl_FADT->xpm2_cnt_blk);
|
||||
&acpi_gbl_FADT.xpm2_control_block);
|
||||
break;
|
||||
|
||||
case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
|
||||
|
||||
status =
|
||||
acpi_hw_low_level_write(32, value,
|
||||
&acpi_gbl_FADT->xpm_tmr_blk);
|
||||
&acpi_gbl_FADT.xpm_timer_block);
|
||||
break;
|
||||
|
||||
case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
|
||||
|
||||
/* SMI_CMD is currently always in IO space */
|
||||
|
||||
status = acpi_os_write_port(acpi_gbl_FADT->smi_cmd, value, 8);
|
||||
status =
|
||||
acpi_os_write_port(acpi_gbl_FADT.smi_command, value, 8);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -783,7 +779,7 @@ acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg)
|
|||
* Two address spaces supported: Memory or IO.
|
||||
* PCI_Config is not supported here because the GAS struct is insufficient
|
||||
*/
|
||||
switch (reg->address_space_id) {
|
||||
switch (reg->space_id) {
|
||||
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
|
||||
|
||||
status = acpi_os_read_memory((acpi_physical_address) address,
|
||||
|
@ -792,22 +788,20 @@ acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg)
|
|||
|
||||
case ACPI_ADR_SPACE_SYSTEM_IO:
|
||||
|
||||
status = acpi_os_read_port((acpi_io_address) address,
|
||||
value, width);
|
||||
status =
|
||||
acpi_os_read_port((acpi_io_address) address, value, width);
|
||||
break;
|
||||
|
||||
default:
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Unsupported address space: %X",
|
||||
reg->address_space_id));
|
||||
"Unsupported address space: %X", reg->space_id));
|
||||
return (AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_IO,
|
||||
"Read: %8.8X width %2d from %8.8X%8.8X (%s)\n",
|
||||
*value, width,
|
||||
ACPI_FORMAT_UINT64(address),
|
||||
acpi_ut_get_region_name(reg->address_space_id)));
|
||||
*value, width, ACPI_FORMAT_UINT64(address),
|
||||
acpi_ut_get_region_name(reg->space_id)));
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
@ -854,7 +848,7 @@ acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg)
|
|||
* Two address spaces supported: Memory or IO.
|
||||
* PCI_Config is not supported here because the GAS struct is insufficient
|
||||
*/
|
||||
switch (reg->address_space_id) {
|
||||
switch (reg->space_id) {
|
||||
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
|
||||
|
||||
status = acpi_os_write_memory((acpi_physical_address) address,
|
||||
|
@ -863,22 +857,20 @@ acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg)
|
|||
|
||||
case ACPI_ADR_SPACE_SYSTEM_IO:
|
||||
|
||||
status = acpi_os_write_port((acpi_io_address) address,
|
||||
value, width);
|
||||
status = acpi_os_write_port((acpi_io_address) address, value,
|
||||
width);
|
||||
break;
|
||||
|
||||
default:
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Unsupported address space: %X",
|
||||
reg->address_space_id));
|
||||
"Unsupported address space: %X", reg->space_id));
|
||||
return (AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_IO,
|
||||
"Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
|
||||
value, width,
|
||||
ACPI_FORMAT_UINT64(address),
|
||||
acpi_ut_get_region_name(reg->address_space_id)));
|
||||
value, width, ACPI_FORMAT_UINT64(address),
|
||||
acpi_ut_get_region_name(reg->space_id)));
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -43,6 +43,7 @@
|
|||
*/
|
||||
|
||||
#include <acpi/acpi.h>
|
||||
#include <acpi/actables.h>
|
||||
|
||||
#define _COMPONENT ACPI_HARDWARE
|
||||
ACPI_MODULE_NAME("hwsleep")
|
||||
|
@ -62,17 +63,32 @@ ACPI_MODULE_NAME("hwsleep")
|
|||
acpi_status
|
||||
acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
|
||||
{
|
||||
struct acpi_table_facs *facs;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
|
||||
|
||||
/* Get the FACS */
|
||||
|
||||
status =
|
||||
acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
|
||||
(struct acpi_table_header **)&facs);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Set the vector */
|
||||
|
||||
if (acpi_gbl_common_fACS.vector_width == 32) {
|
||||
*(ACPI_CAST_PTR
|
||||
(u32, acpi_gbl_common_fACS.firmware_waking_vector))
|
||||
= (u32) physical_address;
|
||||
if ((facs->length < 32) || (!(facs->xfirmware_waking_vector))) {
|
||||
/*
|
||||
* ACPI 1.0 FACS or short table or optional X_ field is zero
|
||||
*/
|
||||
facs->firmware_waking_vector = (u32) physical_address;
|
||||
} else {
|
||||
*acpi_gbl_common_fACS.firmware_waking_vector = physical_address;
|
||||
/*
|
||||
* ACPI 2.0 FACS with valid X_ field
|
||||
*/
|
||||
facs->xfirmware_waking_vector = physical_address;
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
|
@ -97,6 +113,8 @@ ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
|
|||
acpi_status
|
||||
acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
|
||||
{
|
||||
struct acpi_table_facs *facs;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_get_firmware_waking_vector);
|
||||
|
||||
|
@ -104,16 +122,29 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
|
|||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
/* Get the FACS */
|
||||
|
||||
status =
|
||||
acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
|
||||
(struct acpi_table_header **)&facs);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Get the vector */
|
||||
|
||||
if (acpi_gbl_common_fACS.vector_width == 32) {
|
||||
*physical_address = (acpi_physical_address)
|
||||
*
|
||||
(ACPI_CAST_PTR
|
||||
(u32, acpi_gbl_common_fACS.firmware_waking_vector));
|
||||
} else {
|
||||
if ((facs->length < 32) || (!(facs->xfirmware_waking_vector))) {
|
||||
/*
|
||||
* ACPI 1.0 FACS or short table or optional X_ field is zero
|
||||
*/
|
||||
*physical_address =
|
||||
*acpi_gbl_common_fACS.firmware_waking_vector;
|
||||
(acpi_physical_address) facs->firmware_waking_vector;
|
||||
} else {
|
||||
/*
|
||||
* ACPI 2.0 FACS with valid X_ field
|
||||
*/
|
||||
*physical_address =
|
||||
(acpi_physical_address) facs->xfirmware_waking_vector;
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
|
@ -246,15 +277,14 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
|
|||
|
||||
/* Clear wake status */
|
||||
|
||||
status =
|
||||
acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK);
|
||||
status = acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Clear all fixed and general purpose status bits */
|
||||
|
||||
status = acpi_hw_clear_acpi_status(ACPI_MTX_DO_NOT_LOCK);
|
||||
status = acpi_hw_clear_acpi_status();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
@ -367,8 +397,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
|
|||
/* Wait until we enter sleep state */
|
||||
|
||||
do {
|
||||
status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value,
|
||||
ACPI_MTX_DO_NOT_LOCK);
|
||||
status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
@ -401,13 +430,12 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
|
|||
|
||||
ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
|
||||
|
||||
status =
|
||||
acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK);
|
||||
status = acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
status = acpi_hw_clear_acpi_status(ACPI_MTX_DO_NOT_LOCK);
|
||||
status = acpi_hw_clear_acpi_status();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
@ -429,13 +457,12 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
|
|||
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
||||
status = acpi_os_write_port(acpi_gbl_FADT->smi_cmd,
|
||||
(u32) acpi_gbl_FADT->S4bios_req, 8);
|
||||
status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
|
||||
(u32) acpi_gbl_FADT.S4bios_request, 8);
|
||||
|
||||
do {
|
||||
acpi_os_stall(1000);
|
||||
status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value,
|
||||
ACPI_MTX_DO_NOT_LOCK);
|
||||
status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
@ -568,13 +595,11 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
|
|||
|
||||
(void)
|
||||
acpi_set_register(acpi_gbl_fixed_event_info
|
||||
[ACPI_EVENT_POWER_BUTTON].enable_register_id, 1,
|
||||
ACPI_MTX_DO_NOT_LOCK);
|
||||
[ACPI_EVENT_POWER_BUTTON].enable_register_id, 1);
|
||||
|
||||
(void)
|
||||
acpi_set_register(acpi_gbl_fixed_event_info
|
||||
[ACPI_EVENT_POWER_BUTTON].status_register_id, 1,
|
||||
ACPI_MTX_DO_NOT_LOCK);
|
||||
[ACPI_EVENT_POWER_BUTTON].status_register_id, 1);
|
||||
|
||||
arg.integer.value = ACPI_SST_WORKING;
|
||||
status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -66,7 +66,7 @@ acpi_status acpi_get_timer_resolution(u32 * resolution)
|
|||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
if (acpi_gbl_FADT->tmr_val_ext == 0) {
|
||||
if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
|
||||
*resolution = 24;
|
||||
} else {
|
||||
*resolution = 32;
|
||||
|
@ -98,7 +98,8 @@ acpi_status acpi_get_timer(u32 * ticks)
|
|||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
status = acpi_hw_low_level_read(32, ticks, &acpi_gbl_FADT->xpm_tmr_blk);
|
||||
status =
|
||||
acpi_hw_low_level_read(32, ticks, &acpi_gbl_FADT.xpm_timer_block);
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
@ -153,7 +154,7 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
|
|||
if (start_ticks < end_ticks) {
|
||||
delta_ticks = end_ticks - start_ticks;
|
||||
} else if (start_ticks > end_ticks) {
|
||||
if (acpi_gbl_FADT->tmr_val_ext == 0) {
|
||||
if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
|
||||
|
||||
/* 24-bit Timer */
|
||||
|
||||
|
|
|
@ -1,191 +0,0 @@
|
|||
/*
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or (at
|
||||
* your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
|
||||
*
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*/
|
||||
|
||||
/* Purpose: Prevent PCMCIA cards from using motherboard resources. */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include <acpi/acpi_bus.h>
|
||||
#include <acpi/acpi_drivers.h>
|
||||
|
||||
#define _COMPONENT ACPI_SYSTEM_COMPONENT
|
||||
ACPI_MODULE_NAME("acpi_motherboard")
|
||||
|
||||
/* Dell use PNP0C01 instead of PNP0C02 */
|
||||
#define ACPI_MB_HID1 "PNP0C01"
|
||||
#define ACPI_MB_HID2 "PNP0C02"
|
||||
/**
|
||||
* Doesn't care about legacy IO ports, only IO ports beyond 0x1000 are reserved
|
||||
* Doesn't care about the failure of 'request_region', since other may reserve
|
||||
* the io ports as well
|
||||
*/
|
||||
#define IS_RESERVED_ADDR(base, len) \
|
||||
(((len) > 0) && ((base) > 0) && ((base) + (len) < IO_SPACE_LIMIT) \
|
||||
&& ((base) + (len) > PCIBIOS_MIN_IO))
|
||||
/*
|
||||
* Clearing the flag (IORESOURCE_BUSY) allows drivers to use
|
||||
* the io ports if they really know they can use it, while
|
||||
* still preventing hotplug PCI devices from using it.
|
||||
*/
|
||||
|
||||
/*
|
||||
* When CONFIG_PNP is enabled, pnp/system.c binds to PNP0C01
|
||||
* and PNP0C02, redundant with acpi_reserve_io_ranges().
|
||||
* But acpi_reserve_io_ranges() is necessary for !CONFIG_PNP.
|
||||
*/
|
||||
static acpi_status acpi_reserve_io_ranges(struct acpi_resource *res, void *data)
|
||||
{
|
||||
struct resource *requested_res = NULL;
|
||||
|
||||
|
||||
if (res->type == ACPI_RESOURCE_TYPE_IO) {
|
||||
struct acpi_resource_io *io_res = &res->data.io;
|
||||
|
||||
if (io_res->minimum != io_res->maximum)
|
||||
return AE_OK;
|
||||
if (IS_RESERVED_ADDR
|
||||
(io_res->minimum, io_res->address_length)) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Motherboard resources 0x%08x - 0x%08x\n",
|
||||
io_res->minimum,
|
||||
io_res->minimum +
|
||||
io_res->address_length));
|
||||
requested_res =
|
||||
request_region(io_res->minimum,
|
||||
io_res->address_length, "motherboard");
|
||||
}
|
||||
} else if (res->type == ACPI_RESOURCE_TYPE_FIXED_IO) {
|
||||
struct acpi_resource_fixed_io *fixed_io_res =
|
||||
&res->data.fixed_io;
|
||||
|
||||
if (IS_RESERVED_ADDR
|
||||
(fixed_io_res->address, fixed_io_res->address_length)) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Motherboard resources 0x%08x - 0x%08x\n",
|
||||
fixed_io_res->address,
|
||||
fixed_io_res->address +
|
||||
fixed_io_res->address_length));
|
||||
requested_res =
|
||||
request_region(fixed_io_res->address,
|
||||
fixed_io_res->address_length,
|
||||
"motherboard");
|
||||
}
|
||||
} else {
|
||||
/* Memory mapped IO? */
|
||||
}
|
||||
|
||||
if (requested_res)
|
||||
requested_res->flags &= ~IORESOURCE_BUSY;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
static int acpi_motherboard_add(struct acpi_device *device)
|
||||
{
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
acpi_walk_resources(device->handle, METHOD_NAME__CRS,
|
||||
acpi_reserve_io_ranges, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct acpi_driver acpi_motherboard_driver1 = {
|
||||
.name = "motherboard",
|
||||
.class = "",
|
||||
.ids = ACPI_MB_HID1,
|
||||
.ops = {
|
||||
.add = acpi_motherboard_add,
|
||||
},
|
||||
};
|
||||
|
||||
static struct acpi_driver acpi_motherboard_driver2 = {
|
||||
.name = "motherboard",
|
||||
.class = "",
|
||||
.ids = ACPI_MB_HID2,
|
||||
.ops = {
|
||||
.add = acpi_motherboard_add,
|
||||
},
|
||||
};
|
||||
|
||||
static void __init acpi_request_region (struct acpi_generic_address *addr,
|
||||
unsigned int length, char *desc)
|
||||
{
|
||||
if (!addr->address || !length)
|
||||
return;
|
||||
|
||||
if (addr->address_space_id == ACPI_ADR_SPACE_SYSTEM_IO)
|
||||
request_region(addr->address, length, desc);
|
||||
else if (addr->address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
|
||||
request_mem_region(addr->address, length, desc);
|
||||
}
|
||||
|
||||
static void __init acpi_reserve_resources(void)
|
||||
{
|
||||
acpi_request_region(&acpi_gbl_FADT->xpm1a_evt_blk,
|
||||
acpi_gbl_FADT->pm1_evt_len, "ACPI PM1a_EVT_BLK");
|
||||
|
||||
acpi_request_region(&acpi_gbl_FADT->xpm1b_evt_blk,
|
||||
acpi_gbl_FADT->pm1_evt_len, "ACPI PM1b_EVT_BLK");
|
||||
|
||||
acpi_request_region(&acpi_gbl_FADT->xpm1a_cnt_blk,
|
||||
acpi_gbl_FADT->pm1_cnt_len, "ACPI PM1a_CNT_BLK");
|
||||
|
||||
acpi_request_region(&acpi_gbl_FADT->xpm1b_cnt_blk,
|
||||
acpi_gbl_FADT->pm1_cnt_len, "ACPI PM1b_CNT_BLK");
|
||||
|
||||
if (acpi_gbl_FADT->pm_tm_len == 4)
|
||||
acpi_request_region(&acpi_gbl_FADT->xpm_tmr_blk, 4, "ACPI PM_TMR");
|
||||
|
||||
acpi_request_region(&acpi_gbl_FADT->xpm2_cnt_blk,
|
||||
acpi_gbl_FADT->pm2_cnt_len, "ACPI PM2_CNT_BLK");
|
||||
|
||||
/* Length of GPE blocks must be a non-negative multiple of 2 */
|
||||
|
||||
if (!(acpi_gbl_FADT->gpe0_blk_len & 0x1))
|
||||
acpi_request_region(&acpi_gbl_FADT->xgpe0_blk,
|
||||
acpi_gbl_FADT->gpe0_blk_len, "ACPI GPE0_BLK");
|
||||
|
||||
if (!(acpi_gbl_FADT->gpe1_blk_len & 0x1))
|
||||
acpi_request_region(&acpi_gbl_FADT->xgpe1_blk,
|
||||
acpi_gbl_FADT->gpe1_blk_len, "ACPI GPE1_BLK");
|
||||
}
|
||||
|
||||
static int __init acpi_motherboard_init(void)
|
||||
{
|
||||
acpi_bus_register_driver(&acpi_motherboard_driver1);
|
||||
acpi_bus_register_driver(&acpi_motherboard_driver2);
|
||||
/*
|
||||
* Guarantee motherboard IO reservation first
|
||||
* This module must run after scan.c
|
||||
*/
|
||||
if (!acpi_disabled)
|
||||
acpi_reserve_resources();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserve motherboard resources after PCI claim BARs,
|
||||
* but before PCI assign resources for uninitialized PCI devices
|
||||
*/
|
||||
fs_initcall(acpi_motherboard_init);
|
|
@ -5,7 +5,7 @@
|
|||
******************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -195,31 +195,27 @@ acpi_status acpi_ns_root_initialize(void)
|
|||
obj_desc->mutex.sync_level =
|
||||
(u8) (ACPI_TO_INTEGER(val) - 1);
|
||||
|
||||
/* Create a mutex */
|
||||
|
||||
status =
|
||||
acpi_os_create_mutex(&obj_desc->mutex.
|
||||
os_mutex);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_ut_remove_reference(obj_desc);
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
/* Special case for ACPI Global Lock */
|
||||
|
||||
if (ACPI_STRCMP(init_val->name, "_GL_") == 0) {
|
||||
acpi_gbl_global_lock_mutex =
|
||||
obj_desc->mutex.os_mutex;
|
||||
|
||||
/* Create a counting semaphore for the global lock */
|
||||
/* Create additional counting semaphore for global lock */
|
||||
|
||||
status =
|
||||
acpi_os_create_semaphore
|
||||
(ACPI_NO_UNIT_LIMIT, 1,
|
||||
&acpi_gbl_global_lock_semaphore);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_ut_remove_reference
|
||||
(obj_desc);
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
/* Mark this mutex as very special */
|
||||
|
||||
obj_desc->mutex.os_mutex =
|
||||
ACPI_GLOBAL_LOCK;
|
||||
} else {
|
||||
/* Create a mutex */
|
||||
|
||||
status =
|
||||
acpi_os_create_mutex(&obj_desc->
|
||||
mutex.
|
||||
os_mutex);
|
||||
acpi_os_create_semaphore(1, 0,
|
||||
&acpi_gbl_global_lock_semaphore);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_ut_remove_reference
|
||||
(obj_desc);
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
******************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -61,6 +61,9 @@ ACPI_MODULE_NAME("nsalloc")
|
|||
struct acpi_namespace_node *acpi_ns_create_node(u32 name)
|
||||
{
|
||||
struct acpi_namespace_node *node;
|
||||
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
|
||||
u32 temp;
|
||||
#endif
|
||||
|
||||
ACPI_FUNCTION_TRACE(ns_create_node);
|
||||
|
||||
|
@ -71,6 +74,15 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name)
|
|||
|
||||
ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_allocated++);
|
||||
|
||||
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
|
||||
temp =
|
||||
acpi_gbl_ns_node_list->total_allocated -
|
||||
acpi_gbl_ns_node_list->total_freed;
|
||||
if (temp > acpi_gbl_ns_node_list->max_occupied) {
|
||||
acpi_gbl_ns_node_list->max_occupied = temp;
|
||||
}
|
||||
#endif
|
||||
|
||||
node->name.integer = name;
|
||||
ACPI_SET_DESCRIPTOR_TYPE(node, ACPI_DESC_TYPE_NAMED);
|
||||
return_PTR(node);
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -205,7 +205,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
|
|||
|
||||
if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
|
||||
this_node->name.integer =
|
||||
acpi_ut_repair_name(this_node->name.integer);
|
||||
acpi_ut_repair_name(this_node->name.ascii);
|
||||
|
||||
ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X",
|
||||
this_node->name.integer));
|
||||
|
@ -226,6 +226,12 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
|
|||
obj_desc = acpi_ns_get_attached_object(this_node);
|
||||
acpi_dbg_level = dbg_level;
|
||||
|
||||
/* Temp nodes are those nodes created by a control method */
|
||||
|
||||
if (this_node->flags & ANOBJ_TEMPORARY) {
|
||||
acpi_os_printf("(T) ");
|
||||
}
|
||||
|
||||
switch (info->display_type & ACPI_DISPLAY_MASK) {
|
||||
case ACPI_DISPLAY_SUMMARY:
|
||||
|
||||
|
@ -623,7 +629,8 @@ acpi_ns_dump_objects(acpi_object_type type,
|
|||
info.display_type = display_type;
|
||||
|
||||
(void)acpi_ns_walk_namespace(type, start_handle, max_depth,
|
||||
ACPI_NS_WALK_NO_UNLOCK,
|
||||
ACPI_NS_WALK_NO_UNLOCK |
|
||||
ACPI_NS_WALK_TEMP_NODES,
|
||||
acpi_ns_dump_one_object, (void *)&info,
|
||||
NULL);
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
******************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -154,11 +154,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
|
|||
* Execute the method via the interpreter. The interpreter is locked
|
||||
* here before calling into the AML parser
|
||||
*/
|
||||
status = acpi_ex_enter_interpreter();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
acpi_ex_enter_interpreter();
|
||||
status = acpi_ps_execute_method(info);
|
||||
acpi_ex_exit_interpreter();
|
||||
} else {
|
||||
|
@ -182,10 +178,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
|
|||
* resolution, we must lock it because we could access an opregion.
|
||||
* The opregion access code assumes that the interpreter is locked.
|
||||
*/
|
||||
status = acpi_ex_enter_interpreter();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
acpi_ex_enter_interpreter();
|
||||
|
||||
/* Function has a strange interface */
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -213,7 +213,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
|
|||
u32 level, void *context, void **return_value)
|
||||
{
|
||||
acpi_object_type type;
|
||||
acpi_status status;
|
||||
acpi_status status = AE_OK;
|
||||
struct acpi_init_walk_info *info =
|
||||
(struct acpi_init_walk_info *)context;
|
||||
struct acpi_namespace_node *node =
|
||||
|
@ -267,10 +267,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
|
|||
/*
|
||||
* Must lock the interpreter before executing AML code
|
||||
*/
|
||||
status = acpi_ex_enter_interpreter();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return (status);
|
||||
}
|
||||
acpi_ex_enter_interpreter();
|
||||
|
||||
/*
|
||||
* Each of these types can contain executable AML code within the
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -44,13 +44,12 @@
|
|||
#include <acpi/acpi.h>
|
||||
#include <acpi/acnamesp.h>
|
||||
#include <acpi/acdispat.h>
|
||||
#include <acpi/actables.h>
|
||||
|
||||
#define _COMPONENT ACPI_NAMESPACE
|
||||
ACPI_MODULE_NAME("nsload")
|
||||
|
||||
/* Local prototypes */
|
||||
static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type);
|
||||
|
||||
#ifdef ACPI_FUTURE_IMPLEMENTATION
|
||||
acpi_status acpi_ns_unload_namespace(acpi_handle handle);
|
||||
|
||||
|
@ -62,7 +61,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle);
|
|||
*
|
||||
* FUNCTION: acpi_ns_load_table
|
||||
*
|
||||
* PARAMETERS: table_desc - Descriptor for table to be loaded
|
||||
* PARAMETERS: table_index - Index for table to be loaded
|
||||
* Node - Owning NS node
|
||||
*
|
||||
* RETURN: Status
|
||||
|
@ -72,42 +71,13 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle);
|
|||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
acpi_ns_load_table(struct acpi_table_desc *table_desc,
|
||||
acpi_ns_load_table(acpi_native_uint table_index,
|
||||
struct acpi_namespace_node *node)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ns_load_table);
|
||||
|
||||
/* Check if table contains valid AML (must be DSDT, PSDT, SSDT, etc.) */
|
||||
|
||||
if (!
|
||||
(acpi_gbl_table_data[table_desc->type].
|
||||
flags & ACPI_TABLE_EXECUTABLE)) {
|
||||
|
||||
/* Just ignore this table */
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/* Check validity of the AML start and length */
|
||||
|
||||
if (!table_desc->aml_start) {
|
||||
ACPI_ERROR((AE_INFO, "Null AML pointer"));
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AML block at %p\n",
|
||||
table_desc->aml_start));
|
||||
|
||||
/* Ignore table if there is no AML contained within */
|
||||
|
||||
if (!table_desc->aml_length) {
|
||||
ACPI_WARNING((AE_INFO, "Zero-length AML block in table [%4.4s]",
|
||||
table_desc->pointer->signature));
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse the table and load the namespace with all named
|
||||
* objects found within. Control methods are NOT parsed
|
||||
|
@ -117,15 +87,34 @@ acpi_ns_load_table(struct acpi_table_desc *table_desc,
|
|||
* to another control method, we can't continue parsing
|
||||
* because we don't know how many arguments to parse next!
|
||||
*/
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"**** Loading table into namespace ****\n"));
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
status = acpi_ns_parse_table(table_desc, node->child);
|
||||
/* If table already loaded into namespace, just return */
|
||||
|
||||
if (acpi_tb_is_table_loaded(table_index)) {
|
||||
status = AE_ALREADY_EXISTS;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"**** Loading table into namespace ****\n"));
|
||||
|
||||
status = acpi_tb_allocate_owner_id(table_index);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
status = acpi_ns_parse_table(table_index, node->child);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
acpi_tb_set_table_loaded_flag(table_index, TRUE);
|
||||
} else {
|
||||
acpi_tb_release_owner_id(table_index);
|
||||
}
|
||||
|
||||
unlock:
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
@ -141,7 +130,7 @@ acpi_ns_load_table(struct acpi_table_desc *table_desc,
|
|||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"**** Begin Table Method Parsing and Object Initialization ****\n"));
|
||||
|
||||
status = acpi_ds_initialize_objects(table_desc, node);
|
||||
status = acpi_ds_initialize_objects(table_index, node);
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"**** Completed Table Method Parsing and Object Initialization ****\n"));
|
||||
|
@ -149,99 +138,7 @@ acpi_ns_load_table(struct acpi_table_desc *table_desc,
|
|||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ns_load_table_by_type
|
||||
*
|
||||
* PARAMETERS: table_type - Id of the table type to load
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Load an ACPI table or tables into the namespace. All tables
|
||||
* of the given type are loaded. The mechanism allows this
|
||||
* routine to be called repeatedly.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type)
|
||||
{
|
||||
u32 i;
|
||||
acpi_status status;
|
||||
struct acpi_table_desc *table_desc;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ns_load_table_by_type);
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*
|
||||
* Table types supported are:
|
||||
* DSDT (one), SSDT/PSDT (multiple)
|
||||
*/
|
||||
switch (table_type) {
|
||||
case ACPI_TABLE_ID_DSDT:
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace load: DSDT\n"));
|
||||
|
||||
table_desc = acpi_gbl_table_lists[ACPI_TABLE_ID_DSDT].next;
|
||||
|
||||
/* If table already loaded into namespace, just return */
|
||||
|
||||
if (table_desc->loaded_into_namespace) {
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
/* Now load the single DSDT */
|
||||
|
||||
status = acpi_ns_load_table(table_desc, acpi_gbl_root_node);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
table_desc->loaded_into_namespace = TRUE;
|
||||
}
|
||||
break;
|
||||
|
||||
case ACPI_TABLE_ID_SSDT:
|
||||
case ACPI_TABLE_ID_PSDT:
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Namespace load: %d SSDT or PSDTs\n",
|
||||
acpi_gbl_table_lists[table_type].count));
|
||||
|
||||
/*
|
||||
* Traverse list of SSDT or PSDT tables
|
||||
*/
|
||||
table_desc = acpi_gbl_table_lists[table_type].next;
|
||||
for (i = 0; i < acpi_gbl_table_lists[table_type].count; i++) {
|
||||
/*
|
||||
* Only attempt to load table into namespace if it is not
|
||||
* already loaded!
|
||||
*/
|
||||
if (!table_desc->loaded_into_namespace) {
|
||||
status =
|
||||
acpi_ns_load_table(table_desc,
|
||||
acpi_gbl_root_node);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
break;
|
||||
}
|
||||
|
||||
table_desc->loaded_into_namespace = TRUE;
|
||||
}
|
||||
|
||||
table_desc = table_desc->next;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
status = AE_SUPPORT;
|
||||
break;
|
||||
}
|
||||
|
||||
unlock_and_exit:
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
#ifdef ACPI_OBSOLETE_FUNCTIONS
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_load_namespace
|
||||
|
@ -288,6 +185,7 @@ acpi_status acpi_ns_load_namespace(void)
|
|||
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef ACPI_FUTURE_IMPLEMENTATION
|
||||
/*******************************************************************************
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
******************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
******************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -45,6 +45,7 @@
|
|||
#include <acpi/acnamesp.h>
|
||||
#include <acpi/acparser.h>
|
||||
#include <acpi/acdispat.h>
|
||||
#include <acpi/actables.h>
|
||||
|
||||
#define _COMPONENT ACPI_NAMESPACE
|
||||
ACPI_MODULE_NAME("nsparse")
|
||||
|
@ -62,14 +63,24 @@ ACPI_MODULE_NAME("nsparse")
|
|||
*
|
||||
******************************************************************************/
|
||||
acpi_status
|
||||
acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc)
|
||||
acpi_ns_one_complete_parse(acpi_native_uint pass_number,
|
||||
acpi_native_uint table_index)
|
||||
{
|
||||
union acpi_parse_object *parse_root;
|
||||
acpi_status status;
|
||||
acpi_native_uint aml_length;
|
||||
u8 *aml_start;
|
||||
struct acpi_walk_state *walk_state;
|
||||
struct acpi_table_header *table;
|
||||
acpi_owner_id owner_id;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ns_one_complete_parse);
|
||||
|
||||
status = acpi_tb_get_owner_id(table_index, &owner_id);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Create and init a Root Node */
|
||||
|
||||
parse_root = acpi_ps_create_scope_op();
|
||||
|
@ -79,26 +90,41 @@ acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc)
|
|||
|
||||
/* Create and initialize a new walk state */
|
||||
|
||||
walk_state = acpi_ds_create_walk_state(table_desc->owner_id,
|
||||
NULL, NULL, NULL);
|
||||
walk_state = acpi_ds_create_walk_state(owner_id, NULL, NULL, NULL);
|
||||
if (!walk_state) {
|
||||
acpi_ps_free_op(parse_root);
|
||||
return_ACPI_STATUS(AE_NO_MEMORY);
|
||||
}
|
||||
|
||||
status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
|
||||
table_desc->aml_start,
|
||||
table_desc->aml_length, NULL,
|
||||
pass_number);
|
||||
status = acpi_get_table_by_index(table_index, &table);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_ds_delete_walk_state(walk_state);
|
||||
acpi_ps_free_op(parse_root);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Table must consist of at least a complete header */
|
||||
|
||||
if (table->length < sizeof(struct acpi_table_header)) {
|
||||
status = AE_BAD_HEADER;
|
||||
} else {
|
||||
aml_start = (u8 *) table + sizeof(struct acpi_table_header);
|
||||
aml_length = table->length - sizeof(struct acpi_table_header);
|
||||
status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
|
||||
aml_start, aml_length, NULL,
|
||||
(u8) pass_number);
|
||||
}
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_ds_delete_walk_state(walk_state);
|
||||
acpi_ps_delete_parse_tree(parse_root);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Parse the AML */
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %d parse\n",
|
||||
pass_number));
|
||||
(unsigned)pass_number));
|
||||
status = acpi_ps_parse_aml(walk_state);
|
||||
|
||||
acpi_ps_delete_parse_tree(parse_root);
|
||||
|
@ -119,7 +145,7 @@ acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc)
|
|||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
acpi_ns_parse_table(struct acpi_table_desc *table_desc,
|
||||
acpi_ns_parse_table(acpi_native_uint table_index,
|
||||
struct acpi_namespace_node *start_node)
|
||||
{
|
||||
acpi_status status;
|
||||
|
@ -134,10 +160,10 @@ acpi_ns_parse_table(struct acpi_table_desc *table_desc,
|
|||
* each Parser Op subtree is deleted when it is finished. This saves
|
||||
* a great deal of memory, and allows a small cache of parse objects
|
||||
* to service the entire parse. The second pass of the parse then
|
||||
* performs another complete parse of the AML..
|
||||
* performs another complete parse of the AML.
|
||||
*/
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n"));
|
||||
status = acpi_ns_one_complete_parse(1, table_desc);
|
||||
status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, table_index);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
@ -152,7 +178,7 @@ acpi_ns_parse_table(struct acpi_table_desc *table_desc,
|
|||
* parse objects are all cached.
|
||||
*/
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n"));
|
||||
status = acpi_ns_one_complete_parse(2, table_desc);
|
||||
status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, table_index);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
******************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -321,7 +321,8 @@ acpi_ns_search_and_enter(u32 target_name,
|
|||
* even though there are a few bad names.
|
||||
*/
|
||||
if (!acpi_ut_valid_acpi_name(target_name)) {
|
||||
target_name = acpi_ut_repair_name(target_name);
|
||||
target_name =
|
||||
acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name));
|
||||
|
||||
/* Report warning only if in strict mode or debug mode */
|
||||
|
||||
|
@ -401,6 +402,10 @@ acpi_ns_search_and_enter(u32 target_name,
|
|||
}
|
||||
#endif
|
||||
|
||||
if (flags & ACPI_NS_TEMPORARY) {
|
||||
new_node->flags |= ANOBJ_TEMPORARY;
|
||||
}
|
||||
|
||||
/* Install the new object into the parent's list of children */
|
||||
|
||||
acpi_ns_install_node(walk_state, node, new_node, type);
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2006, R. Byron Moore
|
||||
* Copyright (C) 2000 - 2007, R. Byron Moore
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -770,13 +770,6 @@ void acpi_ns_terminate(void)
|
|||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n"));
|
||||
|
||||
/*
|
||||
* 2) Now we can delete the ACPI tables
|
||||
*/
|
||||
acpi_tb_delete_all_tables();
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
|
||||
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue