Merge branches 'acpi-pci', 'acpi-soc', 'acpi-ec' and 'acpi-osl'
* acpi-pci: ACPI, PCI: Penalize legacy IRQ used by ACPI SCI * acpi-soc: ACPI / LPSS: Ignore 10ms delay for Braswell * acpi-ec: ACPI / EC: Fix an issue caused by the serialized _Qxx evaluations * acpi-osl: ACPI / osl: replace custom implementation of readq / writeq
This commit is contained in:
commit
5d2a1a927d
6 changed files with 113 additions and 59 deletions
|
@ -445,6 +445,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
|
|||
polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
|
||||
|
||||
mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
|
||||
acpi_penalize_sci_irq(bus_irq, trigger, polarity);
|
||||
|
||||
/*
|
||||
* stash over-ride to indicate we've been here
|
||||
|
|
|
@ -60,6 +60,7 @@ ACPI_MODULE_NAME("acpi_lpss");
|
|||
#define LPSS_CLK_DIVIDER BIT(2)
|
||||
#define LPSS_LTR BIT(3)
|
||||
#define LPSS_SAVE_CTX BIT(4)
|
||||
#define LPSS_NO_D3_DELAY BIT(5)
|
||||
|
||||
struct lpss_private_data;
|
||||
|
||||
|
@ -156,6 +157,10 @@ static const struct lpss_device_desc byt_pwm_dev_desc = {
|
|||
.flags = LPSS_SAVE_CTX,
|
||||
};
|
||||
|
||||
static const struct lpss_device_desc bsw_pwm_dev_desc = {
|
||||
.flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
|
||||
};
|
||||
|
||||
static const struct lpss_device_desc byt_uart_dev_desc = {
|
||||
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
|
||||
.clk_con_id = "baudclk",
|
||||
|
@ -163,6 +168,14 @@ static const struct lpss_device_desc byt_uart_dev_desc = {
|
|||
.setup = lpss_uart_setup,
|
||||
};
|
||||
|
||||
static const struct lpss_device_desc bsw_uart_dev_desc = {
|
||||
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
|
||||
| LPSS_NO_D3_DELAY,
|
||||
.clk_con_id = "baudclk",
|
||||
.prv_offset = 0x800,
|
||||
.setup = lpss_uart_setup,
|
||||
};
|
||||
|
||||
static const struct lpss_device_desc byt_spi_dev_desc = {
|
||||
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
|
||||
.prv_offset = 0x400,
|
||||
|
@ -178,8 +191,15 @@ static const struct lpss_device_desc byt_i2c_dev_desc = {
|
|||
.setup = byt_i2c_setup,
|
||||
};
|
||||
|
||||
static const struct lpss_device_desc bsw_i2c_dev_desc = {
|
||||
.flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
|
||||
.prv_offset = 0x800,
|
||||
.setup = byt_i2c_setup,
|
||||
};
|
||||
|
||||
static struct lpss_device_desc bsw_spi_dev_desc = {
|
||||
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
|
||||
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
|
||||
| LPSS_NO_D3_DELAY,
|
||||
.prv_offset = 0x400,
|
||||
.setup = lpss_deassert_reset,
|
||||
};
|
||||
|
@ -214,11 +234,12 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
|
|||
{ "INT33FC", },
|
||||
|
||||
/* Braswell LPSS devices */
|
||||
{ "80862288", LPSS_ADDR(byt_pwm_dev_desc) },
|
||||
{ "8086228A", LPSS_ADDR(byt_uart_dev_desc) },
|
||||
{ "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
|
||||
{ "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
|
||||
{ "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
|
||||
{ "808622C1", LPSS_ADDR(byt_i2c_dev_desc) },
|
||||
{ "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
|
||||
|
||||
/* Broadwell LPSS devices */
|
||||
{ "INT3430", LPSS_ADDR(lpt_dev_desc) },
|
||||
{ "INT3431", LPSS_ADDR(lpt_dev_desc) },
|
||||
{ "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
|
||||
|
@ -558,9 +579,14 @@ static void acpi_lpss_restore_ctx(struct device *dev,
|
|||
* The following delay is needed or the subsequent write operations may
|
||||
* fail. The LPSS devices are actually PCI devices and the PCI spec
|
||||
* expects 10ms delay before the device can be accessed after D3 to D0
|
||||
* transition.
|
||||
* transition. However some platforms like BSW does not need this delay.
|
||||
*/
|
||||
msleep(10);
|
||||
unsigned int delay = 10; /* default 10ms delay */
|
||||
|
||||
if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
|
||||
delay = 0;
|
||||
|
||||
msleep(delay);
|
||||
|
||||
for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
|
||||
unsigned long offset = i * sizeof(u32);
|
||||
|
|
|
@ -161,8 +161,16 @@ struct transaction {
|
|||
u8 flags;
|
||||
};
|
||||
|
||||
struct acpi_ec_query {
|
||||
struct transaction transaction;
|
||||
struct work_struct work;
|
||||
struct acpi_ec_query_handler *handler;
|
||||
};
|
||||
|
||||
static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
|
||||
static void advance_transaction(struct acpi_ec *ec);
|
||||
static void acpi_ec_event_handler(struct work_struct *work);
|
||||
static void acpi_ec_event_processor(struct work_struct *work);
|
||||
|
||||
struct acpi_ec *boot_ec, *first_ec;
|
||||
EXPORT_SYMBOL(first_ec);
|
||||
|
@ -974,60 +982,90 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
|
||||
|
||||
static void acpi_ec_run(void *cxt)
|
||||
static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
|
||||
{
|
||||
struct acpi_ec_query_handler *handler = cxt;
|
||||
struct acpi_ec_query *q;
|
||||
struct transaction *t;
|
||||
|
||||
q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
|
||||
if (!q)
|
||||
return NULL;
|
||||
INIT_WORK(&q->work, acpi_ec_event_processor);
|
||||
t = &q->transaction;
|
||||
t->command = ACPI_EC_COMMAND_QUERY;
|
||||
t->rdata = pval;
|
||||
t->rlen = 1;
|
||||
return q;
|
||||
}
|
||||
|
||||
static void acpi_ec_delete_query(struct acpi_ec_query *q)
|
||||
{
|
||||
if (q) {
|
||||
if (q->handler)
|
||||
acpi_ec_put_query_handler(q->handler);
|
||||
kfree(q);
|
||||
}
|
||||
}
|
||||
|
||||
static void acpi_ec_event_processor(struct work_struct *work)
|
||||
{
|
||||
struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
|
||||
struct acpi_ec_query_handler *handler = q->handler;
|
||||
|
||||
if (!handler)
|
||||
return;
|
||||
ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
|
||||
if (handler->func)
|
||||
handler->func(handler->data);
|
||||
else if (handler->handle)
|
||||
acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
|
||||
ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
|
||||
acpi_ec_put_query_handler(handler);
|
||||
acpi_ec_delete_query(q);
|
||||
}
|
||||
|
||||
static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
|
||||
{
|
||||
u8 value = 0;
|
||||
int result;
|
||||
acpi_status status;
|
||||
struct acpi_ec_query_handler *handler;
|
||||
struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
|
||||
.wdata = NULL, .rdata = &value,
|
||||
.wlen = 0, .rlen = 1};
|
||||
struct acpi_ec_query *q;
|
||||
|
||||
q = acpi_ec_create_query(&value);
|
||||
if (!q)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Query the EC to find out which _Qxx method we need to evaluate.
|
||||
* Note that successful completion of the query causes the ACPI_EC_SCI
|
||||
* bit to be cleared (and thus clearing the interrupt source).
|
||||
*/
|
||||
result = acpi_ec_transaction(ec, &t);
|
||||
if (result)
|
||||
return result;
|
||||
if (data)
|
||||
*data = value;
|
||||
result = acpi_ec_transaction(ec, &q->transaction);
|
||||
if (!value)
|
||||
return -ENODATA;
|
||||
result = -ENODATA;
|
||||
if (result)
|
||||
goto err_exit;
|
||||
|
||||
mutex_lock(&ec->mutex);
|
||||
list_for_each_entry(handler, &ec->list, node) {
|
||||
if (value == handler->query_bit) {
|
||||
/* have custom handler for this bit */
|
||||
handler = acpi_ec_get_query_handler(handler);
|
||||
q->handler = acpi_ec_get_query_handler(handler);
|
||||
ec_dbg_evt("Query(0x%02x) scheduled",
|
||||
handler->query_bit);
|
||||
status = acpi_os_execute((handler->func) ?
|
||||
OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
|
||||
acpi_ec_run, handler);
|
||||
if (ACPI_FAILURE(status))
|
||||
q->handler->query_bit);
|
||||
/*
|
||||
* It is reported that _Qxx are evaluated in a
|
||||
* parallel way on Windows:
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=94411
|
||||
*/
|
||||
if (!schedule_work(&q->work))
|
||||
result = -EBUSY;
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ec->mutex);
|
||||
|
||||
err_exit:
|
||||
if (result && q)
|
||||
acpi_ec_delete_query(q);
|
||||
if (data)
|
||||
*data = value;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
|
||||
#include <asm/io.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm-generic/io-64-nonatomic-lo-hi.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
|
@ -944,21 +945,6 @@ acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
|
|||
|
||||
EXPORT_SYMBOL(acpi_os_write_port);
|
||||
|
||||
#ifdef readq
|
||||
static inline u64 read64(const volatile void __iomem *addr)
|
||||
{
|
||||
return readq(addr);
|
||||
}
|
||||
#else
|
||||
static inline u64 read64(const volatile void __iomem *addr)
|
||||
{
|
||||
u64 l, h;
|
||||
l = readl(addr);
|
||||
h = readl(addr+4);
|
||||
return l | (h << 32);
|
||||
}
|
||||
#endif
|
||||
|
||||
acpi_status
|
||||
acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
|
||||
{
|
||||
|
@ -991,7 +977,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
|
|||
*(u32 *) value = readl(virt_addr);
|
||||
break;
|
||||
case 64:
|
||||
*(u64 *) value = read64(virt_addr);
|
||||
*(u64 *) value = readq(virt_addr);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
@ -1005,19 +991,6 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
|
|||
return AE_OK;
|
||||
}
|
||||
|
||||
#ifdef writeq
|
||||
static inline void write64(u64 val, volatile void __iomem *addr)
|
||||
{
|
||||
writeq(val, addr);
|
||||
}
|
||||
#else
|
||||
static inline void write64(u64 val, volatile void __iomem *addr)
|
||||
{
|
||||
writel(val, addr);
|
||||
writel(val>>32, addr+4);
|
||||
}
|
||||
#endif
|
||||
|
||||
acpi_status
|
||||
acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
|
||||
{
|
||||
|
@ -1046,7 +1019,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
|
|||
writel(value, virt_addr);
|
||||
break;
|
||||
case 64:
|
||||
write64(value, virt_addr);
|
||||
writeq(value, virt_addr);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
|
|
@ -821,6 +821,22 @@ void acpi_penalize_isa_irq(int irq, int active)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
|
||||
* PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
|
||||
* PCI IRQs.
|
||||
*/
|
||||
void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
|
||||
{
|
||||
if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
|
||||
if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
|
||||
polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
|
||||
acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
|
||||
else
|
||||
acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Over-ride default table to reserve additional IRQs for use by ISA
|
||||
* e.g. acpi_irq_isa=5
|
||||
|
|
|
@ -217,7 +217,7 @@ struct pci_dev;
|
|||
|
||||
int acpi_pci_irq_enable (struct pci_dev *dev);
|
||||
void acpi_penalize_isa_irq(int irq, int active);
|
||||
|
||||
void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
|
||||
void acpi_pci_irq_disable (struct pci_dev *dev);
|
||||
|
||||
extern int ec_read(u8 addr, u8 *val);
|
||||
|
|
Loading…
Reference in a new issue