[CELL] pmi: remove support for mutiple devices.

The pmi driver got simplified by removing support for multiple devices.
As there is no more than one pmi device per maschine, there is no need to
specify the device for listening and sending messages.

This way the caller (cbe_cpufreq) doesn't need to scan the device tree.
When registering the handler on a board without a pmi
interface, pmi.c will just return -ENODEV.

The patch that fixed the breakage of cell_defconfig has been
broken out of the earlier version of this patch. So this is
the version that applies cleanly on top of it.

Signed-off-by: Christian Krafft <krafft@de.ibm.com>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
This commit is contained in:
Christian Krafft 2007-07-20 21:39:18 +02:00 committed by Arnd Bergmann
parent c1158e63df
commit 813f90728e
3 changed files with 40 additions and 55 deletions

View file

@ -68,11 +68,12 @@ static u64 MIC_Slow_Next_Timer_table[] = {
}; };
static unsigned int pmi_frequency_limit = 0; static unsigned int pmi_frequency_limit = 0;
/* /*
* hardware specific functions * hardware specific functions
*/ */
static struct of_device *pmi_dev; static bool cbe_cpufreq_has_pmi;
#ifdef CONFIG_PPC_PMI #ifdef CONFIG_PPC_PMI
static int set_pmode_pmi(int cpu, unsigned int pmode) static int set_pmode_pmi(int cpu, unsigned int pmode)
@ -91,7 +92,7 @@ static int set_pmode_pmi(int cpu, unsigned int pmode)
time = (u64) get_cycles(); time = (u64) get_cycles();
#endif #endif
pmi_send_message(pmi_dev, pmi_msg); pmi_send_message(pmi_msg);
ret = pmi_msg.data2; ret = pmi_msg.data2;
pr_debug("PMI returned slow mode %d\n", ret); pr_debug("PMI returned slow mode %d\n", ret);
@ -157,16 +158,16 @@ static int set_pmode_reg(int cpu, unsigned int pmode)
return 0; return 0;
} }
static int set_pmode(int cpu, unsigned int slow_mode) { static int set_pmode(int cpu, unsigned int slow_mode)
{
#ifdef CONFIG_PPC_PMI #ifdef CONFIG_PPC_PMI
if (pmi_dev) if (cbe_cpufreq_has_pmi)
return set_pmode_pmi(cpu, slow_mode); return set_pmode_pmi(cpu, slow_mode);
else
#endif #endif
return set_pmode_reg(cpu, slow_mode); return set_pmode_reg(cpu, slow_mode);
} }
static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg) static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
{ {
u8 cpu; u8 cpu;
u8 cbe_pmode_new; u8 cbe_pmode_new;
@ -253,7 +254,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
if (pmi_dev) { if (cbe_cpufreq_has_pmi) {
/* frequency might get limited later, initialize limit with max_freq */ /* frequency might get limited later, initialize limit with max_freq */
pmi_frequency_limit = max_freq; pmi_frequency_limit = max_freq;
cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
@ -265,7 +266,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{ {
if (pmi_dev) if (cbe_cpufreq_has_pmi)
cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
cpufreq_frequency_table_put_attr(policy->cpu); cpufreq_frequency_table_put_attr(policy->cpu);
@ -326,29 +327,20 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
static int __init cbe_cpufreq_init(void) static int __init cbe_cpufreq_init(void)
{ {
#ifdef CONFIG_PPC_PMI
struct device_node *np;
#endif
if (!machine_is(cell)) if (!machine_is(cell))
return -ENODEV; return -ENODEV;
#ifdef CONFIG_PPC_PMI
np = of_find_node_by_type(NULL, "ibm,pmi");
pmi_dev = of_find_device_by_node(np); cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0;
if (pmi_dev)
pmi_register_handler(pmi_dev, &cbe_pmi_handler);
#endif
return cpufreq_register_driver(&cbe_cpufreq_driver); return cpufreq_register_driver(&cbe_cpufreq_driver);
} }
static void __exit cbe_cpufreq_exit(void) static void __exit cbe_cpufreq_exit(void)
{ {
#ifdef CONFIG_PPC_PMI
if (pmi_dev)
pmi_unregister_handler(pmi_dev, &cbe_pmi_handler);
#endif
cpufreq_unregister_driver(&cbe_cpufreq_driver); cpufreq_unregister_driver(&cbe_cpufreq_driver);
if (cbe_cpufreq_has_pmi)
pmi_unregister_handler(&cbe_pmi_handler);
} }
module_init(cbe_cpufreq_init); module_init(cbe_cpufreq_init);

View file

@ -48,15 +48,13 @@ struct pmi_data {
struct work_struct work; struct work_struct work;
}; };
static struct pmi_data *data;
static int pmi_irq_handler(int irq, void *dev_id) static int pmi_irq_handler(int irq, void *dev_id)
{ {
struct pmi_data *data;
u8 type; u8 type;
int rc; int rc;
data = dev_id;
spin_lock(&data->pmi_spinlock); spin_lock(&data->pmi_spinlock);
type = ioread8(data->pmi_reg + PMI_READ_TYPE); type = ioread8(data->pmi_reg + PMI_READ_TYPE);
@ -111,16 +109,13 @@ MODULE_DEVICE_TABLE(of, pmi_match);
static void pmi_notify_handlers(struct work_struct *work) static void pmi_notify_handlers(struct work_struct *work)
{ {
struct pmi_data *data;
struct pmi_handler *handler; struct pmi_handler *handler;
data = container_of(work, struct pmi_data, work);
spin_lock(&data->handler_spinlock); spin_lock(&data->handler_spinlock);
list_for_each_entry(handler, &data->handler, node) { list_for_each_entry(handler, &data->handler, node) {
pr_debug(KERN_INFO "pmi: notifying handler %p\n", handler); pr_debug(KERN_INFO "pmi: notifying handler %p\n", handler);
if (handler->type == data->msg.type) if (handler->type == data->msg.type)
handler->handle_pmi_message(data->dev, data->msg); handler->handle_pmi_message(data->msg);
} }
spin_unlock(&data->handler_spinlock); spin_unlock(&data->handler_spinlock);
} }
@ -129,9 +124,14 @@ static int pmi_of_probe(struct of_device *dev,
const struct of_device_id *match) const struct of_device_id *match)
{ {
struct device_node *np = dev->node; struct device_node *np = dev->node;
struct pmi_data *data;
int rc; int rc;
if (data) {
printk(KERN_ERR "pmi: driver has already been initialized.\n");
rc = -EBUSY;
goto out;
}
data = kzalloc(sizeof(struct pmi_data), GFP_KERNEL); data = kzalloc(sizeof(struct pmi_data), GFP_KERNEL);
if (!data) { if (!data) {
printk(KERN_ERR "pmi: could not allocate memory.\n"); printk(KERN_ERR "pmi: could not allocate memory.\n");
@ -154,7 +154,6 @@ static int pmi_of_probe(struct of_device *dev,
INIT_WORK(&data->work, pmi_notify_handlers); INIT_WORK(&data->work, pmi_notify_handlers);
dev->dev.driver_data = data;
data->dev = dev; data->dev = dev;
data->irq = irq_of_parse_and_map(np, 0); data->irq = irq_of_parse_and_map(np, 0);
@ -164,7 +163,7 @@ static int pmi_of_probe(struct of_device *dev,
goto error_cleanup_iomap; goto error_cleanup_iomap;
} }
rc = request_irq(data->irq, pmi_irq_handler, 0, "pmi", data); rc = request_irq(data->irq, pmi_irq_handler, 0, "pmi", NULL);
if (rc) { if (rc) {
printk(KERN_ERR "pmi: can't request IRQ %d: returned %d\n", printk(KERN_ERR "pmi: can't request IRQ %d: returned %d\n",
data->irq, rc); data->irq, rc);
@ -187,12 +186,9 @@ static int pmi_of_probe(struct of_device *dev,
static int pmi_of_remove(struct of_device *dev) static int pmi_of_remove(struct of_device *dev)
{ {
struct pmi_data *data;
struct pmi_handler *handler, *tmp; struct pmi_handler *handler, *tmp;
data = dev->dev.driver_data; free_irq(data->irq, NULL);
free_irq(data->irq, data);
iounmap(data->pmi_reg); iounmap(data->pmi_reg);
spin_lock(&data->handler_spinlock); spin_lock(&data->handler_spinlock);
@ -202,7 +198,8 @@ static int pmi_of_remove(struct of_device *dev)
spin_unlock(&data->handler_spinlock); spin_unlock(&data->handler_spinlock);
kfree(dev->dev.driver_data); kfree(data);
data = NULL;
return 0; return 0;
} }
@ -226,13 +223,13 @@ static void __exit pmi_module_exit(void)
} }
module_exit(pmi_module_exit); module_exit(pmi_module_exit);
void pmi_send_message(struct of_device *device, pmi_message_t msg) int pmi_send_message(pmi_message_t msg)
{ {
struct pmi_data *data;
unsigned long flags; unsigned long flags;
DECLARE_COMPLETION_ONSTACK(completion); DECLARE_COMPLETION_ONSTACK(completion);
data = device->dev.driver_data; if (!data)
return -ENODEV;
mutex_lock(&data->msg_mutex); mutex_lock(&data->msg_mutex);
@ -256,30 +253,26 @@ void pmi_send_message(struct of_device *device, pmi_message_t msg)
data->completion = NULL; data->completion = NULL;
mutex_unlock(&data->msg_mutex); mutex_unlock(&data->msg_mutex);
return 0;
} }
EXPORT_SYMBOL_GPL(pmi_send_message); EXPORT_SYMBOL_GPL(pmi_send_message);
void pmi_register_handler(struct of_device *device, int pmi_register_handler(struct pmi_handler *handler)
struct pmi_handler *handler)
{ {
struct pmi_data *data;
data = device->dev.driver_data;
if (!data) if (!data)
return; return -ENODEV;
spin_lock(&data->handler_spinlock); spin_lock(&data->handler_spinlock);
list_add_tail(&handler->node, &data->handler); list_add_tail(&handler->node, &data->handler);
spin_unlock(&data->handler_spinlock); spin_unlock(&data->handler_spinlock);
return 0;
} }
EXPORT_SYMBOL_GPL(pmi_register_handler); EXPORT_SYMBOL_GPL(pmi_register_handler);
void pmi_unregister_handler(struct of_device *device, void pmi_unregister_handler(struct pmi_handler *handler)
struct pmi_handler *handler)
{ {
struct pmi_data *data;
data = device->dev.driver_data;
if (!data) if (!data)
return; return;

View file

@ -55,13 +55,13 @@ typedef struct {
struct pmi_handler { struct pmi_handler {
struct list_head node; struct list_head node;
u8 type; u8 type;
void (*handle_pmi_message) (struct of_device *, pmi_message_t); void (*handle_pmi_message) (pmi_message_t);
}; };
void pmi_register_handler(struct of_device *, struct pmi_handler *); int pmi_register_handler(struct pmi_handler *);
void pmi_unregister_handler(struct of_device *, struct pmi_handler *); void pmi_unregister_handler(struct pmi_handler *);
void pmi_send_message(struct of_device *, pmi_message_t); int pmi_send_message(pmi_message_t);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _POWERPC_PMI_H */ #endif /* _POWERPC_PMI_H */