baaca49f41
This is an attempt to provide an alternate mechanism for postponing a hotplug event instead of using a global mechanism like lock_cpu_hotplug. The proposal is to add two new events namely CPU_LOCK_ACQUIRE and CPU_LOCK_RELEASE. The notification for these two events would be sent out before and after a cpu_hotplug event respectively. During the CPU_LOCK_ACQUIRE event, a cpu-hotplug-aware subsystem is supposed to acquire any per-subsystem hotcpu mutex ( Eg. workqueue_mutex in kernel/workqueue.c ). During the CPU_LOCK_RELEASE release event the cpu-hotplug-aware subsystem is supposed to release the per-subsystem hotcpu mutex. The reasons for defining new events as opposed to reusing the existing events like CPU_UP_PREPARE/CPU_UP_FAILED/CPU_ONLINE for locking/unlocking of per-subsystem hotcpu mutexes are as follow: - CPU_LOCK_ACQUIRE: All hotcpu mutexes are taken before subsystems start handling pre-hotplug events like CPU_UP_PREPARE/CPU_DOWN_PREPARE etc, thus ensuring a clean handling of these events. - CPU_LOCK_RELEASE: The hotcpu mutexes will be released only after all subsystems have handled post-hotplug events like CPU_DOWN_FAILED, CPU_DEAD,CPU_ONLINE etc thereby ensuring that there are no subsequent clashes amongst the interdependent subsystems after a cpu hotplugs. This patch also uses __raw_notifier_call chain in _cpu_up to take care of the dependency between the two consequetive calls to raw_notifier_call_chain. [akpm@linux-foundation.org: fix a bug] Signed-off-by: Gautham R Shenoy <ego@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
201 lines
7.5 KiB
C
201 lines
7.5 KiB
C
/*
|
|
* Routines to manage notifier chains for passing status changes to any
|
|
* interested routines. We need this instead of hard coded call lists so
|
|
* that modules can poke their nose into the innards. The network devices
|
|
* needed them so here they are for the rest of you.
|
|
*
|
|
* Alan Cox <Alan.Cox@linux.org>
|
|
*/
|
|
|
|
#ifndef _LINUX_NOTIFIER_H
|
|
#define _LINUX_NOTIFIER_H
|
|
#include <linux/errno.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/rwsem.h>
|
|
#include <linux/srcu.h>
|
|
|
|
/*
|
|
* Notifier chains are of four types:
|
|
*
|
|
* Atomic notifier chains: Chain callbacks run in interrupt/atomic
|
|
* context. Callouts are not allowed to block.
|
|
* Blocking notifier chains: Chain callbacks run in process context.
|
|
* Callouts are allowed to block.
|
|
* Raw notifier chains: There are no restrictions on callbacks,
|
|
* registration, or unregistration. All locking and protection
|
|
* must be provided by the caller.
|
|
* SRCU notifier chains: A variant of blocking notifier chains, with
|
|
* the same restrictions.
|
|
*
|
|
* atomic_notifier_chain_register() may be called from an atomic context,
|
|
* but blocking_notifier_chain_register() and srcu_notifier_chain_register()
|
|
* must be called from a process context. Ditto for the corresponding
|
|
* _unregister() routines.
|
|
*
|
|
* atomic_notifier_chain_unregister(), blocking_notifier_chain_unregister(),
|
|
* and srcu_notifier_chain_unregister() _must not_ be called from within
|
|
* the call chain.
|
|
*
|
|
* SRCU notifier chains are an alternative form of blocking notifier chains.
|
|
* They use SRCU (Sleepable Read-Copy Update) instead of rw-semaphores for
|
|
* protection of the chain links. This means there is _very_ low overhead
|
|
* in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
|
|
* As compensation, srcu_notifier_chain_unregister() is rather expensive.
|
|
* SRCU notifier chains should be used when the chain will be called very
|
|
* often but notifier_blocks will seldom be removed. Also, SRCU notifier
|
|
* chains are slightly more difficult to use because they require special
|
|
* runtime initialization.
|
|
*/
|
|
|
|
struct notifier_block {
|
|
int (*notifier_call)(struct notifier_block *, unsigned long, void *);
|
|
struct notifier_block *next;
|
|
int priority;
|
|
};
|
|
|
|
struct atomic_notifier_head {
|
|
spinlock_t lock;
|
|
struct notifier_block *head;
|
|
};
|
|
|
|
struct blocking_notifier_head {
|
|
struct rw_semaphore rwsem;
|
|
struct notifier_block *head;
|
|
};
|
|
|
|
struct raw_notifier_head {
|
|
struct notifier_block *head;
|
|
};
|
|
|
|
struct srcu_notifier_head {
|
|
struct mutex mutex;
|
|
struct srcu_struct srcu;
|
|
struct notifier_block *head;
|
|
};
|
|
|
|
#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \
|
|
spin_lock_init(&(name)->lock); \
|
|
(name)->head = NULL; \
|
|
} while (0)
|
|
#define BLOCKING_INIT_NOTIFIER_HEAD(name) do { \
|
|
init_rwsem(&(name)->rwsem); \
|
|
(name)->head = NULL; \
|
|
} while (0)
|
|
#define RAW_INIT_NOTIFIER_HEAD(name) do { \
|
|
(name)->head = NULL; \
|
|
} while (0)
|
|
|
|
/* srcu_notifier_heads must be initialized and cleaned up dynamically */
|
|
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
|
|
#define srcu_cleanup_notifier_head(name) \
|
|
cleanup_srcu_struct(&(name)->srcu);
|
|
|
|
#define ATOMIC_NOTIFIER_INIT(name) { \
|
|
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
|
.head = NULL }
|
|
#define BLOCKING_NOTIFIER_INIT(name) { \
|
|
.rwsem = __RWSEM_INITIALIZER((name).rwsem), \
|
|
.head = NULL }
|
|
#define RAW_NOTIFIER_INIT(name) { \
|
|
.head = NULL }
|
|
/* srcu_notifier_heads cannot be initialized statically */
|
|
|
|
#define ATOMIC_NOTIFIER_HEAD(name) \
|
|
struct atomic_notifier_head name = \
|
|
ATOMIC_NOTIFIER_INIT(name)
|
|
#define BLOCKING_NOTIFIER_HEAD(name) \
|
|
struct blocking_notifier_head name = \
|
|
BLOCKING_NOTIFIER_INIT(name)
|
|
#define RAW_NOTIFIER_HEAD(name) \
|
|
struct raw_notifier_head name = \
|
|
RAW_NOTIFIER_INIT(name)
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
|
|
struct notifier_block *nb);
|
|
extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
|
|
struct notifier_block *nb);
|
|
extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
|
|
struct notifier_block *nb);
|
|
extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
|
|
struct notifier_block *nb);
|
|
|
|
extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
|
|
struct notifier_block *nb);
|
|
extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
|
|
struct notifier_block *nb);
|
|
extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
|
|
struct notifier_block *nb);
|
|
extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
|
|
struct notifier_block *nb);
|
|
|
|
extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
|
|
unsigned long val, void *v);
|
|
extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
|
|
unsigned long val, void *v, int nr_to_call, int *nr_calls);
|
|
extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
|
|
unsigned long val, void *v);
|
|
extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
|
|
unsigned long val, void *v, int nr_to_call, int *nr_calls);
|
|
extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
|
|
unsigned long val, void *v);
|
|
extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
|
|
unsigned long val, void *v, int nr_to_call, int *nr_calls);
|
|
extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
|
|
unsigned long val, void *v);
|
|
extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
|
|
unsigned long val, void *v, int nr_to_call, int *nr_calls);
|
|
|
|
#define NOTIFY_DONE 0x0000 /* Don't care */
|
|
#define NOTIFY_OK 0x0001 /* Suits me */
|
|
#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
|
|
#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
|
|
/* Bad/Veto action */
|
|
/*
|
|
* Clean way to return from the notifier and stop further calls.
|
|
*/
|
|
#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
|
|
|
|
/*
|
|
* Declared notifiers so far. I can imagine quite a few more chains
|
|
* over time (eg laptop power reset chains, reboot chain (to clean
|
|
* device units up), device [un]mount chain, module load/unload chain,
|
|
* low memory chain, screenblank chain (for plug in modular screenblankers)
|
|
* VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
|
|
*/
|
|
|
|
/* netdevice notifier chain */
|
|
#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
|
|
#define NETDEV_DOWN 0x0002
|
|
#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
|
|
detected a hardware crash and restarted
|
|
- we can use this eg to kick tcp sessions
|
|
once done */
|
|
#define NETDEV_CHANGE 0x0004 /* Notify device state change */
|
|
#define NETDEV_REGISTER 0x0005
|
|
#define NETDEV_UNREGISTER 0x0006
|
|
#define NETDEV_CHANGEMTU 0x0007
|
|
#define NETDEV_CHANGEADDR 0x0008
|
|
#define NETDEV_GOING_DOWN 0x0009
|
|
#define NETDEV_CHANGENAME 0x000A
|
|
#define NETDEV_FEAT_CHANGE 0x000B
|
|
|
|
#define SYS_DOWN 0x0001 /* Notify of system down */
|
|
#define SYS_RESTART SYS_DOWN
|
|
#define SYS_HALT 0x0002 /* Notify of system halt */
|
|
#define SYS_POWER_OFF 0x0003 /* Notify of system power off */
|
|
|
|
#define NETLINK_URELEASE 0x0001 /* Unicast netlink socket released */
|
|
|
|
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
|
|
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
|
|
#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
|
|
#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
|
|
#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
|
|
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
|
|
#define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */
|
|
#define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _LINUX_NOTIFIER_H */
|