[PATCH] spin/rwlock init cleanups
locking init cleanups: - convert " = SPIN_LOCK_UNLOCKED" to spin_lock_init() or DEFINE_SPINLOCK() - convert rwlocks in a similar manner this patch was generated automatically. Motivation: - cleanliness - lockdep needs control of lock initialization, which the open-coded variants do not give - it's also useful for -rt and for lock debugging in general Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
b6cd0b772d
commit
34af946a22
51 changed files with 59 additions and 59 deletions
|
@ -27,7 +27,7 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
|
|||
int sn_force_interrupt_flag = 1;
|
||||
extern int sn_ioif_inited;
|
||||
struct list_head **sn_irq_lh;
|
||||
static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */
|
||||
static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
|
||||
|
||||
u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
|
||||
struct sn_irq_info *sn_irq_info,
|
||||
|
|
|
@ -367,7 +367,7 @@ void mipsmt_prepare_cpus(void)
|
|||
dvpe();
|
||||
dmt();
|
||||
|
||||
freeIPIq.lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&freeIPIq.lock);
|
||||
|
||||
/*
|
||||
* We probably don't have as many VPEs as we do SMP "CPUs",
|
||||
|
@ -375,7 +375,7 @@ void mipsmt_prepare_cpus(void)
|
|||
*/
|
||||
for (i=0; i<NR_CPUS; i++) {
|
||||
IPIQ[i].head = IPIQ[i].tail = NULL;
|
||||
IPIQ[i].lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&IPIQ[i].lock);
|
||||
IPIQ[i].depth = 0;
|
||||
ipi_timer_latch[i] = 0;
|
||||
}
|
||||
|
|
|
@ -2203,7 +2203,7 @@ void spu_init_csa(struct spu_state *csa)
|
|||
|
||||
memset(lscsa, 0, sizeof(struct spu_lscsa));
|
||||
csa->lscsa = lscsa;
|
||||
csa->register_lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&csa->register_lock);
|
||||
|
||||
/* Set LS pages reserved to allow for user-space mapping. */
|
||||
for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
|
||||
|
|
|
@ -546,7 +546,7 @@ struct pmf_device {
|
|||
};
|
||||
|
||||
static LIST_HEAD(pmf_devices);
|
||||
static spinlock_t pmf_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(pmf_lock);
|
||||
static DEFINE_MUTEX(pmf_irq_mutex);
|
||||
|
||||
static void pmf_release_device(struct kref *kref)
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
*/
|
||||
|
||||
/* EEH event workqueue setup. */
|
||||
static spinlock_t eeh_eventlist_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(eeh_eventlist_lock);
|
||||
LIST_HEAD(eeh_eventlist);
|
||||
static void eeh_thread_launcher(void *);
|
||||
DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
|
||||
static void __iomem *mmio_nvram_start;
|
||||
static long mmio_nvram_len;
|
||||
static spinlock_t mmio_nvram_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(mmio_nvram_lock);
|
||||
|
||||
static ssize_t mmio_nvram_read(char *buf, size_t count, loff_t *index)
|
||||
{
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
|
||||
extern volatile unsigned long wall_jiffies;
|
||||
|
||||
spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
|
||||
DEFINE_SPINLOCK(rtc_lock);
|
||||
EXPORT_SYMBOL(rtc_lock);
|
||||
|
||||
|
||||
|
|
|
@ -461,7 +461,7 @@ void show_code(unsigned int *pc)
|
|||
}
|
||||
}
|
||||
|
||||
spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
|
||||
DEFINE_SPINLOCK(die_lock);
|
||||
|
||||
void die(const char * str, struct pt_regs * regs, long err)
|
||||
{
|
||||
|
|
|
@ -43,7 +43,7 @@ typedef struct drm_mem_stats {
|
|||
unsigned long bytes_freed;
|
||||
} drm_mem_stats_t;
|
||||
|
||||
static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(drm_mem_lock);
|
||||
static unsigned long drm_ram_available = 0; /* In pages */
|
||||
static unsigned long drm_ram_used = 0;
|
||||
static drm_mem_stats_t drm_mem_stats[] =
|
||||
|
|
|
@ -557,7 +557,7 @@ via_init_dmablit(drm_device_t *dev)
|
|||
blitq->num_outstanding = 0;
|
||||
blitq->is_active = 0;
|
||||
blitq->aborting = 0;
|
||||
blitq->blit_lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&blitq->blit_lock);
|
||||
for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
|
||||
DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ static int invalid_lilo_config;
|
|||
/* The ISA boards do window flipping into the same spaces so its only sane
|
||||
with a single lock. It's still pretty efficient */
|
||||
|
||||
static spinlock_t epca_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(epca_lock);
|
||||
|
||||
/* -----------------------------------------------------------------------
|
||||
MAXBOARDS is typically 12, but ISA and EISA cards are restricted to
|
||||
|
|
|
@ -301,7 +301,7 @@ static struct tty_operations moxa_ops = {
|
|||
.tiocmset = moxa_tiocmset,
|
||||
};
|
||||
|
||||
static spinlock_t moxa_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(moxa_lock);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
static int moxa_get_PCI_conf(struct pci_dev *p, int board_type, moxa_board_conf * board)
|
||||
|
|
|
@ -2477,7 +2477,7 @@ static int __init specialix_init(void)
|
|||
#endif
|
||||
|
||||
for (i = 0; i < SX_NBOARD; i++)
|
||||
sx_board[i].lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&sx_board[i].lock);
|
||||
|
||||
if (sx_init_drivers()) {
|
||||
func_exit();
|
||||
|
|
|
@ -2320,7 +2320,7 @@ static int sx_init_portstructs (int nboards, int nports)
|
|||
#ifdef NEW_WRITE_LOCKING
|
||||
port->gs.port_write_mutex = MUTEX;
|
||||
#endif
|
||||
port->gs.driver_lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&port->gs.driver_lock);
|
||||
/*
|
||||
* Initializing wait queue
|
||||
*/
|
||||
|
|
|
@ -981,7 +981,7 @@ void gigaset_stop(struct cardstate *cs)
|
|||
EXPORT_SYMBOL_GPL(gigaset_stop);
|
||||
|
||||
static LIST_HEAD(drivers);
|
||||
static spinlock_t driver_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(driver_lock);
|
||||
|
||||
struct cardstate *gigaset_get_cs_by_id(int id)
|
||||
{
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include <linux/leds.h>
|
||||
#include "leds.h"
|
||||
|
||||
rwlock_t leds_list_lock = RW_LOCK_UNLOCKED;
|
||||
DEFINE_RWLOCK(leds_list_lock);
|
||||
LIST_HEAD(leds_list);
|
||||
|
||||
EXPORT_SYMBOL_GPL(leds_list);
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
/*
|
||||
* Nests outside led_cdev->trigger_lock
|
||||
*/
|
||||
static rwlock_t triggers_list_lock = RW_LOCK_UNLOCKED;
|
||||
static DEFINE_RWLOCK(triggers_list_lock);
|
||||
static LIST_HEAD(trigger_list);
|
||||
|
||||
ssize_t led_trigger_store(struct class_device *dev, const char *buf,
|
||||
|
|
|
@ -85,7 +85,7 @@ static int __devinit ibmasm_init_one(struct pci_dev *pdev, const struct pci_devi
|
|||
}
|
||||
memset(sp, 0, sizeof(struct service_processor));
|
||||
|
||||
sp->lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&sp->lock);
|
||||
INIT_LIST_HEAD(&sp->command_queue);
|
||||
|
||||
pci_set_drvdata(pdev, (void *)sp);
|
||||
|
|
|
@ -157,7 +157,7 @@ MODULE_LICENSE("Dual MPL/GPL");
|
|||
|
||||
static int pcmcia_schlvl = PCMCIA_SCHLVL;
|
||||
|
||||
static spinlock_t events_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(events_lock);
|
||||
|
||||
|
||||
#define PCMCIA_SOCKET_KEY_5V 1
|
||||
|
@ -644,7 +644,7 @@ static struct platform_device m8xx_device = {
|
|||
};
|
||||
|
||||
static u32 pending_events[PCMCIA_SOCKETS_NO];
|
||||
static spinlock_t pending_event_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(pending_event_lock);
|
||||
|
||||
static irqreturn_t m8xx_interrupt(int irq, void *dev, struct pt_regs *regs)
|
||||
{
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
* These interrupt-safe spinlocks protect all accesses to RIO
|
||||
* configuration space and doorbell access.
|
||||
*/
|
||||
static spinlock_t rio_config_lock = SPIN_LOCK_UNLOCKED;
|
||||
static spinlock_t rio_doorbell_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(rio_config_lock);
|
||||
static DEFINE_SPINLOCK(rio_doorbell_lock);
|
||||
|
||||
/*
|
||||
* Wrappers for all RIO configuration access functions. They just check
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
|
||||
static unsigned long rtc_freq = 1024;
|
||||
static struct rtc_time rtc_alarm;
|
||||
static spinlock_t sa1100_rtc_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(sa1100_rtc_lock);
|
||||
|
||||
static int rtc_update_alarm(struct rtc_time *alrm)
|
||||
{
|
||||
|
|
|
@ -93,7 +93,7 @@ static void __iomem *rtc2_base;
|
|||
|
||||
static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */
|
||||
|
||||
static spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(rtc_lock);
|
||||
static char rtc_name[] = "RTC";
|
||||
static unsigned long periodic_frequency;
|
||||
static unsigned long periodic_count;
|
||||
|
|
|
@ -89,7 +89,7 @@ struct eerbuffer {
|
|||
};
|
||||
|
||||
static LIST_HEAD(bufferlist);
|
||||
static spinlock_t bufferlock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(bufferlock);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
|
||||
|
||||
/*
|
||||
|
|
|
@ -5733,7 +5733,7 @@ module_init(ata_init);
|
|||
module_exit(ata_exit);
|
||||
|
||||
static unsigned long ratelimit_time;
|
||||
static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(ata_ratelimit_lock);
|
||||
|
||||
int ata_ratelimit(void)
|
||||
{
|
||||
|
|
|
@ -26,7 +26,7 @@ static DECLARE_RWSEM(ioc3_devices_rwsem);
|
|||
|
||||
static struct ioc3_submodule *ioc3_submodules[IOC3_MAX_SUBMODULES];
|
||||
static struct ioc3_submodule *ioc3_ethernet;
|
||||
static rwlock_t ioc3_submodules_lock = RW_LOCK_UNLOCKED;
|
||||
static DEFINE_RWLOCK(ioc3_submodules_lock);
|
||||
|
||||
/* NIC probing code */
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
|
||||
static int hp680bl_suspended;
|
||||
static int current_intensity = 0;
|
||||
static spinlock_t bl_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(bl_lock);
|
||||
static struct backlight_device *hp680_backlight_device;
|
||||
|
||||
static void hp680bl_send_intensity(struct backlight_device *bd)
|
||||
|
|
|
@ -123,7 +123,7 @@ static void release_stateid(struct nfs4_stateid *stp, int flags);
|
|||
*/
|
||||
|
||||
/* recall_lock protects the del_recall_lru */
|
||||
static spinlock_t recall_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(recall_lock);
|
||||
static struct list_head del_recall_lru;
|
||||
|
||||
static void
|
||||
|
|
|
@ -54,7 +54,7 @@ static DECLARE_RWSEM(o2hb_callback_sem);
|
|||
* multiple hb threads are watching multiple regions. A node is live
|
||||
* whenever any of the threads sees activity from the node in its region.
|
||||
*/
|
||||
static spinlock_t o2hb_live_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(o2hb_live_lock);
|
||||
static struct list_head o2hb_live_slots[O2NM_MAX_NODES];
|
||||
static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
||||
static LIST_HEAD(o2hb_node_events);
|
||||
|
|
|
@ -108,7 +108,7 @@
|
|||
##args); \
|
||||
} while (0)
|
||||
|
||||
static rwlock_t o2net_handler_lock = RW_LOCK_UNLOCKED;
|
||||
static DEFINE_RWLOCK(o2net_handler_lock);
|
||||
static struct rb_root o2net_handler_tree = RB_ROOT;
|
||||
|
||||
static struct o2net_node o2net_nodes[O2NM_MAX_NODES];
|
||||
|
|
|
@ -88,7 +88,7 @@ static void **dlm_alloc_pagevec(int pages)
|
|||
*
|
||||
*/
|
||||
|
||||
spinlock_t dlm_domain_lock = SPIN_LOCK_UNLOCKED;
|
||||
DEFINE_SPINLOCK(dlm_domain_lock);
|
||||
LIST_HEAD(dlm_domains);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events);
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
#define MLOG_MASK_PREFIX ML_DLM
|
||||
#include "cluster/masklog.h"
|
||||
|
||||
static spinlock_t dlm_cookie_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(dlm_cookie_lock);
|
||||
static u64 dlm_next_cookie = 1;
|
||||
|
||||
static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
|
||||
|
|
|
@ -98,8 +98,8 @@ static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
|
|||
|
||||
static u64 dlm_get_next_mig_cookie(void);
|
||||
|
||||
static spinlock_t dlm_reco_state_lock = SPIN_LOCK_UNLOCKED;
|
||||
static spinlock_t dlm_mig_cookie_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(dlm_reco_state_lock);
|
||||
static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
|
||||
static u64 dlm_mig_cookie = 1;
|
||||
|
||||
static u64 dlm_get_next_mig_cookie(void)
|
||||
|
|
|
@ -242,7 +242,7 @@ static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
|
|||
mlog_exit_void();
|
||||
}
|
||||
|
||||
static spinlock_t ocfs2_dlm_tracking_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
|
||||
|
||||
static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
|
||||
struct ocfs2_dlm_debug *dlm_debug)
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
|
||||
#include "buffer_head_io.h"
|
||||
|
||||
spinlock_t trans_inc_lock = SPIN_LOCK_UNLOCKED;
|
||||
DEFINE_SPINLOCK(trans_inc_lock);
|
||||
|
||||
static int ocfs2_force_read_journal(struct inode *inode);
|
||||
static int ocfs2_recover_node(struct ocfs2_super *osb,
|
||||
|
|
|
@ -435,7 +435,7 @@ static inline void t2_outl(u32 b, unsigned long addr)
|
|||
set_hae(msb); \
|
||||
}
|
||||
|
||||
static spinlock_t t2_hae_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(t2_hae_lock);
|
||||
|
||||
__EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
|
||||
{
|
||||
|
|
|
@ -818,7 +818,7 @@ static struct audit_buffer * audit_buffer_alloc(struct audit_context *ctx,
|
|||
*/
|
||||
unsigned int audit_serial(void)
|
||||
{
|
||||
static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(serial_lock);
|
||||
static unsigned int serial = 0;
|
||||
|
||||
unsigned long flags;
|
||||
|
|
|
@ -45,7 +45,7 @@ static struct mem_section *sparse_index_alloc(int nid)
|
|||
|
||||
static int sparse_index_init(unsigned long section_nr, int nid)
|
||||
{
|
||||
static spinlock_t index_init_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(index_init_lock);
|
||||
unsigned long root = SECTION_NR_TO_ROOT(section_nr);
|
||||
struct mem_section *section;
|
||||
int ret = 0;
|
||||
|
|
|
@ -349,7 +349,7 @@ static struct rt6_info *rt6_select(struct rt6_info **head, int oif,
|
|||
(strict & RT6_SELECT_F_REACHABLE) &&
|
||||
last && last != rt0) {
|
||||
/* no entries matched; do round-robin */
|
||||
static spinlock_t lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(lock);
|
||||
spin_lock(&lock);
|
||||
*head = rt0->u.next;
|
||||
rt0->u.next = last->u.next;
|
||||
|
|
|
@ -70,7 +70,7 @@
|
|||
# define RPCDBG_FACILITY RPCDBG_AUTH
|
||||
#endif
|
||||
|
||||
spinlock_t krb5_seq_lock = SPIN_LOCK_UNLOCKED;
|
||||
DEFINE_SPINLOCK(krb5_seq_lock);
|
||||
|
||||
u32
|
||||
gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
|
||||
|
|
|
@ -117,7 +117,7 @@ struct bclink {
|
|||
static struct bcbearer *bcbearer = NULL;
|
||||
static struct bclink *bclink = NULL;
|
||||
static struct link *bcl = NULL;
|
||||
static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(bc_lock);
|
||||
|
||||
char tipc_bclink_name[] = "multicast-link";
|
||||
|
||||
|
@ -796,7 +796,7 @@ int tipc_bclink_init(void)
|
|||
memset(bclink, 0, sizeof(struct bclink));
|
||||
INIT_LIST_HEAD(&bcl->waiting_ports);
|
||||
bcl->next_out_no = 1;
|
||||
bclink->node.lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&bclink->node.lock);
|
||||
bcl->owner = &bclink->node;
|
||||
bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
|
||||
tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
|
||||
|
|
|
@ -566,7 +566,7 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
|
|||
b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
|
||||
bcast_scope, 2);
|
||||
}
|
||||
b_ptr->publ.lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&b_ptr->publ.lock);
|
||||
write_unlock_bh(&tipc_net_lock);
|
||||
info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
|
||||
name, addr_string_fill(addr_string, bcast_scope), priority);
|
||||
|
|
|
@ -63,7 +63,7 @@ struct manager {
|
|||
|
||||
static struct manager mng = { 0};
|
||||
|
||||
static spinlock_t config_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(config_lock);
|
||||
|
||||
static const void *req_tlv_area; /* request message TLV area */
|
||||
static int req_tlv_space; /* request message TLV area size */
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
#define MAX_STRING 512
|
||||
|
||||
static char print_string[MAX_STRING];
|
||||
static spinlock_t print_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(print_lock);
|
||||
|
||||
static struct print_buf cons_buf = { NULL, 0, NULL, NULL };
|
||||
struct print_buf *TIPC_CONS = &cons_buf;
|
||||
|
|
|
@ -44,7 +44,7 @@ struct queue_item {
|
|||
|
||||
static kmem_cache_t *tipc_queue_item_cache;
|
||||
static struct list_head signal_queue_head;
|
||||
static spinlock_t qitem_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(qitem_lock);
|
||||
static int handler_enabled = 0;
|
||||
|
||||
static void process_signal_queue(unsigned long dummy);
|
||||
|
|
|
@ -101,7 +101,7 @@ struct name_table {
|
|||
|
||||
static struct name_table table = { NULL } ;
|
||||
static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
|
||||
rwlock_t tipc_nametbl_lock = RW_LOCK_UNLOCKED;
|
||||
DEFINE_RWLOCK(tipc_nametbl_lock);
|
||||
|
||||
|
||||
static int hash(int x)
|
||||
|
@ -172,7 +172,7 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
|
|||
}
|
||||
|
||||
memset(nseq, 0, sizeof(*nseq));
|
||||
nseq->lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&nseq->lock);
|
||||
nseq->type = type;
|
||||
nseq->sseqs = sseq;
|
||||
dbg("tipc_nameseq_create(): nseq = %p, type %u, ssseqs %p, ff: %u\n",
|
||||
|
|
|
@ -115,7 +115,7 @@
|
|||
* - A local spin_lock protecting the queue of subscriber events.
|
||||
*/
|
||||
|
||||
rwlock_t tipc_net_lock = RW_LOCK_UNLOCKED;
|
||||
DEFINE_RWLOCK(tipc_net_lock);
|
||||
struct network tipc_net = { NULL };
|
||||
|
||||
struct node *tipc_net_select_remote_node(u32 addr, u32 ref)
|
||||
|
|
|
@ -77,7 +77,7 @@ struct node *tipc_node_create(u32 addr)
|
|||
|
||||
memset(n_ptr, 0, sizeof(*n_ptr));
|
||||
n_ptr->addr = addr;
|
||||
n_ptr->lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&n_ptr->lock);
|
||||
INIT_LIST_HEAD(&n_ptr->nsub);
|
||||
n_ptr->owner = c_ptr;
|
||||
tipc_cltr_attach_node(c_ptr, n_ptr);
|
||||
|
|
|
@ -57,8 +57,8 @@
|
|||
static struct sk_buff *msg_queue_head = NULL;
|
||||
static struct sk_buff *msg_queue_tail = NULL;
|
||||
|
||||
spinlock_t tipc_port_list_lock = SPIN_LOCK_UNLOCKED;
|
||||
static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED;
|
||||
DEFINE_SPINLOCK(tipc_port_list_lock);
|
||||
static DEFINE_SPINLOCK(queue_lock);
|
||||
|
||||
static LIST_HEAD(ports);
|
||||
static void port_handle_node_down(unsigned long ref);
|
||||
|
|
|
@ -63,7 +63,7 @@
|
|||
|
||||
struct ref_table tipc_ref_table = { NULL };
|
||||
|
||||
static rwlock_t ref_table_lock = RW_LOCK_UNLOCKED;
|
||||
static DEFINE_RWLOCK(ref_table_lock);
|
||||
|
||||
/**
|
||||
* tipc_ref_table_init - create reference table for objects
|
||||
|
@ -87,7 +87,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
|
|||
index_mask = sz - 1;
|
||||
for (i = sz - 1; i >= 0; i--) {
|
||||
table[i].object = NULL;
|
||||
table[i].lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&table[i].lock);
|
||||
table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
|
||||
}
|
||||
tipc_ref_table.entries = table;
|
||||
|
|
|
@ -457,7 +457,7 @@ int tipc_subscr_start(void)
|
|||
int res = -1;
|
||||
|
||||
memset(&topsrv, 0, sizeof (topsrv));
|
||||
topsrv.lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&topsrv.lock);
|
||||
INIT_LIST_HEAD(&topsrv.subscriber_list);
|
||||
|
||||
spin_lock_bh(&topsrv.lock);
|
||||
|
|
|
@ -67,7 +67,7 @@ struct tipc_user {
|
|||
|
||||
static struct tipc_user *users = NULL;
|
||||
static u32 next_free_user = MAX_USERID + 1;
|
||||
static spinlock_t reg_lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(reg_lock);
|
||||
|
||||
/**
|
||||
* reg_init - create TIPC user registry (but don't activate it)
|
||||
|
|
Loading…
Reference in a new issue