[NetLabel]: make the CIPSOv4 cache spinlocks bottom half safe

The CIPSOv4 cache traversal routines are triggered both the userspace events
(cache invalidation due to DOI removal or updated SELinux policy) and network
packet processing events.  As a result there is a problem with the existing
CIPSOv4 cache spinlocks as they are not bottom-half/softirq safe.  This patch
converts the CIPSOv4 cache spin_[un]lock() calls into spin_[un]lock_bh() calls
to address this problem.

Signed-off-by: Paul Moore <paul.moore@hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Paul Moore 2006-09-25 15:52:37 -07:00 committed by David S. Miller
parent 14a72f53fb
commit 609c92feea

View file

@ -259,7 +259,7 @@ void cipso_v4_cache_invalidate(void)
u32 iter; u32 iter;
for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
spin_lock(&cipso_v4_cache[iter].lock); spin_lock_bh(&cipso_v4_cache[iter].lock);
list_for_each_entry_safe(entry, list_for_each_entry_safe(entry,
tmp_entry, tmp_entry,
&cipso_v4_cache[iter].list, list) { &cipso_v4_cache[iter].list, list) {
@ -267,7 +267,7 @@ void cipso_v4_cache_invalidate(void)
cipso_v4_cache_entry_free(entry); cipso_v4_cache_entry_free(entry);
} }
cipso_v4_cache[iter].size = 0; cipso_v4_cache[iter].size = 0;
spin_unlock(&cipso_v4_cache[iter].lock); spin_unlock_bh(&cipso_v4_cache[iter].lock);
} }
return; return;
@ -309,7 +309,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
hash = cipso_v4_map_cache_hash(key, key_len); hash = cipso_v4_map_cache_hash(key, key_len);
bkt = hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); bkt = hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
spin_lock(&cipso_v4_cache[bkt].lock); spin_lock_bh(&cipso_v4_cache[bkt].lock);
list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) {
if (entry->hash == hash && if (entry->hash == hash &&
entry->key_len == key_len && entry->key_len == key_len &&
@ -318,7 +318,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
secattr->cache.free = entry->lsm_data.free; secattr->cache.free = entry->lsm_data.free;
secattr->cache.data = entry->lsm_data.data; secattr->cache.data = entry->lsm_data.data;
if (prev_entry == NULL) { if (prev_entry == NULL) {
spin_unlock(&cipso_v4_cache[bkt].lock); spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return 0; return 0;
} }
@ -333,12 +333,12 @@ static int cipso_v4_cache_check(const unsigned char *key,
&prev_entry->list); &prev_entry->list);
} }
spin_unlock(&cipso_v4_cache[bkt].lock); spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return 0; return 0;
} }
prev_entry = entry; prev_entry = entry;
} }
spin_unlock(&cipso_v4_cache[bkt].lock); spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return -ENOENT; return -ENOENT;
} }
@ -387,7 +387,7 @@ int cipso_v4_cache_add(const struct sk_buff *skb,
entry->lsm_data.data = secattr->cache.data; entry->lsm_data.data = secattr->cache.data;
bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
spin_lock(&cipso_v4_cache[bkt].lock); spin_lock_bh(&cipso_v4_cache[bkt].lock);
if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) {
list_add(&entry->list, &cipso_v4_cache[bkt].list); list_add(&entry->list, &cipso_v4_cache[bkt].list);
cipso_v4_cache[bkt].size += 1; cipso_v4_cache[bkt].size += 1;
@ -398,7 +398,7 @@ int cipso_v4_cache_add(const struct sk_buff *skb,
list_add(&entry->list, &cipso_v4_cache[bkt].list); list_add(&entry->list, &cipso_v4_cache[bkt].list);
cipso_v4_cache_entry_free(old_entry); cipso_v4_cache_entry_free(old_entry);
} }
spin_unlock(&cipso_v4_cache[bkt].lock); spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return 0; return 0;