2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* linux/ipc/util.h
|
|
|
|
* Copyright (C) 1999 Christoph Rohland
|
|
|
|
*
|
2006-01-14 18:43:54 -07:00
|
|
|
* ipc helper functions (c) 1999 Manfred Spraul <manfred@colorfullife.com>
|
2006-10-02 03:18:20 -06:00
|
|
|
* namespaces support. 2006 OpenVZ, SWsoft Inc.
|
|
|
|
* Pavel Emelianov <xemul@openvz.org>
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _IPC_UTIL_H
|
|
|
|
#define _IPC_UTIL_H
|
|
|
|
|
2009-06-19 18:23:29 -06:00
|
|
|
#include <linux/unistd.h>
|
2007-10-19 00:40:51 -06:00
|
|
|
#include <linux/err.h>
|
2007-10-19 00:40:48 -06:00
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
#define SEQ_MULTIPLIER (IPCMNI)
|
|
|
|
|
|
|
|
void sem_init (void);
|
|
|
|
void msg_init (void);
|
|
|
|
void shm_init (void);
|
|
|
|
|
2008-02-08 05:18:22 -07:00
|
|
|
struct ipc_namespace;
|
|
|
|
|
2009-04-06 20:01:08 -06:00
|
|
|
#ifdef CONFIG_POSIX_MQUEUE
|
namespaces: ipc namespaces: implement support for posix msqueues
Implement multiple mounts of the mqueue file system, and link it to usage
of CLONE_NEWIPC.
Each ipc ns has a corresponding mqueuefs superblock. When a user does
clone(CLONE_NEWIPC) or unshare(CLONE_NEWIPC), the unshare will cause an
internal mount of a new mqueuefs sb linked to the new ipc ns.
When a user does 'mount -t mqueue mqueue /dev/mqueue', he mounts the
mqueuefs superblock.
Posix message queues can be worked with both through the mq_* system calls
(see mq_overview(7)), and through the VFS through the mqueue mount. Any
usage of mq_open() and friends will work with the acting task's ipc
namespace. Any actions through the VFS will work with the mqueuefs in
which the file was created. So if a user doesn't remount mqueuefs after
unshare(CLONE_NEWIPC), mq_open("/ab") will not be reflected in "ls
/dev/mqueue".
If task a mounts mqueue for ipc_ns:1, then clones task b with a new ipcns,
ipcns:2, and then task a is the last task in ipc_ns:1 to exit, then (1)
ipc_ns:1 will be freed, (2) it's superblock will live on until task b
umounts the corresponding mqueuefs, and vfs actions will continue to
succeed, but (3) sb->s_fs_info will be NULL for the sb corresponding to
the deceased ipc_ns:1.
To make this happen, we must protect the ipc reference count when
a) a task exits and drops its ipcns->count, since it might be dropping
it to 0 and freeing the ipcns
b) a task accesses the ipcns through its mqueuefs interface, since it
bumps the ipcns refcount and might race with the last task in the ipcns
exiting.
So the kref is changed to an atomic_t so we can use
atomic_dec_and_lock(&ns->count,mq_lock), and every access to the ipcns
through ns = mqueuefs_sb->s_fs_info is protected by the same lock.
Signed-off-by: Cedric Le Goater <clg@fr.ibm.com>
Signed-off-by: Serge E. Hallyn <serue@us.ibm.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-06 20:01:10 -06:00
|
|
|
extern void mq_clear_sbinfo(struct ipc_namespace *ns);
|
|
|
|
extern void mq_put_mnt(struct ipc_namespace *ns);
|
2009-04-06 20:01:08 -06:00
|
|
|
#else
|
namespaces: ipc namespaces: implement support for posix msqueues
Implement multiple mounts of the mqueue file system, and link it to usage
of CLONE_NEWIPC.
Each ipc ns has a corresponding mqueuefs superblock. When a user does
clone(CLONE_NEWIPC) or unshare(CLONE_NEWIPC), the unshare will cause an
internal mount of a new mqueuefs sb linked to the new ipc ns.
When a user does 'mount -t mqueue mqueue /dev/mqueue', he mounts the
mqueuefs superblock.
Posix message queues can be worked with both through the mq_* system calls
(see mq_overview(7)), and through the VFS through the mqueue mount. Any
usage of mq_open() and friends will work with the acting task's ipc
namespace. Any actions through the VFS will work with the mqueuefs in
which the file was created. So if a user doesn't remount mqueuefs after
unshare(CLONE_NEWIPC), mq_open("/ab") will not be reflected in "ls
/dev/mqueue".
If task a mounts mqueue for ipc_ns:1, then clones task b with a new ipcns,
ipcns:2, and then task a is the last task in ipc_ns:1 to exit, then (1)
ipc_ns:1 will be freed, (2) it's superblock will live on until task b
umounts the corresponding mqueuefs, and vfs actions will continue to
succeed, but (3) sb->s_fs_info will be NULL for the sb corresponding to
the deceased ipc_ns:1.
To make this happen, we must protect the ipc reference count when
a) a task exits and drops its ipcns->count, since it might be dropping
it to 0 and freeing the ipcns
b) a task accesses the ipcns through its mqueuefs interface, since it
bumps the ipcns refcount and might race with the last task in the ipcns
exiting.
So the kref is changed to an atomic_t so we can use
atomic_dec_and_lock(&ns->count,mq_lock), and every access to the ipcns
through ns = mqueuefs_sb->s_fs_info is protected by the same lock.
Signed-off-by: Cedric Le Goater <clg@fr.ibm.com>
Signed-off-by: Serge E. Hallyn <serue@us.ibm.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-06 20:01:10 -06:00
|
|
|
static inline void mq_clear_sbinfo(struct ipc_namespace *ns) { }
|
|
|
|
static inline void mq_put_mnt(struct ipc_namespace *ns) { }
|
2009-04-06 20:01:08 -06:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_SYSVIPC
|
2008-02-08 05:18:57 -07:00
|
|
|
void sem_init_ns(struct ipc_namespace *ns);
|
|
|
|
void msg_init_ns(struct ipc_namespace *ns);
|
|
|
|
void shm_init_ns(struct ipc_namespace *ns);
|
2006-10-02 03:18:20 -06:00
|
|
|
|
|
|
|
void sem_exit_ns(struct ipc_namespace *ns);
|
|
|
|
void msg_exit_ns(struct ipc_namespace *ns);
|
|
|
|
void shm_exit_ns(struct ipc_namespace *ns);
|
2009-04-06 20:01:08 -06:00
|
|
|
#else
|
|
|
|
static inline void sem_init_ns(struct ipc_namespace *ns) { }
|
|
|
|
static inline void msg_init_ns(struct ipc_namespace *ns) { }
|
|
|
|
static inline void shm_init_ns(struct ipc_namespace *ns) { }
|
|
|
|
|
|
|
|
static inline void sem_exit_ns(struct ipc_namespace *ns) { }
|
|
|
|
static inline void msg_exit_ns(struct ipc_namespace *ns) { }
|
|
|
|
static inline void shm_exit_ns(struct ipc_namespace *ns) { }
|
|
|
|
#endif
|
2006-10-02 03:18:20 -06:00
|
|
|
|
ipc: fix race with LSMs
Currently, IPC mechanisms do security and auditing related checks under
RCU. However, since security modules can free the security structure,
for example, through selinux_[sem,msg_queue,shm]_free_security(), we can
race if the structure is freed before other tasks are done with it,
creating a use-after-free condition. Manfred illustrates this nicely,
for instance with shared mem and selinux:
-> do_shmat calls rcu_read_lock()
-> do_shmat calls shm_object_check().
Checks that the object is still valid - but doesn't acquire any locks.
Then it returns.
-> do_shmat calls security_shm_shmat (e.g. selinux_shm_shmat)
-> selinux_shm_shmat calls ipc_has_perm()
-> ipc_has_perm accesses ipc_perms->security
shm_close()
-> shm_close acquires rw_mutex & shm_lock
-> shm_close calls shm_destroy
-> shm_destroy calls security_shm_free (e.g. selinux_shm_free_security)
-> selinux_shm_free_security calls ipc_free_security(&shp->shm_perm)
-> ipc_free_security calls kfree(ipc_perms->security)
This patch delays the freeing of the security structures after all RCU
readers are done. Furthermore it aligns the security life cycle with
that of the rest of IPC - freeing them based on the reference counter.
For situations where we need not free security, the current behavior is
kept. Linus states:
"... the old behavior was suspect for another reason too: having the
security blob go away from under a user sounds like it could cause
various other problems anyway, so I think the old code was at least
_prone_ to bugs even if it didn't have catastrophic behavior."
I have tested this patch with IPC testcases from LTP on both my
quad-core laptop and on a 64 core NUMA server. In both cases selinux is
enabled, and tests pass for both voluntary and forced preemption models.
While the mentioned races are theoretical (at least no one as reported
them), I wanted to make sure that this new logic doesn't break anything
we weren't aware of.
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
Acked-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-09-23 18:04:45 -06:00
|
|
|
struct ipc_rcu {
|
|
|
|
struct rcu_head rcu;
|
|
|
|
atomic_t refcount;
|
|
|
|
} ____cacheline_aligned_in_smp;
|
|
|
|
|
|
|
|
#define ipc_rcu_to_struct(p) ((void *)(p+1))
|
|
|
|
|
2007-10-19 00:40:49 -06:00
|
|
|
/*
|
|
|
|
* Structure that holds the parameters needed by the ipc operations
|
|
|
|
* (see after)
|
|
|
|
*/
|
|
|
|
struct ipc_params {
|
|
|
|
key_t key;
|
|
|
|
int flg;
|
|
|
|
union {
|
|
|
|
size_t size; /* for shared memories */
|
|
|
|
int nsems; /* for semaphores */
|
|
|
|
} u; /* holds the getnew() specific param */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Structure that holds some ipc operations. This structure is used to unify
|
|
|
|
* the calls to sys_msgget(), sys_semget(), sys_shmget()
|
|
|
|
* . routine to call to create a new ipc object. Can be one of newque,
|
|
|
|
* newary, newseg
|
2007-10-19 00:40:53 -06:00
|
|
|
* . routine to call to check permissions for a new ipc object.
|
2007-10-19 00:40:49 -06:00
|
|
|
* Can be one of security_msg_associate, security_sem_associate,
|
|
|
|
* security_shm_associate
|
|
|
|
* . routine to call for an extra check if needed
|
|
|
|
*/
|
|
|
|
struct ipc_ops {
|
|
|
|
int (*getnew) (struct ipc_namespace *, struct ipc_params *);
|
2007-10-19 00:40:51 -06:00
|
|
|
int (*associate) (struct kern_ipc_perm *, int);
|
|
|
|
int (*more_checks) (struct kern_ipc_perm *, struct ipc_params *);
|
2007-10-19 00:40:49 -06:00
|
|
|
};
|
|
|
|
|
2005-09-06 16:17:09 -06:00
|
|
|
struct seq_file;
|
2008-02-08 05:18:57 -07:00
|
|
|
struct ipc_ids;
|
2007-07-16 00:40:58 -06:00
|
|
|
|
2007-10-19 00:40:48 -06:00
|
|
|
void ipc_init_ids(struct ipc_ids *);
|
2005-09-06 16:17:09 -06:00
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
void __init ipc_init_proc_interface(const char *path, const char *header,
|
2006-10-02 03:18:20 -06:00
|
|
|
int ids, int (*show)(struct seq_file *, void *));
|
2005-09-06 16:17:09 -06:00
|
|
|
#else
|
|
|
|
#define ipc_init_proc_interface(path, header, ids, show) do {} while (0)
|
|
|
|
#endif
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2006-10-02 03:18:20 -06:00
|
|
|
#define IPC_SEM_IDS 0
|
|
|
|
#define IPC_MSG_IDS 1
|
|
|
|
#define IPC_SHM_IDS 2
|
|
|
|
|
2007-10-19 00:40:52 -06:00
|
|
|
#define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER)
|
2013-01-04 16:34:50 -07:00
|
|
|
#define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER)
|
2007-10-19 00:40:52 -06:00
|
|
|
|
2013-09-11 15:26:24 -06:00
|
|
|
/* must be called with ids->rwsem acquired for writing */
|
2007-10-19 00:40:48 -06:00
|
|
|
int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int);
|
2007-10-19 00:40:54 -06:00
|
|
|
|
2013-09-11 15:26:24 -06:00
|
|
|
/* must be called with ids->rwsem acquired for reading */
|
2007-10-19 00:40:48 -06:00
|
|
|
int ipc_get_maxid(struct ipc_ids *);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* must be called with both locks acquired. */
|
2007-10-19 00:40:48 -06:00
|
|
|
void ipc_rmid(struct ipc_ids *, struct kern_ipc_perm *);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2007-10-19 00:40:53 -06:00
|
|
|
/* must be called with ipcp locked */
|
2011-03-23 17:43:24 -06:00
|
|
|
int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* for rare, potentially huge allocations.
|
|
|
|
* both function can sleep
|
|
|
|
*/
|
|
|
|
void* ipc_alloc(int size);
|
|
|
|
void ipc_free(void* ptr, int size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For allocation that need to be freed by RCU.
|
|
|
|
* Objects are reference counted, they start with reference count 1.
|
|
|
|
* getref increases the refcount, the putref call that reduces the recount
|
|
|
|
* to 0 schedules the rcu destruction. Caller must guarantee locking.
|
|
|
|
*/
|
|
|
|
void* ipc_rcu_alloc(int size);
|
2013-04-30 20:15:44 -06:00
|
|
|
int ipc_rcu_getref(void *ptr);
|
ipc: fix race with LSMs
Currently, IPC mechanisms do security and auditing related checks under
RCU. However, since security modules can free the security structure,
for example, through selinux_[sem,msg_queue,shm]_free_security(), we can
race if the structure is freed before other tasks are done with it,
creating a use-after-free condition. Manfred illustrates this nicely,
for instance with shared mem and selinux:
-> do_shmat calls rcu_read_lock()
-> do_shmat calls shm_object_check().
Checks that the object is still valid - but doesn't acquire any locks.
Then it returns.
-> do_shmat calls security_shm_shmat (e.g. selinux_shm_shmat)
-> selinux_shm_shmat calls ipc_has_perm()
-> ipc_has_perm accesses ipc_perms->security
shm_close()
-> shm_close acquires rw_mutex & shm_lock
-> shm_close calls shm_destroy
-> shm_destroy calls security_shm_free (e.g. selinux_shm_free_security)
-> selinux_shm_free_security calls ipc_free_security(&shp->shm_perm)
-> ipc_free_security calls kfree(ipc_perms->security)
This patch delays the freeing of the security structures after all RCU
readers are done. Furthermore it aligns the security life cycle with
that of the rest of IPC - freeing them based on the reference counter.
For situations where we need not free security, the current behavior is
kept. Linus states:
"... the old behavior was suspect for another reason too: having the
security blob go away from under a user sounds like it could cause
various other problems anyway, so I think the old code was at least
_prone_ to bugs even if it didn't have catastrophic behavior."
I have tested this patch with IPC testcases from LTP on both my
quad-core laptop and on a 64 core NUMA server. In both cases selinux is
enabled, and tests pass for both voluntary and forced preemption models.
While the mentioned races are theoretical (at least no one as reported
them), I wanted to make sure that this new logic doesn't break anything
we weren't aware of.
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
Acked-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-09-23 18:04:45 -06:00
|
|
|
void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
|
|
|
|
void ipc_rcu_free(struct rcu_head *head);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2007-10-19 00:40:51 -06:00
|
|
|
struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
|
2013-04-30 20:15:19 -06:00
|
|
|
struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out);
|
|
|
|
void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out);
|
2012-02-07 17:54:11 -07:00
|
|
|
int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out);
|
2013-04-30 20:15:24 -06:00
|
|
|
struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
|
|
|
|
struct ipc_ids *ids, int id, int cmd,
|
|
|
|
struct ipc64_perm *perm, int extra_perm);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2012-07-30 15:42:46 -06:00
|
|
|
#ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
|
2005-04-16 16:20:36 -06:00
|
|
|
/* On IA-64, we always use the "64-bit version" of the IPC structures. */
|
|
|
|
# define ipc_parse_version(cmd) IPC_64
|
|
|
|
#else
|
|
|
|
int ipc_parse_version (int *cmd);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
extern void free_msg(struct msg_msg *msg);
|
ipc, msg: fix message length check for negative values
On 64 bit systems the test for negative message sizes is bogus as the
size, which may be positive when evaluated as a long, will get truncated
to an int when passed to load_msg(). So a long might very well contain a
positive value but when truncated to an int it would become negative.
That in combination with a small negative value of msg_ctlmax (which will
be promoted to an unsigned type for the comparison against msgsz, making
it a big positive value and therefore make it pass the check) will lead to
two problems: 1/ The kmalloc() call in alloc_msg() will allocate a too
small buffer as the addition of alen is effectively a subtraction. 2/ The
copy_from_user() call in load_msg() will first overflow the buffer with
userland data and then, when the userland access generates an access
violation, the fixup handler copy_user_handle_tail() will try to fill the
remainder with zeros -- roughly 4GB. That almost instantly results in a
system crash or reset.
,-[ Reproducer (needs to be run as root) ]--
| #include <sys/stat.h>
| #include <sys/msg.h>
| #include <unistd.h>
| #include <fcntl.h>
|
| int main(void) {
| long msg = 1;
| int fd;
|
| fd = open("/proc/sys/kernel/msgmax", O_WRONLY);
| write(fd, "-1", 2);
| close(fd);
|
| msgsnd(0, &msg, 0xfffffff0, IPC_NOWAIT);
|
| return 0;
| }
'---
Fix the issue by preventing msgsz from getting truncated by consistently
using size_t for the message length. This way the size checks in
do_msgsnd() could still be passed with a negative value for msg_ctlmax but
we would fail on the buffer allocation in that case and error out.
Also change the type of m_ts from int to size_t to avoid similar nastiness
in other code paths -- it is used in similar constructs, i.e. signed vs.
unsigned checks. It should never become negative under normal
circumstances, though.
Setting msg_ctlmax to a negative value is an odd configuration and should
be prevented. As that might break existing userland, it will be handled
in a separate commit so it could easily be reverted and reworked without
reintroducing the above described bug.
Hardening mechanisms for user copy operations would have catched that bug
early -- e.g. checking slab object sizes on user copy operations as the
usercopy feature of the PaX patch does. Or, for that matter, detect the
long vs. int sign change due to truncation, as the size overflow plugin
of the very same patch does.
[akpm@linux-foundation.org: fix i386 min() warnings]
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Cc: Pax Team <pageexec@freemail.hu>
Cc: Davidlohr Bueso <davidlohr@hp.com>
Cc: Brad Spengler <spender@grsecurity.net>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: <stable@vger.kernel.org> [ v2.3.27+ -- yes, that old ;) ]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-11-12 16:11:47 -07:00
|
|
|
extern struct msg_msg *load_msg(const void __user *src, size_t len);
|
2013-01-04 16:34:55 -07:00
|
|
|
extern struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst);
|
ipc, msg: fix message length check for negative values
On 64 bit systems the test for negative message sizes is bogus as the
size, which may be positive when evaluated as a long, will get truncated
to an int when passed to load_msg(). So a long might very well contain a
positive value but when truncated to an int it would become negative.
That in combination with a small negative value of msg_ctlmax (which will
be promoted to an unsigned type for the comparison against msgsz, making
it a big positive value and therefore make it pass the check) will lead to
two problems: 1/ The kmalloc() call in alloc_msg() will allocate a too
small buffer as the addition of alen is effectively a subtraction. 2/ The
copy_from_user() call in load_msg() will first overflow the buffer with
userland data and then, when the userland access generates an access
violation, the fixup handler copy_user_handle_tail() will try to fill the
remainder with zeros -- roughly 4GB. That almost instantly results in a
system crash or reset.
,-[ Reproducer (needs to be run as root) ]--
| #include <sys/stat.h>
| #include <sys/msg.h>
| #include <unistd.h>
| #include <fcntl.h>
|
| int main(void) {
| long msg = 1;
| int fd;
|
| fd = open("/proc/sys/kernel/msgmax", O_WRONLY);
| write(fd, "-1", 2);
| close(fd);
|
| msgsnd(0, &msg, 0xfffffff0, IPC_NOWAIT);
|
| return 0;
| }
'---
Fix the issue by preventing msgsz from getting truncated by consistently
using size_t for the message length. This way the size checks in
do_msgsnd() could still be passed with a negative value for msg_ctlmax but
we would fail on the buffer allocation in that case and error out.
Also change the type of m_ts from int to size_t to avoid similar nastiness
in other code paths -- it is used in similar constructs, i.e. signed vs.
unsigned checks. It should never become negative under normal
circumstances, though.
Setting msg_ctlmax to a negative value is an odd configuration and should
be prevented. As that might break existing userland, it will be handled
in a separate commit so it could easily be reverted and reworked without
reintroducing the above described bug.
Hardening mechanisms for user copy operations would have catched that bug
early -- e.g. checking slab object sizes on user copy operations as the
usercopy feature of the PaX patch does. Or, for that matter, detect the
long vs. int sign change due to truncation, as the size overflow plugin
of the very same patch does.
[akpm@linux-foundation.org: fix i386 min() warnings]
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Cc: Pax Team <pageexec@freemail.hu>
Cc: Davidlohr Bueso <davidlohr@hp.com>
Cc: Brad Spengler <spender@grsecurity.net>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: <stable@vger.kernel.org> [ v2.3.27+ -- yes, that old ;) ]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-11-12 16:11:47 -07:00
|
|
|
extern int store_msg(void __user *dest, struct msg_msg *msg, size_t len);
|
2007-10-19 00:40:49 -06:00
|
|
|
|
2008-04-29 02:00:42 -06:00
|
|
|
extern void recompute_msgmni(struct ipc_namespace *);
|
|
|
|
|
2007-10-19 00:40:55 -06:00
|
|
|
static inline int ipc_buildid(int id, int seq)
|
2007-10-19 00:40:52 -06:00
|
|
|
{
|
|
|
|
return SEQ_MULTIPLIER * seq + id;
|
|
|
|
}
|
|
|
|
|
2007-10-19 00:40:55 -06:00
|
|
|
static inline int ipc_checkid(struct kern_ipc_perm *ipcp, int uid)
|
2007-10-19 00:40:51 -06:00
|
|
|
{
|
2013-04-30 20:15:14 -06:00
|
|
|
return uid / SEQ_MULTIPLIER != ipcp->seq;
|
2007-10-19 00:40:51 -06:00
|
|
|
}
|
|
|
|
|
2013-07-08 17:01:10 -06:00
|
|
|
static inline void ipc_lock_object(struct kern_ipc_perm *perm)
|
2007-10-19 00:40:51 -06:00
|
|
|
{
|
|
|
|
spin_lock(&perm->lock);
|
|
|
|
}
|
|
|
|
|
2013-07-08 17:01:10 -06:00
|
|
|
static inline void ipc_unlock_object(struct kern_ipc_perm *perm)
|
2007-10-19 00:40:51 -06:00
|
|
|
{
|
|
|
|
spin_unlock(&perm->lock);
|
|
|
|
}
|
|
|
|
|
2013-07-08 17:01:10 -06:00
|
|
|
static inline void ipc_assert_locked_object(struct kern_ipc_perm *perm)
|
ipc,sem: do not hold ipc lock more than necessary
Instead of holding the ipc lock for permissions and security checks, among
others, only acquire it when necessary.
Some numbers....
1) With Rik's semop-multi.c microbenchmark we can see the following
results:
Baseline (3.9-rc1):
cpus 4, threads: 256, semaphores: 128, test duration: 30 secs
total operations: 151452270, ops/sec 5048409
+ 59.40% a.out [kernel.kallsyms] [k] _raw_spin_lock
+ 6.14% a.out [kernel.kallsyms] [k] sys_semtimedop
+ 3.84% a.out [kernel.kallsyms] [k] avc_has_perm_flags
+ 3.64% a.out [kernel.kallsyms] [k] __audit_syscall_exit
+ 2.06% a.out [kernel.kallsyms] [k] copy_user_enhanced_fast_string
+ 1.86% a.out [kernel.kallsyms] [k] ipc_lock
With this patchset:
cpus 4, threads: 256, semaphores: 128, test duration: 30 secs
total operations: 273156400, ops/sec 9105213
+ 18.54% a.out [kernel.kallsyms] [k] _raw_spin_lock
+ 11.72% a.out [kernel.kallsyms] [k] sys_semtimedop
+ 7.70% a.out [kernel.kallsyms] [k] ipc_has_perm.isra.21
+ 6.58% a.out [kernel.kallsyms] [k] avc_has_perm_flags
+ 6.54% a.out [kernel.kallsyms] [k] __audit_syscall_exit
+ 4.71% a.out [kernel.kallsyms] [k] ipc_obtain_object_check
2) While on an Oracle swingbench DSS (data mining) workload the
improvements are not as exciting as with Rik's benchmark, we can see
some positive numbers. For an 8 socket machine the following are the
percentages of %sys time incurred in the ipc lock:
Baseline (3.9-rc1):
100 swingbench users: 8,74%
400 swingbench users: 21,86%
800 swingbench users: 84,35%
With this patchset:
100 swingbench users: 8,11%
400 swingbench users: 19,93%
800 swingbench users: 77,69%
[riel@redhat.com: fix two locking bugs]
[sasha.levin@oracle.com: prevent releasing RCU read lock twice in semctl_main]
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Davidlohr Bueso <davidlohr.bueso@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Chegu Vinod <chegu_vinod@hp.com>
Acked-by: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Jason Low <jason.low2@hp.com>
Cc: Emmanuel Benisty <benisty.e@gmail.com>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: Stanislav Kinsbursky <skinsbursky@parallels.com>
Tested-by: Sedat Dilek <sedat.dilek@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-04-30 20:15:29 -06:00
|
|
|
{
|
2013-07-08 17:01:10 -06:00
|
|
|
assert_spin_locked(&perm->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ipc_unlock(struct kern_ipc_perm *perm)
|
|
|
|
{
|
2013-07-08 17:01:11 -06:00
|
|
|
ipc_unlock_object(perm);
|
2013-07-08 17:01:10 -06:00
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2013-04-30 20:15:19 -06:00
|
|
|
struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id);
|
2008-02-08 05:18:54 -07:00
|
|
|
int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
|
|
|
|
struct ipc_ops *ops, struct ipc_params *params);
|
2009-06-17 17:27:57 -06:00
|
|
|
void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
|
|
|
|
void (*free)(struct ipc_namespace *, struct kern_ipc_perm *));
|
2005-04-16 16:20:36 -06:00
|
|
|
#endif
|