UPSTREAM: mm/memfd: Add an F_SEAL_FUTURE_WRITE seal to memfd
Android uses ashmem for sharing memory regions. We are looking forward to migrating all usecases of ashmem to memfd so that we can possibly remove the ashmem driver in the future from staging while also benefiting from using memfd and contributing to it. Note staging drivers are also not ABI and generally can be removed at anytime. One of the main usecases Android has is the ability to create a region and mmap it as writeable, then add protection against making any "future" writes while keeping the existing already mmap'ed writeable-region active. This allows us to implement a usecase where receivers of the shared memory buffer can get a read-only view, while the sender continues to write to the buffer. See CursorWindow documentation in Android for more details: https://developer.android.com/reference/android/database/CursorWindow This usecase cannot be implemented with the existing F_SEAL_WRITE seal. To support the usecase, this patch adds a new F_SEAL_FUTURE_WRITE seal which prevents any future mmap and write syscalls from succeeding while keeping the existing mmap active. A better way to do F_SEAL_FUTURE_WRITE seal was discussed [1] last week where we don't need to modify core VFS structures to get the same behavior of the seal. This solves several side-effects pointed by Andy. self-tests are provided in later patch to verify the expected semantics. [1] https://lore.kernel.org/lkml/20181111173650.GA256781@google.com/ [Thanks a lot to Andy for suggestions to improve code] Cc: Andy Lutomirski <luto@kernel.org> Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> Acked-by: John Stultz <john.stultz@linaro.org> Change-Id: I6710c045954378f87bfbff6311d372a3b8549064
This commit is contained in:
parent
198aac25b7
commit
dec031f640
4 changed files with 26 additions and 5 deletions
|
@ -530,7 +530,7 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
|||
inode_lock(inode);
|
||||
|
||||
/* protected by i_mutex */
|
||||
if (info->seals & F_SEAL_WRITE) {
|
||||
if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
|
||||
inode_unlock(inode);
|
||||
return -EPERM;
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
|
||||
#define F_SEAL_GROW 0x0004 /* prevent file from growing */
|
||||
#define F_SEAL_WRITE 0x0008 /* prevent writes */
|
||||
#define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */
|
||||
/* (1U << 31) is reserved for signed error codes */
|
||||
|
||||
/*
|
||||
|
|
|
@ -150,7 +150,8 @@ static unsigned int *memfd_file_seals_ptr(struct file *file)
|
|||
#define F_ALL_SEALS (F_SEAL_SEAL | \
|
||||
F_SEAL_SHRINK | \
|
||||
F_SEAL_GROW | \
|
||||
F_SEAL_WRITE)
|
||||
F_SEAL_WRITE | \
|
||||
F_SEAL_FUTURE_WRITE)
|
||||
|
||||
static int memfd_add_seals(struct file *file, unsigned int seals)
|
||||
{
|
||||
|
|
25
mm/shmem.c
25
mm/shmem.c
|
@ -2169,6 +2169,24 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
|
|||
|
||||
static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct shmem_inode_info *info = SHMEM_I(file_inode(file));
|
||||
|
||||
if (info->seals & F_SEAL_FUTURE_WRITE) {
|
||||
/*
|
||||
* New PROT_WRITE and MAP_SHARED mmaps are not allowed when
|
||||
* "future write" seal active.
|
||||
*/
|
||||
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* Since the F_SEAL_FUTURE_WRITE seals allow for a MAP_SHARED
|
||||
* read-only mapping, take care to not allow mprotect to revert
|
||||
* protections.
|
||||
*/
|
||||
vma->vm_flags &= ~(VM_MAYWRITE);
|
||||
}
|
||||
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &shmem_vm_ops;
|
||||
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
|
||||
|
@ -2422,8 +2440,9 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
|
|||
pgoff_t index = pos >> PAGE_SHIFT;
|
||||
|
||||
/* i_mutex is held by caller */
|
||||
if (unlikely(info->seals & (F_SEAL_WRITE | F_SEAL_GROW))) {
|
||||
if (info->seals & F_SEAL_WRITE)
|
||||
if (unlikely(info->seals & (F_SEAL_GROW |
|
||||
F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
|
||||
if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
|
||||
return -EPERM;
|
||||
if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
|
||||
return -EPERM;
|
||||
|
@ -2686,7 +2705,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
|
|||
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
|
||||
|
||||
/* protected by i_mutex */
|
||||
if (info->seals & F_SEAL_WRITE) {
|
||||
if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
|
||||
error = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue