kernel-fxtec-pro1x/kernel/locking/rwsem-xadd.c
Ivaylo Georgiev 0f3194a0fa Merge android-4.19.45 (50f9143) into msm-4.19
* refs/heads/tmp-50f9143:
  Linux 4.19.45
  ext4: don't update s_rev_level if not required
  ext4: fix compile error when using BUFFER_TRACE
  pstore: Refactor compression initialization
  pstore: Allocate compression during late_initcall()
  pstore: Centralize init/exit routines
  iov_iter: optimize page_copy_sane()
  libnvdimm/namespace: Fix label tracking error
  xen/pvh: set xen_domain_type to HVM in xen_pvh_init
  kbuild: turn auto.conf.cmd into a mandatory include file
  KVM: lapic: Busy wait for timer to expire when using hv_timer
  KVM: x86: Skip EFER vs. guest CPUID checks for host-initiated writes
  jbd2: fix potential double free
  ALSA: hda/realtek - Fix for Lenovo B50-70 inverted internal microphone bug
  ALSA: hda/realtek - Fixup headphone noise via runtime suspend
  ALSA: hda/realtek - Corrected fixup for System76 Gazelle (gaze14)
  ext4: avoid panic during forced reboot due to aborted journal
  ext4: fix use-after-free in dx_release()
  ext4: fix data corruption caused by overlapping unaligned and aligned IO
  ext4: zero out the unused memory region in the extent tree block
  tty: Don't force RISCV SBI console as preferred console
  fs/writeback.c: use rcu_barrier() to wait for inflight wb switches going into workqueue when umount
  crypto: ccm - fix incompatibility between "ccm" and "ccm_base"
  ipmi:ssif: compare block number correctly for multi-part return messages
  bcache: never set KEY_PTRS of journal key to 0 in journal_reclaim()
  bcache: fix a race between cache register and cacheset unregister
  Btrfs: do not start a transaction at iterate_extent_inodes()
  Btrfs: do not start a transaction during fiemap
  Btrfs: send, flush dellaloc in order to avoid data loss
  btrfs: Honour FITRIM range constraints during free space trim
  btrfs: Correctly free extent buffer in case btree_read_extent_buffer_pages fails
  btrfs: Check the first key and level for cached extent buffer
  ext4: fix ext4_show_options for file systems w/o journal
  ext4: actually request zeroing of inode table after grow
  ext4: fix use-after-free race with debug_want_extra_isize
  ext4: avoid drop reference to iloc.bh twice
  ext4: ignore e_value_offs for xattrs with value-in-ea-inode
  ext4: make sanity check in mballoc more strict
  jbd2: check superblock mapped prior to committing
  tty/vt: fix write/write race in ioctl(KDSKBSENT) handler
  tty: vt.c: Fix TIOCL_BLANKSCREEN console blanking if blankinterval == 0
  mtd: spi-nor: intel-spi: Avoid crossing 4K address boundary on read/write
  mfd: max77620: Fix swapped FPS_PERIOD_MAX_US values
  mfd: da9063: Fix OTP control register names to match datasheets for DA9063/63L
  ACPI: PM: Set enable_for_wake for wakeup GPEs during suspend-to-idle
  userfaultfd: use RCU to free the task struct when fork fails
  ocfs2: fix ocfs2 read inode data panic in ocfs2_iget
  hugetlb: use same fault hash key for shared and private mappings
  mm/hugetlb.c: don't put_page in lock of hugetlb_lock
  mm/huge_memory: fix vmf_insert_pfn_{pmd, pud}() crash, handle unaligned addresses
  mm/mincore.c: make mincore() more conservative
  crypto: ccree - handle tee fips error during power management resume
  crypto: ccree - add function to handle cryptocell tee fips error
  crypto: ccree - HOST_POWER_DOWN_EN should be the last CC access during suspend
  crypto: ccree - pm resume first enable the source clk
  crypto: ccree - don't map AEAD key and IV on stack
  crypto: ccree - use correct internal state sizes for export
  crypto: ccree - don't map MAC key on stack
  crypto: ccree - fix mem leak on error path
  crypto: ccree - remove special handling of chained sg
  bpf, arm64: remove prefetch insn in xadd mapping
  ASoC: codec: hdac_hdmi add device_link to card device
  ASoC: fsl_esai: Fix missing break in switch statement
  ASoC: RT5677-SPI: Disable 16Bit SPI Transfers
  ASoC: max98090: Fix restore of DAPM Muxes
  ALSA: hdea/realtek - Headset fixup for System76 Gazelle (gaze14)
  ALSA: hda/realtek - EAPD turn on later
  ALSA: hda/hdmi - Consider eld_valid when reporting jack event
  ALSA: hda/hdmi - Read the pin sense from register when repolling
  ALSA: usb-audio: Fix a memory leak bug
  ALSA: line6: toneport: Fix broken usage of timer for delayed execution
  mmc: core: Fix tag set memory leak
  crypto: arm64/aes-neonbs - don't access already-freed walk.iv
  crypto: arm/aes-neonbs - don't access already-freed walk.iv
  crypto: rockchip - update IV buffer to contain the next IV
  crypto: gcm - fix incompatibility between "gcm" and "gcm_base"
  crypto: arm64/gcm-aes-ce - fix no-NEON fallback code
  crypto: x86/crct10dif-pcl - fix use via crypto_shash_digest()
  crypto: crct10dif-generic - fix use via crypto_shash_digest()
  crypto: skcipher - don't WARN on unprocessed data after slow walk step
  crypto: vmx - fix copy-paste error in CTR mode
  crypto: ccp - Do not free psp_master when PLATFORM_INIT fails
  crypto: chacha20poly1305 - set cra_name correctly
  crypto: salsa20 - don't access already-freed walk.iv
  crypto: crypto4xx - fix cfb and ofb "overran dst buffer" issues
  crypto: crypto4xx - fix ctr-aes missing output IV
  sched/x86: Save [ER]FLAGS on context switch
  arm64: Save and restore OSDLR_EL1 across suspend/resume
  arm64: Clear OSDLR_EL1 on CPU boot
  arm64: compat: Reduce address limit
  arm64: arch_timer: Ensure counter register reads occur with seqlock held
  arm64: mmap: Ensure file offset is treated as unsigned
  power: supply: axp288_fuel_gauge: Add ACEPC T8 and T11 mini PCs to the blacklist
  power: supply: axp288_charger: Fix unchecked return value
  ARM: exynos: Fix a leaked reference by adding missing of_node_put
  mmc: sdhci-of-arasan: Add DTS property to disable DCMDs.
  ARM: dts: exynos: Fix audio (microphone) routing on Odroid XU3
  ARM: dts: exynos: Fix interrupt for shared EINTs on Exynos5260
  arm64: dts: rockchip: Disable DCMDs on RK3399's eMMC controller.
  objtool: Fix function fallthrough detection
  x86/speculation/mds: Improve CPU buffer clear documentation
  x86/speculation/mds: Revert CPU buffer clear on double fault exit
  locking/rwsem: Prevent decrement of reader count before increment
  fs: sdcardfs: Add missing option to show_options
  BACKPORT: drm/amd/display: add -msse2 to prevent Clang from emitting libcalls to undefined SW FP routines
  ANDROID: x86: use the correct function type for sys_ni_syscall
  ANDROID: x86: use the correct function type for sys32_(rt_)sigreturn
  ANDROID: x86: use the correct function type for native_set_fixmap
  ANDROID: x86: use the correct function type in SYSCALL_DEFINE0
  ANDROID: x86: add support for CONFIG_LTO_CLANG
  ANDROID: x86: disable STACK_VALIDATION with LTO_CLANG
  ANDROID: x86: disable HAVE_ARCH_PREL32_RELOCATIONS with LTO_CLANG
  ANDROID: x86/vdso: disable LTO only for VDSO
  ANDROID: x86/cpu/vmware: use the full form of inl in VMWARE_PORT
  UPSTREAM: x86/build: Keep local relocations with ld.lld
  ANDROID: crypto: arm64/ghash: fix CFI for GHASH CE
  ANDROID: crypto: arm64/sha: fix CFI in SHA CE
  ANDROID: arm64: kvm: disable CFI
  ANDROID: arm64: mark kpti_install_ng_mappings as __nocfi
  ANDROID: arm64: disable CFI for cpu_replace_ttbr1
  FROMLIST: arm64: use the correct function type for __arm64_sys_ni_syscall
  FROMLIST: arm64: use the correct function type in SYSCALL_DEFINE0
  FROMLIST: arm64: fix syscall_fn_t type
  ANDROID: modpost: add an exception for CFI stubs
  ANDROID: ftrace: fix function type mismatches
  FROMLIST: 9p: pass the correct prototype to read_cache_page
  FROMLIST: jffs2: pass the correct prototype to read_cache_page
  UPSTREAM: nfs: pass the correct prototype to read_cache_page
  FROMLIST: mm: don't cast ->readpage to filler_t for do_read_cache_page
  UPSTREAM: netfilter: xt_IDLETIMER: fix sysfs callback function type
  ANDROID: kallsyms: strip the .cfi postfix from symbols with CONFIG_CFI_CLANG
  ANDROID: add support for clang Control Flow Integrity (CFI)
  FROMLIST: arm64: select ARCH_SUPPORTS_LTO_CLANG
  ANDROID: arm64: disable HAVE_ARCH_PREL32_RELOCATIONS with LTO_CLANG
  ANDROID: arm64: add atomic_ll_sc.o to obj-y if using lld
  ANDROID: arm64: lse: fix LSE atomics with LTO
  ANDROID: arm64: vdso: disable LTO
  FROMLIST: arm64: kvm: use -fno-jump-tables with clang
  BACKPORT: arm64: sysreg: Make mrs_s and msr_s macros work with Clang and LTO
  ANDROID: init: ensure initcall ordering with LTO
  ANDROID: drivers/misc: disable LTO for lkdtm_rodata.o
  FROMLIST: efi/libstub: disable LTO
  FROMLIST: scripts/mod: disable LTO for empty.c
  ANDROID: kbuild: disable LTO_CLANG with KASAN
  FROMLIST: kbuild: fix dynamic ftrace with clang LTO
  ANDROID: kbuild: add support for clang LTO
  ANDROID: kbuild: add CONFIG_LD_IS_LLD
  UPSTREAM: gcov: clang support
  UPSTREAM: gcov: docs: add a note on GCC vs Clang differences
  UPSTREAM: gcov: clang: move common GCC code into gcc_base.c
  UPSTREAM: module: add stubs for within_module functions
  UPSTREAM: bpf: relax inode permission check for retrieving bpf program

Conflicts:
	Makefile
	arch/Kconfig
	arch/arm64/kvm/hyp/Makefile
	arch/x86/include/asm/syscall_wrapper.h
	drivers/mmc/core/queue.c
	fs/nfs/dir.c
	fs/nfs/symlink.c
	include/asm-generic/vmlinux.lds.h
	include/linux/compiler-clang.h
	include/linux/pagemap.h
	kernel/cfi.c
	mm/filemap.c
	scripts/link-vmlinux.sh

Change-Id: I1e34675a86ecb60d7b8a87e16574ea8920f9cb12
Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
2019-07-08 00:33:34 -07:00

731 lines
21 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* rwsem.c: R/W semaphores: contention handling functions
*
* Written by David Howells (dhowells@redhat.com).
* Derived from arch/i386/kernel/semaphore.c
*
* Writer lock-stealing by Alex Shi <alex.shi@intel.com>
* and Michel Lespinasse <walken@google.com>
*
* Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
* and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
*/
#include <linux/rwsem.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/sched/signal.h>
#include <linux/sched/rt.h>
#include <linux/sched/wake_q.h>
#include <linux/sched/debug.h>
#include <linux/osq_lock.h>
#include "rwsem.h"
/*
* Guide to the rw_semaphore's count field for common values.
* (32-bit case illustrated, similar for 64-bit)
*
* 0x0000000X (1) X readers active or attempting lock, no writer waiting
* X = #active_readers + #readers attempting to lock
* (X*ACTIVE_BIAS)
*
* 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
* attempting to read lock or write lock.
*
* 0xffff000X (1) X readers active or attempting lock, with waiters for lock
* X = #active readers + # readers attempting lock
* (X*ACTIVE_BIAS + WAITING_BIAS)
* (2) 1 writer attempting lock, no waiters for lock
* X-1 = #active readers + #readers attempting lock
* ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
* (3) 1 writer active, no waiters for lock
* X-1 = #active readers + #readers attempting lock
* ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
*
* 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
* (WAITING_BIAS + ACTIVE_BIAS)
* (2) 1 writer active or attempting lock, no waiters for lock
* (ACTIVE_WRITE_BIAS)
*
* 0xffff0000 (1) There are writers or readers queued but none active
* or in the process of attempting lock.
* (WAITING_BIAS)
* Note: writer can attempt to steal lock for this count by adding
* ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
*
* 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
* (ACTIVE_WRITE_BIAS + WAITING_BIAS)
*
* Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
* the count becomes more than 0 for successful lock acquisition,
* i.e. the case where there are only readers or nobody has lock.
* (1st and 2nd case above).
*
* Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
* checking the count becomes ACTIVE_WRITE_BIAS for successful lock
* acquisition (i.e. nobody else has lock or attempts lock). If
* unsuccessful, in rwsem_down_write_failed, we'll check to see if there
* are only waiters but none active (5th case above), and attempt to
* steal the lock.
*
*/
/*
* Initialize an rwsem:
*/
void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held semaphore:
*/
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
sem->owner = NULL;
osq_lock_init(&sem->osq);
#endif
#ifdef CONFIG_RWSEM_PRIO_AWARE
sem->m_count = 0;
#endif
}
EXPORT_SYMBOL(__init_rwsem);
enum rwsem_wake_type {
RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
RWSEM_WAKE_READERS, /* Wake readers only */
RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
};
/*
* handle the lock release when processes blocked on it that can now run
* - if we come here from up_xxxx(), then:
* - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
* - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
* - there must be someone on the queue
* - the wait_lock must be held by the caller
* - tasks are marked for wakeup, the caller must later invoke wake_up_q()
* to actually wakeup the blocked task(s) and drop the reference count,
* preferably when the wait_lock is released
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only marked woken if downgrading is false
*/
static void __rwsem_mark_wake(struct rw_semaphore *sem,
enum rwsem_wake_type wake_type,
struct wake_q_head *wake_q)
{
struct rwsem_waiter *waiter, *tmp;
long oldcount, woken = 0, adjustment = 0;
struct list_head wlist;
/*
* Take a peek at the queue head waiter such that we can determine
* the wakeup(s) to perform.
*/
waiter = list_first_entry(&sem->wait_list, struct rwsem_waiter, list);
if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
if (wake_type == RWSEM_WAKE_ANY) {
/*
* Mark writer at the front of the queue for wakeup.
* Until the task is actually later awoken later by
* the caller, other writers are able to steal it.
* Readers, on the other hand, will block as they
* will notice the queued writer.
*/
wake_q_add(wake_q, waiter->task);
}
return;
}
/*
* Writers might steal the lock before we grant it to the next reader.
* We prefer to do the first reader grant before counting readers
* so we can bail out early if a writer stole the lock.
*/
if (wake_type != RWSEM_WAKE_READ_OWNED) {
adjustment = RWSEM_ACTIVE_READ_BIAS;
try_reader_grant:
oldcount = atomic_long_fetch_add(adjustment, &sem->count);
if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
/*
* If the count is still less than RWSEM_WAITING_BIAS
* after removing the adjustment, it is assumed that
* a writer has stolen the lock. We have to undo our
* reader grant.
*/
if (atomic_long_add_return(-adjustment, &sem->count) <
RWSEM_WAITING_BIAS)
return;
/* Last active locker left. Retry waking readers. */
goto try_reader_grant;
}
/*
* It is not really necessary to set it to reader-owned here,
* but it gives the spinners an early indication that the
* readers now have the lock.
*/
rwsem_set_reader_owned(sem);
}
/*
* Grant an infinite number of read locks to the readers at the front
* of the queue. We know that woken will be at least 1 as we accounted
* for above. Note we increment the 'active part' of the count by the
* number of readers before waking any processes up.
*
* We have to do wakeup in 2 passes to prevent the possibility that
* the reader count may be decremented before it is incremented. It
* is because the to-be-woken waiter may not have slept yet. So it
* may see waiter->task got cleared, finish its critical section and
* do an unlock before the reader count increment.
*
* 1) Collect the read-waiters in a separate list, count them and
* fully increment the reader count in rwsem.
* 2) For each waiters in the new list, clear waiter->task and
* put them into wake_q to be woken up later.
*/
list_for_each_entry(waiter, &sem->wait_list, list) {
if (waiter->type == RWSEM_WAITING_FOR_WRITE)
break;
woken++;
}
list_cut_before(&wlist, &sem->wait_list, &waiter->list);
adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
if (list_empty(&sem->wait_list)) {
/* hit end of list above */
adjustment -= RWSEM_WAITING_BIAS;
}
if (adjustment)
atomic_long_add(adjustment, &sem->count);
/* 2nd pass */
list_for_each_entry_safe(waiter, tmp, &wlist, list) {
struct task_struct *tsk;
tsk = waiter->task;
get_task_struct(tsk);
/*
* Ensure calling get_task_struct() before setting the reader
* waiter to nil such that rwsem_down_read_failed() cannot
* race with do_exit() by always holding a reference count
* to the task to wakeup.
*/
smp_store_release(&waiter->task, NULL);
/*
* Ensure issuing the wakeup (either by us or someone else)
* after setting the reader waiter to nil.
*/
wake_q_add(wake_q, tsk);
/* wake_q_add() already take the task ref */
put_task_struct(tsk);
}
}
/*
* Wait for the read lock to be granted
*/
static inline struct rw_semaphore __sched *
__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
{
long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
struct rwsem_waiter waiter;
DEFINE_WAKE_Q(wake_q);
bool is_first_waiter = false;
waiter.task = current;
waiter.type = RWSEM_WAITING_FOR_READ;
raw_spin_lock_irq(&sem->wait_lock);
if (list_empty(&sem->wait_list))
adjustment += RWSEM_WAITING_BIAS;
/* is_first_waiter == true means we are first in the queue */
is_first_waiter = rwsem_list_add_per_prio(&waiter, sem);
/* we're now waiting on the lock, but no longer actively locking */
count = atomic_long_add_return(adjustment, &sem->count);
/*
* If there are no active locks, wake the front queued process(es).
*
* If there are no writers and we are first in the queue,
* wake our own waiter to join the existing active readers !
*/
if (count == RWSEM_WAITING_BIAS ||
(count > RWSEM_WAITING_BIAS &&
(adjustment != -RWSEM_ACTIVE_READ_BIAS ||
is_first_waiter)))
__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
raw_spin_unlock_irq(&sem->wait_lock);
wake_up_q(&wake_q);
/* wait to be given the lock */
while (true) {
set_current_state(state);
if (!waiter.task)
break;
if (signal_pending_state(state, current)) {
raw_spin_lock_irq(&sem->wait_lock);
if (waiter.task)
goto out_nolock;
raw_spin_unlock_irq(&sem->wait_lock);
break;
}
schedule();
}
__set_current_state(TASK_RUNNING);
return sem;
out_nolock:
list_del(&waiter.list);
if (list_empty(&sem->wait_list))
atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
raw_spin_unlock_irq(&sem->wait_lock);
__set_current_state(TASK_RUNNING);
return ERR_PTR(-EINTR);
}
__visible struct rw_semaphore * __sched
rwsem_down_read_failed(struct rw_semaphore *sem)
{
return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(rwsem_down_read_failed);
__visible struct rw_semaphore * __sched
rwsem_down_read_failed_killable(struct rw_semaphore *sem)
{
return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
}
EXPORT_SYMBOL(rwsem_down_read_failed_killable);
/*
* This function must be called with the sem->wait_lock held to prevent
* race conditions between checking the rwsem wait list and setting the
* sem->count accordingly.
*/
static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
{
/*
* Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
*/
if (count != RWSEM_WAITING_BIAS)
return false;
/*
* Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
* are other tasks on the wait list, we need to add on WAITING_BIAS.
*/
count = list_is_singular(&sem->wait_list) ?
RWSEM_ACTIVE_WRITE_BIAS :
RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
== RWSEM_WAITING_BIAS) {
rwsem_set_owner(sem);
return true;
}
return false;
}
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
/*
* Try to acquire write lock before the writer has been put on wait queue.
*/
static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
{
long old, count = atomic_long_read(&sem->count);
while (true) {
if (!(count == 0 || count == RWSEM_WAITING_BIAS))
return false;
old = atomic_long_cmpxchg_acquire(&sem->count, count,
count + RWSEM_ACTIVE_WRITE_BIAS);
if (old == count) {
rwsem_set_owner(sem);
return true;
}
count = old;
}
}
static inline bool owner_on_cpu(struct task_struct *owner)
{
/*
* As lock holder preemption issue, we both skip spinning if
* task is not on cpu or its cpu is preempted
*/
return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
}
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
{
struct task_struct *owner;
bool ret = true;
BUILD_BUG_ON(!rwsem_has_anonymous_owner(RWSEM_OWNER_UNKNOWN));
if (need_resched())
return false;
rcu_read_lock();
owner = READ_ONCE(sem->owner);
if (owner) {
ret = is_rwsem_owner_spinnable(owner) &&
owner_on_cpu(owner);
}
rcu_read_unlock();
return ret;
}
/*
* Return true only if we can still spin on the owner field of the rwsem.
*/
static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
{
struct task_struct *owner = READ_ONCE(sem->owner);
if (!is_rwsem_owner_spinnable(owner))
return false;
rcu_read_lock();
while (owner && (READ_ONCE(sem->owner) == owner)) {
/*
* Ensure we emit the owner->on_cpu, dereference _after_
* checking sem->owner still matches owner, if that fails,
* owner might point to free()d memory, if it still matches,
* the rcu_read_lock() ensures the memory stays valid.
*/
barrier();
/*
* abort spinning when need_resched or owner is not running or
* owner's cpu is preempted.
*/
if (need_resched() || !owner_on_cpu(owner)) {
rcu_read_unlock();
return false;
}
cpu_relax();
}
rcu_read_unlock();
/*
* If there is a new owner or the owner is not set, we continue
* spinning.
*/
return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
}
static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
{
bool taken = false;
preempt_disable();
/* sem->wait_lock should not be held when doing optimistic spinning */
if (!rwsem_can_spin_on_owner(sem))
goto done;
if (!osq_lock(&sem->osq))
goto done;
/*
* Optimistically spin on the owner field and attempt to acquire the
* lock whenever the owner changes. Spinning will be stopped when:
* 1) the owning writer isn't running; or
* 2) readers own the lock as we can't determine if they are
* actively running or not.
*/
while (rwsem_spin_on_owner(sem)) {
/*
* Try to acquire the lock
*/
if (rwsem_try_write_lock_unqueued(sem)) {
taken = true;
break;
}
/*
* When there's no owner, we might have preempted between the
* owner acquiring the lock and setting the owner field. If
* we're an RT task that will live-lock because we won't let
* the owner complete.
*/
if (!sem->owner && (need_resched() || rt_task(current)))
break;
/*
* The cpu_relax() call is a compiler barrier which forces
* everything in this loop to be re-loaded. We don't need
* memory barriers as we'll eventually observe the right
* values at the cost of a few extra spins.
*/
cpu_relax();
}
osq_unlock(&sem->osq);
done:
preempt_enable();
return taken;
}
/*
* Return true if the rwsem has active spinner
*/
static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
{
return osq_is_locked(&sem->osq);
}
#else
static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
{
return false;
}
static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
{
return false;
}
#endif
/*
* Wait until we successfully acquire the write lock
*/
static inline struct rw_semaphore *
__rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
{
long count;
bool waiting = true; /* any queued threads before us */
struct rwsem_waiter waiter;
struct rw_semaphore *ret = sem;
DEFINE_WAKE_Q(wake_q);
bool is_first_waiter = false;
/* undo write bias from down_write operation, stop active locking */
count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
/* do optimistic spinning and steal lock if possible */
if (rwsem_optimistic_spin(sem))
return sem;
/*
* Optimistic spinning failed, proceed to the slowpath
* and block until we can acquire the sem.
*/
waiter.task = current;
waiter.type = RWSEM_WAITING_FOR_WRITE;
raw_spin_lock_irq(&sem->wait_lock);
/* account for this before adding a new element to the list */
if (list_empty(&sem->wait_list))
waiting = false;
/*
* is_first_waiter == true means we are first in the queue,
* so there is no read locks that were queued ahead of us.
*/
is_first_waiter = rwsem_list_add_per_prio(&waiter, sem);
/* we're now waiting on the lock, but no longer actively locking */
if (waiting) {
count = atomic_long_read(&sem->count);
/*
* If there were already threads queued before us and there are
* no active writers, the lock must be read owned; so we try to
* wake any read locks that were queued ahead of us.
*/
if (!is_first_waiter && count > RWSEM_WAITING_BIAS) {
__rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
/*
* The wakeup is normally called _after_ the wait_lock
* is released, but given that we are proactively waking
* readers we can deal with the wake_q overhead as it is
* similar to releasing and taking the wait_lock again
* for attempting rwsem_try_write_lock().
*/
wake_up_q(&wake_q);
/*
* Reinitialize wake_q after use.
*/
wake_q_init(&wake_q);
}
} else
count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count);
/* wait until we successfully acquire the lock */
set_current_state(state);
while (true) {
if (rwsem_try_write_lock(count, sem))
break;
raw_spin_unlock_irq(&sem->wait_lock);
/* Block until there are no active lockers. */
do {
if (signal_pending_state(state, current))
goto out_nolock;
schedule();
set_current_state(state);
} while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
raw_spin_lock_irq(&sem->wait_lock);
}
__set_current_state(TASK_RUNNING);
list_del(&waiter.list);
raw_spin_unlock_irq(&sem->wait_lock);
return ret;
out_nolock:
__set_current_state(TASK_RUNNING);
raw_spin_lock_irq(&sem->wait_lock);
list_del(&waiter.list);
if (list_empty(&sem->wait_list))
atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
else
__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
raw_spin_unlock_irq(&sem->wait_lock);
wake_up_q(&wake_q);
return ERR_PTR(-EINTR);
}
__visible struct rw_semaphore * __sched
rwsem_down_write_failed(struct rw_semaphore *sem)
{
return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(rwsem_down_write_failed);
__visible struct rw_semaphore * __sched
rwsem_down_write_failed_killable(struct rw_semaphore *sem)
{
return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
}
EXPORT_SYMBOL(rwsem_down_write_failed_killable);
/*
* handle waking up a waiter on the semaphore
* - up_read/up_write has decremented the active part of count if we come here
*/
__visible
struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
{
unsigned long flags;
DEFINE_WAKE_Q(wake_q);
/*
* __rwsem_down_write_failed_common(sem)
* rwsem_optimistic_spin(sem)
* osq_unlock(sem->osq)
* ...
* atomic_long_add_return(&sem->count)
*
* - VS -
*
* __up_write()
* if (atomic_long_sub_return_release(&sem->count) < 0)
* rwsem_wake(sem)
* osq_is_locked(&sem->osq)
*
* And __up_write() must observe !osq_is_locked() when it observes the
* atomic_long_add_return() in order to not miss a wakeup.
*
* This boils down to:
*
* [S.rel] X = 1 [RmW] r0 = (Y += 0)
* MB RMB
* [RmW] Y += 1 [L] r1 = X
*
* exists (r0=1 /\ r1=0)
*/
smp_rmb();
/*
* If a spinner is present, it is not necessary to do the wakeup.
* Try to do wakeup only if the trylock succeeds to minimize
* spinlock contention which may introduce too much delay in the
* unlock operation.
*
* spinning writer up_write/up_read caller
* --------------- -----------------------
* [S] osq_unlock() [L] osq
* MB RMB
* [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
*
* Here, it is important to make sure that there won't be a missed
* wakeup while the rwsem is free and the only spinning writer goes
* to sleep without taking the rwsem. Even when the spinning writer
* is just going to break out of the waiting loop, it will still do
* a trylock in rwsem_down_write_failed() before sleeping. IOW, if
* rwsem_has_spinner() is true, it will guarantee at least one
* trylock attempt on the rwsem later on.
*/
if (rwsem_has_spinner(sem)) {
/*
* The smp_rmb() here is to make sure that the spinner
* state is consulted before reading the wait_lock.
*/
smp_rmb();
if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
return sem;
goto locked;
}
raw_spin_lock_irqsave(&sem->wait_lock, flags);
locked:
if (!list_empty(&sem->wait_list))
__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
wake_up_q(&wake_q);
return sem;
}
EXPORT_SYMBOL(rwsem_wake);
/*
* downgrade a write lock into a read lock
* - caller incremented waiting part of count and discovered it still negative
* - just wake up any readers at the front of the queue
*/
__visible
struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
unsigned long flags;
DEFINE_WAKE_Q(wake_q);
raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (!list_empty(&sem->wait_list))
__rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
wake_up_q(&wake_q);
return sem;
}
EXPORT_SYMBOL(rwsem_downgrade_wake);