kernel-fxtec-pro1x/tools/memory-model/lock.cat
Alan Stern cee0321a40 tools/memory-model: Remove out-of-date comments and code from lock.cat
lock.cat contains old comments and code referring to the possibility
of LKR events that are not part of an RMW pair.  This is a holdover
from when I though we might end up using LKR events to implement
spin_is_locked().  Reword the comments to remove this assumption and
replace domain(lk-rmw) in the code with LKR.

Tested-by: Andrea Parri <andrea.parri@amarulasolutions.com>
[ paulmck: Pulled as lock-nest into previous line as discussed. ]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Akira Yokosawa <akiyks@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Jade Alglave <j.alglave@ucl.ac.uk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Luc Maranget <luc.maranget@inria.fr>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arch@vger.kernel.org
Cc: parri.andrea@gmail.com
Link: http://lkml.kernel.org/r/1526340837-12222-15-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2018-05-15 08:11:18 +02:00

146 lines
4.7 KiB
Text

// SPDX-License-Identifier: GPL-2.0+
(*
* Copyright (C) 2016 Luc Maranget <luc.maranget@inria.fr> for Inria
* Copyright (C) 2017 Alan Stern <stern@rowland.harvard.edu>
*)
(*
* Generate coherence orders and handle lock operations
*
* Warning: spin_is_locked() crashes herd7 versions strictly before 7.48.
* spin_is_locked() is functional from herd7 version 7.49.
*)
include "cross.cat"
(*
* The lock-related events generated by herd are as follows:
*
* LKR Lock-Read: the read part of a spin_lock() or successful
* spin_trylock() read-modify-write event pair
* LKW Lock-Write: the write part of a spin_lock() or successful
* spin_trylock() RMW event pair
* UL Unlock: a spin_unlock() event
* LF Lock-Fail: a failed spin_trylock() event
* RL Read-Locked: a spin_is_locked() event which returns True
* RU Read-Unlocked: a spin_is_locked() event which returns False
*
* LKR and LKW events always come paired, like all RMW event sequences.
*
* LKR, LF, RL, and RU are read events; LKR has Acquire ordering.
* LKW and UL are write events; UL has Release ordering.
* LKW, LF, RL, and RU have no ordering properties.
*)
(* Backward compatibility *)
let RL = try RL with emptyset
let RU = try RU with emptyset
(* Treat RL as a kind of LF: a read with no ordering properties *)
let LF = LF | RL
(* There should be no ordinary R or W accesses to spinlocks *)
let ALL-LOCKS = LKR | LKW | UL | LF | RU
flag ~empty [M \ IW] ; loc ; [ALL-LOCKS] as mixed-lock-accesses
(* Link Lock-Reads to their RMW-partner Lock-Writes *)
let lk-rmw = ([LKR] ; po-loc ; [LKW]) \ (po ; po)
let rmw = rmw | lk-rmw
(* The litmus test is invalid if an LKR/LKW event is not part of an RMW pair *)
flag ~empty LKW \ range(lk-rmw) as unpaired-LKW
flag ~empty LKR \ domain(lk-rmw) as unpaired-LKR
(*
* An LKR must always see an unlocked value; spin_lock() calls nested
* inside a critical section (for the same lock) always deadlock.
*)
empty ([LKW] ; po-loc ; [LKR]) \ (po-loc ; [UL] ; po-loc) as lock-nest
(* The final value of a spinlock should not be tested *)
flag ~empty [FW] ; loc ; [ALL-LOCKS] as lock-final
(*
* Put lock operations in their appropriate classes, but leave UL out of W
* until after the co relation has been generated.
*)
let R = R | LKR | LF | RU
let W = W | LKW
let Release = Release | UL
let Acquire = Acquire | LKR
(* Match LKW events to their corresponding UL events *)
let critical = ([LKW] ; po-loc ; [UL]) \ (po-loc ; [LKW | UL] ; po-loc)
flag ~empty UL \ range(critical) as unmatched-unlock
(* Allow up to one unmatched LKW per location; more must deadlock *)
let UNMATCHED-LKW = LKW \ domain(critical)
empty ([UNMATCHED-LKW] ; loc ; [UNMATCHED-LKW]) \ id as unmatched-locks
(* rfi for LF events: link each LKW to the LF events in its critical section *)
let rfi-lf = ([LKW] ; po-loc ; [LF]) \ ([LKW] ; po-loc ; [UL] ; po-loc)
(* rfe for LF events *)
let all-possible-rfe-lf =
(*
* Given an LF event r, compute the possible rfe edges for that event
* (all those starting from LKW events in other threads),
* and then convert that relation to a set of single-edge relations.
*)
let possible-rfe-lf r =
let pair-to-relation p = p ++ 0
in map pair-to-relation ((LKW * {r}) & loc & ext)
(* Do this for each LF event r that isn't in rfi-lf *)
in map possible-rfe-lf (LF \ range(rfi-lf))
(* Generate all rf relations for LF events *)
with rfe-lf from cross(all-possible-rfe-lf)
let rf-lf = rfe-lf | rfi-lf
(*
* RU, i.e., spin_is_locked() returning False, is slightly different.
* We rely on the memory model to rule out cases where spin_is_locked()
* within one of the lock's critical sections returns False.
*)
(* rfi for RU events: an RU may read from the last po-previous UL *)
let rfi-ru = ([UL] ; po-loc ; [RU]) \ ([UL] ; po-loc ; [LKW] ; po-loc)
(* rfe for RU events: an RU may read from an external UL or the initial write *)
let all-possible-rfe-ru =
let possible-rfe-ru r =
let pair-to-relation p = p ++ 0
in map pair-to-relation (((UL|IW) * {r}) & loc & ext)
in map possible-rfe-ru RU
(* Generate all rf relations for RU events *)
with rfe-ru from cross(all-possible-rfe-ru)
let rf-ru = rfe-ru | rfi-ru
(* Final rf relation *)
let rf = rf | rf-lf | rf-ru
(* Generate all co relations, including LKW events but not UL *)
let co0 = co0 | ([IW] ; loc ; [LKW]) |
(([LKW] ; loc ; [UNMATCHED-LKW]) \ [UNMATCHED-LKW])
include "cos-opt.cat"
let W = W | UL
let M = R | W
(* Merge UL events into co *)
let co = (co | critical | (critical^-1 ; co))+
let coe = co & ext
let coi = co & int
(* Merge LKR events into rf *)
let rf = rf | ([IW | UL] ; singlestep(co) ; lk-rmw^-1)
let rfe = rf & ext
let rfi = rf & int
let fr = rf^-1 ; co
let fre = fr & ext
let fri = fr & int
show co,rf,fr