arm64: cmpxchg: truncate sub-word signed types before comparison
When performing a cmpxchg operation on a signed sub-word type (e.g. s8), we need to ensure that the upper register bits of the "old" value used for comparison are zeroed, otherwise we may erroneously fail the cmpxchg which may even be interpreted as success by the caller (if the compiler performs the truncation as part of its check). This has been observed in mod_state, where negative values where causing problems with this_cpu_cmpxchg. This patch fixes the issue by explicitly casting 8-bit and 16-bit "old" values using unsigned types in our cmpxchg wrappers. 32-bit types can be left alone, since the underlying asm makes use of W registers in this case. Reported-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
ef5e724b25
commit
a14949e09a
1 changed files with 4 additions and 4 deletions
|
@ -122,9 +122,9 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
|||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
return __cmpxchg_case_1(ptr, old, new);
|
||||
return __cmpxchg_case_1(ptr, (u8)old, new);
|
||||
case 2:
|
||||
return __cmpxchg_case_2(ptr, old, new);
|
||||
return __cmpxchg_case_2(ptr, (u16)old, new);
|
||||
case 4:
|
||||
return __cmpxchg_case_4(ptr, old, new);
|
||||
case 8:
|
||||
|
@ -141,9 +141,9 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
|
|||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
return __cmpxchg_case_mb_1(ptr, old, new);
|
||||
return __cmpxchg_case_mb_1(ptr, (u8)old, new);
|
||||
case 2:
|
||||
return __cmpxchg_case_mb_2(ptr, old, new);
|
||||
return __cmpxchg_case_mb_2(ptr, (u16)old, new);
|
||||
case 4:
|
||||
return __cmpxchg_case_mb_4(ptr, old, new);
|
||||
case 8:
|
||||
|
|
Loading…
Reference in a new issue