2824988a20
commit 444da3f52407d74c9aa12187ac6b01f76ee47d62 upstream.
When ur_load_imm_any() is inlined into jeq_imm(), it's possible for the
compiler to deduce a case where _val can only have the value of -1 at
compile time. Specifically,
/* struct bpf_insn: _s32 imm */
u64 imm = insn->imm; /* sign extend */
if (imm >> 32) { /* non-zero only if insn->imm is negative */
/* inlined from ur_load_imm_any */
u32 __imm = imm >> 32; /* therefore, always 0xffffffff */
if (__builtin_constant_p(__imm) && __imm > 255)
compiletime_assert_XXX()
This can result in tripping a BUILD_BUG_ON() in __BF_FIELD_CHECK() that
checks that a given value is representable in one byte (interpreted as
unsigned).
FIELD_FIT() should return true or false at runtime for whether a value
can fit for not. Don't break the build over a value that's too large for
the mask. We'd prefer to keep the inlining and compiler optimizations
though we know this case will always return false.
Cc: stable@vger.kernel.org
Fixes: 1697599ee3
("bitfield.h: add FIELD_FIT() helper")
Link: https://lore.kernel.org/kernel-hardening/CAK7LNASvb0UDJ0U5wkYYRzTAdnEs64HjXpEUL7d=V0CXiAXcNw@mail.gmail.com/
Reported-by: Masahiro Yamada <masahiroy@kernel.org>
Debugged-by: Sami Tolvanen <samitolvanen@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
153 lines
4.7 KiB
C
153 lines
4.7 KiB
C
/*
|
|
* Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
|
|
* Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2
|
|
* as published by the Free Software Foundation
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*/
|
|
|
|
#ifndef _LINUX_BITFIELD_H
|
|
#define _LINUX_BITFIELD_H
|
|
|
|
#include <linux/build_bug.h>
|
|
#include <asm/byteorder.h>
|
|
|
|
/*
|
|
* Bitfield access macros
|
|
*
|
|
* FIELD_{GET,PREP} macros take as first parameter shifted mask
|
|
* from which they extract the base mask and shift amount.
|
|
* Mask must be a compilation time constant.
|
|
*
|
|
* Example:
|
|
*
|
|
* #define REG_FIELD_A GENMASK(6, 0)
|
|
* #define REG_FIELD_B BIT(7)
|
|
* #define REG_FIELD_C GENMASK(15, 8)
|
|
* #define REG_FIELD_D GENMASK(31, 16)
|
|
*
|
|
* Get:
|
|
* a = FIELD_GET(REG_FIELD_A, reg);
|
|
* b = FIELD_GET(REG_FIELD_B, reg);
|
|
*
|
|
* Set:
|
|
* reg = FIELD_PREP(REG_FIELD_A, 1) |
|
|
* FIELD_PREP(REG_FIELD_B, 0) |
|
|
* FIELD_PREP(REG_FIELD_C, c) |
|
|
* FIELD_PREP(REG_FIELD_D, 0x40);
|
|
*
|
|
* Modify:
|
|
* reg &= ~REG_FIELD_C;
|
|
* reg |= FIELD_PREP(REG_FIELD_C, c);
|
|
*/
|
|
|
|
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
|
|
|
|
#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
|
|
({ \
|
|
BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
|
|
_pfx "mask is not constant"); \
|
|
BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \
|
|
BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
|
|
~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
|
|
_pfx "value too large for the field"); \
|
|
BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
|
|
_pfx "type of reg too small for mask"); \
|
|
__BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
|
|
(1ULL << __bf_shf(_mask))); \
|
|
})
|
|
|
|
/**
|
|
* FIELD_FIT() - check if value fits in the field
|
|
* @_mask: shifted mask defining the field's length and position
|
|
* @_val: value to test against the field
|
|
*
|
|
* Return: true if @_val can fit inside @_mask, false if @_val is too big.
|
|
*/
|
|
#define FIELD_FIT(_mask, _val) \
|
|
({ \
|
|
__BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \
|
|
!((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
|
|
})
|
|
|
|
/**
|
|
* FIELD_PREP() - prepare a bitfield element
|
|
* @_mask: shifted mask defining the field's length and position
|
|
* @_val: value to put in the field
|
|
*
|
|
* FIELD_PREP() masks and shifts up the value. The result should
|
|
* be combined with other fields of the bitfield using logical OR.
|
|
*/
|
|
#define FIELD_PREP(_mask, _val) \
|
|
({ \
|
|
__BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
|
|
((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
|
|
})
|
|
|
|
/**
|
|
* FIELD_GET() - extract a bitfield element
|
|
* @_mask: shifted mask defining the field's length and position
|
|
* @_reg: value of entire bitfield
|
|
*
|
|
* FIELD_GET() extracts the field specified by @_mask from the
|
|
* bitfield passed in as @_reg by masking and shifting it down.
|
|
*/
|
|
#define FIELD_GET(_mask, _reg) \
|
|
({ \
|
|
__BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
|
|
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
|
|
})
|
|
|
|
extern void __compiletime_error("value doesn't fit into mask")
|
|
__field_overflow(void);
|
|
extern void __compiletime_error("bad bitfield mask")
|
|
__bad_mask(void);
|
|
static __always_inline u64 field_multiplier(u64 field)
|
|
{
|
|
if ((field | (field - 1)) & ((field | (field - 1)) + 1))
|
|
__bad_mask();
|
|
return field & -field;
|
|
}
|
|
static __always_inline u64 field_mask(u64 field)
|
|
{
|
|
return field / field_multiplier(field);
|
|
}
|
|
#define ____MAKE_OP(type,base,to,from) \
|
|
static __always_inline __##type type##_encode_bits(base v, base field) \
|
|
{ \
|
|
if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
|
|
__field_overflow(); \
|
|
return to((v & field_mask(field)) * field_multiplier(field)); \
|
|
} \
|
|
static __always_inline __##type type##_replace_bits(__##type old, \
|
|
base val, base field) \
|
|
{ \
|
|
return (old & ~to(field)) | type##_encode_bits(val, field); \
|
|
} \
|
|
static __always_inline void type##p_replace_bits(__##type *p, \
|
|
base val, base field) \
|
|
{ \
|
|
*p = (*p & ~to(field)) | type##_encode_bits(val, field); \
|
|
} \
|
|
static __always_inline base type##_get_bits(__##type v, base field) \
|
|
{ \
|
|
return (from(v) & field)/field_multiplier(field); \
|
|
}
|
|
#define __MAKE_OP(size) \
|
|
____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
|
|
____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
|
|
____MAKE_OP(u##size,u##size,,)
|
|
____MAKE_OP(u8,u8,,)
|
|
__MAKE_OP(16)
|
|
__MAKE_OP(32)
|
|
__MAKE_OP(64)
|
|
#undef __MAKE_OP
|
|
#undef ____MAKE_OP
|
|
|
|
#endif
|