Merge "Merge android-4.19.42 (66aebe2) into msm-4.19"

This commit is contained in:
qctecmdr 2019-06-19 02:54:50 -07:00 committed by Gerrit - the friendly Code Review server
commit d0b211ae3d
118 changed files with 1609 additions and 1257 deletions

View file

@ -343,9 +343,9 @@ FS_IOC_SET_ENCRYPTION_POLICY can fail with the following errors:
- ``ENOTEMPTY``: the file is unencrypted and is a nonempty directory
- ``ENOTTY``: this type of filesystem does not implement encryption
- ``EOPNOTSUPP``: the kernel was not configured with encryption
support for this filesystem, or the filesystem superblock has not
support for filesystems, or the filesystem superblock has not
had encryption enabled on it. (For example, to use encryption on an
ext4 filesystem, CONFIG_EXT4_ENCRYPTION must be enabled in the
ext4 filesystem, CONFIG_FS_ENCRYPTION must be enabled in the
kernel config, and the superblock must have had the "encrypt"
feature flag enabled using ``tune2fs -O encrypt`` or ``mkfs.ext4 -O
encrypt``.)
@ -451,10 +451,18 @@ astute users may notice some differences in behavior:
- Unencrypted files, or files encrypted with a different encryption
policy (i.e. different key, modes, or flags), cannot be renamed or
linked into an encrypted directory; see `Encryption policy
enforcement`_. Attempts to do so will fail with EPERM. However,
enforcement`_. Attempts to do so will fail with EXDEV. However,
encrypted files can be renamed within an encrypted directory, or
into an unencrypted directory.
Note: "moving" an unencrypted file into an encrypted directory, e.g.
with the `mv` program, is implemented in userspace by a copy
followed by a delete. Be aware that the original unencrypted data
may remain recoverable from free space on the disk; prefer to keep
all files encrypted from the very beginning. The `shred` program
may be used to overwrite the source files but isn't guaranteed to be
effective on all filesystems and storage devices.
- Direct I/O is not supported on encrypted files. Attempts to use
direct I/O on such files will fall back to buffered I/O.
@ -541,7 +549,7 @@ not be encrypted.
Except for those special files, it is forbidden to have unencrypted
files, or files encrypted with a different encryption policy, in an
encrypted directory tree. Attempts to link or rename such a file into
an encrypted directory will fail with EPERM. This is also enforced
an encrypted directory will fail with EXDEV. This is also enforced
during ->lookup() to provide limited protection against offline
attacks that try to disable or downgrade encryption in known locations
where applications may later write sensitive data. It is recommended

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 41
SUBLEVEL = 42
EXTRAVERSION =
NAME = "People's Front"

View file

@ -624,12 +624,9 @@ CONFIG_SENSORS_SSC=y
CONFIG_QCOM_KGSL=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
CONFIG_FS_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y

View file

@ -647,12 +647,9 @@ CONFIG_SENSORS_SSC=y
CONFIG_QCOM_KGSL=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
CONFIG_FS_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y

View file

@ -591,7 +591,7 @@ CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
CONFIG_FS_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y

View file

@ -607,7 +607,7 @@ CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
CONFIG_FS_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y

View file

@ -23,26 +23,34 @@
#include <asm/errno.h>
#define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
do { \
unsigned int loops = FUTEX_MAX_LOOPS; \
\
uaccess_enable(); \
asm volatile( \
" prfm pstl1strm, %2\n" \
"1: ldxr %w1, %2\n" \
insn "\n" \
"2: stlxr %w0, %w3, %2\n" \
" cbnz %w0, 1b\n" \
" dmb ish\n" \
" cbz %w0, 3f\n" \
" sub %w4, %w4, %w0\n" \
" cbnz %w4, 1b\n" \
" mov %w0, %w7\n" \
"3:\n" \
" dmb ish\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %w0, %w5\n" \
"4: mov %w0, %w6\n" \
" b 3b\n" \
" .popsection\n" \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \
"+r" (loops) \
: "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \
: "memory"); \
uaccess_disable(); \
} while (0)
@ -57,23 +65,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("mov %w3, %w4",
__futex_atomic_op("mov %w3, %w5",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op("add %w3, %w1, %w4",
__futex_atomic_op("add %w3, %w1, %w5",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("orr %w3, %w1, %w4",
__futex_atomic_op("orr %w3, %w1, %w5",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("and %w3, %w1, %w4",
__futex_atomic_op("and %w3, %w1, %w5",
ret, oldval, uaddr, tmp, ~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("eor %w3, %w1, %w4",
__futex_atomic_op("eor %w3, %w1, %w5",
ret, oldval, uaddr, tmp, oparg);
break;
default:
@ -93,6 +101,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
u32 oldval, u32 newval)
{
int ret = 0;
unsigned int loops = FUTEX_MAX_LOOPS;
u32 val, tmp;
u32 __user *uaddr;
@ -104,20 +113,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
" prfm pstl1strm, %2\n"
"1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n"
" cbnz %w3, 3f\n"
"2: stlxr %w3, %w5, %2\n"
" cbnz %w3, 1b\n"
" dmb ish\n"
" sub %w3, %w1, %w5\n"
" cbnz %w3, 4f\n"
"2: stlxr %w3, %w6, %2\n"
" cbz %w3, 3f\n"
" sub %w4, %w4, %w3\n"
" cbnz %w4, 1b\n"
" mov %w0, %w8\n"
"3:\n"
" dmb ish\n"
"4:\n"
" .pushsection .fixup,\"ax\"\n"
"4: mov %w0, %w6\n"
" b 3b\n"
"5: mov %w0, %w7\n"
" b 4b\n"
" .popsection\n"
_ASM_EXTABLE(1b, 4b)
_ASM_EXTABLE(2b, 4b)
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
_ASM_EXTABLE(1b, 5b)
_ASM_EXTABLE(2b, 5b)
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN)
: "memory");
uaccess_disable();

View file

@ -63,7 +63,7 @@ CONFIG_HID_MONTEREY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FANOTIFY=y
CONFIG_FUSE_FS=y
CONFIG_CUSE=y

View file

@ -33,6 +33,7 @@
#include <asm/processor.h>
#include <asm/sigcontext.h>
#include <linux/uaccess.h>
#include <asm/irq_regs.h>
static struct hard_trap_info {
unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
@ -214,7 +215,7 @@ static void kgdb_call_nmi_hook(void *ignored)
old_fs = get_fs();
set_fs(get_ds());
kgdb_nmicallback(raw_smp_processor_id(), NULL);
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
set_fs(old_fs);
}

View file

@ -74,7 +74,7 @@ CONFIG_GENERIC_PHY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y

View file

@ -492,7 +492,6 @@ CONFIG_VIRTIO_INPUT=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@ -512,6 +511,7 @@ CONFIG_BTRFS_DEBUG=y
CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y

View file

@ -489,7 +489,6 @@ CONFIG_VIRTIO_INPUT=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@ -507,6 +506,7 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y

View file

@ -23,6 +23,7 @@ CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_BPF=y
CONFIG_NAMESPACES=y
CONFIG_SCHED_TUNE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_LZ4 is not set
# CONFIG_FHANDLE is not set

View file

@ -3068,7 +3068,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
return ret;
if (event->attr.precise_ip) {
if (!event->attr.freq) {
if (!(event->attr.freq || event->attr.wakeup_events)) {
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
if (!(event->attr.sample_type &
~intel_pmu_large_pebs_flags(event)))
@ -3447,6 +3447,12 @@ static void intel_pmu_cpu_starting(int cpu)
cpuc->lbr_sel = NULL;
if (x86_pmu.flags & PMU_FL_TFA) {
WARN_ON_ONCE(cpuc->tfa_shadow);
cpuc->tfa_shadow = ~0ULL;
intel_set_tfa(cpuc, false);
}
if (x86_pmu.version > 1)
flip_smm_bit(&x86_pmu.attr_freeze_on_smi);

View file

@ -225,6 +225,7 @@ struct cfq_group_data {
unsigned int weight;
unsigned int leaf_weight;
u64 group_idle;
};
/* This is per cgroup per device grouping structure */
@ -310,6 +311,7 @@ struct cfq_group {
struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
struct cfq_queue *async_idle_cfqq;
u64 group_idle;
};
struct cfq_io_cq {
@ -805,6 +807,17 @@ static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
static inline u64 get_group_idle(struct cfq_data *cfqd)
{
#ifdef CONFIG_CFQ_GROUP_IOSCHED
struct cfq_queue *cfqq = cfqd->active_queue;
if (cfqq && cfqq->cfqg)
return cfqq->cfqg->group_idle;
#endif
return cfqd->cfq_group_idle;
}
#define cfq_log(cfqd, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
@ -825,7 +838,7 @@ static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
if (!sample_valid(ttime->ttime_samples))
return false;
if (group_idle)
slice = cfqd->cfq_group_idle;
slice = get_group_idle(cfqd);
else
slice = cfqd->cfq_slice_idle;
return ttime->ttime_mean > slice;
@ -1592,6 +1605,7 @@ static void cfq_cpd_init(struct blkcg_policy_data *cpd)
cgd->weight = weight;
cgd->leaf_weight = weight;
cgd->group_idle = cfq_group_idle;
}
static void cfq_cpd_free(struct blkcg_policy_data *cpd)
@ -1636,6 +1650,7 @@ static void cfq_pd_init(struct blkg_policy_data *pd)
cfqg->weight = cgd->weight;
cfqg->leaf_weight = cgd->leaf_weight;
cfqg->group_idle = cgd->group_idle;
}
static void cfq_pd_offline(struct blkg_policy_data *pd)
@ -1757,6 +1772,19 @@ static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
return 0;
}
static int cfq_print_group_idle(struct seq_file *sf, void *v)
{
struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
u64 val = 0;
if (cgd)
val = cgd->group_idle;
seq_printf(sf, "%llu\n", div_u64(val, NSEC_PER_USEC));
return 0;
}
static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off,
bool on_dfl, bool is_leaf_weight)
@ -1878,6 +1906,37 @@ static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
return __cfq_set_weight(css, val, false, false, true);
}
static int cfq_set_group_idle(struct cgroup_subsys_state *css,
struct cftype *cft, u64 val)
{
struct blkcg *blkcg = css_to_blkcg(css);
struct cfq_group_data *cfqgd;
struct blkcg_gq *blkg;
int ret = 0;
spin_lock_irq(&blkcg->lock);
cfqgd = blkcg_to_cfqgd(blkcg);
if (!cfqgd) {
ret = -EINVAL;
goto out;
}
cfqgd->group_idle = val * NSEC_PER_USEC;
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
struct cfq_group *cfqg = blkg_to_cfqg(blkg);
if (!cfqg)
continue;
cfqg->group_idle = cfqgd->group_idle;
}
out:
spin_unlock_irq(&blkcg->lock);
return ret;
}
static int cfqg_print_stat(struct seq_file *sf, void *v)
{
blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
@ -2023,6 +2082,11 @@ static struct cftype cfq_blkcg_legacy_files[] = {
.seq_show = cfq_print_leaf_weight,
.write_u64 = cfq_set_leaf_weight,
},
{
.name = "group_idle",
.seq_show = cfq_print_group_idle,
.write_u64 = cfq_set_group_idle,
},
/* statistics, covers only the tasks in the cfqg */
{
@ -2917,7 +2981,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
* with sync vs async workloads.
*/
if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag &&
!cfqd->cfq_group_idle)
!get_group_idle(cfqd))
return;
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
@ -2928,9 +2992,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
*/
if (!cfq_should_idle(cfqd, cfqq)) {
/* no queue idling. Check for group idling */
if (cfqd->cfq_group_idle)
group_idle = cfqd->cfq_group_idle;
else
group_idle = get_group_idle(cfqd);
if (!group_idle)
return;
}
@ -2971,7 +3034,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
cfq_mark_cfqq_wait_request(cfqq);
if (group_idle)
sl = cfqd->cfq_group_idle;
sl = group_idle;
else
sl = cfqd->cfq_slice_idle;
@ -3320,7 +3383,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
* this group, wait for requests to complete.
*/
check_group_idle:
if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
if (get_group_idle(cfqd) && cfqq->cfqg->nr_cfqq == 1 &&
cfqq->cfqg->dispatched &&
!cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
cfqq = NULL;
@ -3884,7 +3947,7 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqd->cfq_slice_idle);
}
#ifdef CONFIG_CFQ_GROUP_IOSCHED
__cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
__cfq_update_io_thinktime(&cfqq->cfqg->ttime, get_group_idle(cfqd));
#endif
}
@ -4273,7 +4336,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
if (cfq_should_wait_busy(cfqd, cfqq)) {
u64 extend_sl = cfqd->cfq_slice_idle;
if (!cfqd->cfq_slice_idle)
extend_sl = cfqd->cfq_group_idle;
extend_sl = get_group_idle(cfqd);
cfqq->slice_end = now + extend_sl;
cfq_mark_cfqq_wait_busy(cfqq);
cfq_log_cfqq(cfqd, cfqq, "will busy wait");

View file

@ -446,6 +446,8 @@ static int init_vq(struct virtio_blk *vblk)
if (err)
num_vqs = 1;
num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
return -ENOMEM;

View file

@ -1571,6 +1571,7 @@ static struct clk_regmap gxbb_vdec_1_div = {
.offset = HHI_VDEC_CLK_CNTL,
.shift = 0,
.width = 7,
.flags = CLK_DIVIDER_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "vdec_1_div",
@ -1616,6 +1617,7 @@ static struct clk_regmap gxbb_vdec_hevc_div = {
.offset = HHI_VDEC2_CLK_CNTL,
.shift = 16,
.width = 7,
.flags = CLK_DIVIDER_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_div",

View file

@ -359,11 +359,11 @@ static int __init armada37xx_cpufreq_driver_init(void)
struct armada_37xx_dvfs *dvfs;
struct platform_device *pdev;
unsigned long freq;
unsigned int cur_frequency;
unsigned int cur_frequency, base_frequency;
struct regmap *nb_pm_base, *avs_base;
struct device *cpu_dev;
int load_lvl, ret;
struct clk *clk;
struct clk *clk, *parent;
nb_pm_base =
syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
@ -399,6 +399,22 @@ static int __init armada37xx_cpufreq_driver_init(void)
return PTR_ERR(clk);
}
parent = clk_get_parent(clk);
if (IS_ERR(parent)) {
dev_err(cpu_dev, "Cannot get parent clock for CPU0\n");
clk_put(clk);
return PTR_ERR(parent);
}
/* Get parent CPU frequency */
base_frequency = clk_get_rate(parent);
if (!base_frequency) {
dev_err(cpu_dev, "Failed to get parent clock rate for CPU\n");
clk_put(clk);
return -EINVAL;
}
/* Get nominal (current) CPU frequency */
cur_frequency = clk_get_rate(clk);
if (!cur_frequency) {
@ -431,7 +447,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
load_lvl++) {
unsigned long u_volt = avs_map[dvfs->avs[load_lvl]] * 1000;
freq = cur_frequency / dvfs->divider[load_lvl];
freq = base_frequency / dvfs->divider[load_lvl];
ret = dev_pm_opp_add(cpu_dev, freq, u_volt);
if (ret)
goto remove_opp;

View file

@ -4028,6 +4028,7 @@ static void handle_cursor_update(struct drm_plane *plane,
amdgpu_crtc->cursor_width = plane->state->crtc_w;
amdgpu_crtc->cursor_height = plane->state->crtc_h;
memset(&attributes, 0, sizeof(attributes));
attributes.address.high_part = upper_32_bits(address);
attributes.address.low_part = lower_32_bits(address);
attributes.width = plane->state->crtc_w;

View file

@ -1473,7 +1473,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
if (IS_ERR(regmap))
ret = PTR_ERR(regmap);
if (ret) {
ret = PTR_ERR(regmap);
dev_err(dev,
"Failed to get system configuration registers: %d\n",
ret);
@ -1509,6 +1508,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
of_node_put(remote);
hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
of_node_put(i2c_np);
if (!hdmi->ddc_adpt) {
dev_err(dev, "Failed to get ddc i2c adapter by node\n");
return -EINVAL;

View file

@ -175,6 +175,7 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3);
hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE);
hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE);
REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
hdmi4_core_disable(core);
return 0;
}
@ -182,16 +183,24 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
if (err)
return err;
/*
* Initialize CEC clock divider: CEC needs 2MHz clock hence
* set the divider to 24 to get 48/24=2MHz clock
*/
REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
/* Clear TX FIFO */
if (!hdmi_cec_clear_tx_fifo(adap)) {
pr_err("cec-%s: could not clear TX FIFO\n", adap->name);
return -EIO;
err = -EIO;
goto err_disable_clk;
}
/* Clear RX FIFO */
if (!hdmi_cec_clear_rx_fifo(adap)) {
pr_err("cec-%s: could not clear RX FIFO\n", adap->name);
return -EIO;
err = -EIO;
goto err_disable_clk;
}
/* Clear CEC interrupts */
@ -236,6 +245,12 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp);
}
return 0;
err_disable_clk:
REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
hdmi4_core_disable(core);
return err;
}
static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
@ -333,11 +348,8 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
return ret;
core->wp = wp;
/*
* Initialize CEC clock divider: CEC needs 2MHz clock hence
* set the devider to 24 to get 48/24=2MHz clock
*/
REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
/* Disable clock initially, hdmi_cec_adap_enable() manages it */
REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
ret = cec_register_adapter(core->adap, &pdev->dev);
if (ret < 0) {

View file

@ -217,7 +217,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
err_unregister_gates:
for (i = 0; i < CLK_NUM; i++)
if (clk_data->hws[i])
if (!IS_ERR_OR_NULL(clk_data->hws[i]))
clk_hw_unregister_gate(clk_data->hws[i]);
clk_disable_unprepare(tcon_top->bus);
err_assert_reset:
@ -235,7 +235,8 @@ static void sun8i_tcon_top_unbind(struct device *dev, struct device *master,
of_clk_del_provider(dev->of_node);
for (i = 0; i < CLK_NUM; i++)
clk_hw_unregister_gate(clk_data->hws[i]);
if (clk_data->hws[i])
clk_hw_unregister_gate(clk_data->hws[i]);
clk_disable_unprepare(tcon_top->bus);
reset_control_assert(tcon_top->rst);

View file

@ -402,7 +402,6 @@ int hv_synic_cleanup(unsigned int cpu)
clockevents_unbind_device(hv_cpu->clk_evt, cpu);
hv_ce_shutdown(hv_cpu->clk_evt);
put_cpu_ptr(hv_cpu);
}
hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);

View file

@ -165,6 +165,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
/* Comet Lake */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x02a6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{ 0 },
};

View file

@ -13388,7 +13388,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
int total_contexts;
int ret;
unsigned ngroups;
int qos_rmt_count;
int rmt_count;
int user_rmt_reduced;
u32 n_usr_ctxts;
u32 send_contexts = chip_send_contexts(dd);
@ -13450,10 +13450,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
n_usr_ctxts = rcv_contexts - total_contexts;
}
/* each user context requires an entry in the RMT */
qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
/*
* The RMT entries are currently allocated as shown below:
* 1. QOS (0 to 128 entries);
* 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
* 3. VNIC (num_vnic_contexts).
* It should be noted that PSM FECN oversubscribe num_vnic_contexts
* entries of RMT because both VNIC and PSM could allocate any receive
* context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
* and PSM FECN must reserve an RMT entry for each possible PSM receive
* context.
*/
rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
dd_dev_err(dd,
"RMT size is reducing the number of user receive contexts from %u to %d\n",
n_usr_ctxts,
@ -14441,9 +14451,11 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
u64 reg;
int i, idx, regoff, regidx;
u8 offset;
u32 total_cnt;
/* there needs to be enough room in the map table */
if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
return;
}
@ -14497,7 +14509,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
/* add rule 1 */
add_rsm_rule(dd, RSM_INS_FECN, &rrd);
rmt->used += dd->num_user_contexts;
rmt->used += total_cnt;
}
/* Initialize RSM for VNIC */

View file

@ -2303,7 +2303,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
@ -2377,7 +2377,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}

View file

@ -745,6 +745,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
dma_offset = offset = idx_offset * table->obj_size;
} else {
u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
/* mtt mhop */
i = mhop.l0_idx;
@ -756,8 +758,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
hem_idx = i;
hem = table->hem[hem_idx];
dma_offset = offset = (obj & (table->num_obj - 1)) *
table->obj_size % mhop.bt_chunk_size;
dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size %
mhop.bt_chunk_size;
if (mhop.hop_num == 2)
dma_offset = offset = 0;
}

View file

@ -707,7 +707,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table;
dma_addr_t dma_handle;
__le64 *mtts;
u32 s = start_index * sizeof(u64);
u32 bt_page_size;
u32 i;
@ -730,7 +729,8 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
table = &hr_dev->mr_table.mtt_cqe_table;
mtts = hns_roce_table_find(hr_dev, table,
mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
mtt->first_seg +
start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
&dma_handle);
if (!mtts)
return -ENOMEM;

View file

@ -1123,6 +1123,8 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
pvrdma_free_slots(dev);
dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
dev->dsrbase);
iounmap(dev->regs);
kfree(dev->sgid_tbl);

View file

@ -356,7 +356,7 @@ static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
static void iommu_set_exclusion_range(struct amd_iommu *iommu)
{
u64 start = iommu->exclusion_start & PAGE_MASK;
u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
u64 entry;
if (!iommu->exclusion_start)

View file

@ -140,7 +140,7 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
p->des0 |= cpu_to_le32(RDES0_OWN);
bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK);
p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ndesc_rx_set_on_chain(p, end);

View file

@ -1844,7 +1844,7 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
memset(queue, 0, sizeof(*queue));
queue->ctrl = ctrl;
queue->qnum = idx;
atomic_set(&queue->csn, 1);
atomic_set(&queue->csn, 0);
queue->dev = ctrl->dev;
if (idx > 0)
@ -1886,7 +1886,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
*/
queue->connection_id = 0;
atomic_set(&queue->csn, 1);
atomic_set(&queue->csn, 0);
}
static void
@ -2182,7 +2182,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
{
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
struct nvme_command *sqe = &cmdiu->sqe;
u32 csn;
int ret, opstate;
/*
@ -2197,8 +2196,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
/* format the FC-NVME CMD IU and fcp_req */
cmdiu->connection_id = cpu_to_be64(queue->connection_id);
csn = atomic_inc_return(&queue->csn);
cmdiu->csn = cpu_to_be32(csn);
cmdiu->data_len = cpu_to_be32(data_len);
switch (io_dir) {
case NVMEFC_FCP_WRITE:
@ -2256,11 +2253,24 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
if (!(op->flags & FCOP_FLAGS_AEN))
blk_mq_start_request(op->rq);
cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
&ctrl->rport->remoteport,
queue->lldd_handle, &op->fcp_req);
if (ret) {
/*
* If the lld fails to send the command is there an issue with
* the csn value? If the command that fails is the Connect,
* no - as the connection won't be live. If it is a command
* post-connect, it's possible a gap in csn may be created.
* Does this matter? As Linux initiators don't send fused
* commands, no. The gap would exist, but as there's nothing
* that depends on csn order to be delivered on the target
* side, it shouldn't hurt. It would be difficult for a
* target to even detect the csn gap as it has no idea when the
* cmd with the csn was supposed to arrive.
*/
opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);

View file

@ -396,7 +396,7 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc)
* Some systems need one or more of their pmc_plt_clks to be
* marked as critical.
*/
static const struct dmi_system_id critclk_systems[] __initconst = {
static const struct dmi_system_id critclk_systems[] = {
{
.ident = "MPL CEC1x",
.matches = {

View file

@ -1713,8 +1713,11 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
}
out:
if (req->nsge > 0)
if (req->nsge > 0) {
scsi_dma_unmap(cmnd);
if (req->dcopy && (host_status == DID_OK))
host_status = csio_scsi_copy_to_sgl(hw, req);
}
cmnd->result = (((host_status) << 16) | scsi_status);
cmnd->scsi_done(cmnd);

View file

@ -48,17 +48,16 @@ static void smp_task_timedout(struct timer_list *t)
unsigned long flags;
spin_lock_irqsave(&task->task_state_lock, flags);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
complete(&task->slow_task->completion);
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
complete(&task->slow_task->completion);
}
static void smp_task_done(struct sas_task *task)
{
if (!del_timer(&task->slow_task->timer))
return;
del_timer(&task->slow_task->timer);
complete(&task->slow_task->completion);
}

View file

@ -117,7 +117,7 @@ static ssize_t
lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
}
/**
@ -137,9 +137,9 @@ lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
if (phba->hba_flag & HBA_FIP_SUPPORT)
return snprintf(buf, PAGE_SIZE, "1\n");
return scnprintf(buf, PAGE_SIZE, "1\n");
else
return snprintf(buf, PAGE_SIZE, "0\n");
return scnprintf(buf, PAGE_SIZE, "0\n");
}
static ssize_t
@ -517,14 +517,15 @@ lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
if (phba->cfg_enable_bg)
if (phba->cfg_enable_bg) {
if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n");
return scnprintf(buf, PAGE_SIZE,
"BlockGuard Enabled\n");
else
return snprintf(buf, PAGE_SIZE,
return scnprintf(buf, PAGE_SIZE,
"BlockGuard Not Supported\n");
else
return snprintf(buf, PAGE_SIZE,
} else
return scnprintf(buf, PAGE_SIZE,
"BlockGuard Disabled\n");
}
@ -536,7 +537,7 @@ lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%llu\n",
return scnprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)phba->bg_guard_err_cnt);
}
@ -548,7 +549,7 @@ lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%llu\n",
return scnprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)phba->bg_apptag_err_cnt);
}
@ -560,7 +561,7 @@ lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%llu\n",
return scnprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)phba->bg_reftag_err_cnt);
}
@ -578,7 +579,7 @@ lpfc_info_show(struct device *dev, struct device_attribute *attr,
{
struct Scsi_Host *host = class_to_shost(dev);
return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host));
}
/**
@ -597,7 +598,7 @@ lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber);
}
/**
@ -619,7 +620,7 @@ lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support);
return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support);
}
/**
@ -638,7 +639,7 @@ lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc);
}
/**
@ -657,7 +658,7 @@ lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName);
}
/**
@ -676,7 +677,7 @@ lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType);
}
/**
@ -694,7 +695,7 @@ lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%d\n",
return scnprintf(buf, PAGE_SIZE, "%d\n",
(phba->sli.sli_flag & LPFC_MENLO_MAINT));
}
@ -714,7 +715,7 @@ lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port);
}
/**
@ -742,10 +743,10 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
if (phba->sli_rev < LPFC_SLI_REV4)
len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
fwrev, phba->sli_rev);
else
len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
fwrev, phba->sli_rev, if_type, sli_family);
return len;
@ -769,7 +770,7 @@ lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
lpfc_vpd_t *vp = &phba->vpd;
lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
return scnprintf(buf, PAGE_SIZE, "%s\n", hdw);
}
/**
@ -790,10 +791,11 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
char fwrev[FW_REV_STR_SIZE];
if (phba->sli_rev < LPFC_SLI_REV4)
return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
return scnprintf(buf, PAGE_SIZE, "%s\n",
phba->OptionROMVersion);
lpfc_decode_firmware_rev(phba, fwrev, 1);
return snprintf(buf, PAGE_SIZE, "%s\n", fwrev);
return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev);
}
/**
@ -824,20 +826,20 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
case LPFC_LINK_DOWN:
case LPFC_HBA_ERROR:
if (phba->hba_flag & LINK_DISABLED)
len += snprintf(buf + len, PAGE_SIZE-len,
len += scnprintf(buf + len, PAGE_SIZE-len,
"Link Down - User disabled\n");
else
len += snprintf(buf + len, PAGE_SIZE-len,
len += scnprintf(buf + len, PAGE_SIZE-len,
"Link Down\n");
break;
case LPFC_LINK_UP:
case LPFC_CLEAR_LA:
case LPFC_HBA_READY:
len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
switch (vport->port_state) {
case LPFC_LOCAL_CFG_LINK:
len += snprintf(buf + len, PAGE_SIZE-len,
len += scnprintf(buf + len, PAGE_SIZE-len,
"Configuring Link\n");
break;
case LPFC_FDISC:
@ -847,38 +849,40 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
case LPFC_NS_QRY:
case LPFC_BUILD_DISC_LIST:
case LPFC_DISC_AUTH:
len += snprintf(buf + len, PAGE_SIZE - len,
len += scnprintf(buf + len, PAGE_SIZE - len,
"Discovery\n");
break;
case LPFC_VPORT_READY:
len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n");
len += scnprintf(buf + len, PAGE_SIZE - len,
"Ready\n");
break;
case LPFC_VPORT_FAILED:
len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n");
len += scnprintf(buf + len, PAGE_SIZE - len,
"Failed\n");
break;
case LPFC_VPORT_UNKNOWN:
len += snprintf(buf + len, PAGE_SIZE - len,
len += scnprintf(buf + len, PAGE_SIZE - len,
"Unknown\n");
break;
}
if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
len += snprintf(buf + len, PAGE_SIZE-len,
len += scnprintf(buf + len, PAGE_SIZE-len,
" Menlo Maint Mode\n");
else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
if (vport->fc_flag & FC_PUBLIC_LOOP)
len += snprintf(buf + len, PAGE_SIZE-len,
len += scnprintf(buf + len, PAGE_SIZE-len,
" Public Loop\n");
else
len += snprintf(buf + len, PAGE_SIZE-len,
len += scnprintf(buf + len, PAGE_SIZE-len,
" Private Loop\n");
} else {
if (vport->fc_flag & FC_FABRIC)
len += snprintf(buf + len, PAGE_SIZE-len,
len += scnprintf(buf + len, PAGE_SIZE-len,
" Fabric\n");
else
len += snprintf(buf + len, PAGE_SIZE-len,
len += scnprintf(buf + len, PAGE_SIZE-len,
" Point-2-Point\n");
}
}
@ -903,15 +907,15 @@ lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
if (phba->sli_rev < LPFC_SLI_REV4)
return snprintf(buf, PAGE_SIZE, "fc\n");
return scnprintf(buf, PAGE_SIZE, "fc\n");
if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
return snprintf(buf, PAGE_SIZE, "fcoe\n");
return scnprintf(buf, PAGE_SIZE, "fcoe\n");
if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
return snprintf(buf, PAGE_SIZE, "fc\n");
return scnprintf(buf, PAGE_SIZE, "fc\n");
}
return snprintf(buf, PAGE_SIZE, "unknown\n");
return scnprintf(buf, PAGE_SIZE, "unknown\n");
}
/**
@ -931,7 +935,7 @@ lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%d\n",
return scnprintf(buf, PAGE_SIZE, "%d\n",
phba->sli4_hba.pc_sli4_params.oas_supported);
}
@ -989,7 +993,7 @@ lpfc_num_discovered_ports_show(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
return snprintf(buf, PAGE_SIZE, "%d\n",
return scnprintf(buf, PAGE_SIZE, "%d\n",
vport->fc_map_cnt + vport->fc_unmap_cnt);
}
@ -1427,7 +1431,7 @@ lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
}
/**
@ -1456,7 +1460,7 @@ lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
else
state = "online";
return snprintf(buf, PAGE_SIZE, "%s\n", state);
return scnprintf(buf, PAGE_SIZE, "%s\n", state);
}
/**
@ -1669,8 +1673,8 @@ lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
uint32_t cnt;
if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
return snprintf(buf, PAGE_SIZE, "Unknown\n");
return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
@ -1697,8 +1701,8 @@ lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
uint32_t cnt, acnt;
if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
return snprintf(buf, PAGE_SIZE, "Unknown\n");
return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
@ -1725,8 +1729,8 @@ lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
uint32_t cnt;
if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
return snprintf(buf, PAGE_SIZE, "Unknown\n");
return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
@ -1753,8 +1757,8 @@ lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
uint32_t cnt, acnt;
if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
return snprintf(buf, PAGE_SIZE, "Unknown\n");
return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
@ -1781,8 +1785,8 @@ lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
uint32_t cnt;
if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
return snprintf(buf, PAGE_SIZE, "Unknown\n");
return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
@ -1809,8 +1813,8 @@ lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
uint32_t cnt, acnt;
if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
return snprintf(buf, PAGE_SIZE, "Unknown\n");
return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
/**
@ -1835,10 +1839,10 @@ lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
if (!(phba->max_vpi))
return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
if (vport->port_type == LPFC_PHYSICAL_PORT)
return snprintf(buf, PAGE_SIZE, "NPIV Physical\n");
return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n");
return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
}
/**
@ -1860,7 +1864,7 @@ lpfc_poll_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
}
/**
@ -1964,7 +1968,7 @@ lpfc_fips_level_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
}
/**
@ -1983,7 +1987,7 @@ lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
}
/**
@ -2002,7 +2006,7 @@ lpfc_dss_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
return scnprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
(phba->cfg_enable_dss) ? "Enabled" : "Disabled",
(phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
"" : "Not ");
@ -2031,7 +2035,7 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
uint16_t max_nr_virtfn;
max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
}
static inline bool lpfc_rangecheck(uint val, uint min, uint max)
@ -2091,7 +2095,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
struct lpfc_hba *phba = vport->phba;\
return snprintf(buf, PAGE_SIZE, "%d\n",\
return scnprintf(buf, PAGE_SIZE, "%d\n",\
phba->cfg_##attr);\
}
@ -2119,7 +2123,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
struct lpfc_hba *phba = vport->phba;\
uint val = 0;\
val = phba->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%#x\n",\
return scnprintf(buf, PAGE_SIZE, "%#x\n",\
phba->cfg_##attr);\
}
@ -2255,7 +2259,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
}
/**
@ -2280,7 +2284,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
}
/**
@ -2551,7 +2555,7 @@ lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)phba->cfg_soft_wwpn);
}
@ -2648,7 +2652,7 @@ lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)phba->cfg_soft_wwnn);
}
@ -2714,7 +2718,7 @@ lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
wwn_to_u64(phba->cfg_oas_tgt_wwpn));
}
@ -2782,7 +2786,7 @@ lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
}
/**
@ -2845,7 +2849,7 @@ lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
wwn_to_u64(phba->cfg_oas_vpt_wwpn));
}
@ -2916,7 +2920,7 @@ lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
}
/**
@ -2980,7 +2984,7 @@ lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
return -EFAULT;
return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
}
static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
lpfc_oas_lun_status_show, NULL);
@ -3132,7 +3136,7 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
if (oas_lun != NOT_OAS_ENABLED_LUN)
phba->cfg_oas_flags |= OAS_LUN_VALID;
len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
return len;
}
@ -3266,7 +3270,7 @@ lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
return snprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
}
static DEVICE_ATTR(iocb_hw, S_IRUGO,
@ -3278,7 +3282,7 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
return snprintf(buf, PAGE_SIZE, "%d\n",
return scnprintf(buf, PAGE_SIZE, "%d\n",
pring ? pring->txq_max : 0);
}
@ -3292,7 +3296,7 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
return snprintf(buf, PAGE_SIZE, "%d\n",
return scnprintf(buf, PAGE_SIZE, "%d\n",
pring ? pring->txcmplq_max : 0);
}
@ -3328,7 +3332,7 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
}
/**
@ -4830,19 +4834,19 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
switch (phba->cfg_fcp_cpu_map) {
case 0:
len += snprintf(buf + len, PAGE_SIZE-len,
len += scnprintf(buf + len, PAGE_SIZE-len,
"fcp_cpu_map: No mapping (%d)\n",
phba->cfg_fcp_cpu_map);
return len;
case 1:
len += snprintf(buf + len, PAGE_SIZE-len,
len += scnprintf(buf + len, PAGE_SIZE-len,
"fcp_cpu_map: HBA centric mapping (%d): "
"%d online CPUs\n",
phba->cfg_fcp_cpu_map,
phba->sli4_hba.num_online_cpu);
break;
case 2:
len += snprintf(buf + len, PAGE_SIZE-len,
len += scnprintf(buf + len, PAGE_SIZE-len,
"fcp_cpu_map: Driver centric mapping (%d): "
"%d online CPUs\n",
phba->cfg_fcp_cpu_map,
@ -4855,14 +4859,14 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
/* margin should fit in this and the truncated message */
if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
len += snprintf(buf + len, PAGE_SIZE-len,
len += scnprintf(buf + len, PAGE_SIZE-len,
"CPU %02d io_chan %02d "
"physid %d coreid %d\n",
phba->sli4_hba.curr_disp_cpu,
cpup->channel_id, cpup->phys_id,
cpup->core_id);
else
len += snprintf(buf + len, PAGE_SIZE-len,
len += scnprintf(buf + len, PAGE_SIZE-len,
"CPU %02d io_chan %02d "
"physid %d coreid %d IRQ %d\n",
phba->sli4_hba.curr_disp_cpu,
@ -4875,7 +4879,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
if (phba->sli4_hba.curr_disp_cpu <
phba->sli4_hba.num_present_cpu &&
(len >= (PAGE_SIZE - 64))) {
len += snprintf(buf + len, PAGE_SIZE-len, "more...\n");
len += scnprintf(buf + len, PAGE_SIZE-len, "more...\n");
break;
}
}
@ -6296,7 +6300,7 @@ lpfc_show_rport_##field (struct device *dev, \
{ \
struct fc_rport *rport = transport_class_to_rport(dev); \
struct lpfc_rport_data *rdata = rport->hostdata; \
return snprintf(buf, sz, format_string, \
return scnprintf(buf, sz, format_string, \
(rdata->target) ? cast rdata->target->field : 0); \
}

View file

@ -1220,7 +1220,7 @@ lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
* Name object. NPIV is not in play so this integer
* value is sufficient and unique per FC-ID.
*/
n = snprintf(symbol, size, "%d", vport->phba->brd_no);
n = scnprintf(symbol, size, "%d", vport->phba->brd_no);
return n;
}
@ -1234,26 +1234,26 @@ lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
n = snprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
if (size < n)
return n;
n += snprintf(symbol + n, size - n, " FV%s", fwrev);
n += scnprintf(symbol + n, size - n, " FV%s", fwrev);
if (size < n)
return n;
n += snprintf(symbol + n, size - n, " DV%s.",
n += scnprintf(symbol + n, size - n, " DV%s.",
lpfc_release_version);
if (size < n)
return n;
n += snprintf(symbol + n, size - n, " HN:%s.",
n += scnprintf(symbol + n, size - n, " HN:%s.",
init_utsname()->nodename);
if (size < n)
return n;
/* Note :- OS name is "Linux" */
n += snprintf(symbol + n, size - n, " OS:%s\n",
n += scnprintf(symbol + n, size - n, " OS:%s\n",
init_utsname()->sysname);
return n;
}

File diff suppressed because it is too large Load diff

View file

@ -342,7 +342,7 @@ lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
pword = q->qe[idx].address;
len = 0;
len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx);
len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx);
if (qe_word_cnt > 8)
printk(KERN_ERR "%s\n", line_buf);
@ -353,11 +353,11 @@ lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
if (qe_word_cnt > 8) {
len = 0;
memset(line_buf, 0, LPFC_LBUF_SZ);
len += snprintf(line_buf+len, LPFC_LBUF_SZ-len,
len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len,
"%03d: ", i);
}
}
len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ",
len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ",
((uint32_t)*pword) & 0xffffffff);
pword++;
}

View file

@ -345,7 +345,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
ha->optrom_region_start = start;
ha->optrom_region_size = start + size;
ha->optrom_region_size = size;
ha->optrom_state = QLA_SREADING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@ -418,7 +418,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
ha->optrom_region_start = start;
ha->optrom_region_size = start + size;
ha->optrom_region_size = size;
ha->optrom_state = QLA_SWRITING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);

View file

@ -981,6 +981,8 @@ void qlt_free_session_done(struct work_struct *work)
sess->send_els_logo);
if (!IS_SW_RESV_ADDR(sess->d_id)) {
qla2x00_mark_device_lost(vha, sess, 0, 0);
if (sess->send_els_logo) {
qlt_port_logo_t logo;
@ -1161,8 +1163,6 @@ void qlt_unreg_sess(struct fc_port *sess)
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
qla2x00_mark_device_lost(vha, sess, 0, 0);
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
sess->disc_state = DSC_DELETE_PEND;
sess->last_rscn_gen = sess->rscn_gen;

View file

@ -4,6 +4,7 @@
config SUNXI_SRAM
bool
default ARCH_SUNXI
select REGMAP_MMIO
help
Say y here to enable the SRAM controller support. This
device is responsible on mapping the SRAM in the sunXi SoCs

View file

@ -520,7 +520,7 @@ static int gb_power_supply_prop_descriptors_get(struct gb_power_supply *gbpsy)
op = gb_operation_create(connection,
GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS,
sizeof(req), sizeof(*resp) + props_count *
sizeof(*req), sizeof(*resp) + props_count *
sizeof(struct gb_power_supply_props_desc),
GFP_KERNEL);
if (!op)

View file

@ -546,7 +546,7 @@ static void __exit mod_exit(void)
destroy_cdev(c);
destroy_channel(c);
}
unregister_chrdev_region(comp.devno, 1);
unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
ida_destroy(&comp.minor_id);
class_destroy(comp.class);
}

View file

@ -470,12 +470,12 @@ static void acm_read_bulk_callback(struct urb *urb)
struct acm *acm = rb->instance;
unsigned long flags;
int status = urb->status;
bool stopped = false;
bool stalled = false;
dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
rb->index, urb->actual_length, status);
set_bit(rb->index, &acm->read_urbs_free);
if (!acm->dev) {
dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
return;
@ -488,15 +488,16 @@ static void acm_read_bulk_callback(struct urb *urb)
break;
case -EPIPE:
set_bit(EVENT_RX_STALL, &acm->flags);
schedule_work(&acm->work);
return;
stalled = true;
break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
dev_dbg(&acm->data->dev,
"%s - urb shutting down with status: %d\n",
__func__, status);
return;
stopped = true;
break;
default:
dev_dbg(&acm->data->dev,
"%s - nonzero urb status received: %d\n",
@ -505,10 +506,24 @@ static void acm_read_bulk_callback(struct urb *urb)
}
/*
* Unthrottle may run on another CPU which needs to see events
* in the same order. Submission has an implict barrier
* Make sure URB processing is done before marking as free to avoid
* racing with unthrottle() on another CPU. Matches the barriers
* implied by the test_and_clear_bit() in acm_submit_read_urb().
*/
smp_mb__before_atomic();
set_bit(rb->index, &acm->read_urbs_free);
/*
* Make sure URB is marked as free before checking the throttled flag
* to avoid racing with unthrottle() on another CPU. Matches the
* smp_mb() in unthrottle().
*/
smp_mb__after_atomic();
if (stopped || stalled) {
if (stalled)
schedule_work(&acm->work);
return;
}
/* throttle device if requested by tty */
spin_lock_irqsave(&acm->read_lock, flags);
@ -842,6 +857,9 @@ static void acm_tty_unthrottle(struct tty_struct *tty)
acm->throttle_req = 0;
spin_unlock_irq(&acm->read_lock);
/* Matches the smp_mb__after_atomic() in acm_read_bulk_callback(). */
smp_mb();
if (was_throttled)
acm_submit_read_urbs(acm, GFP_KERNEL);
}

View file

@ -1210,7 +1210,7 @@ static void dwc3_get_properties(struct dwc3 *dwc)
u8 tx_max_burst_prd;
/* default to highest possible threshold */
lpm_nyet_threshold = 0xff;
lpm_nyet_threshold = 0xf;
/* default to -3.5dB de-emphasis */
tx_de_emphasis = 1;

View file

@ -66,7 +66,7 @@ config USB_MUSB_SUNXI
depends on NOP_USB_XCEIV
depends on PHY_SUN4I_USB
depends on EXTCON
depends on GENERIC_PHY
select GENERIC_PHY
select SUNXI_SRAM
config USB_MUSB_DAVINCI

View file

@ -556,9 +556,12 @@ static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
static void f81232_close(struct usb_serial_port *port)
{
struct f81232_private *port_priv = usb_get_serial_port_data(port);
f81232_port_disable(port);
usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
flush_work(&port_priv->interrupt_work);
}
static void f81232_dtr_rts(struct usb_serial_port *port, int on)
@ -652,6 +655,40 @@ static int f81232_port_remove(struct usb_serial_port *port)
return 0;
}
static int f81232_suspend(struct usb_serial *serial, pm_message_t message)
{
struct usb_serial_port *port = serial->port[0];
struct f81232_private *port_priv = usb_get_serial_port_data(port);
int i;
for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
usb_kill_urb(port->read_urbs[i]);
usb_kill_urb(port->interrupt_in_urb);
if (port_priv)
flush_work(&port_priv->interrupt_work);
return 0;
}
static int f81232_resume(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
int result;
if (tty_port_initialized(&port->port)) {
result = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
if (result) {
dev_err(&port->dev, "submit interrupt urb failed: %d\n",
result);
return result;
}
}
return usb_serial_generic_resume(serial);
}
static struct usb_serial_driver f81232_device = {
.driver = {
.owner = THIS_MODULE,
@ -675,6 +712,8 @@ static struct usb_serial_driver f81232_device = {
.read_int_callback = f81232_read_int_callback,
.port_probe = f81232_port_probe,
.port_remove = f81232_port_remove,
.suspend = f81232_suspend,
.resume = f81232_resume,
};
static struct usb_serial_driver * const serial_drivers[] = {

View file

@ -65,6 +65,7 @@ static const char* host_info(struct Scsi_Host *host)
static int slave_alloc (struct scsi_device *sdev)
{
struct us_data *us = host_to_us(sdev->host);
int maxp;
/*
* Set the INQUIRY transfer length to 36. We don't use any of
@ -74,20 +75,17 @@ static int slave_alloc (struct scsi_device *sdev)
sdev->inquiry_len = 36;
/*
* USB has unusual DMA-alignment requirements: Although the
* starting address of each scatter-gather element doesn't matter,
* the length of each element except the last must be divisible
* by the Bulk maxpacket value. There's currently no way to
* express this by block-layer constraints, so we'll cop out
* and simply require addresses to be aligned at 512-byte
* boundaries. This is okay since most block I/O involves
* hardware sectors that are multiples of 512 bytes in length,
* and since host controllers up through USB 2.0 have maxpacket
* values no larger than 512.
*
* But it doesn't suffice for Wireless USB, where Bulk maxpacket
* values can be as large as 2048. To make that work properly
* will require changes to the block layer.
* USB has unusual scatter-gather requirements: the length of each
* scatterlist element except the last must be divisible by the
* Bulk maxpacket value. Fortunately this value is always a
* power of 2. Inform the block layer about this requirement.
*/
maxp = usb_maxpacket(us->pusb_dev, us->recv_bulk_pipe, 0);
blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
/*
* Some host controllers may have alignment requirements.
* We'll play it safe by requiring 512-byte alignment always.
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));

View file

@ -796,24 +796,33 @@ static int uas_slave_alloc(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo =
(struct uas_dev_info *)sdev->host->hostdata;
int maxp;
sdev->hostdata = devinfo;
/*
* USB has unusual DMA-alignment requirements: Although the
* starting address of each scatter-gather element doesn't matter,
* the length of each element except the last must be divisible
* by the Bulk maxpacket value. There's currently no way to
* express this by block-layer constraints, so we'll cop out
* and simply require addresses to be aligned at 512-byte
* boundaries. This is okay since most block I/O involves
* hardware sectors that are multiples of 512 bytes in length,
* and since host controllers up through USB 2.0 have maxpacket
* values no larger than 512.
* We have two requirements here. We must satisfy the requirements
* of the physical HC and the demands of the protocol, as we
* definitely want no additional memory allocation in this path
* ruling out using bounce buffers.
*
* But it doesn't suffice for Wireless USB, where Bulk maxpacket
* values can be as large as 2048. To make that work properly
* will require changes to the block layer.
* For a transmission on USB to continue we must never send
* a package that is smaller than maxpacket. Hence the length of each
* scatterlist element except the last must be divisible by the
* Bulk maxpacket value.
* If the HC does not ensure that through SG,
* the upper layer must do that. We must assume nothing
* about the capabilities off the HC, so we use the most
* pessimistic requirement.
*/
maxp = usb_maxpacket(devinfo->udev, devinfo->data_in_pipe, 0);
blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
/*
* The protocol has no requirements on alignment in the strict sense.
* Controllers may or may not have alignment restrictions.
* As this is not exported, we use an extremely conservative guess.
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));

View file

@ -255,9 +255,11 @@ void vp_del_vqs(struct virtio_device *vdev)
for (i = 0; i < vp_dev->msix_used_vectors; ++i)
free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
for (i = 0; i < vp_dev->msix_vectors; i++)
if (vp_dev->msix_affinity_masks[i])
free_cpumask_var(vp_dev->msix_affinity_masks[i]);
if (vp_dev->msix_affinity_masks) {
for (i = 0; i < vp_dev->msix_vectors; i++)
if (vp_dev->msix_affinity_masks[i])
free_cpumask_var(vp_dev->msix_affinity_masks[i]);
}
if (vp_dev->msix_enabled) {
/* Disable the vector used for configuration */

View file

@ -1,16 +1,16 @@
config FS_ENCRYPTION
tristate "FS Encryption (Per-file encryption)"
bool "FS Encryption (Per-file encryption)"
select CRYPTO
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_ECB
select CRYPTO_XTS
select CRYPTO_CTS
select CRYPTO_CTR
select CRYPTO_SHA256
select KEYS
help
Enable encryption of files and directories. This
feature is similar to ecryptfs, but it is more memory
efficient since it avoids caching the encrypted and
decrypted pages in the page cache.
decrypted pages in the page cache. Currently Ext4,
F2FS and UBIFS make use of this feature.

View file

@ -12,9 +12,6 @@
#ifndef _FSCRYPT_PRIVATE_H
#define _FSCRYPT_PRIVATE_H
#ifndef __FS_HAS_ENCRYPTION
#define __FS_HAS_ENCRYPTION 1
#endif
#include <linux/fscrypt.h>
#include <crypto/hash.h>
#include <linux/pfk.h>

View file

@ -58,7 +58,7 @@ int __fscrypt_prepare_link(struct inode *inode, struct inode *dir)
return err;
if (!fscrypt_has_permitted_context(dir, inode))
return -EPERM;
return -EXDEV;
return 0;
}
@ -82,13 +82,13 @@ int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry,
if (IS_ENCRYPTED(new_dir) &&
!fscrypt_has_permitted_context(new_dir,
d_inode(old_dentry)))
return -EPERM;
return -EXDEV;
if ((flags & RENAME_EXCHANGE) &&
IS_ENCRYPTED(old_dir) &&
!fscrypt_has_permitted_context(old_dir,
d_inode(new_dentry)))
return -EPERM;
return -EXDEV;
}
return 0;
}

View file

@ -151,8 +151,7 @@ EXPORT_SYMBOL(fscrypt_ioctl_get_policy);
* malicious offline violations of this constraint, while the link and rename
* checks are needed to prevent online violations of this constraint.
*
* Return: 1 if permitted, 0 if forbidden. If forbidden, the caller must fail
* the filesystem operation with EPERM.
* Return: 1 if permitted, 0 if forbidden.
*/
int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
{

View file

@ -37,7 +37,6 @@
#include <linux/uio.h>
#include <linux/atomic.h>
#include <linux/prefetch.h>
#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION)
#include <linux/fscrypt.h>
/*

View file

@ -101,10 +101,9 @@ config EXT4_ENCRYPTION
depends on EXT4_FS
select FS_ENCRYPTION
help
Enable encryption of ext4 files and directories. This
feature is similar to ecryptfs, but it is more memory
efficient since it avoids caching the encrypted and
decrypted pages in the page cache.
This kconfig symbol is deprecated; now it just selects
FS_ENCRYPTION. Use CONFIG_FS_ENCRYPTION=y in new config
files
config EXT4_FS_ENCRYPTION
bool "Ext4 FS Encryption"

View file

@ -111,7 +111,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
int dir_has_error = 0;
struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
if (ext4_encrypted_inode(inode)) {
if (IS_ENCRYPTED(inode)) {
err = fscrypt_get_encryption_info(inode);
if (err && err != -ENOKEY)
return err;
@ -138,7 +138,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
return err;
}
if (ext4_encrypted_inode(inode)) {
if (IS_ENCRYPTED(inode)) {
err = fscrypt_fname_alloc_buffer(inode, EXT4_NAME_LEN, &fstr);
if (err < 0)
return err;
@ -245,7 +245,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
offset += ext4_rec_len_from_disk(de->rec_len,
sb->s_blocksize);
if (le32_to_cpu(de->inode)) {
if (!ext4_encrypted_inode(inode)) {
if (!IS_ENCRYPTED(inode)) {
if (!dir_emit(ctx, de->name,
de->name_len,
le32_to_cpu(de->inode),
@ -283,9 +283,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
done:
err = 0;
errout:
#ifdef CONFIG_EXT4_FS_ENCRYPTION
fscrypt_fname_free_buffer(&fstr);
#endif
brelse(bh);
return err;
}
@ -613,7 +611,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
static int ext4_dir_open(struct inode * inode, struct file * filp)
{
if (ext4_encrypted_inode(inode))
if (IS_ENCRYPTED(inode))
return fscrypt_get_encryption_info(inode) ? -EACCES : 0;
return 0;
}

View file

@ -40,7 +40,6 @@
#include <linux/compat.h>
#endif
#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION)
#include <linux/fscrypt.h>
#include <linux/compiler.h>
@ -1337,7 +1336,7 @@ struct ext4_super_block {
#define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */
#define EXT4_MF_TEST_DUMMY_ENCRYPTION 0x0004
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
#define DUMMY_ENCRYPTION_ENABLED(sbi) (unlikely((sbi)->s_mount_flags & \
EXT4_MF_TEST_DUMMY_ENCRYPTION))
#else
@ -2062,7 +2061,7 @@ struct ext4_filename {
const struct qstr *usr_fname;
struct fscrypt_str disk_name;
struct dx_hash_info hinfo;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
struct fscrypt_str crypto_buf;
#endif
};
@ -2290,12 +2289,7 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
struct ext4_group_desc *gdp);
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
static inline bool ext4_encrypted_inode(struct inode *inode)
{
return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT);
}
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
static inline int ext4_fname_setup_filename(struct inode *dir,
const struct qstr *iname,
int lookup, struct ext4_filename *fname)

View file

@ -411,7 +411,7 @@ static inline int ext4_inode_journal_mode(struct inode *inode)
(ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
!test_opt(inode->i_sb, DELALLOC))) {
/* We do not support data journalling for encrypted data */
if (S_ISREG(inode->i_mode) && ext4_encrypted_inode(inode))
if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode))
return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */
return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */
}

View file

@ -3569,7 +3569,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
max_zeroout = sbi->s_extent_max_zeroout_kb >>
(inode->i_sb->s_blocksize_bits - 10);
if (ext4_encrypted_inode(inode))
if (IS_ENCRYPTED(inode))
max_zeroout = 0;
/*
@ -4919,7 +4919,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
* leave it disabled for encrypted inodes for now. This is a
* bug we should fix....
*/
if (ext4_encrypted_inode(inode) &&
if (IS_ENCRYPTED(inode) &&
(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE |
FALLOC_FL_ZERO_RANGE)))
return -EOPNOTSUPP;

View file

@ -771,7 +771,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
if (unlikely(ext4_forced_shutdown(sbi)))
return ERR_PTR(-EIO);
if ((ext4_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
if ((IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) &&
!(i_flags & EXT4_EA_INODE_FL)) {
err = fscrypt_get_encryption_info(dir);

View file

@ -416,7 +416,7 @@ int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
{
int ret;
if (ext4_encrypted_inode(inode))
if (IS_ENCRYPTED(inode))
return fscrypt_zeroout_range(inode, lblk, pblk, len);
ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
@ -1151,7 +1151,7 @@ int do_journal_get_write_access(handle_t *handle,
return ret;
}
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block)
{
@ -1216,7 +1216,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
decrypt = ext4_encrypted_inode(inode) &&
decrypt = IS_ENCRYPTED(inode) &&
S_ISREG(inode->i_mode) &&
!fscrypt_using_hardware_encryption(inode);
ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0),
@ -1316,7 +1316,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
if (ext4_should_dioread_nolock(inode))
ret = ext4_block_write_begin(page, pos, len,
ext4_get_block_unwritten);
@ -3100,7 +3100,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
ret = ext4_block_write_begin(page, pos, len,
ext4_da_get_block_prep);
#else
@ -3767,8 +3767,8 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
get_block_func = ext4_dio_get_block_unwritten_async;
dio_flags = DIO_LOCKING;
}
#if defined(CONFIG_EXT4_FS_ENCRYPTION)
WARN_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
#if defined(CONFIG_FS_ENCRYPTION)
WARN_ON(IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)
&& !fscrypt_using_hardware_encryption(inode));
#endif
ret = __blockdev_direct_IO(iocb, inode,
@ -3881,8 +3881,8 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
ssize_t ret;
int rw = iov_iter_rw(iter);
#if defined(CONFIG_EXT4_FS_ENCRYPTION)
if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
#ifdef CONFIG_FS_ENCRYPTION
if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)
&& !fscrypt_using_hardware_encryption(inode))
return 0;
#endif
@ -4093,8 +4093,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
if (!buffer_uptodate(bh)) {
err = -EIO;
decrypt = S_ISREG(inode->i_mode) &&
ext4_encrypted_inode(inode) &&
decrypt = S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
!fscrypt_using_hardware_encryption(inode);
ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0), 1, &bh);
wait_on_buffer(bh);
@ -4177,7 +4176,7 @@ static int ext4_block_truncate_page(handle_t *handle,
struct inode *inode = mapping->host;
/* If we are processing an encrypted inode during orphan list handling */
if (ext4_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode))
if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
return 0;
blocksize = inode->i_sb->s_blocksize;
@ -4764,7 +4763,7 @@ static bool ext4_should_use_dax(struct inode *inode)
return false;
if (ext4_has_inline_data(inode))
return false;
if (ext4_encrypted_inode(inode))
if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
return false;
return true;
}
@ -5114,7 +5113,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
ret = -EFSCORRUPTED;
goto bad_inode;
}
if (ext4_encrypted_inode(inode)) {
if (IS_ENCRYPTED(inode)) {
inode->i_op = &ext4_encrypted_symlink_inode_operations;
ext4_set_aops(inode);
} else if (ext4_inode_is_fast_symlink(inode)) {

View file

@ -256,7 +256,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
return err;
}
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
static int uuid_is_zero(__u8 u[16])
{
int i;
@ -1031,7 +1031,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
case EXT4_IOC_GET_ENCRYPTION_PWSALT: {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
int err, err2;
struct ext4_sb_info *sbi = EXT4_SB(sb);
handle_t *handle;

View file

@ -592,8 +592,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
return -EOPNOTSUPP;
}
if (ext4_encrypted_inode(orig_inode) ||
ext4_encrypted_inode(donor_inode)) {
if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) {
ext4_msg(orig_inode->i_sb, KERN_ERR,
"Online defrag not supported for encrypted files");
return -EOPNOTSUPP;

View file

@ -612,7 +612,7 @@ static struct stats dx_show_leaf(struct inode *dir,
{
if (show_names)
{
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
int len;
char *name;
struct fscrypt_str fname_crypto_str =
@ -621,7 +621,7 @@ static struct stats dx_show_leaf(struct inode *dir,
name = de->name;
len = de->name_len;
if (ext4_encrypted_inode(dir))
if (IS_ENCRYPTED(dir))
res = fscrypt_get_encryption_info(dir);
if (res) {
printk(KERN_WARNING "Error setting up"
@ -984,9 +984,9 @@ static int htree_dirblock_to_tree(struct file *dir_file,
top = (struct ext4_dir_entry_2 *) ((char *) de +
dir->i_sb->s_blocksize -
EXT4_DIR_REC_LEN(0));
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
/* Check if the directory is encrypted */
if (ext4_encrypted_inode(dir)) {
if (IS_ENCRYPTED(dir)) {
err = fscrypt_get_encryption_info(dir);
if (err < 0) {
brelse(bh);
@ -1015,7 +1015,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
continue;
if (de->inode == 0)
continue;
if (!ext4_encrypted_inode(dir)) {
if (!IS_ENCRYPTED(dir)) {
tmp_str.name = de->name;
tmp_str.len = de->name_len;
err = ext4_htree_store_dirent(dir_file,
@ -1047,7 +1047,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
}
errout:
brelse(bh);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
fscrypt_fname_free_buffer(&fname_crypto_str);
#endif
return count;
@ -1267,7 +1267,7 @@ static inline bool ext4_match(const struct ext4_filename *fname,
f.usr_fname = fname->usr_fname;
f.disk_name = fname->disk_name;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
f.crypto_buf = fname->crypto_buf;
#endif
return fscrypt_match_name(&f, de->name, de->name_len);
@ -1498,7 +1498,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
ext4_lblk_t block;
int retval;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
*res_dir = NULL;
#endif
frame = dx_probe(fname, dir, NULL, frames);
@ -1578,7 +1578,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
ino);
return ERR_PTR(-EFSCORRUPTED);
}
if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
if (!IS_ERR(inode) && IS_ENCRYPTED(dir) &&
(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
!fscrypt_has_permitted_context(dir, inode)) {
ext4_warning(inode->i_sb,

View file

@ -66,7 +66,7 @@ static void ext4_finish_bio(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
struct page *data_page = NULL;
#endif
struct buffer_head *bh, *head;
@ -78,7 +78,7 @@ static void ext4_finish_bio(struct bio *bio)
if (!page)
continue;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
if (!page->mapping) {
/* The bounce data pages are unmapped. */
data_page = page;
@ -111,7 +111,7 @@ static void ext4_finish_bio(struct bio *bio)
bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
local_irq_restore(flags);
if (!under_io) {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
if (data_page)
fscrypt_restore_control_page(data_page);
#endif
@ -480,8 +480,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
bh = head = page_buffers(page);
if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
nr_to_submit) {
if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && nr_to_submit) {
gfp_t gfp_flags = GFP_NOFS;
retry_encrypt:

View file

@ -50,7 +50,7 @@
static inline bool ext4_bio_encrypted(struct bio *bio)
{
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
return unlikely(bio->bi_private != NULL);
#else
return false;
@ -282,8 +282,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
struct fscrypt_ctx *ctx = NULL;
unsigned int flags = 0;
if (ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode)) {
if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
ctx = fscrypt_get_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
goto set_error_page;

View file

@ -1243,7 +1243,7 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
return try_to_free_buffers(page);
}
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
{
return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
@ -1341,7 +1341,7 @@ static bool ext4_dummy_context(struct inode *inode)
static inline bool ext4_is_encrypted(struct inode *inode)
{
return ext4_encrypted_inode(inode);
return IS_ENCRYPTED(inode);
}
static const struct fscrypt_operations ext4_cryptops = {
@ -1940,7 +1940,7 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
*journal_ioprio =
IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
} else if (token == Opt_test_dummy_encryption) {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
ext4_msg(sb, KERN_WARNING,
"Test dummy encryption mode enabled");
@ -4185,7 +4185,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &ext4_sops;
sb->s_export_op = &ext4_export_ops;
sb->s_xattr = ext4_xattr_handlers;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
sb->s_cop = &ext4_cryptops;
#endif
#ifdef CONFIG_QUOTA

View file

@ -224,7 +224,7 @@ static struct attribute *ext4_attrs[] = {
EXT4_ATTR_FEATURE(lazy_itable_init);
EXT4_ATTR_FEATURE(batched_discard);
EXT4_ATTR_FEATURE(meta_bg_resize);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
EXT4_ATTR_FEATURE(encryption);
#endif
EXT4_ATTR_FEATURE(metadata_csum_seed);
@ -233,7 +233,7 @@ static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(lazy_itable_init),
ATTR_LIST(batched_discard),
ATTR_LIST(meta_bg_resize),
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
ATTR_LIST(encryption),
#endif
ATTR_LIST(metadata_csum_seed),

View file

@ -3,6 +3,7 @@ config F2FS_FS
depends on BLOCK
select CRYPTO
select CRYPTO_CRC32
select F2FS_FS_XATTR if FS_ENCRYPTION
help
F2FS is based on Log-structured File System (LFS), which supports
versatile "flash-friendly" features. The design has been focused on
@ -73,13 +74,11 @@ config F2FS_CHECK_FS
config F2FS_FS_ENCRYPTION
bool "F2FS Encryption"
depends on F2FS_FS
depends on F2FS_FS_XATTR
select FS_ENCRYPTION
help
Enable encryption of f2fs files and directories. This
feature is similar to ecryptfs, but it is more memory
efficient since it avoids caching the encrypted and
decrypted pages in the page cache.
This kconfig symbol is deprecated; now it just selects
FS_ENCRYPTION. Use CONFIG_FS_ENCRYPTION=y in new config
files.
config F2FS_IO_TRACE
bool "F2FS IO tracer"

View file

@ -621,7 +621,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
f2fs_target_device(sbi, blkaddr, bio);
bio->bi_end_io = f2fs_read_end_io;
bio_set_op_attrs(bio, REQ_OP_READ,
(f2fs_encrypted_inode(inode) ?
(IS_ENCRYPTED(inode) ?
REQ_NOENCRYPT :
0));
@ -1528,7 +1528,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
if (size) {
if (f2fs_encrypted_inode(inode))
if (IS_ENCRYPTED(inode))
flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
ret = fiemap_fill_next_extent(fieinfo, logical,
@ -1813,7 +1813,7 @@ static inline bool check_inplace_update_policy(struct inode *inode,
if (policy & (0x1 << F2FS_IPU_ASYNC) &&
fio && fio->op == REQ_OP_WRITE &&
!(fio->op_flags & REQ_SYNC) &&
!f2fs_encrypted_inode(inode))
!IS_ENCRYPTED(inode))
return true;
/* this is only set during fdatasync */

View file

@ -385,7 +385,7 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
if (err)
goto put_error;
if ((f2fs_encrypted_inode(dir) || dummy_encrypt) &&
if ((IS_ENCRYPTED(dir) || dummy_encrypt) &&
f2fs_may_encrypt(inode)) {
err = fscrypt_inherit_context(dir, inode, page, false);
if (err)
@ -399,7 +399,7 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
if (new_name) {
init_dent_inode(new_name, page);
if (f2fs_encrypted_inode(dir))
if (IS_ENCRYPTED(dir))
file_set_enc_name(inode);
}
@ -824,7 +824,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
goto out;
}
if (f2fs_encrypted_inode(d->inode)) {
if (IS_ENCRYPTED(d->inode)) {
int save_len = fstr->len;
err = fscrypt_fname_disk_to_usr(d->inode,
@ -867,7 +867,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
int err = 0;
if (f2fs_encrypted_inode(inode)) {
if (IS_ENCRYPTED(inode)) {
err = fscrypt_get_encryption_info(inode);
if (err && err != -ENOKEY)
goto out;
@ -929,7 +929,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
static int f2fs_dir_open(struct inode *inode, struct file *filp)
{
if (f2fs_encrypted_inode(inode))
if (IS_ENCRYPTED(inode))
return fscrypt_get_encryption_info(inode) ? -EACCES : 0;
return 0;
}

View file

@ -24,7 +24,6 @@
#include <linux/quotaops.h>
#include <crypto/hash.h>
#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION)
#include <linux/fscrypt.h>
#ifdef CONFIG_F2FS_CHECK_FS
@ -1141,7 +1140,7 @@ enum fsync_mode {
FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */
};
#ifdef CONFIG_F2FS_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
#define DUMMY_ENCRYPTION_ENABLED(sbi) \
(unlikely(F2FS_OPTION(sbi).test_dummy_encryption))
#else
@ -3492,19 +3491,14 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
/*
* crypto support
*/
static inline bool f2fs_encrypted_inode(struct inode *inode)
{
return file_is_encrypt(inode);
}
static inline bool f2fs_encrypted_file(struct inode *inode)
{
return f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode);
return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
}
static inline void f2fs_set_encrypted_inode(struct inode *inode)
{
#ifdef CONFIG_F2FS_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
file_set_encrypt(inode);
f2fs_set_inode_flags(inode);
#endif
@ -3584,7 +3578,7 @@ static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
static inline bool f2fs_may_encrypt(struct inode *inode)
{
#ifdef CONFIG_F2FS_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
umode_t mode = inode->i_mode;
return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));

View file

@ -582,7 +582,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
zero_user(page, offset, PAGE_SIZE - offset);
/* An encrypted inode should have a key and truncate the last page. */
f2fs_bug_on(F2FS_I_SB(inode), cache_only && f2fs_encrypted_inode(inode));
f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
if (!cache_only)
set_page_dirty(page);
f2fs_put_page(page, 1);
@ -709,7 +709,7 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
stat->attributes |= STATX_ATTR_APPEND;
if (flags & F2FS_COMPR_FL)
stat->attributes |= STATX_ATTR_COMPRESSED;
if (f2fs_encrypted_inode(inode))
if (IS_ENCRYPTED(inode))
stat->attributes |= STATX_ATTR_ENCRYPTED;
if (flags & F2FS_IMMUTABLE_FL)
stat->attributes |= STATX_ATTR_IMMUTABLE;
@ -1558,7 +1558,7 @@ static long f2fs_fallocate(struct file *file, int mode,
if (!S_ISREG(inode->i_mode))
return -EINVAL;
if (f2fs_encrypted_inode(inode) &&
if (IS_ENCRYPTED(inode) &&
(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
return -EOPNOTSUPP;
@ -1642,7 +1642,7 @@ static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
struct f2fs_inode_info *fi = F2FS_I(inode);
unsigned int flags = fi->i_flags;
if (f2fs_encrypted_inode(inode))
if (IS_ENCRYPTED(inode))
flags |= F2FS_ENCRYPT_FL;
if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
flags |= F2FS_INLINE_DATA_FL;
@ -2416,7 +2416,7 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
return -EINVAL;
if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
return -EOPNOTSUPP;
if (src == dst) {

View file

@ -44,7 +44,7 @@ void f2fs_set_inode_flags(struct inode *inode)
new_fl |= S_NOATIME;
if (flags & F2FS_DIRSYNC_FL)
new_fl |= S_DIRSYNC;
if (f2fs_encrypted_inode(inode))
if (file_is_encrypt(inode))
new_fl |= S_ENCRYPTED;
inode_set_flags(inode, new_fl,
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
@ -468,7 +468,7 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
inode->i_mapping->a_ops = &f2fs_dblock_aops;
inode_nohighmem(inode);
} else if (S_ISLNK(inode->i_mode)) {
if (f2fs_encrypted_inode(inode))
if (file_is_encrypt(inode))
inode->i_op = &f2fs_encrypted_symlink_inode_operations;
else
inode->i_op = &f2fs_symlink_inode_operations;

View file

@ -76,7 +76,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
set_inode_flag(inode, FI_NEW_INODE);
/* If the directory encrypted, then we should encrypt the inode. */
if ((f2fs_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
if ((IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
f2fs_may_encrypt(inode))
f2fs_set_encrypted_inode(inode);
@ -477,7 +477,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
if (err)
goto out_iput;
}
if (f2fs_encrypted_inode(dir) &&
if (IS_ENCRYPTED(dir) &&
(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
!fscrypt_has_permitted_context(dir, inode)) {
f2fs_msg(inode->i_sb, KERN_WARNING,
@ -804,7 +804,7 @@ static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
if (f2fs_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) {
if (IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) {
int err = fscrypt_get_encryption_info(dir);
if (err)
return err;

View file

@ -757,7 +757,7 @@ static int parse_options(struct super_block *sb, char *options)
kvfree(name);
break;
case Opt_test_dummy_encryption:
#ifdef CONFIG_F2FS_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
if (!f2fs_sb_has_encrypt(sbi)) {
f2fs_msg(sb, KERN_ERR, "Encrypt feature is off");
return -EINVAL;
@ -1418,7 +1418,7 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_printf(seq, ",whint_mode=%s", "user-based");
else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
seq_printf(seq, ",whint_mode=%s", "fs-based");
#ifdef CONFIG_F2FS_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
if (F2FS_OPTION(sbi).test_dummy_encryption)
seq_puts(seq, ",test_dummy_encryption");
#endif
@ -2204,7 +2204,7 @@ static const struct super_operations f2fs_sops = {
.remount_fs = f2fs_remount,
};
#ifdef CONFIG_F2FS_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
{
return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
@ -3174,7 +3174,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
#endif
sb->s_op = &f2fs_sops;
#ifdef CONFIG_F2FS_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
sb->s_cop = &f2fs_cryptops;
#endif
sb->s_xattr = f2fs_xattr_handlers;

View file

@ -441,7 +441,7 @@ F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
F2FS_GENERAL_RO_ATTR(features);
F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
#ifdef CONFIG_F2FS_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO);
#endif
#ifdef CONFIG_BLK_DEV_ZONED
@ -503,7 +503,7 @@ static struct attribute *f2fs_attrs[] = {
};
static struct attribute *f2fs_feat_attrs[] = {
#ifdef CONFIG_F2FS_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
ATTR_LIST(encryption),
#endif
#ifdef CONFIG_BLK_DEV_ZONED

View file

@ -7,6 +7,7 @@ config UBIFS_FS
select CRYPTO if UBIFS_FS_ZLIB
select CRYPTO_LZO if UBIFS_FS_LZO
select CRYPTO_DEFLATE if UBIFS_FS_ZLIB
select UBIFS_FS_XATTR if FS_ENCRYPTION
depends on MTD_UBI
help
UBIFS is a file system for flash devices which works on top of UBI.
@ -62,17 +63,6 @@ config UBIFS_FS_XATTR
If unsure, say Y.
config UBIFS_FS_ENCRYPTION
bool "UBIFS Encryption"
depends on UBIFS_FS && UBIFS_FS_XATTR && BLOCK
select FS_ENCRYPTION
default n
help
Enable encryption of UBIFS files and directories. This
feature is similar to ecryptfs, but it is more memory
efficient since it avoids caching the encrypted and
decrypted pages in the page cache.
config UBIFS_FS_SECURITY
bool "UBIFS Security Labels"
depends on UBIFS_FS && UBIFS_FS_XATTR

View file

@ -6,5 +6,5 @@ ubifs-y += tnc.o master.o scan.o replay.o log.o commit.o gc.o orphan.o
ubifs-y += budget.o find.o tnc_commit.o compress.o lpt.o lprops.o
ubifs-y += recovery.o ioctl.o lpt_commit.o tnc_misc.o debug.o
ubifs-y += misc.o
ubifs-$(CONFIG_UBIFS_FS_ENCRYPTION) += crypto.o
ubifs-$(CONFIG_FS_ENCRYPTION) += crypto.o
ubifs-$(CONFIG_UBIFS_FS_XATTR) += xattr.o

View file

@ -185,7 +185,7 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return err;
}
case FS_IOC_SET_ENCRYPTION_POLICY: {
#ifdef CONFIG_UBIFS_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
struct ubifs_info *c = inode->i_sb->s_fs_info;
err = ubifs_enable_encryption(c);
@ -198,7 +198,7 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
#endif
}
case FS_IOC_GET_ENCRYPTION_POLICY: {
#ifdef CONFIG_UBIFS_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
return fscrypt_ioctl_get_policy(file, (void __user *)arg);
#else
return -EOPNOTSUPP;

View file

@ -647,7 +647,7 @@ int ubifs_read_superblock(struct ubifs_info *c)
goto out;
}
#ifndef CONFIG_UBIFS_FS_ENCRYPTION
#ifndef CONFIG_FS_ENCRYPTION
if (c->encrypted) {
ubifs_err(c, "file system contains encrypted files but UBIFS"
" was built without crypto support.");

View file

@ -2087,7 +2087,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
#ifdef CONFIG_UBIFS_FS_XATTR
sb->s_xattr = ubifs_xattr_handlers;
#endif
#ifdef CONFIG_UBIFS_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
sb->s_cop = &ubifs_crypt_operations;
#endif

View file

@ -40,7 +40,6 @@
#include <linux/xattr.h>
#include <linux/random.h>
#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_UBIFS_FS_ENCRYPTION)
#include <linux/fscrypt.h>
#include "ubifs-media.h"
@ -139,7 +138,7 @@
*/
#define WORST_COMPR_FACTOR 2
#ifdef CONFIG_UBIFS_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
#define UBIFS_CIPHER_BLOCK_SIZE FS_CRYPTO_BLOCK_SIZE
#else
#define UBIFS_CIPHER_BLOCK_SIZE 0
@ -1829,7 +1828,7 @@ int ubifs_decompress(const struct ubifs_info *c, const void *buf, int len,
#include "misc.h"
#include "key.h"
#ifndef CONFIG_UBIFS_FS_ENCRYPTION
#ifndef CONFIG_FS_ENCRYPTION
static inline int ubifs_encrypt(const struct inode *inode,
struct ubifs_data_node *dn,
unsigned int in_len, unsigned int *out_len,

View file

@ -688,7 +688,7 @@ struct inode {
struct fsnotify_mark_connector __rcu *i_fsnotify_marks;
#endif
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
#ifdef CONFIG_FS_ENCRYPTION
struct fscrypt_info *i_crypt_info;
#endif
@ -1388,7 +1388,7 @@ struct super_block {
void *s_security;
#endif
const struct xattr_handler **s_xattr;
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
#ifdef CONFIG_FS_ENCRYPTION
const struct fscrypt_operations *s_cop;
#endif
struct hlist_bl_head s_roots; /* alternate root dentries for NFS */

View file

@ -2,9 +2,8 @@
/*
* fscrypt.h: declarations for per-file encryption
*
* Filesystems that implement per-file encryption include this header
* file with the __FS_HAS_ENCRYPTION set according to whether that filesystem
* is being built with encryption support or not.
* Filesystems that implement per-file encryption must include this header
* file.
*
* Copyright (C) 2015, Google, Inc.
*
@ -15,6 +14,8 @@
#define _LINUX_FSCRYPT_H
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
#define FS_CRYPTO_BLOCK_SIZE 16
@ -47,11 +48,411 @@ struct fscrypt_name {
/* Maximum value for the third parameter of fscrypt_operations.set_context(). */
#define FSCRYPT_SET_CONTEXT_MAX_SIZE 28
#if __FS_HAS_ENCRYPTION
#include <linux/fscrypt_supp.h>
#else
#include <linux/fscrypt_notsupp.h>
#endif
#ifdef CONFIG_FS_ENCRYPTION
/*
* fscrypt superblock flags
*/
#define FS_CFLG_OWN_PAGES (1U << 1)
/*
* crypto operations for filesystems
*/
struct fscrypt_operations {
unsigned int flags;
const char *key_prefix;
int (*get_context)(struct inode *, void *, size_t);
int (*set_context)(struct inode *, const void *, size_t, void *);
bool (*dummy_context)(struct inode *);
bool (*empty_dir)(struct inode *);
unsigned int max_namelen;
bool (*is_encrypted)(struct inode *inode);
};
struct fscrypt_ctx {
union {
struct {
struct page *bounce_page; /* Ciphertext page */
struct page *control_page; /* Original page */
} w;
struct {
struct bio *bio;
struct work_struct work;
} r;
struct list_head free_list; /* Free list */
};
u8 flags; /* Flags */
};
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
{
return (inode->i_crypt_info != NULL);
}
static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
{
return inode->i_sb->s_cop->dummy_context &&
inode->i_sb->s_cop->dummy_context(inode);
}
/* crypto.c */
extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
unsigned int, unsigned int,
u64, gfp_t);
extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
unsigned int, u64);
static inline struct page *fscrypt_control_page(struct page *page)
{
return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
}
extern void fscrypt_restore_control_page(struct page *);
/* policy.c */
extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
extern int fscrypt_inherit_context(struct inode *, struct inode *,
void *, bool);
/* keyinfo.c */
extern int fscrypt_get_encryption_info(struct inode *);
extern void fscrypt_put_encryption_info(struct inode *);
/* fname.c */
extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
int lookup, struct fscrypt_name *);
static inline void fscrypt_free_filename(struct fscrypt_name *fname)
{
kfree(fname->crypto_buf.name);
}
extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
struct fscrypt_str *);
extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
const struct fscrypt_str *, struct fscrypt_str *);
#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32
/* Extracts the second-to-last ciphertext block; see explanation below */
#define FSCRYPT_FNAME_DIGEST(name, len) \
((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
FS_CRYPTO_BLOCK_SIZE))
#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE
/**
* fscrypt_digested_name - alternate identifier for an on-disk filename
*
* When userspace lists an encrypted directory without access to the key,
* filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
* bytes are shown in this abbreviated form (base64-encoded) rather than as the
* full ciphertext (base64-encoded). This is necessary to allow supporting
* filenames up to NAME_MAX bytes, since base64 encoding expands the length.
*
* To make it possible for filesystems to still find the correct directory entry
* despite not knowing the full on-disk name, we encode any filesystem-specific
* 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
* followed by the second-to-last ciphertext block of the filename. Due to the
* use of the CBC-CTS encryption mode, the second-to-last ciphertext block
* depends on the full plaintext. (Note that ciphertext stealing causes the
* last two blocks to appear "flipped".) This makes accidental collisions very
* unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
* share the same filesystem-specific hashes.
*
* However, this scheme isn't immune to intentional collisions, which can be
* created by anyone able to create arbitrary plaintext filenames and view them
* without the key. Making the "digest" be a real cryptographic hash like
* SHA-256 over the full ciphertext would prevent this, although it would be
* less efficient and harder to implement, especially since the filesystem would
* need to calculate it for each directory entry examined during a search.
*/
struct fscrypt_digested_name {
u32 hash;
u32 minor_hash;
u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
};
/**
* fscrypt_match_name() - test whether the given name matches a directory entry
* @fname: the name being searched for
* @de_name: the name from the directory entry
* @de_name_len: the length of @de_name in bytes
*
* Normally @fname->disk_name will be set, and in that case we simply compare
* that to the name stored in the directory entry. The only exception is that
* if we don't have the key for an encrypted directory and a filename in it is
* very long, then we won't have the full disk_name and we'll instead need to
* match against the fscrypt_digested_name.
*
* Return: %true if the name matches, otherwise %false.
*/
static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
const u8 *de_name, u32 de_name_len)
{
if (unlikely(!fname->disk_name.name)) {
const struct fscrypt_digested_name *n =
(const void *)fname->crypto_buf.name;
if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
return false;
if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
return false;
return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
}
if (de_name_len != fname->disk_name.len)
return false;
return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
}
/* bio.c */
extern void fscrypt_decrypt_bio(struct bio *);
extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
struct bio *bio);
extern void fscrypt_pullback_bio_page(struct page **, bool);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
/* hooks.c */
extern int fscrypt_file_open(struct inode *inode, struct file *filp);
extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
extern int __fscrypt_prepare_rename(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
struct dentry *new_dentry,
unsigned int flags);
extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
unsigned int max_len,
struct fscrypt_str *disk_link);
extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
unsigned int len,
struct fscrypt_str *disk_link);
extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
unsigned int max_size,
struct delayed_call *done);
#else /* !CONFIG_FS_ENCRYPTION */
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
{
return false;
}
static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
{
return false;
}
/* crypto.c */
static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
}
static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
gfp_t gfp_flags)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
{
return;
}
static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
struct page *page,
unsigned int len,
unsigned int offs,
u64 lblk_num, gfp_t gfp_flags)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline int fscrypt_decrypt_page(const struct inode *inode,
struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num)
{
return -EOPNOTSUPP;
}
static inline struct page *fscrypt_control_page(struct page *page)
{
WARN_ON_ONCE(1);
return ERR_PTR(-EINVAL);
}
static inline void fscrypt_restore_control_page(struct page *page)
{
return;
}
/* policy.c */
static inline int fscrypt_ioctl_set_policy(struct file *filp,
const void __user *arg)
{
return -EOPNOTSUPP;
}
static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
{
return -EOPNOTSUPP;
}
static inline int fscrypt_has_permitted_context(struct inode *parent,
struct inode *child)
{
return 0;
}
static inline int fscrypt_inherit_context(struct inode *parent,
struct inode *child,
void *fs_data, bool preload)
{
return -EOPNOTSUPP;
}
/* keyinfo.c */
static inline int fscrypt_get_encryption_info(struct inode *inode)
{
return -EOPNOTSUPP;
}
static inline void fscrypt_put_encryption_info(struct inode *inode)
{
return;
}
/* fname.c */
static inline int fscrypt_setup_filename(struct inode *dir,
const struct qstr *iname,
int lookup, struct fscrypt_name *fname)
{
if (IS_ENCRYPTED(dir))
return -EOPNOTSUPP;
memset(fname, 0, sizeof(struct fscrypt_name));
fname->usr_fname = iname;
fname->disk_name.name = (unsigned char *)iname->name;
fname->disk_name.len = iname->len;
return 0;
}
static inline void fscrypt_free_filename(struct fscrypt_name *fname)
{
return;
}
static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
u32 max_encrypted_len,
struct fscrypt_str *crypto_str)
{
return -EOPNOTSUPP;
}
static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
{
return;
}
static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
u32 hash, u32 minor_hash,
const struct fscrypt_str *iname,
struct fscrypt_str *oname)
{
return -EOPNOTSUPP;
}
static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
const u8 *de_name, u32 de_name_len)
{
/* Encryption support disabled; use standard comparison */
if (de_name_len != fname->disk_name.len)
return false;
return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
}
/* bio.c */
static inline void fscrypt_decrypt_bio(struct bio *bio)
{
}
static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
struct bio *bio)
{
}
static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
{
return;
}
static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
return -EOPNOTSUPP;
}
/* hooks.c */
static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
{
if (IS_ENCRYPTED(inode))
return -EOPNOTSUPP;
return 0;
}
static inline int __fscrypt_prepare_link(struct inode *inode,
struct inode *dir)
{
return -EOPNOTSUPP;
}
static inline int __fscrypt_prepare_rename(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
struct dentry *new_dentry,
unsigned int flags)
{
return -EOPNOTSUPP;
}
static inline int __fscrypt_prepare_lookup(struct inode *dir,
struct dentry *dentry)
{
return -EOPNOTSUPP;
}
static inline int __fscrypt_prepare_symlink(struct inode *dir,
unsigned int len,
unsigned int max_len,
struct fscrypt_str *disk_link)
{
return -EOPNOTSUPP;
}
static inline int __fscrypt_encrypt_symlink(struct inode *inode,
const char *target,
unsigned int len,
struct fscrypt_str *disk_link)
{
return -EOPNOTSUPP;
}
static inline const char *fscrypt_get_symlink(struct inode *inode,
const void *caddr,
unsigned int max_size,
struct delayed_call *done)
{
return ERR_PTR(-EOPNOTSUPP);
}
#endif /* !CONFIG_FS_ENCRYPTION */
/**
* fscrypt_require_key - require an inode's encryption key
@ -94,7 +495,7 @@ static inline int fscrypt_require_key(struct inode *inode)
* in an encrypted directory tree use the same encryption policy.
*
* Return: 0 on success, -ENOKEY if the directory's encryption key is missing,
* -EPERM if the link would result in an inconsistent encryption policy, or
* -EXDEV if the link would result in an inconsistent encryption policy, or
* another -errno code.
*/
static inline int fscrypt_prepare_link(struct dentry *old_dentry,
@ -124,7 +525,7 @@ static inline int fscrypt_prepare_link(struct dentry *old_dentry,
* We also verify that the rename will not violate the constraint that all files
* in an encrypted directory tree use the same encryption policy.
*
* Return: 0 on success, -ENOKEY if an encryption key is missing, -EPERM if the
* Return: 0 on success, -ENOKEY if an encryption key is missing, -EXDEV if the
* rename would cause inconsistent encryption policies, or another -errno code.
*/
static inline int fscrypt_prepare_rename(struct inode *old_dir,

View file

@ -1,231 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* fscrypt_notsupp.h
*
* This stubs out the fscrypt functions for filesystems configured without
* encryption support.
*
* Do not include this file directly. Use fscrypt.h instead!
*/
#ifndef _LINUX_FSCRYPT_H
#error "Incorrect include of linux/fscrypt_notsupp.h!"
#endif
#ifndef _LINUX_FSCRYPT_NOTSUPP_H
#define _LINUX_FSCRYPT_NOTSUPP_H
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
{
return false;
}
static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
{
return false;
}
/* crypto.c */
static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
}
static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
gfp_t gfp_flags)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
{
return;
}
static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
struct page *page,
unsigned int len,
unsigned int offs,
u64 lblk_num, gfp_t gfp_flags)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline int fscrypt_decrypt_page(const struct inode *inode,
struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num)
{
return -EOPNOTSUPP;
}
static inline struct page *fscrypt_control_page(struct page *page)
{
WARN_ON_ONCE(1);
return ERR_PTR(-EINVAL);
}
static inline void fscrypt_restore_control_page(struct page *page)
{
return;
}
/* policy.c */
static inline int fscrypt_ioctl_set_policy(struct file *filp,
const void __user *arg)
{
return -EOPNOTSUPP;
}
static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
{
return -EOPNOTSUPP;
}
static inline int fscrypt_has_permitted_context(struct inode *parent,
struct inode *child)
{
return 0;
}
static inline int fscrypt_inherit_context(struct inode *parent,
struct inode *child,
void *fs_data, bool preload)
{
return -EOPNOTSUPP;
}
/* keyinfo.c */
static inline int fscrypt_get_encryption_info(struct inode *inode)
{
return -EOPNOTSUPP;
}
static inline void fscrypt_put_encryption_info(struct inode *inode)
{
return;
}
/* fname.c */
static inline int fscrypt_setup_filename(struct inode *dir,
const struct qstr *iname,
int lookup, struct fscrypt_name *fname)
{
if (IS_ENCRYPTED(dir))
return -EOPNOTSUPP;
memset(fname, 0, sizeof(struct fscrypt_name));
fname->usr_fname = iname;
fname->disk_name.name = (unsigned char *)iname->name;
fname->disk_name.len = iname->len;
return 0;
}
static inline void fscrypt_free_filename(struct fscrypt_name *fname)
{
return;
}
static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
u32 max_encrypted_len,
struct fscrypt_str *crypto_str)
{
return -EOPNOTSUPP;
}
static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
{
return;
}
static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
u32 hash, u32 minor_hash,
const struct fscrypt_str *iname,
struct fscrypt_str *oname)
{
return -EOPNOTSUPP;
}
static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
const u8 *de_name, u32 de_name_len)
{
/* Encryption support disabled; use standard comparison */
if (de_name_len != fname->disk_name.len)
return false;
return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
}
/* bio.c */
static inline void fscrypt_decrypt_bio(struct bio *bio)
{
}
static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
struct bio *bio)
{
}
static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
{
return;
}
static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
return -EOPNOTSUPP;
}
/* hooks.c */
static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
{
if (IS_ENCRYPTED(inode))
return -EOPNOTSUPP;
return 0;
}
static inline int __fscrypt_prepare_link(struct inode *inode,
struct inode *dir)
{
return -EOPNOTSUPP;
}
static inline int __fscrypt_prepare_rename(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
struct dentry *new_dentry,
unsigned int flags)
{
return -EOPNOTSUPP;
}
static inline int __fscrypt_prepare_lookup(struct inode *dir,
struct dentry *dentry)
{
return -EOPNOTSUPP;
}
static inline int __fscrypt_prepare_symlink(struct inode *dir,
unsigned int len,
unsigned int max_len,
struct fscrypt_str *disk_link)
{
return -EOPNOTSUPP;
}
static inline int __fscrypt_encrypt_symlink(struct inode *inode,
const char *target,
unsigned int len,
struct fscrypt_str *disk_link)
{
return -EOPNOTSUPP;
}
static inline const char *fscrypt_get_symlink(struct inode *inode,
const void *caddr,
unsigned int max_size,
struct delayed_call *done)
{
return ERR_PTR(-EOPNOTSUPP);
}
#endif /* _LINUX_FSCRYPT_NOTSUPP_H */

View file

@ -1,205 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* fscrypt_supp.h
*
* Do not include this file directly. Use fscrypt.h instead!
*/
#ifndef _LINUX_FSCRYPT_H
#error "Incorrect include of linux/fscrypt_supp.h!"
#endif
#ifndef _LINUX_FSCRYPT_SUPP_H
#define _LINUX_FSCRYPT_SUPP_H
#include <linux/mm.h>
#include <linux/slab.h>
/*
* fscrypt superblock flags
*/
#define FS_CFLG_OWN_PAGES (1U << 1)
/*
* crypto operations for filesystems
*/
struct fscrypt_operations {
unsigned int flags;
const char *key_prefix;
int (*get_context)(struct inode *, void *, size_t);
int (*set_context)(struct inode *, const void *, size_t, void *);
bool (*dummy_context)(struct inode *);
bool (*empty_dir)(struct inode *);
unsigned int max_namelen;
bool (*is_encrypted)(struct inode *inode);
};
struct fscrypt_ctx {
union {
struct {
struct page *bounce_page; /* Ciphertext page */
struct page *control_page; /* Original page */
} w;
struct {
struct bio *bio;
struct work_struct work;
} r;
struct list_head free_list; /* Free list */
};
u8 flags; /* Flags */
};
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
{
return (inode->i_crypt_info != NULL);
}
static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
{
return inode->i_sb->s_cop->dummy_context &&
inode->i_sb->s_cop->dummy_context(inode);
}
/* crypto.c */
extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
unsigned int, unsigned int,
u64, gfp_t);
extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
unsigned int, u64);
static inline struct page *fscrypt_control_page(struct page *page)
{
return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
}
extern void fscrypt_restore_control_page(struct page *);
/* policy.c */
extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
extern int fscrypt_inherit_context(struct inode *, struct inode *,
void *, bool);
/* keyinfo.c */
extern int fscrypt_get_encryption_info(struct inode *);
extern void fscrypt_put_encryption_info(struct inode *);
/* fname.c */
extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
int lookup, struct fscrypt_name *);
static inline void fscrypt_free_filename(struct fscrypt_name *fname)
{
kfree(fname->crypto_buf.name);
}
extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
struct fscrypt_str *);
extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
const struct fscrypt_str *, struct fscrypt_str *);
#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32
/* Extracts the second-to-last ciphertext block; see explanation below */
#define FSCRYPT_FNAME_DIGEST(name, len) \
((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
FS_CRYPTO_BLOCK_SIZE))
#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE
/**
* fscrypt_digested_name - alternate identifier for an on-disk filename
*
* When userspace lists an encrypted directory without access to the key,
* filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
* bytes are shown in this abbreviated form (base64-encoded) rather than as the
* full ciphertext (base64-encoded). This is necessary to allow supporting
* filenames up to NAME_MAX bytes, since base64 encoding expands the length.
*
* To make it possible for filesystems to still find the correct directory entry
* despite not knowing the full on-disk name, we encode any filesystem-specific
* 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
* followed by the second-to-last ciphertext block of the filename. Due to the
* use of the CBC-CTS encryption mode, the second-to-last ciphertext block
* depends on the full plaintext. (Note that ciphertext stealing causes the
* last two blocks to appear "flipped".) This makes accidental collisions very
* unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
* share the same filesystem-specific hashes.
*
* However, this scheme isn't immune to intentional collisions, which can be
* created by anyone able to create arbitrary plaintext filenames and view them
* without the key. Making the "digest" be a real cryptographic hash like
* SHA-256 over the full ciphertext would prevent this, although it would be
* less efficient and harder to implement, especially since the filesystem would
* need to calculate it for each directory entry examined during a search.
*/
struct fscrypt_digested_name {
u32 hash;
u32 minor_hash;
u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
};
/**
* fscrypt_match_name() - test whether the given name matches a directory entry
* @fname: the name being searched for
* @de_name: the name from the directory entry
* @de_name_len: the length of @de_name in bytes
*
* Normally @fname->disk_name will be set, and in that case we simply compare
* that to the name stored in the directory entry. The only exception is that
* if we don't have the key for an encrypted directory and a filename in it is
* very long, then we won't have the full disk_name and we'll instead need to
* match against the fscrypt_digested_name.
*
* Return: %true if the name matches, otherwise %false.
*/
static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
const u8 *de_name, u32 de_name_len)
{
if (unlikely(!fname->disk_name.name)) {
const struct fscrypt_digested_name *n =
(const void *)fname->crypto_buf.name;
if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
return false;
if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
return false;
return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
}
if (de_name_len != fname->disk_name.len)
return false;
return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
}
/* bio.c */
extern void fscrypt_decrypt_bio(struct bio *);
extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
struct bio *bio);
extern void fscrypt_pullback_bio_page(struct page **, bool);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
/* hooks.c */
extern int fscrypt_file_open(struct inode *inode, struct file *filp);
extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
extern int __fscrypt_prepare_rename(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
struct dentry *new_dentry,
unsigned int flags);
extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
unsigned int max_len,
struct fscrypt_str *disk_link);
extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
unsigned int len,
struct fscrypt_str *disk_link);
extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
unsigned int max_size,
struct delayed_call *done);
#endif /* _LINUX_FSCRYPT_SUPP_H */

View file

@ -73,8 +73,8 @@
#define u64_to_user_ptr(x) ( \
{ \
typecheck(u64, x); \
(void __user *)(uintptr_t)x; \
typecheck(u64, (x)); \
(void __user *)(uintptr_t)(x); \
} \
)

View file

@ -182,6 +182,9 @@ struct adv_info {
#define HCI_MAX_SHORT_NAME_LENGTH 10
/* Min encryption key size to match with SMP */
#define HCI_MIN_ENC_KEY_SIZE 7
/* Default LE RPA expiry time, 15 minutes */
#define HCI_DEFAULT_RPA_TIMEOUT (15 * 60)

View file

@ -2044,8 +2044,8 @@ event_sched_out(struct perf_event *event,
event->pmu->del(event, 0);
event->oncpu = -1;
if (event->pending_disable) {
event->pending_disable = 0;
if (READ_ONCE(event->pending_disable) >= 0) {
WRITE_ONCE(event->pending_disable, -1);
state = PERF_EVENT_STATE_OFF;
}
perf_event_set_state(event, state);
@ -2233,7 +2233,8 @@ EXPORT_SYMBOL_GPL(perf_event_disable);
void perf_event_disable_inatomic(struct perf_event *event)
{
event->pending_disable = 1;
WRITE_ONCE(event->pending_disable, smp_processor_id());
/* can fail, see perf_pending_event_disable() */
irq_work_queue(&event->pending);
}
@ -5976,10 +5977,45 @@ void perf_event_wakeup(struct perf_event *event)
}
}
static void perf_pending_event_disable(struct perf_event *event)
{
int cpu = READ_ONCE(event->pending_disable);
if (cpu < 0)
return;
if (cpu == smp_processor_id()) {
WRITE_ONCE(event->pending_disable, -1);
perf_event_disable_local(event);
return;
}
/*
* CPU-A CPU-B
*
* perf_event_disable_inatomic()
* @pending_disable = CPU-A;
* irq_work_queue();
*
* sched-out
* @pending_disable = -1;
*
* sched-in
* perf_event_disable_inatomic()
* @pending_disable = CPU-B;
* irq_work_queue(); // FAILS
*
* irq_work_run()
* perf_pending_event()
*
* But the event runs on CPU-B and wants disabling there.
*/
irq_work_queue_on(&event->pending, cpu);
}
static void perf_pending_event(struct irq_work *entry)
{
struct perf_event *event = container_of(entry,
struct perf_event, pending);
struct perf_event *event = container_of(entry, struct perf_event, pending);
int rctx;
rctx = perf_swevent_get_recursion_context();
@ -5988,10 +6024,7 @@ static void perf_pending_event(struct irq_work *entry)
* and we won't recurse 'further'.
*/
if (event->pending_disable) {
event->pending_disable = 0;
perf_event_disable_local(event);
}
perf_pending_event_disable(event);
if (event->pending_wakeup) {
event->pending_wakeup = 0;
@ -10264,6 +10297,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
init_waitqueue_head(&event->waitq);
event->pending_disable = -1;
init_irq_work(&event->pending, perf_pending_event);
mutex_init(&event->mmap_mutex);

View file

@ -393,7 +393,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
* store that will be enabled on successful return
*/
if (!handle->size) { /* A, matches D */
event->pending_disable = 1;
event->pending_disable = smp_processor_id();
perf_output_wakeup(handle);
local_set(&rb->aux_nest, 0);
goto err_put;
@ -471,7 +471,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
if (wakeup) {
if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
handle->event->pending_disable = 1;
handle->event->pending_disable = smp_processor_id();
perf_output_wakeup(handle);
}

View file

@ -1306,13 +1306,15 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
{
int err;
u32 uninitialized_var(curval);
if (unlikely(should_fail_futex(true)))
return -EFAULT;
if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
return -EFAULT;
err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
if (unlikely(err))
return err;
/* If user space value changed, let the caller retry */
return curval != uval ? -EAGAIN : 0;
@ -1498,10 +1500,8 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
if (unlikely(should_fail_futex(true)))
ret = -EFAULT;
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
ret = -EFAULT;
} else if (curval != uval) {
ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
if (!ret && (curval != uval)) {
/*
* If a unconditional UNLOCK_PI operation (user space did not
* try the TID->0 transition) raced with a waiter setting the
@ -1696,32 +1696,32 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
double_lock_hb(hb1, hb2);
op_ret = futex_atomic_op_inuser(op, uaddr2);
if (unlikely(op_ret < 0)) {
double_unlock_hb(hb1, hb2);
#ifndef CONFIG_MMU
/*
* we don't get EFAULT from MMU faults if we don't have an MMU,
* but we might get them from range checking
*/
ret = op_ret;
goto out_put_keys;
#endif
if (unlikely(op_ret != -EFAULT)) {
if (!IS_ENABLED(CONFIG_MMU) ||
unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) {
/*
* we don't get EFAULT from MMU faults if we don't have
* an MMU, but we might get them from range checking
*/
ret = op_ret;
goto out_put_keys;
}
ret = fault_in_user_writeable(uaddr2);
if (ret)
goto out_put_keys;
if (op_ret == -EFAULT) {
ret = fault_in_user_writeable(uaddr2);
if (ret)
goto out_put_keys;
}
if (!(flags & FLAGS_SHARED))
if (!(flags & FLAGS_SHARED)) {
cond_resched();
goto retry_private;
}
put_futex_key(&key2);
put_futex_key(&key1);
cond_resched();
goto retry;
}
@ -2346,7 +2346,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
u32 uval, uninitialized_var(curval), newval;
struct task_struct *oldowner, *newowner;
u32 newtid;
int ret;
int ret, err = 0;
lockdep_assert_held(q->lock_ptr);
@ -2417,14 +2417,17 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
if (!pi_state->owner)
newtid |= FUTEX_OWNER_DIED;
if (get_futex_value_locked(&uval, uaddr))
goto handle_fault;
err = get_futex_value_locked(&uval, uaddr);
if (err)
goto handle_err;
for (;;) {
newval = (uval & FUTEX_OWNER_DIED) | newtid;
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
goto handle_fault;
err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
if (err)
goto handle_err;
if (curval == uval)
break;
uval = curval;
@ -2452,23 +2455,37 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
return 0;
/*
* To handle the page fault we need to drop the locks here. That gives
* the other task (either the highest priority waiter itself or the
* task which stole the rtmutex) the chance to try the fixup of the
* pi_state. So once we are back from handling the fault we need to
* check the pi_state after reacquiring the locks and before trying to
* do another fixup. When the fixup has been done already we simply
* return.
* In order to reschedule or handle a page fault, we need to drop the
* locks here. In the case of a fault, this gives the other task
* (either the highest priority waiter itself or the task which stole
* the rtmutex) the chance to try the fixup of the pi_state. So once we
* are back from handling the fault we need to check the pi_state after
* reacquiring the locks and before trying to do another fixup. When
* the fixup has been done already we simply return.
*
* Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
* drop hb->lock since the caller owns the hb -> futex_q relation.
* Dropping the pi_mutex->wait_lock requires the state revalidate.
*/
handle_fault:
handle_err:
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
spin_unlock(q->lock_ptr);
ret = fault_in_user_writeable(uaddr);
switch (err) {
case -EFAULT:
ret = fault_in_user_writeable(uaddr);
break;
case -EAGAIN:
cond_resched();
ret = 0;
break;
default:
WARN_ON_ONCE(1);
ret = err;
break;
}
spin_lock(q->lock_ptr);
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
@ -3037,10 +3054,8 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
* A unconditional UNLOCK_PI op raced against a waiter
* setting the FUTEX_WAITERS bit. Try again.
*/
if (ret == -EAGAIN) {
put_futex_key(&key);
goto retry;
}
if (ret == -EAGAIN)
goto pi_retry;
/*
* wake_futex_pi has detected invalid state. Tell user
* space.
@ -3055,9 +3070,19 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
* preserve the WAITERS bit not the OWNER_DIED one. We are the
* owner.
*/
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) {
if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
spin_unlock(&hb->lock);
goto pi_faulted;
switch (ret) {
case -EFAULT:
goto pi_faulted;
case -EAGAIN:
goto pi_retry;
default:
WARN_ON_ONCE(1);
goto out_putkey;
}
}
/*
@ -3071,6 +3096,11 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
put_futex_key(&key);
return ret;
pi_retry:
put_futex_key(&key);
cond_resched();
goto retry;
pi_faulted:
put_futex_key(&key);
@ -3431,6 +3461,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
{
u32 uval, uninitialized_var(nval), mval;
int err;
/* Futex address must be 32bit aligned */
if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
@ -3440,42 +3471,57 @@ int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
if (get_user(uval, uaddr))
return -1;
if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
/*
* Ok, this dying thread is truly holding a futex
* of interest. Set the OWNER_DIED bit atomically
* via cmpxchg, and if the value had FUTEX_WAITERS
* set, wake up a waiter (if any). (We have to do a
* futex_wake() even if OWNER_DIED is already set -
* to handle the rare but possible case of recursive
* thread-death.) The rest of the cleanup is done in
* userspace.
*/
mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
/*
* We are not holding a lock here, but we want to have
* the pagefault_disable/enable() protection because
* we want to handle the fault gracefully. If the
* access fails we try to fault in the futex with R/W
* verification via get_user_pages. get_user() above
* does not guarantee R/W access. If that fails we
* give up and leave the futex locked.
*/
if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
return 0;
/*
* Ok, this dying thread is truly holding a futex
* of interest. Set the OWNER_DIED bit atomically
* via cmpxchg, and if the value had FUTEX_WAITERS
* set, wake up a waiter (if any). (We have to do a
* futex_wake() even if OWNER_DIED is already set -
* to handle the rare but possible case of recursive
* thread-death.) The rest of the cleanup is done in
* userspace.
*/
mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
/*
* We are not holding a lock here, but we want to have
* the pagefault_disable/enable() protection because
* we want to handle the fault gracefully. If the
* access fails we try to fault in the futex with R/W
* verification via get_user_pages. get_user() above
* does not guarantee R/W access. If that fails we
* give up and leave the futex locked.
*/
if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) {
switch (err) {
case -EFAULT:
if (fault_in_user_writeable(uaddr))
return -1;
goto retry;
}
if (nval != uval)
case -EAGAIN:
cond_resched();
goto retry;
/*
* Wake robust non-PI futexes here. The wakeup of
* PI futexes happens in exit_pi_state():
*/
if (!pi && (uval & FUTEX_WAITERS))
futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
default:
WARN_ON_ONCE(1);
return err;
}
}
if (nval != uval)
goto retry;
/*
* Wake robust non-PI futexes here. The wakeup of
* PI futexes happens in exit_pi_state():
*/
if (!pi && (uval & FUTEX_WAITERS))
futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
return 0;
}

View file

@ -356,11 +356,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
desc->affinity_notify = notify;
raw_spin_unlock_irqrestore(&desc->lock, flags);
if (!notify && old_notify)
if (old_notify) {
cancel_work_sync(&old_notify->work);
if (old_notify)
kref_put(&old_notify->kref, old_notify->release);
}
return 0;
}

View file

@ -86,11 +86,13 @@ static bool is_inline_int(struct type_descriptor *type)
return bits <= inline_bits;
}
static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
static s_max get_signed_val(struct type_descriptor *type, void *val)
{
if (is_inline_int(type)) {
unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type);
return ((s_max)val) << extra_bits >> extra_bits;
unsigned long ulong_val = (unsigned long)val;
return ((s_max)ulong_val) << extra_bits >> extra_bits;
}
if (type_bit_width(type) == 64)
@ -99,15 +101,15 @@ static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
return *(s_max *)val;
}
static bool val_is_negative(struct type_descriptor *type, unsigned long val)
static bool val_is_negative(struct type_descriptor *type, void *val)
{
return type_is_signed(type) && get_signed_val(type, val) < 0;
}
static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
static u_max get_unsigned_val(struct type_descriptor *type, void *val)
{
if (is_inline_int(type))
return val;
return (unsigned long)val;
if (type_bit_width(type) == 64)
return *(u64 *)val;
@ -116,7 +118,7 @@ static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
}
static void val_to_string(char *str, size_t size, struct type_descriptor *type,
unsigned long value)
void *value)
{
if (type_is_int(type)) {
if (type_bit_width(type) == 128) {
@ -163,8 +165,8 @@ static void ubsan_epilogue(unsigned long *flags)
current->in_ubsan--;
}
static void handle_overflow(struct overflow_data *data, unsigned long lhs,
unsigned long rhs, char op)
static void handle_overflow(struct overflow_data *data, void *lhs,
void *rhs, char op)
{
struct type_descriptor *type = data->type;
@ -191,8 +193,7 @@ static void handle_overflow(struct overflow_data *data, unsigned long lhs,
}
void __ubsan_handle_add_overflow(struct overflow_data *data,
unsigned long lhs,
unsigned long rhs)
void *lhs, void *rhs)
{
handle_overflow(data, lhs, rhs, '+');
@ -200,23 +201,21 @@ void __ubsan_handle_add_overflow(struct overflow_data *data,
EXPORT_SYMBOL(__ubsan_handle_add_overflow);
void __ubsan_handle_sub_overflow(struct overflow_data *data,
unsigned long lhs,
unsigned long rhs)
void *lhs, void *rhs)
{
handle_overflow(data, lhs, rhs, '-');
}
EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
void __ubsan_handle_mul_overflow(struct overflow_data *data,
unsigned long lhs,
unsigned long rhs)
void *lhs, void *rhs)
{
handle_overflow(data, lhs, rhs, '*');
}
EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
void __ubsan_handle_negate_overflow(struct overflow_data *data,
unsigned long old_val)
void *old_val)
{
unsigned long flags;
char old_val_str[VALUE_LENGTH];
@ -237,8 +236,7 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
void __ubsan_handle_divrem_overflow(struct overflow_data *data,
unsigned long lhs,
unsigned long rhs)
void *lhs, void *rhs)
{
unsigned long flags;
char rhs_val_str[VALUE_LENGTH];
@ -323,7 +321,7 @@ static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
}
void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
unsigned long ptr)
void *ptr)
{
struct type_mismatch_data_common common_data = {
.location = &data->location,
@ -332,12 +330,12 @@ void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
.type_check_kind = data->type_check_kind
};
ubsan_type_mismatch_common(&common_data, ptr);
ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
}
EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
unsigned long ptr)
void *ptr)
{
struct type_mismatch_data_common common_data = {
@ -347,12 +345,12 @@ void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
.type_check_kind = data->type_check_kind
};
ubsan_type_mismatch_common(&common_data, ptr);
ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
}
EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data,
unsigned long bound)
void *bound)
{
unsigned long flags;
char bound_str[VALUE_LENGTH];
@ -369,8 +367,7 @@ void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data,
}
EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive);
void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
unsigned long index)
void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
{
unsigned long flags;
char index_str[VALUE_LENGTH];
@ -388,7 +385,7 @@ void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
unsigned long lhs, unsigned long rhs)
void *lhs, void *rhs)
{
unsigned long flags;
struct type_descriptor *rhs_type = data->rhs_type;
@ -439,7 +436,7 @@ void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
unsigned long val)
void *val)
{
unsigned long flags;
char val_str[VALUE_LENGTH];

View file

@ -4305,7 +4305,8 @@ static void show_symbol(struct seq_file *m, unsigned long address)
static int leaks_show(struct seq_file *m, void *p)
{
struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
root_caches_node);
struct page *page;
struct kmem_cache_node *n;
const char *name;

View file

@ -1276,6 +1276,14 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
return 0;
/* The minimum encryption key size needs to be enforced by the
* host stack before establishing any L2CAP connections. The
* specification in theory allows a minimum of 1, but to align
* BR/EDR and LE transports, a minimum of 7 is chosen.
*/
if (conn->enc_key_size < HCI_MIN_ENC_KEY_SIZE)
return 0;
return 1;
}

View file

@ -76,6 +76,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
sockfd_put(csock);
return err;
}
ca.name[sizeof(ca.name)-1] = 0;
err = hidp_connection_add(&ca, csock, isock);
if (!err && copy_to_user(argp, &ca, sizeof(ca)))

View file

@ -1634,6 +1634,16 @@ static int cs35l35_i2c_probe(struct i2c_client *i2c_client,
return ret;
}
static int cs35l35_i2c_remove(struct i2c_client *i2c_client)
{
struct cs35l35_private *cs35l35 = i2c_get_clientdata(i2c_client);
regulator_bulk_disable(cs35l35->num_supplies, cs35l35->supplies);
gpiod_set_value_cansleep(cs35l35->reset_gpio, 0);
return 0;
}
static const struct of_device_id cs35l35_of_match[] = {
{.compatible = "cirrus,cs35l35"},
{},
@ -1654,6 +1664,7 @@ static struct i2c_driver cs35l35_i2c_driver = {
},
.id_table = cs35l35_id,
.probe = cs35l35_i2c_probe,
.remove = cs35l35_i2c_remove,
};
module_i2c_driver(cs35l35_i2c_driver);

Some files were not shown because too many files have changed in this diff Show more