Merge android-4.19.27 (36d178b) into msm-4.19

* refs/heads/tmp-36d178b:
  Linux 4.19.27
  x86/uaccess: Don't leak the AC flag into __put_user() value evaluation
  MIPS: eBPF: Fix icache flush end address
  MIPS: BCM63XX: provide DMA masks for ethernet devices
  MIPS: fix truncation in __cmpxchg_small for short values
  hugetlbfs: fix races and page leaks during migration
  drm: Block fb changes for async plane updates
  mm: enforce min addr even if capable() in expand_downwards()
  mmc: sdhci-esdhc-imx: correct the fix of ERR004536
  mmc: cqhci: Fix a tiny potential memory leak on error condition
  mmc: cqhci: fix space allocated for transfer descriptor
  mmc: core: Fix NULL ptr crash from mmc_should_fail_request
  mmc: tmio: fix access width of Block Count Register
  mmc: tmio_mmc_core: don't claim spurious interrupts
  mmc: spi: Fix card detection during probe
  kvm: selftests: Fix region overlap check in kvm_util
  KVM: nSVM: clear events pending from svm_complete_interrupts() when exiting to L1
  svm: Fix AVIC incomplete IPI emulation
  cfg80211: extend range deviation for DMG
  mac80211: Add attribute aligned(2) to struct 'action'
  mac80211: don't initiate TDLS connection if station is not associated to AP
  ibmveth: Do not process frames after calling napi_reschedule
  net: dev_is_mac_header_xmit() true for ARPHRD_RAWIP
  net: usb: asix: ax88772_bind return error when hw_reset fail
  drm/msm: Fix A6XX support for opp-level
  nvme-multipath: drop optimization for static ANA group IDs
  nvme-rdma: fix timeout handler
  hv_netvsc: Fix hash key value reset after other ops
  hv_netvsc: Refactor assignments of struct netvsc_device_info
  hv_netvsc: Fix ethtool change hash key error
  net: altera_tse: fix connect_local_phy error path
  scsi: csiostor: fix NULL pointer dereference in csio_vport_set_state()
  scsi: lpfc: nvmet: avoid hang / use-after-free when destroying targetport
  scsi: lpfc: nvme: avoid hang / use-after-free when destroying localport
  writeback: synchronize sync(2) against cgroup writeback membership switches
  direct-io: allow direct writes to empty inodes
  staging: android: ion: Support cpu access during dma_buf_detach
  drm/sun4i: hdmi: Fix usage of TMDS clock
  serial: fsl_lpuart: fix maximum acceptable baud rate with over-sampling
  tty: serial: qcom_geni_serial: Allow mctrl when flow control is disabled
  drm/amd/powerplay: OD setting fix on Vega10
  locking/rwsem: Fix (possible) missed wakeup
  futex: Fix (possible) missed wakeup
  sched/wake_q: Fix wakeup ordering for wake_q
  sched/wait: Fix rcuwait_wake_up() ordering
  mac80211: fix miscounting of ttl-dropped frames
  staging: rtl8723bs: Fix build error with Clang when inlining is disabled
  drivers: thermal: int340x_thermal: Fix sysfs race condition
  ARC: show_regs: lockdep: avoid page allocator...
  ARC: fix __ffs return value to avoid build warnings
  irqchip/gic-v3-mbi: Fix uninitialized mbi_lock
  selftests: gpio-mockup-chardev: Check asprintf() for error
  selftests: seccomp: use LDLIBS instead of LDFLAGS
  phy: ath79-usb: Fix the main reset name to match the DT binding
  phy: ath79-usb: Fix the power on error path
  selftests/vm/gup_benchmark.c: match gup struct to kernel
  ASoC: imx-audmux: change snprintf to scnprintf for possible overflow
  ASoC: dapm: change snprintf to scnprintf for possible overflow
  ASoC: rt5682: Fix PLL source register definitions
  x86/mm/mem_encrypt: Fix erroneous sizeof()
  genirq: Make sure the initial affinity is not empty
  selftests: rtc: rtctest: add alarm test on minute boundary
  selftests: rtc: rtctest: fix alarm tests
  usb: gadget: Potential NULL dereference on allocation error
  usb: dwc3: gadget: Fix the uninitialized link_state when udc starts
  usb: dwc3: gadget: synchronize_irq dwc irq in suspend
  thermal: int340x_thermal: Fix a NULL vs IS_ERR() check
  clk: vc5: Abort clock configuration without upstream clock
  clk: sysfs: fix invalid JSON in clk_dump
  clk: tegra: dfll: Fix a potential Oop in remove()
  ASoC: Variable "val" in function rt274_i2c_probe() could be uninitialized
  ALSA: compress: prevent potential divide by zero bugs
  ASoC: Intel: Haswell/Broadwell: fix setting for .dynamic field
  drm/msm: Unblock writer if reader closes file
  scsi: libsas: Fix rphy phy_identifier for PHYs with end devices attached
  mac80211: Change default tx_sk_pacing_shift to 7
  genirq/matrix: Improve target CPU selection for managed interrupts.
  irq/matrix: Spread managed interrupts on allocation
  irq/matrix: Split out the CPU selection code into a helper
  FROMGIT: binder: create node flag to request sender's security context

  Modify include/uapi/linux/android/binder.h, as commit:

  FROMGIT: binder: create node flag to request sender's security context

  introduces enums and structures, which are already defined in other
  userspace files that include the binder uapi file. Thus, the
  redeclaration of these enums and structures can lead to
  build errors. To avoid this, guard the redundant declarations
  in the uapi header with the __KERNEL__ header guard, so they
  are not exported to userspace.

Conflicts:
	drivers/staging/android/ion/ion.c
	sound/core/compress_offload.c

Change-Id: Ibf422b9b32ea1315515c33036b20ae635b8c8e4c
Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
This commit is contained in:
Ivaylo Georgiev 2019-03-13 10:41:23 -07:00
commit bd72c908a7
82 changed files with 792 additions and 312 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 26
SUBLEVEL = 27
EXTRAVERSION =
NAME = "People's Front"

View file

@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x)
/*
* __ffs: Similar to ffs, but zero based (0-31)
*/
static inline __attribute__ ((const)) int __ffs(unsigned long word)
static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
{
if (!word)
return word;
@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x)
/*
* __ffs: Similar to ffs, but zero based (0-31)
*/
static inline __attribute__ ((const)) int __ffs(unsigned long x)
static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
{
int n;
unsigned long n;
asm volatile(
" ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */

View file

@ -18,6 +18,8 @@
#include <asm/arcregs.h>
#include <asm/irqflags.h>
#define ARC_PATH_MAX 256
/*
* Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
* -Prints 3 regs per line and a CR.
@ -58,11 +60,12 @@ static void show_callee_regs(struct callee_regs *cregs)
print_reg_file(&(cregs->r13), 13);
}
static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
static void print_task_path_n_nm(struct task_struct *tsk)
{
char *path_nm = NULL;
struct mm_struct *mm;
struct file *exe_file;
char buf[ARC_PATH_MAX];
mm = get_task_mm(tsk);
if (!mm)
@ -72,7 +75,7 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
mmput(mm);
if (exe_file) {
path_nm = file_path(exe_file, buf, 255);
path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1);
fput(exe_file);
}
@ -80,10 +83,9 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
}
static void show_faulting_vma(unsigned long address, char *buf)
static void show_faulting_vma(unsigned long address)
{
struct vm_area_struct *vma;
char *nm = buf;
struct mm_struct *active_mm = current->active_mm;
/* can't use print_vma_addr() yet as it doesn't check for
@ -96,8 +98,11 @@ static void show_faulting_vma(unsigned long address, char *buf)
* if the container VMA is not found
*/
if (vma && (vma->vm_start <= address)) {
char buf[ARC_PATH_MAX];
char *nm = "?";
if (vma->vm_file) {
nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1);
nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
if (IS_ERR(nm))
nm = "?";
}
@ -173,13 +178,8 @@ void show_regs(struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct callee_regs *cregs;
char *buf;
buf = (char *)__get_free_page(GFP_KERNEL);
if (!buf)
return;
print_task_path_n_nm(tsk, buf);
print_task_path_n_nm(tsk);
show_regs_print_info(KERN_INFO);
show_ecr_verbose(regs);
@ -189,7 +189,7 @@ void show_regs(struct pt_regs *regs)
(void *)regs->blink, (void *)regs->ret);
if (user_mode(regs))
show_faulting_vma(regs->ret, buf); /* faulting code, not data */
show_faulting_vma(regs->ret); /* faulting code, not data */
pr_info("[STAT32]: 0x%08lx", regs->status32);
@ -221,8 +221,6 @@ void show_regs(struct pt_regs *regs)
cregs = (struct callee_regs *)current->thread.callee_reg;
if (cregs)
show_callee_regs(cregs);
free_page((unsigned long)buf);
}
void show_kernel_fault_diag(const char *str, struct pt_regs *regs,

View file

@ -70,6 +70,8 @@ static struct platform_device bcm63xx_enet_shared_device = {
static int shared_device_registered;
static u64 enet_dmamask = DMA_BIT_MASK(32);
static struct resource enet0_res[] = {
{
.start = -1, /* filled at runtime */
@ -99,6 +101,8 @@ static struct platform_device bcm63xx_enet0_device = {
.resource = enet0_res,
.dev = {
.platform_data = &enet0_pd,
.dma_mask = &enet_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@ -131,6 +135,8 @@ static struct platform_device bcm63xx_enet1_device = {
.resource = enet1_res,
.dev = {
.platform_data = &enet1_pd,
.dma_mask = &enet_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@ -157,6 +163,8 @@ static struct platform_device bcm63xx_enetsw_device = {
.resource = enetsw_res,
.dev = {
.platform_data = &enetsw_pd,
.dma_mask = &enet_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};

View file

@ -54,10 +54,9 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size)
{
u32 mask, old32, new32, load32;
u32 mask, old32, new32, load32, load;
volatile u32 *ptr32;
unsigned int shift;
u8 load;
/* Check that ptr is naturally aligned */
WARN_ON((unsigned long)ptr & (size - 1));

View file

@ -1818,7 +1818,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* Update the icache */
flush_icache_range((unsigned long)ctx.target,
(unsigned long)(ctx.target + ctx.idx * sizeof(u32)));
(unsigned long)&ctx.target[ctx.idx]);
if (bpf_jit_enable > 1)
/* Dump JIT code */

View file

@ -293,8 +293,7 @@ do { \
__put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
break; \
case 8: \
__put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
errret); \
__put_user_asm_u64(x, ptr, retval, errret); \
break; \
default: \
__put_user_bad(); \
@ -440,8 +439,10 @@ do { \
#define __put_user_nocheck(x, ptr, size) \
({ \
int __pu_err; \
__typeof__(*(ptr)) __pu_val; \
__pu_val = x; \
__uaccess_begin(); \
__put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
__put_user_size(__pu_val, (ptr), (size), __pu_err, -EFAULT);\
__uaccess_end(); \
__builtin_expect(__pu_err, 0); \
})

View file

@ -313,14 +313,13 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
struct apic_chip_data *apicd = apic_chip_data(irqd);
int vector, cpu;
cpumask_and(vector_searchmask, vector_searchmask, affmsk);
cpu = cpumask_first(vector_searchmask);
if (cpu >= nr_cpu_ids)
return -EINVAL;
cpumask_and(vector_searchmask, dest, affmsk);
/* set_affinity might call here for nothing */
if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
return 0;
vector = irq_matrix_alloc_managed(vector_matrix, cpu);
vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
&cpu);
trace_vector_alloc_managed(irqd->irq, vector, vector);
if (vector < 0)
return vector;

View file

@ -3399,6 +3399,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
kvm_mmu_reset_context(&svm->vcpu);
kvm_mmu_load(&svm->vcpu);
/*
* Drop what we picked up for L2 via svm_complete_interrupts() so it
* doesn't end up in L1.
*/
svm->vcpu.arch.nmi_injected = false;
kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu);
return 0;
}
@ -4485,25 +4493,14 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
break;
case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
int i;
struct kvm_vcpu *vcpu;
struct kvm *kvm = svm->vcpu.kvm;
struct kvm_lapic *apic = svm->vcpu.arch.apic;
/*
* At this point, we expect that the AVIC HW has already
* set the appropriate IRR bits on the valid target
* vcpus. So, we just need to kick the appropriate vcpu.
* Update ICR high and low, then emulate sending IPI,
* which is handled when writing APIC_ICR.
*/
kvm_for_each_vcpu(i, vcpu, kvm) {
bool m = kvm_apic_match_dest(vcpu, apic,
icrl & KVM_APIC_SHORT_MASK,
GET_APIC_DEST_FIELD(icrh),
icrl & KVM_APIC_DEST_MASK);
if (m && !avic_vcpu_is_running(vcpu))
kvm_vcpu_wake_up(vcpu);
}
kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
break;
}
case AVIC_IPI_FAILURE_INVALID_TARGET:

View file

@ -157,8 +157,8 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
pmd = pmd_offset(pud, ppd->vaddr);
if (pmd_none(*pmd)) {
pte = ppd->pgtable_area;
memset(pte, 0, sizeof(pte) * PTRS_PER_PTE);
ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE;
memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
}

View file

@ -358,6 +358,7 @@ struct binder_error {
* @min_priority: minimum scheduling priority
* (invariant after initialized)
* @inherit_rt: inherit RT scheduling policy from caller
* @txn_security_ctx: require sender's security context
* (invariant after initialized)
* @async_todo: list of async work items
* (protected by @proc->inner_lock)
@ -397,6 +398,7 @@ struct binder_node {
u8 sched_policy:2;
u8 inherit_rt:1;
u8 accept_fds:1;
u8 txn_security_ctx:1;
u8 min_priority;
};
bool has_async_transaction;
@ -654,6 +656,7 @@ struct binder_transaction {
struct binder_priority saved_priority;
bool set_priority_called;
kuid_t sender_euid;
binder_uintptr_t security_ctx;
/**
* @lock: protects @from, @to_proc, and @to_thread
*
@ -1363,6 +1366,7 @@ static struct binder_node *binder_init_node_ilocked(
node->min_priority = to_kernel_prio(node->sched_policy, priority);
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
@ -2900,6 +2904,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
char *secctx = NULL;
u32 secctx_sz = 0;
e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
@ -3123,6 +3129,20 @@ static void binder_transaction(struct binder_proc *proc,
t->priority = target_proc->default_priority;
}
if (target_node && target_node->txn_security_ctx) {
u32 secid;
security_task_getsecid(proc->tsk, &secid);
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
if (ret) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_get_secctx_failed;
}
extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
}
trace_binder_transaction(reply, t, target_node);
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
@ -3139,6 +3159,19 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
if (secctx) {
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
ALIGN(tr->offsets_size, sizeof(void *)) +
ALIGN(extra_buffers_size, sizeof(void *)) -
ALIGN(secctx_sz, sizeof(u64));
char *kptr = t->buffer->data + buf_offset;
t->security_ctx = (uintptr_t)kptr +
binder_alloc_get_user_buffer_offset(&target_proc->alloc);
memcpy(kptr, secctx, secctx_sz);
security_release_secctx(secctx, secctx_sz);
secctx = NULL;
}
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
@ -3409,6 +3442,9 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
if (secctx)
security_release_secctx(secctx, secctx_sz);
err_get_secctx_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
@ -4055,11 +4091,13 @@ static int binder_thread_read(struct binder_proc *proc,
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_transaction_data_secctx tr;
struct binder_transaction_data *trd = &tr.transaction_data;
struct binder_work *w = NULL;
struct list_head *list = NULL;
struct binder_transaction *t = NULL;
struct binder_thread *t_from;
size_t trsize = sizeof(*trd);
binder_inner_proc_lock(proc);
if (!binder_worklist_empty_ilocked(&thread->todo))
@ -4255,41 +4293,47 @@ static int binder_thread_read(struct binder_proc *proc,
struct binder_node *target_node = t->buffer->target_node;
struct binder_priority node_prio;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
trd->target.ptr = target_node->ptr;
trd->cookie = target_node->cookie;
node_prio.sched_policy = target_node->sched_policy;
node_prio.prio = target_node->min_priority;
binder_transaction_priority(current, t, node_prio,
target_node->inherit_rt);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = 0;
tr.cookie = 0;
trd->target.ptr = 0;
trd->cookie = 0;
cmd = BR_REPLY;
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
trd->code = t->code;
trd->flags = t->flags;
trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
t_from = binder_get_txn_from(t);
if (t_from) {
struct task_struct *sender = t_from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
trd->sender_pid =
task_tgid_nr_ns(sender,
task_active_pid_ns(current));
} else {
tr.sender_pid = 0;
trd->sender_pid = 0;
}
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)
trd->data_size = t->buffer->data_size;
trd->offsets_size = t->buffer->offsets_size;
trd->data.ptr.buffer = (binder_uintptr_t)
((uintptr_t)t->buffer->data +
binder_alloc_get_user_buffer_offset(&proc->alloc));
tr.data.ptr.offsets = tr.data.ptr.buffer +
trd->data.ptr.offsets = trd->data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
tr.secctx = t->security_ctx;
if (t->security_ctx) {
cmd = BR_TRANSACTION_SEC_CTX;
trsize = sizeof(tr);
}
if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
@ -4300,7 +4344,7 @@ static int binder_thread_read(struct binder_proc *proc,
return -EFAULT;
}
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr))) {
if (copy_to_user(ptr, &tr, trsize)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
@ -4309,7 +4353,7 @@ static int binder_thread_read(struct binder_proc *proc,
return -EFAULT;
}
ptr += sizeof(tr);
ptr += trsize;
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
@ -4317,16 +4361,18 @@ static int binder_thread_read(struct binder_proc *proc,
"%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
(cmd == BR_TRANSACTION_SEC_CTX) ?
"BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
t->debug_id, t_from ? t_from->proc->pid : 0,
t_from ? t_from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
(u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
(u64)trd->data.ptr.buffer,
(u64)trd->data.ptr.offsets);
if (t_from)
binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
binder_inner_proc_lock(thread->proc);
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
@ -4671,7 +4717,8 @@ static int binder_ioctl_write_read(struct file *filp,
return ret;
}
static int binder_ioctl_set_ctx_mgr(struct file *filp)
static int binder_ioctl_set_ctx_mgr(struct file *filp,
struct flat_binder_object *fbo)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
@ -4700,7 +4747,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
} else {
context->binder_context_mgr_uid = curr_euid;
}
new_node = binder_new_node(proc, NULL);
new_node = binder_new_node(proc, fbo);
if (!new_node) {
ret = -ENOMEM;
goto out;
@ -4823,8 +4870,20 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
binder_inner_proc_unlock(proc);
break;
}
case BINDER_SET_CONTEXT_MGR_EXT: {
struct flat_binder_object fbo;
if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
ret = -EINVAL;
goto err;
}
ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
if (ret)
goto err;
break;
}
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp);
ret = binder_ioctl_set_ctx_mgr(filp, NULL);
if (ret)
goto err;
break;

View file

@ -262,8 +262,10 @@ static int vc5_mux_set_parent(struct clk_hw *hw, u8 index)
if (vc5->clk_mux_ins == VC5_MUX_IN_XIN)
src = VC5_PRIM_SRC_SHDN_EN_XTAL;
if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
else if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
src = VC5_PRIM_SRC_SHDN_EN_CLKIN;
else /* Invalid; should have been caught by vc5_probe() */
return -EINVAL;
}
return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src);

View file

@ -3164,7 +3164,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
seq_printf(s, "\"protect_count\": %d,", c->protect_count);
seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
seq_printf(s, "\"duty_cycle\": %u",
clk_core_get_scaled_duty_cycle(c, 100000));
}

View file

@ -133,9 +133,11 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
struct tegra_dfll_soc_data *soc;
soc = tegra_dfll_unregister(pdev);
if (IS_ERR(soc))
if (IS_ERR(soc)) {
dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n",
PTR_ERR(soc));
return PTR_ERR(soc);
}
tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);

View file

@ -32,6 +32,7 @@
#include "vega10_pptable.h"
#define NUM_DSPCLK_LEVELS 8
#define VEGA10_ENGINECLOCK_HARDMAX 198000
static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
enum phm_platform_caps cap)
@ -258,7 +259,26 @@ static int init_over_drive_limits(
struct pp_hwmgr *hwmgr,
const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
{
hwmgr->platform_descriptor.overdriveLimit.engineClock =
const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
(const ATOM_Vega10_GFXCLK_Dependency_Table *)
(((unsigned long) powerplay_table) +
le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
bool is_acg_enabled = false;
ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2;
if (gfxclk_dep_table->ucRevId == 1) {
patom_record_v2 =
(ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
is_acg_enabled =
(bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable;
}
if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX &&
!is_acg_enabled)
hwmgr->platform_descriptor.overdriveLimit.engineClock =
VEGA10_ENGINECLOCK_HARDMAX;
else
hwmgr->platform_descriptor.overdriveLimit.engineClock =
le32_to_cpu(powerplay_table->ulMaxODEngineClock);
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
le32_to_cpu(powerplay_table->ulMaxODMemoryClock);

View file

@ -1564,6 +1564,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
old_plane_state->crtc != new_plane_state->crtc)
return -EINVAL;
/*
* FIXME: Since prepare_fb and cleanup_fb are always called on
* the new_plane_state for async updates we need to block framebuffer
* changes. This prevents use of a fb that's been cleaned up and
* double cleanups from occuring.
*/
if (old_plane_state->fb != new_plane_state->fb)
return -EINVAL;
funcs = plane->helper_private;
if (!funcs->atomic_async_update)
return -EINVAL;

View file

@ -896,7 +896,7 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
np = dev_pm_opp_get_of_node(opp);
if (np) {
of_property_read_u32(np, "qcom,level", &val);
of_property_read_u32(np, "opp-level", &val);
of_node_put(np);
}

View file

@ -115,7 +115,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
char *fptr = &fifo->buf[fifo->head];
int n;
wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
if (!rd->open)
return;
/* Note that smp_load_acquire() is not strictly required
* as CIRC_SPACE_TO_END() does not access the tail more
@ -213,7 +215,10 @@ static int rd_open(struct inode *inode, struct file *file)
static int rd_release(struct inode *inode, struct file *file)
{
struct msm_rd_state *rd = inode->i_private;
rd->open = false;
wake_up_all(&rd->fifo_event);
return 0;
}

View file

@ -92,6 +92,8 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder)
val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
clk_disable_unprepare(hdmi->tmds_clk);
}
static void sun4i_hdmi_enable(struct drm_encoder *encoder)
@ -102,6 +104,8 @@ static void sun4i_hdmi_enable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
clk_prepare_enable(hdmi->tmds_clk);
sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);

View file

@ -24,7 +24,7 @@ struct mbi_range {
unsigned long *bm;
};
static struct mutex mbi_lock;
static DEFINE_MUTEX(mbi_lock);
static phys_addr_t mbi_phys_base;
static struct mbi_range *mbi_ranges;
static unsigned int mbi_range_nr;

View file

@ -97,7 +97,7 @@ static void mmc_should_fail_request(struct mmc_host *host,
if (!data)
return;
if (cmd->error || data->error ||
if ((cmd && cmd->error) || data->error ||
!should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
return;

View file

@ -201,7 +201,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
(cq_host->num_slots - 1);
cq_host->mmc->cqe_qdepth;
pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
@ -217,12 +217,21 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
cq_host->desc_size,
&cq_host->desc_dma_base,
GFP_KERNEL);
if (!cq_host->desc_base)
return -ENOMEM;
cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
cq_host->data_size,
&cq_host->trans_desc_dma_base,
GFP_KERNEL);
if (!cq_host->desc_base || !cq_host->trans_desc_base)
if (!cq_host->trans_desc_base) {
dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
cq_host->desc_base,
cq_host->desc_dma_base);
cq_host->desc_base = NULL;
cq_host->desc_dma_base = 0;
return -ENOMEM;
}
pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,

View file

@ -1447,6 +1447,7 @@ static int mmc_spi_probe(struct spi_device *spi)
mmc->caps &= ~MMC_CAP_NEEDS_POLL;
mmc_gpiod_request_cd_irq(mmc);
}
mmc_detect_change(mmc, 0);
if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) {
has_ro = true;

View file

@ -68,6 +68,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
.scc_offset = 0x0300,
.taps = rcar_gen2_scc_taps,
.taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
.max_blk_count = 0xffffffff,
};
/* Definitions for sampling clocks */

View file

@ -1097,11 +1097,12 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
| ESDHC_BURST_LEN_EN_INCR,
host->ioaddr + SDHCI_HOST_CONTROL);
/*
* erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
* TO1.1, it's harmless for MX6SL
*/
writel(readl(host->ioaddr + 0x6c) | BIT(7),
* erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
* TO1.1, it's harmless for MX6SL
*/
writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
host->ioaddr + 0x6c);
/* disable DLL_CTRL delay line settings */

View file

@ -279,6 +279,11 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host,
iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
{
iowrite32(val, host->ctl + (addr << host->bus_shift));
}
static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
const u32 *buf, int count)
{

View file

@ -46,6 +46,7 @@
#include <linux/regulator/consumer.h>
#include <linux/mmc/sdio.h>
#include <linux/scatterlist.h>
#include <linux/sizes.h>
#include <linux/spinlock.h>
#include <linux/swiotlb.h>
#include <linux/workqueue.h>
@ -703,7 +704,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
return false;
}
static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
{
struct mmc_host *mmc = host->mmc;
struct tmio_mmc_data *pdata = host->pdata;
@ -711,7 +712,7 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
unsigned int sdio_status;
if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
return;
return false;
status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
@ -724,6 +725,8 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
mmc_signal_sdio_irq(mmc);
return ireg;
}
irqreturn_t tmio_mmc_irq(int irq, void *devid)
@ -742,9 +745,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
if (__tmio_mmc_sdcard_irq(host, ireg, status))
return IRQ_HANDLED;
__tmio_mmc_sdio_irq(host);
if (__tmio_mmc_sdio_irq(host))
return IRQ_HANDLED;
return IRQ_HANDLED;
return IRQ_NONE;
}
EXPORT_SYMBOL_GPL(tmio_mmc_irq);
@ -774,7 +778,10 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
/* Set transfer length / blocksize */
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
if (host->mmc->max_blk_count >= SZ_64K)
sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks);
else
sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
tmio_mmc_start_dma(host, data);

View file

@ -714,8 +714,10 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
priv->phy_iface);
if (IS_ERR(phydev))
if (IS_ERR(phydev)) {
netdev_err(dev, "Could not attach to PHY\n");
phydev = NULL;
}
} else {
int ret;

View file

@ -1314,7 +1314,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
unsigned long lpar_rc;
u16 mss = 0;
restart_poll:
while (frames_processed < budget) {
if (!ibmveth_rxq_pending_buffer(adapter))
break;
@ -1402,7 +1401,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
napi_reschedule(napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_DISABLE);
goto restart_poll;
}
}

View file

@ -144,6 +144,8 @@ struct hv_netvsc_packet {
u32 total_data_buflen;
};
#define NETVSC_HASH_KEYLEN 40
struct netvsc_device_info {
unsigned char mac_adr[ETH_ALEN];
u32 num_chn;
@ -151,6 +153,8 @@ struct netvsc_device_info {
u32 recv_sections;
u32 send_section_size;
u32 recv_section_size;
u8 rss_key[NETVSC_HASH_KEYLEN];
};
enum rndis_device_state {
@ -160,8 +164,6 @@ enum rndis_device_state {
RNDIS_DEV_DATAINITIALIZED,
};
#define NETVSC_HASH_KEYLEN 40
struct rndis_device {
struct net_device *ndev;
@ -210,7 +212,9 @@ int netvsc_recv_callback(struct net_device *net,
void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
int rndis_set_subchannel(struct net_device *ndev,
struct netvsc_device *nvdev,
struct netvsc_device_info *dev_info);
int rndis_filter_open(struct netvsc_device *nvdev);
int rndis_filter_close(struct netvsc_device *nvdev);
struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,

View file

@ -84,7 +84,7 @@ static void netvsc_subchan_work(struct work_struct *w)
rdev = nvdev->extension;
if (rdev) {
ret = rndis_set_subchannel(rdev->ndev, nvdev);
ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
if (ret == 0) {
netif_device_attach(rdev->ndev);
} else {

View file

@ -856,6 +856,39 @@ static void netvsc_get_channels(struct net_device *net,
}
}
/* Alloc struct netvsc_device_info, and initialize it from either existing
* struct netvsc_device, or from default values.
*/
static struct netvsc_device_info *netvsc_devinfo_get
(struct netvsc_device *nvdev)
{
struct netvsc_device_info *dev_info;
dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
if (!dev_info)
return NULL;
if (nvdev) {
dev_info->num_chn = nvdev->num_chn;
dev_info->send_sections = nvdev->send_section_cnt;
dev_info->send_section_size = nvdev->send_section_size;
dev_info->recv_sections = nvdev->recv_section_cnt;
dev_info->recv_section_size = nvdev->recv_section_size;
memcpy(dev_info->rss_key, nvdev->extension->rss_key,
NETVSC_HASH_KEYLEN);
} else {
dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
dev_info->send_sections = NETVSC_DEFAULT_TX;
dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
dev_info->recv_sections = NETVSC_DEFAULT_RX;
dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
}
return dev_info;
}
static int netvsc_detach(struct net_device *ndev,
struct netvsc_device *nvdev)
{
@ -907,7 +940,7 @@ static int netvsc_attach(struct net_device *ndev,
return PTR_ERR(nvdev);
if (nvdev->num_chn > 1) {
ret = rndis_set_subchannel(ndev, nvdev);
ret = rndis_set_subchannel(ndev, nvdev, dev_info);
/* if unavailable, just proceed with one queue */
if (ret) {
@ -941,7 +974,7 @@ static int netvsc_set_channels(struct net_device *net,
struct net_device_context *net_device_ctx = netdev_priv(net);
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
unsigned int orig, count = channels->combined_count;
struct netvsc_device_info device_info;
struct netvsc_device_info *device_info;
int ret;
/* We do not support separate count for rx, tx, or other */
@ -960,24 +993,26 @@ static int netvsc_set_channels(struct net_device *net,
orig = nvdev->num_chn;
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = count;
device_info.send_sections = nvdev->send_section_cnt;
device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = nvdev->recv_section_cnt;
device_info.recv_section_size = nvdev->recv_section_size;
device_info = netvsc_devinfo_get(nvdev);
if (!device_info)
return -ENOMEM;
device_info->num_chn = count;
ret = netvsc_detach(net, nvdev);
if (ret)
return ret;
goto out;
ret = netvsc_attach(net, &device_info);
ret = netvsc_attach(net, device_info);
if (ret) {
device_info.num_chn = orig;
if (netvsc_attach(net, &device_info))
device_info->num_chn = orig;
if (netvsc_attach(net, device_info))
netdev_err(net, "restoring channel setting failed\n");
}
out:
kfree(device_info);
return ret;
}
@ -1044,48 +1079,45 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
int orig_mtu = ndev->mtu;
struct netvsc_device_info device_info;
struct netvsc_device_info *device_info;
int ret = 0;
if (!nvdev || nvdev->destroy)
return -ENODEV;
device_info = netvsc_devinfo_get(nvdev);
if (!device_info)
return -ENOMEM;
/* Change MTU of underlying VF netdev first. */
if (vf_netdev) {
ret = dev_set_mtu(vf_netdev, mtu);
if (ret)
return ret;
goto out;
}
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = nvdev->num_chn;
device_info.send_sections = nvdev->send_section_cnt;
device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = nvdev->recv_section_cnt;
device_info.recv_section_size = nvdev->recv_section_size;
ret = netvsc_detach(ndev, nvdev);
if (ret)
goto rollback_vf;
ndev->mtu = mtu;
ret = netvsc_attach(ndev, &device_info);
if (ret)
goto rollback;
ret = netvsc_attach(ndev, device_info);
if (!ret)
goto out;
return 0;
rollback:
/* Attempt rollback to original MTU */
ndev->mtu = orig_mtu;
if (netvsc_attach(ndev, &device_info))
if (netvsc_attach(ndev, device_info))
netdev_err(ndev, "restoring mtu failed\n");
rollback_vf:
if (vf_netdev)
dev_set_mtu(vf_netdev, orig_mtu);
out:
kfree(device_info);
return ret;
}
@ -1690,7 +1722,7 @@ static int netvsc_set_ringparam(struct net_device *ndev,
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
struct netvsc_device_info device_info;
struct netvsc_device_info *device_info;
struct ethtool_ringparam orig;
u32 new_tx, new_rx;
int ret = 0;
@ -1710,26 +1742,29 @@ static int netvsc_set_ringparam(struct net_device *ndev,
new_rx == orig.rx_pending)
return 0; /* no change */
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = nvdev->num_chn;
device_info.send_sections = new_tx;
device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = new_rx;
device_info.recv_section_size = nvdev->recv_section_size;
device_info = netvsc_devinfo_get(nvdev);
if (!device_info)
return -ENOMEM;
device_info->send_sections = new_tx;
device_info->recv_sections = new_rx;
ret = netvsc_detach(ndev, nvdev);
if (ret)
return ret;
goto out;
ret = netvsc_attach(ndev, &device_info);
ret = netvsc_attach(ndev, device_info);
if (ret) {
device_info.send_sections = orig.tx_pending;
device_info.recv_sections = orig.rx_pending;
device_info->send_sections = orig.tx_pending;
device_info->recv_sections = orig.rx_pending;
if (netvsc_attach(ndev, &device_info))
if (netvsc_attach(ndev, device_info))
netdev_err(ndev, "restoring ringparam failed");
}
out:
kfree(device_info);
return ret;
}
@ -2158,7 +2193,7 @@ static int netvsc_probe(struct hv_device *dev,
{
struct net_device *net = NULL;
struct net_device_context *net_device_ctx;
struct netvsc_device_info device_info;
struct netvsc_device_info *device_info = NULL;
struct netvsc_device *nvdev;
int ret = -ENOMEM;
@ -2205,21 +2240,21 @@ static int netvsc_probe(struct hv_device *dev,
netif_set_real_num_rx_queues(net, 1);
/* Notify the netvsc driver of the new device */
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = VRSS_CHANNEL_DEFAULT;
device_info.send_sections = NETVSC_DEFAULT_TX;
device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
device_info.recv_sections = NETVSC_DEFAULT_RX;
device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
device_info = netvsc_devinfo_get(NULL);
nvdev = rndis_filter_device_add(dev, &device_info);
if (!device_info) {
ret = -ENOMEM;
goto devinfo_failed;
}
nvdev = rndis_filter_device_add(dev, device_info);
if (IS_ERR(nvdev)) {
ret = PTR_ERR(nvdev);
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
goto rndis_failed;
}
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
/* We must get rtnl lock before scheduling nvdev->subchan_work,
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
@ -2257,12 +2292,16 @@ static int netvsc_probe(struct hv_device *dev,
list_add(&net_device_ctx->list, &netvsc_dev_list);
rtnl_unlock();
kfree(device_info);
return 0;
register_failed:
rtnl_unlock();
rndis_filter_device_remove(dev, nvdev);
rndis_failed:
kfree(device_info);
devinfo_failed:
free_percpu(net_device_ctx->vf_stats);
no_stats:
hv_set_drvdata(dev, NULL);

View file

@ -715,8 +715,8 @@ rndis_filter_set_offload_params(struct net_device *ndev,
return ret;
}
int rndis_filter_set_rss_param(struct rndis_device *rdev,
const u8 *rss_key)
static int rndis_set_rss_param_msg(struct rndis_device *rdev,
const u8 *rss_key, u16 flag)
{
struct net_device *ndev = rdev->ndev;
struct rndis_request *request;
@ -745,7 +745,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
rssp->flag = 0;
rssp->flag = flag;
rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
NDIS_HASH_TCP_IPV6;
@ -770,9 +770,12 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
wait_for_completion(&request->wait_event);
set_complete = &request->response_msg.msg.set_complete;
if (set_complete->status == RNDIS_STATUS_SUCCESS)
memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
else {
if (set_complete->status == RNDIS_STATUS_SUCCESS) {
if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
!(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
} else {
netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
set_complete->status);
ret = -EINVAL;
@ -783,6 +786,16 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
return ret;
}
int rndis_filter_set_rss_param(struct rndis_device *rdev,
const u8 *rss_key)
{
/* Disable RSS before change */
rndis_set_rss_param_msg(rdev, rss_key,
NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
return rndis_set_rss_param_msg(rdev, rss_key, 0);
}
static int rndis_filter_query_device_link_status(struct rndis_device *dev,
struct netvsc_device *net_device)
{
@ -1062,7 +1075,9 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
* This breaks overlap of processing the host message for the
* new primary channel with the initialization of sub-channels.
*/
int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
int rndis_set_subchannel(struct net_device *ndev,
struct netvsc_device *nvdev,
struct netvsc_device_info *dev_info)
{
struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
struct net_device_context *ndev_ctx = netdev_priv(ndev);
@ -1103,7 +1118,10 @@ int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
atomic_read(&nvdev->open_chn) == nvdev->num_chn);
/* ignore failues from setting rss parameters, still have channels */
rndis_filter_set_rss_param(rdev, netvsc_hash_key);
if (dev_info)
rndis_filter_set_rss_param(rdev, dev_info->rss_key);
else
rndis_filter_set_rss_param(rdev, netvsc_hash_key);
netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
netif_set_real_num_rx_queues(ndev, nvdev->num_chn);

View file

@ -739,8 +739,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
chipcode &= AX_CHIPCODE_MASK;
(chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
ax88772a_hw_reset(dev, 0);
ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
ax88772a_hw_reset(dev, 0);
if (ret < 0) {
netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret);
return ret;
}
/* Read PHYID register *AFTER* the PHY was reset properly */
phyid = asix_get_phyid(dev);

View file

@ -531,8 +531,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
if (!(ctrl->anacap & (1 << 6)))
ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
dev_err(ctrl->device,

View file

@ -1672,18 +1672,28 @@ static enum blk_eh_timer_return
nvme_rdma_timeout(struct request *rq, bool reserved)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue;
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
dev_warn(req->queue->ctrl->ctrl.device,
"I/O %d QID %d timeout, reset controller\n",
rq->tag, nvme_rdma_queue_idx(req->queue));
dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
rq->tag, nvme_rdma_queue_idx(queue));
/* queue error recovery */
nvme_rdma_error_recovery(req->queue->ctrl);
if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
/*
* Teardown immediately if controller times out while starting
* or we are already started error recovery. all outstanding
* requests are completed on shutdown, so we return BLK_EH_DONE.
*/
flush_work(&ctrl->err_work);
nvme_rdma_teardown_io_queues(ctrl, false);
nvme_rdma_teardown_admin_queue(ctrl, false);
return BLK_EH_DONE;
}
/* fail with DNR on cmd timeout */
nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
dev_warn(ctrl->ctrl.device, "starting error recovery\n");
nvme_rdma_error_recovery(ctrl);
return BLK_EH_DONE;
return BLK_EH_RESET_TIMER;
}
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,

View file

@ -31,7 +31,7 @@ static int ath79_usb_phy_power_on(struct phy *phy)
err = reset_control_deassert(priv->reset);
if (err && priv->no_suspend_override)
reset_control_assert(priv->no_suspend_override);
reset_control_deassert(priv->no_suspend_override);
return err;
}
@ -69,7 +69,7 @@ static int ath79_usb_phy_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
priv->reset = devm_reset_control_get(&pdev->dev, "usb-phy");
priv->reset = devm_reset_control_get(&pdev->dev, "phy");
if (IS_ERR(priv->reset))
return PTR_ERR(priv->reset);

View file

@ -594,12 +594,12 @@ csio_vport_create(struct fc_vport *fc_vport, bool disable)
}
fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
ln->fc_vport = fc_vport;
if (csio_fcoe_alloc_vnp(hw, ln))
goto error;
*(struct csio_lnode **)fc_vport->dd_data = ln;
ln->fc_vport = fc_vport;
if (!fc_vport->node_name)
fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
if (!fc_vport->port_name)

View file

@ -829,6 +829,7 @@ static struct domain_device *sas_ex_discover_end_dev(
rphy = sas_end_device_alloc(phy->port);
if (!rphy)
goto out_free;
rphy->identify.phy_identifier = phy_id;
child->rphy = rphy;
get_device(&rphy->dev);
@ -856,6 +857,7 @@ static struct domain_device *sas_ex_discover_end_dev(
child->rphy = rphy;
get_device(&rphy->dev);
rphy->identify.phy_identifier = phy_id;
sas_fill_in_rphy(child, rphy);
list_add_tail(&child->disco_list_node, &parent->port->disco_list);

View file

@ -297,7 +297,8 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
lport);
/* release any threads waiting for the unreg to complete */
complete(&lport->lport_unreg_done);
if (lport->vport->localport)
complete(lport->lport_unreg_cmp);
}
/* lpfc_nvme_remoteport_delete
@ -2556,7 +2557,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
*/
void
lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
struct lpfc_nvme_lport *lport)
struct lpfc_nvme_lport *lport,
struct completion *lport_unreg_cmp)
{
#if (IS_ENABLED(CONFIG_NVME_FC))
u32 wait_tmo;
@ -2568,8 +2570,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
*/
wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
while (true) {
ret = wait_for_completion_timeout(&lport->lport_unreg_done,
wait_tmo);
ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
if (unlikely(!ret)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
"6176 Lport %p Localport %p wait "
@ -2603,12 +2604,12 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_ctrl_stat *cstat;
int ret;
DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
if (vport->nvmei_support == 0)
return;
localport = vport->localport;
vport->localport = NULL;
lport = (struct lpfc_nvme_lport *)localport->private;
cstat = lport->cstat;
@ -2619,13 +2620,14 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
/* lport's rport list is clear. Unregister
* lport and release resources.
*/
init_completion(&lport->lport_unreg_done);
lport->lport_unreg_cmp = &lport_unreg_cmp;
ret = nvme_fc_unregister_localport(localport);
/* Wait for completion. This either blocks
* indefinitely or succeeds
*/
lpfc_nvme_lport_unreg_wait(vport, lport);
lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
vport->localport = NULL;
kfree(cstat);
/* Regardless of the unregister upcall response, clear

View file

@ -50,7 +50,7 @@ struct lpfc_nvme_ctrl_stat {
/* Declare nvme-based local and remote port definitions. */
struct lpfc_nvme_lport {
struct lpfc_vport *vport;
struct completion lport_unreg_done;
struct completion *lport_unreg_cmp;
/* Add stats counters here */
struct lpfc_nvme_ctrl_stat *cstat;
atomic_t fc4NvmeLsRequests;

View file

@ -1003,7 +1003,8 @@ lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
struct lpfc_nvmet_tgtport *tport = targetport->private;
/* release any threads waiting for the unreg to complete */
complete(&tport->tport_unreg_done);
if (tport->phba->targetport)
complete(tport->tport_unreg_cmp);
}
static void
@ -1700,6 +1701,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_queue *wq;
uint32_t qidx;
DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
if (phba->nvmet_support == 0)
return;
@ -1709,9 +1711,9 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
wq = phba->sli4_hba.nvme_wq[qidx];
lpfc_nvmet_wqfull_flush(phba, wq, NULL);
}
init_completion(&tgtp->tport_unreg_done);
tgtp->tport_unreg_cmp = &tport_unreg_cmp;
nvmet_fc_unregister_targetport(phba->targetport);
wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
wait_for_completion_timeout(&tport_unreg_cmp, 5);
lpfc_nvmet_cleanup_io_context(phba);
}
phba->targetport = NULL;

View file

@ -34,7 +34,7 @@
/* Used for NVME Target */
struct lpfc_nvmet_tgtport {
struct lpfc_hba *phba;
struct completion tport_unreg_done;
struct completion *tport_unreg_cmp;
/* Stats counters - lpfc_nvmet_unsol_ls_buffer */
atomic_t rcv_ls_req_in;

View file

@ -310,9 +310,9 @@ static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
struct ion_buffer *buffer = dmabuf->priv;
mutex_lock(&buffer->lock);
free_duped_table(a->table);
list_del(&a->list);
mutex_unlock(&buffer->lock);
free_duped_table(a->table);
kfree(a);
}

View file

@ -850,18 +850,18 @@ enum ieee80211_state {
#define IP_FMT "%pI4"
#define IP_ARG(x) (x)
extern __inline int is_multicast_mac_addr(const u8 *addr)
static inline int is_multicast_mac_addr(const u8 *addr)
{
return ((addr[0] != 0xff) && (0x01 & addr[0]));
}
extern __inline int is_broadcast_mac_addr(const u8 *addr)
static inline int is_broadcast_mac_addr(const u8 *addr)
{
return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \
(addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
}
extern __inline int is_zero_mac_addr(const u8 *addr)
static inline int is_zero_mac_addr(const u8 *addr)
{
return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \
(addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00));

View file

@ -84,7 +84,12 @@ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
struct pci_dev *pci_dev; \
struct platform_device *pdev; \
struct proc_thermal_device *proc_dev; \
\
\
if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \
dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \
return 0; \
} \
\
if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
pdev = to_platform_device(dev); \
proc_dev = platform_get_drvdata(pdev); \
@ -298,11 +303,6 @@ static int proc_thermal_add(struct device *dev,
*priv = proc_priv;
ret = proc_thermal_read_ppcc(proc_priv);
if (!ret) {
ret = sysfs_create_group(&dev->kobj,
&power_limit_attribute_group);
}
if (ret)
return ret;
@ -316,8 +316,7 @@ static int proc_thermal_add(struct device *dev,
proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
if (IS_ERR(proc_priv->int340x_zone)) {
ret = PTR_ERR(proc_priv->int340x_zone);
goto remove_group;
return PTR_ERR(proc_priv->int340x_zone);
} else
ret = 0;
@ -331,9 +330,6 @@ static int proc_thermal_add(struct device *dev,
remove_zone:
int340x_thermal_zone_remove(proc_priv->int340x_zone);
remove_group:
sysfs_remove_group(&proc_priv->dev->kobj,
&power_limit_attribute_group);
return ret;
}
@ -364,7 +360,10 @@ static int int3401_add(struct platform_device *pdev)
platform_set_drvdata(pdev, proc_priv);
proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
return 0;
dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PLATFORM_DEV\n");
return sysfs_create_group(&pdev->dev.kobj,
&power_limit_attribute_group);
}
static int int3401_remove(struct platform_device *pdev)
@ -423,7 +422,7 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
proc_priv->soc_dts = intel_soc_dts_iosf_init(
INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0);
if (proc_priv->soc_dts && pdev->irq) {
if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) {
ret = pci_enable_msi(pdev);
if (!ret) {
ret = request_threaded_irq(pdev->irq, NULL,
@ -441,7 +440,10 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "No auxiliary DTSs enabled\n");
}
return 0;
dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PCI\n");
return sysfs_create_group(&pdev->dev.kobj,
&power_limit_attribute_group);
}
static void proc_thermal_pci_remove(struct pci_dev *pdev)

View file

@ -1695,7 +1695,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
}
/* ask the core to calculate the divisor */
baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4);
spin_lock_irqsave(&sport->port.lock, flags);

View file

@ -221,7 +221,7 @@ static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport)
unsigned int mctrl = TIOCM_DSR | TIOCM_CAR;
u32 geni_ios;
if (uart_console(uport) || !uart_cts_enabled(uport)) {
if (uart_console(uport)) {
mctrl |= TIOCM_CTS;
} else {
geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS);
@ -237,7 +237,7 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
{
u32 uart_manual_rfr = 0;
if (uart_console(uport) || !uart_cts_enabled(uport))
if (uart_console(uport))
return;
if (!(mctrl & TIOCM_RTS))

View file

@ -2423,6 +2423,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
/* begin to receive SETUP packets */
dwc->ep0state = EP0_SETUP_PHASE;
dwc->link_state = DWC3_LINK_STATE_SS_DIS;
dwc3_ep0_out_start(dwc);
dwc3_gadget_enable_irq(dwc);
@ -3963,6 +3964,8 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
dwc3_disconnect_gadget(dwc);
__dwc3_gadget_stop(dwc);
synchronize_irq(dwc->irq_gadget);
return 0;
}

View file

@ -838,7 +838,7 @@ static struct usb_function *source_sink_alloc_func(
ss = kzalloc(sizeof(*ss), GFP_KERNEL);
if (!ss)
return NULL;
return ERR_PTR(-ENOMEM);
ss_opts = container_of(fi, struct f_ss_opts, func_inst);

View file

@ -679,6 +679,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
unsigned long fs_count; /* Number of filesystem-sized blocks */
int create;
unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
loff_t i_size;
/*
* If there was a memory error and we've overwritten all the
@ -708,8 +709,8 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
*/
create = dio->op == REQ_OP_WRITE;
if (dio->flags & DIO_SKIP_HOLES) {
if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
i_blkbits))
i_size = i_size_read(dio->inode);
if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits)
create = 0;
}

View file

@ -331,11 +331,22 @@ struct inode_switch_wbs_context {
struct work_struct work;
};
static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
{
down_write(&bdi->wb_switch_rwsem);
}
static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
{
up_write(&bdi->wb_switch_rwsem);
}
static void inode_switch_wbs_work_fn(struct work_struct *work)
{
struct inode_switch_wbs_context *isw =
container_of(work, struct inode_switch_wbs_context, work);
struct inode *inode = isw->inode;
struct backing_dev_info *bdi = inode_to_bdi(inode);
struct address_space *mapping = inode->i_mapping;
struct bdi_writeback *old_wb = inode->i_wb;
struct bdi_writeback *new_wb = isw->new_wb;
@ -343,6 +354,12 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
bool switched = false;
void **slot;
/*
* If @inode switches cgwb membership while sync_inodes_sb() is
* being issued, sync_inodes_sb() might miss it. Synchronize.
*/
down_read(&bdi->wb_switch_rwsem);
/*
* By the time control reaches here, RCU grace period has passed
* since I_WB_SWITCH assertion and all wb stat update transactions
@ -435,6 +452,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
spin_unlock(&new_wb->list_lock);
spin_unlock(&old_wb->list_lock);
up_read(&bdi->wb_switch_rwsem);
if (switched) {
wb_wakeup(new_wb);
wb_put(old_wb);
@ -475,9 +494,18 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
if (inode->i_state & I_WB_SWITCH)
return;
/*
* Avoid starting new switches while sync_inodes_sb() is in
* progress. Otherwise, if the down_write protected issue path
* blocks heavily, we might end up starting a large number of
* switches which will block on the rwsem.
*/
if (!down_read_trylock(&bdi->wb_switch_rwsem))
return;
isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
if (!isw)
return;
goto out_unlock;
/* find and pin the new wb */
rcu_read_lock();
@ -511,12 +539,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
* Let's continue after I_WB_SWITCH is guaranteed to be visible.
*/
call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
return;
goto out_unlock;
out_free:
if (isw->new_wb)
wb_put(isw->new_wb);
kfree(isw);
out_unlock:
up_read(&bdi->wb_switch_rwsem);
}
/**
@ -894,6 +924,9 @@ fs_initcall(cgroup_writeback_init);
#else /* CONFIG_CGROUP_WRITEBACK */
static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
static struct bdi_writeback *
locked_inode_to_wb_and_lock_list(struct inode *inode)
__releases(&inode->i_lock)
@ -2420,8 +2453,11 @@ void sync_inodes_sb(struct super_block *sb)
return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));
/* protect against inode wb switch, see inode_switch_wbs_work_fn() */
bdi_down_write_wb_switch_rwsem(bdi);
bdi_split_work_to_wbs(bdi, &work, false);
wb_wait_for_completion(bdi, &done);
bdi_up_write_wb_switch_rwsem(bdi);
wait_sb_inodes(sb);
}

View file

@ -859,6 +859,18 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
rc = migrate_huge_page_move_mapping(mapping, newpage, page);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
/*
* page_private is subpool pointer in hugetlb pages. Transfer to
* new page. PagePrivate is not associated with page_private for
* hugetlb pages and can not be set here as only page_huge_active
* pages can be migrated.
*/
if (page_private(page)) {
set_page_private(newpage, page_private(page));
set_page_private(page, 0);
}
if (mode != MIGRATE_SYNC_NO_COPY)
migrate_page_copy(newpage, page);
else

View file

@ -190,6 +190,7 @@ struct backing_dev_info {
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
struct rb_root cgwb_congested_tree; /* their congested states */
struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
#else
struct bdi_writeback_congested *wb_congested;
#endif

View file

@ -1151,7 +1151,8 @@ void irq_matrix_offline(struct irq_matrix *m);
void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
unsigned int *mapped_cpu);
void irq_matrix_reserve(struct irq_matrix *m);
void irq_matrix_remove_reserved(struct irq_matrix *m);
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,

View file

@ -88,6 +88,16 @@ enum flat_binder_object_flags {
* scheduling policy from the caller (for synchronous transactions).
*/
FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
#ifdef __KERNEL__
/**
* @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
*
* Only when set, causes senders to include their security
* context
*/
FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
#endif /* __KERNEL__ */
};
#ifdef BINDER_IPC_32BIT
@ -265,6 +275,7 @@ struct binder_node_info_for_ref {
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
/*
* NOTE: Two special error codes you should check for when calling
@ -323,6 +334,13 @@ struct binder_transaction_data {
} data;
};
#ifdef __KERNEL__
struct binder_transaction_data_secctx {
struct binder_transaction_data transaction_data;
binder_uintptr_t secctx;
};
#endif /* __KERNEL__ */
struct binder_transaction_data_sg {
struct binder_transaction_data transaction_data;
binder_size_t buffers_size;
@ -358,6 +376,13 @@ enum binder_driver_return_protocol {
BR_OK = _IO('r', 1),
/* No parameters! */
#ifdef __KERNEL__
BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
struct binder_transaction_data_secctx),
/*
* binder_transaction_data_secctx: the received command.
*/
#endif /* __KERNEL__ */
BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
/*

View file

@ -307,7 +307,7 @@ void rcuwait_wake_up(struct rcuwait *w)
* MB (A) MB (B)
* [L] cond [L] tsk
*/
smp_rmb(); /* (B) */
smp_mb(); /* (B) */
/*
* Avoid using task_rcu_dereference() magic as long as we are careful,

View file

@ -1444,11 +1444,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
return;
/*
* Queue the task for later wakeup for after we've released
* the hb->lock. wake_q_add() grabs reference to p.
*/
wake_q_add(wake_q, p);
get_task_struct(p);
__unqueue_futex(q);
/*
* The waiting task can free the futex_q as soon as q->lock_ptr = NULL
@ -1458,6 +1454,13 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
* plist_del in __unqueue_futex().
*/
smp_store_release(&q->lock_ptr, NULL);
/*
* Queue the task for later wakeup for after we've released
* the hb->lock. wake_q_add() grabs reference to p.
*/
wake_q_add(wake_q, p);
put_task_struct(p);
}
/*

View file

@ -396,6 +396,9 @@ int irq_setup_affinity(struct irq_desc *desc)
}
cpumask_and(&mask, cpu_online_mask, set);
if (cpumask_empty(&mask))
cpumask_copy(&mask, cpu_online_mask);
if (node != NUMA_NO_NODE) {
const struct cpumask *nodemask = cpumask_of_node(node);

View file

@ -14,6 +14,7 @@ struct cpumap {
unsigned int available;
unsigned int allocated;
unsigned int managed;
unsigned int managed_allocated;
bool initialized;
bool online;
unsigned long alloc_map[IRQ_MATRIX_SIZE];
@ -124,6 +125,48 @@ static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
return area;
}
/* Find the best CPU which has the lowest vector allocation count */
static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
const struct cpumask *msk)
{
unsigned int cpu, best_cpu, maxavl = 0;
struct cpumap *cm;
best_cpu = UINT_MAX;
for_each_cpu(cpu, msk) {
cm = per_cpu_ptr(m->maps, cpu);
if (!cm->online || cm->available <= maxavl)
continue;
best_cpu = cpu;
maxavl = cm->available;
}
return best_cpu;
}
/* Find the best CPU which has the lowest number of managed IRQs allocated */
static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m,
const struct cpumask *msk)
{
unsigned int cpu, best_cpu, allocated = UINT_MAX;
struct cpumap *cm;
best_cpu = UINT_MAX;
for_each_cpu(cpu, msk) {
cm = per_cpu_ptr(m->maps, cpu);
if (!cm->online || cm->managed_allocated > allocated)
continue;
best_cpu = cpu;
allocated = cm->managed_allocated;
}
return best_cpu;
}
/**
* irq_matrix_assign_system - Assign system wide entry in the matrix
* @m: Matrix pointer
@ -239,11 +282,21 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
* @m: Matrix pointer
* @cpu: On which CPU the interrupt should be allocated
*/
int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
unsigned int *mapped_cpu)
{
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
unsigned int bit, end = m->alloc_end;
unsigned int bit, cpu, end = m->alloc_end;
struct cpumap *cm;
if (cpumask_empty(msk))
return -EINVAL;
cpu = matrix_find_best_cpu_managed(m, msk);
if (cpu == UINT_MAX)
return -ENOSPC;
cm = per_cpu_ptr(m->maps, cpu);
end = m->alloc_end;
/* Get managed bit which are not allocated */
bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
bit = find_first_bit(m->scratch_map, end);
@ -251,7 +304,9 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
return -ENOSPC;
set_bit(bit, cm->alloc_map);
cm->allocated++;
cm->managed_allocated++;
m->total_allocated++;
*mapped_cpu = cpu;
trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
return bit;
}
@ -322,37 +377,27 @@ void irq_matrix_remove_reserved(struct irq_matrix *m)
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
bool reserved, unsigned int *mapped_cpu)
{
unsigned int cpu, best_cpu, maxavl = 0;
unsigned int cpu, bit;
struct cpumap *cm;
unsigned int bit;
best_cpu = UINT_MAX;
for_each_cpu(cpu, msk) {
cm = per_cpu_ptr(m->maps, cpu);
cpu = matrix_find_best_cpu(m, msk);
if (cpu == UINT_MAX)
return -ENOSPC;
if (!cm->online || cm->available <= maxavl)
continue;
cm = per_cpu_ptr(m->maps, cpu);
bit = matrix_alloc_area(m, cm, 1, false);
if (bit >= m->alloc_end)
return -ENOSPC;
cm->allocated++;
cm->available--;
m->total_allocated++;
m->global_available--;
if (reserved)
m->global_reserved--;
*mapped_cpu = cpu;
trace_irq_matrix_alloc(bit, cpu, m, cm);
return bit;
best_cpu = cpu;
maxavl = cm->available;
}
if (maxavl) {
cm = per_cpu_ptr(m->maps, best_cpu);
bit = matrix_alloc_area(m, cm, 1, false);
if (bit < m->alloc_end) {
cm->allocated++;
cm->available--;
m->total_allocated++;
m->global_available--;
if (reserved)
m->global_reserved--;
*mapped_cpu = best_cpu;
trace_irq_matrix_alloc(bit, best_cpu, m, cm);
return bit;
}
}
return -ENOSPC;
}
/**
@ -373,6 +418,8 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
clear_bit(bit, cm->alloc_map);
cm->allocated--;
if(managed)
cm->managed_allocated--;
if (cm->online)
m->total_allocated--;
@ -442,13 +489,14 @@ void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
m->system_map);
seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " ");
seq_printf(sf, "%*s| CPU | avl | man | mac | act | vectors\n", ind, " ");
cpus_read_lock();
for_each_online_cpu(cpu) {
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ",
cpu, cm->available, cm->managed, cm->allocated,
seq_printf(sf, "%*s %4d %4u %4u %4u %4u %*pbl\n", ind, " ",
cpu, cm->available, cm->managed,
cm->managed_allocated, cm->allocated,
m->matrix_bits, cm->alloc_map);
}
cpus_read_unlock();

View file

@ -198,15 +198,22 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
woken++;
tsk = waiter->task;
wake_q_add(wake_q, tsk);
get_task_struct(tsk);
list_del(&waiter->list);
/*
* Ensure that the last operation is setting the reader
* Ensure calling get_task_struct() before setting the reader
* waiter to nil such that rwsem_down_read_failed() cannot
* race with do_exit() by always holding a reference count
* to the task to wakeup.
*/
smp_store_release(&waiter->task, NULL);
/*
* Ensure issuing the wakeup (either by us or someone else)
* after setting the reader waiter to nil.
*/
wake_q_add(wake_q, tsk);
/* wake_q_add() already take the task ref */
put_task_struct(tsk);
}
adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;

View file

@ -408,10 +408,11 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
* its already queued (either by us or someone else) and will get the
* wakeup due to that.
*
* This cmpxchg() executes a full barrier, which pairs with the full
* barrier executed by the wakeup in wake_up_q().
* In order to ensure that a pending wakeup will observe our pending
* state, even in the failed case, an explicit smp_mb() must be used.
*/
if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
smp_mb__before_atomic();
if (cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))
return;
head->count++;

View file

@ -689,6 +689,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
bdi->cgwb_congested_tree = RB_ROOT;
mutex_init(&bdi->cgwb_release_mutex);
init_rwsem(&bdi->wb_switch_rwsem);
ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
if (!ret) {

View file

@ -3624,7 +3624,6 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
copy_user_huge_page(new_page, old_page, address, vma,
pages_per_huge_page(h));
__SetPageUptodate(new_page);
set_page_huge_active(new_page);
mmun_start = haddr;
mmun_end = mmun_start + huge_page_size(h);
@ -3646,6 +3645,7 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page, true);
hugepage_add_new_anon_rmap(new_page, vma, haddr);
set_page_huge_active(new_page);
/* Make the old page be freed below */
new_page = old_page;
}
@ -3730,6 +3730,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
pte_t new_pte;
spinlock_t *ptl;
unsigned long haddr = address & huge_page_mask(h);
bool new_page = false;
/*
* Currently, we are forced to kill the process in the event the
@ -3791,7 +3792,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
}
clear_huge_page(page, address, pages_per_huge_page(h));
__SetPageUptodate(page);
set_page_huge_active(page);
new_page = true;
if (vma->vm_flags & VM_MAYSHARE) {
int err = huge_add_to_page_cache(page, mapping, idx);
@ -3862,6 +3863,15 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
}
spin_unlock(ptl);
/*
* Only make newly allocated pages active. Existing pages found
* in the pagecache could be !page_huge_active() if they have been
* isolated for migration.
*/
if (new_page)
set_page_huge_active(page);
unlock_page(page);
out:
return ret;
@ -4096,7 +4106,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
* the set_pte_at() write.
*/
__SetPageUptodate(page);
set_page_huge_active(page);
mapping = dst_vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, dst_vma, dst_addr);
@ -4164,6 +4173,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
update_mmu_cache(dst_vma, dst_addr, dst_pte);
spin_unlock(ptl);
set_page_huge_active(page);
if (vm_shared)
unlock_page(page);
ret = 0;

View file

@ -1303,6 +1303,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
lock_page(hpage);
}
/*
* Check for pages which are in the process of being freed. Without
* page_mapping() set, hugetlbfs specific move page routine will not
* be called and we could leak usage counts for subpools.
*/
if (page_private(hpage) && !page_mapping(hpage)) {
rc = -EBUSY;
goto out_unlock;
}
if (PageAnon(hpage))
anon_vma = page_get_anon_vma(hpage);
@ -1333,6 +1343,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
put_new_page = NULL;
}
out_unlock:
unlock_page(hpage);
out:
if (rc != -EAGAIN)

View file

@ -2400,12 +2400,11 @@ int expand_downwards(struct vm_area_struct *vma,
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *prev;
int error;
int error = 0;
address &= PAGE_MASK;
error = security_mmap_addr(address);
if (error)
return error;
if (address < mmap_min_addr)
return -EPERM;
/* Enforce stack_guard_gap */
prev = vma->vm_prev;

View file

@ -1478,6 +1478,10 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
sta->sta.tdls = true;
if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
!sdata->u.mgd.associated)
return -EINVAL;
err = sta_apply_parameters(local, sta, params);
if (err) {
sta_info_free(local, sta);

View file

@ -221,7 +221,7 @@ static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr_3addr hdr;
u8 category;
u8 action_code;
} __packed action;
} __packed __aligned(2) action;
if (!sdata)
return;
@ -2678,7 +2678,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
skb_set_queue_mapping(skb, q);
if (!--mesh_hdr->ttl) {
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
if (!is_multicast_ether_addr(hdr->addr1))
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
dropped_frames_ttl);
goto out;
}

View file

@ -3614,10 +3614,10 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
/* We need a bit of data queued to build aggregates properly, so
* instruct the TCP stack to allow more than a single ms of data
* to be queued in the stack. The value is a bit-shift of 1
* second, so 8 is ~4ms of queued data. Only affects local TCP
* second, so 7 is ~8ms of queued data. Only affects local TCP
* sockets.
*/
sk_pacing_shift_update(skb->sk, 8);
sk_pacing_shift_update(skb->sk, 7);
fast_tx = rcu_dereference(sta->fast_tx);

View file

@ -1291,7 +1291,7 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd)
* definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"),
* however it is safe for now to assume that a frequency rule should not be
* part of a frequency's band if the start freq or end freq are off by more
* than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the
* than 2 GHz for the 2.4 and 5 GHz bands, and by more than 20 GHz for the
* 60 GHz band.
* This resolution can be lowered and should be considered as we add
* regulatory rule support for other "bands".
@ -1306,7 +1306,7 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
* with the Channel starting frequency above 45 GHz.
*/
u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
return true;
if (abs(freq_khz - freq_range->end_freq_khz) <= limit)

View file

@ -532,7 +532,8 @@ static int snd_compress_check_input(struct snd_compr_params *params)
{
/* first let's check the buffer parameter's */
if (params->buffer.fragment_size == 0 ||
params->buffer.fragments > U32_MAX / params->buffer.fragment_size)
params->buffer.fragments > U32_MAX / params->buffer.fragment_size ||
params->buffer.fragments == 0)
return -EINVAL;
/* now codec parameters */

View file

@ -1126,8 +1126,11 @@ static int rt274_i2c_probe(struct i2c_client *i2c,
return ret;
}
regmap_read(rt274->regmap,
ret = regmap_read(rt274->regmap,
RT274_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
if (ret)
return ret;
if (val != RT274_VENDOR_ID) {
dev_err(&i2c->dev,
"Device with ID register %#x is not rt274\n", val);

View file

@ -849,18 +849,18 @@
#define RT5682_SCLK_SRC_PLL2 (0x2 << 13)
#define RT5682_SCLK_SRC_SDW (0x3 << 13)
#define RT5682_SCLK_SRC_RCCLK (0x4 << 13)
#define RT5682_PLL1_SRC_MASK (0x3 << 10)
#define RT5682_PLL1_SRC_SFT 10
#define RT5682_PLL1_SRC_MCLK (0x0 << 10)
#define RT5682_PLL1_SRC_BCLK1 (0x1 << 10)
#define RT5682_PLL1_SRC_SDW (0x2 << 10)
#define RT5682_PLL1_SRC_RC (0x3 << 10)
#define RT5682_PLL2_SRC_MASK (0x3 << 8)
#define RT5682_PLL2_SRC_SFT 8
#define RT5682_PLL2_SRC_MCLK (0x0 << 8)
#define RT5682_PLL2_SRC_BCLK1 (0x1 << 8)
#define RT5682_PLL2_SRC_SDW (0x2 << 8)
#define RT5682_PLL2_SRC_RC (0x3 << 8)
#define RT5682_PLL2_SRC_MASK (0x3 << 10)
#define RT5682_PLL2_SRC_SFT 10
#define RT5682_PLL2_SRC_MCLK (0x0 << 10)
#define RT5682_PLL2_SRC_BCLK1 (0x1 << 10)
#define RT5682_PLL2_SRC_SDW (0x2 << 10)
#define RT5682_PLL2_SRC_RC (0x3 << 10)
#define RT5682_PLL1_SRC_MASK (0x3 << 8)
#define RT5682_PLL1_SRC_SFT 8
#define RT5682_PLL1_SRC_MCLK (0x0 << 8)
#define RT5682_PLL1_SRC_BCLK1 (0x1 << 8)
#define RT5682_PLL1_SRC_SDW (0x2 << 8)
#define RT5682_PLL1_SRC_RC (0x3 << 8)

View file

@ -86,49 +86,49 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
ret = scnprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
pdcr, ptcr);
if (ptcr & IMX_AUDMUX_V2_PTCR_TFSDIR)
ret += snprintf(buf + ret, PAGE_SIZE - ret,
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"TxFS output from %s, ",
audmux_port_string((ptcr >> 27) & 0x7));
else
ret += snprintf(buf + ret, PAGE_SIZE - ret,
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"TxFS input, ");
if (ptcr & IMX_AUDMUX_V2_PTCR_TCLKDIR)
ret += snprintf(buf + ret, PAGE_SIZE - ret,
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"TxClk output from %s",
audmux_port_string((ptcr >> 22) & 0x7));
else
ret += snprintf(buf + ret, PAGE_SIZE - ret,
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"TxClk input");
ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
if (ptcr & IMX_AUDMUX_V2_PTCR_SYN) {
ret += snprintf(buf + ret, PAGE_SIZE - ret,
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"Port is symmetric");
} else {
if (ptcr & IMX_AUDMUX_V2_PTCR_RFSDIR)
ret += snprintf(buf + ret, PAGE_SIZE - ret,
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"RxFS output from %s, ",
audmux_port_string((ptcr >> 17) & 0x7));
else
ret += snprintf(buf + ret, PAGE_SIZE - ret,
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"RxFS input, ");
if (ptcr & IMX_AUDMUX_V2_PTCR_RCLKDIR)
ret += snprintf(buf + ret, PAGE_SIZE - ret,
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"RxClk output from %s",
audmux_port_string((ptcr >> 12) & 0x7));
else
ret += snprintf(buf + ret, PAGE_SIZE - ret,
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"RxClk input");
}
ret += snprintf(buf + ret, PAGE_SIZE - ret,
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"\nData received from %s\n",
audmux_port_string((pdcr >> 13) & 0x7));

View file

@ -192,7 +192,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
.stream_name = "Loopback",
.cpu_dai_name = "Loopback Pin",
.platform_name = "haswell-pcm-audio",
.dynamic = 0,
.dynamic = 1,
.codec_name = "snd-soc-dummy",
.codec_dai_name = "snd-soc-dummy-dai",
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},

View file

@ -146,7 +146,7 @@ static struct snd_soc_dai_link haswell_rt5640_dais[] = {
.stream_name = "Loopback",
.cpu_dai_name = "Loopback Pin",
.platform_name = "haswell-pcm-audio",
.dynamic = 0,
.dynamic = 1,
.codec_name = "snd-soc-dummy",
.codec_dai_name = "snd-soc-dummy-dai",
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},

View file

@ -2039,19 +2039,19 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
out = is_connected_output_ep(w, NULL, NULL);
}
ret = snprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d",
ret = scnprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d",
w->name, w->power ? "On" : "Off",
w->force ? " (forced)" : "", in, out);
if (w->reg >= 0)
ret += snprintf(buf + ret, PAGE_SIZE - ret,
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
" - R%d(0x%x) mask 0x%x",
w->reg, w->reg, w->mask << w->shift);
ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
if (w->sname)
ret += snprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
ret += scnprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
w->sname,
w->active ? "active" : "inactive");
@ -2064,7 +2064,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
if (!p->connect)
continue;
ret += snprintf(buf + ret, PAGE_SIZE - ret,
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
" %s \"%s\" \"%s\"\n",
(rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out",
p->name ? p->name : "static",

View file

@ -37,7 +37,7 @@ static int get_debugfs(char **path)
struct libmnt_table *tb;
struct libmnt_iter *itr = NULL;
struct libmnt_fs *fs;
int found = 0;
int found = 0, ret;
cxt = mnt_new_context();
if (!cxt)
@ -58,8 +58,11 @@ static int get_debugfs(char **path)
break;
}
}
if (found)
asprintf(path, "%s/gpio", mnt_fs_get_target(fs));
if (found) {
ret = asprintf(path, "%s/gpio", mnt_fs_get_target(fs));
if (ret < 0)
err(EXIT_FAILURE, "failed to format string");
}
mnt_free_iter(itr);
mnt_free_context(cxt);

View file

@ -590,7 +590,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
* already exist.
*/
region = (struct userspace_mem_region *) userspace_mem_region_find(
vm, guest_paddr, guest_paddr + npages * vm->page_size);
vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
if (region != NULL)
TEST_ASSERT(false, "overlapping userspace_mem_region already "
"exists\n"
@ -606,15 +606,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
region = region->next) {
if (region->region.slot == slot)
break;
if ((guest_paddr <= (region->region.guest_phys_addr
+ region->region.memory_size))
&& ((guest_paddr + npages * vm->page_size)
>= region->region.guest_phys_addr))
break;
}
if (region != NULL)
TEST_ASSERT(false, "A mem region with the requested slot "
"or overlapping physical memory range already exists.\n"
"already exists.\n"
" requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
" existing slot: %u paddr: 0x%lx size: 0x%lx",
slot, guest_paddr, npages,

View file

@ -145,15 +145,12 @@ TEST_F(rtc, alarm_alm_set) {
rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
ASSERT_NE(-1, rc);
EXPECT_NE(0, rc);
ASSERT_NE(0, rc);
/* Disable alarm interrupts */
rc = ioctl(self->fd, RTC_AIE_OFF, 0);
ASSERT_NE(-1, rc);
if (rc == 0)
return;
rc = read(self->fd, &data, sizeof(unsigned long));
ASSERT_NE(-1, rc);
TH_LOG("data: %lx", data);
@ -202,7 +199,109 @@ TEST_F(rtc, alarm_wkalm_set) {
rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
ASSERT_NE(-1, rc);
EXPECT_NE(0, rc);
ASSERT_NE(0, rc);
rc = read(self->fd, &data, sizeof(unsigned long));
ASSERT_NE(-1, rc);
rc = ioctl(self->fd, RTC_RD_TIME, &tm);
ASSERT_NE(-1, rc);
new = timegm((struct tm *)&tm);
ASSERT_EQ(new, secs);
}
TEST_F(rtc, alarm_alm_set_minute) {
struct timeval tv = { .tv_sec = 62 };
unsigned long data;
struct rtc_time tm;
fd_set readfds;
time_t secs, new;
int rc;
rc = ioctl(self->fd, RTC_RD_TIME, &tm);
ASSERT_NE(-1, rc);
secs = timegm((struct tm *)&tm) + 60 - tm.tm_sec;
gmtime_r(&secs, (struct tm *)&tm);
rc = ioctl(self->fd, RTC_ALM_SET, &tm);
if (rc == -1) {
ASSERT_EQ(EINVAL, errno);
TH_LOG("skip alarms are not supported.");
return;
}
rc = ioctl(self->fd, RTC_ALM_READ, &tm);
ASSERT_NE(-1, rc);
TH_LOG("Alarm time now set to %02d:%02d:%02d.",
tm.tm_hour, tm.tm_min, tm.tm_sec);
/* Enable alarm interrupts */
rc = ioctl(self->fd, RTC_AIE_ON, 0);
ASSERT_NE(-1, rc);
FD_ZERO(&readfds);
FD_SET(self->fd, &readfds);
rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
ASSERT_NE(-1, rc);
ASSERT_NE(0, rc);
/* Disable alarm interrupts */
rc = ioctl(self->fd, RTC_AIE_OFF, 0);
ASSERT_NE(-1, rc);
rc = read(self->fd, &data, sizeof(unsigned long));
ASSERT_NE(-1, rc);
TH_LOG("data: %lx", data);
rc = ioctl(self->fd, RTC_RD_TIME, &tm);
ASSERT_NE(-1, rc);
new = timegm((struct tm *)&tm);
ASSERT_EQ(new, secs);
}
TEST_F(rtc, alarm_wkalm_set_minute) {
struct timeval tv = { .tv_sec = 62 };
struct rtc_wkalrm alarm = { 0 };
struct rtc_time tm;
unsigned long data;
fd_set readfds;
time_t secs, new;
int rc;
rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time);
ASSERT_NE(-1, rc);
secs = timegm((struct tm *)&alarm.time) + 60 - alarm.time.tm_sec;
gmtime_r(&secs, (struct tm *)&alarm.time);
alarm.enabled = 1;
rc = ioctl(self->fd, RTC_WKALM_SET, &alarm);
if (rc == -1) {
ASSERT_EQ(EINVAL, errno);
TH_LOG("skip alarms are not supported.");
return;
}
rc = ioctl(self->fd, RTC_WKALM_RD, &alarm);
ASSERT_NE(-1, rc);
TH_LOG("Alarm time now set to %02d/%02d/%02d %02d:%02d:%02d.",
alarm.time.tm_mday, alarm.time.tm_mon + 1,
alarm.time.tm_year + 1900, alarm.time.tm_hour,
alarm.time.tm_min, alarm.time.tm_sec);
FD_ZERO(&readfds);
FD_SET(self->fd, &readfds);
rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
ASSERT_NE(-1, rc);
ASSERT_NE(0, rc);
rc = read(self->fd, &data, sizeof(unsigned long));
ASSERT_NE(-1, rc);

View file

@ -9,7 +9,7 @@ BINARIES := seccomp_bpf seccomp_benchmark
CFLAGS += -Wl,-no-as-needed -Wall
seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h
$(CC) $(CFLAGS) $(LDFLAGS) -lpthread $< -o $@
$(CC) $(CFLAGS) $(LDFLAGS) $< -lpthread -o $@
TEST_PROGS += $(BINARIES)
EXTRA_CLEAN := $(BINARIES)

View file

@ -22,6 +22,7 @@ struct gup_benchmark {
__u64 size;
__u32 nr_pages_per_call;
__u32 flags;
__u64 expansion[10]; /* For future use */
};
int main(int argc, char **argv)