Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
ae5112a825
255 changed files with 2524 additions and 1460 deletions
|
@ -15,6 +15,9 @@ Properties:
|
|||
Second cell specifies the irq distribution mode to cores
|
||||
0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
|
||||
|
||||
The second cell in interrupts property is deprecated and may be ignored by
|
||||
the kernel.
|
||||
|
||||
intc accessed via the special ARC AUX register interface, hence "reg" property
|
||||
is not specified.
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ have dual GMAC each represented by a child node..
|
|||
* Ethernet controller node
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "mediatek,mt7623-eth"
|
||||
- compatible: Should be "mediatek,mt2701-eth"
|
||||
- reg: Address and length of the register set for the device
|
||||
- interrupts: Should contain the three frame engines interrupts in numeric
|
||||
order. These are fe_int0, fe_int1 and fe_int2.
|
||||
|
|
|
@ -19,8 +19,9 @@ Optional Properties:
|
|||
specifications. If neither of these are specified, the default is to
|
||||
assume clause 22.
|
||||
|
||||
If the phy's identifier is known then the list may contain an entry
|
||||
of the form: "ethernet-phy-idAAAA.BBBB" where
|
||||
If the PHY reports an incorrect ID (or none at all) then the
|
||||
"compatible" list may contain an entry with the correct PHY ID in the
|
||||
form: "ethernet-phy-idAAAA.BBBB" where
|
||||
AAAA - The value of the 16 bit Phy Identifier 1 register as
|
||||
4 hex digits. This is the chip vendor OUI bits 3:18
|
||||
BBBB - The value of the 16 bit Phy Identifier 2 register as
|
||||
|
|
|
@ -35,9 +35,7 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
|
|||
The default suspend mode (ie. the one to be used without writing anything into
|
||||
/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
|
||||
"s2idle", but it can be overridden by the value of the "mem_sleep_default"
|
||||
parameter in the kernel command line. On some ACPI-based systems, depending on
|
||||
the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
|
||||
is supported.
|
||||
parameter in the kernel command line.
|
||||
|
||||
The properties of all of the sleep states are described below.
|
||||
|
||||
|
|
|
@ -3567,7 +3567,7 @@ F: drivers/infiniband/hw/cxgb3/
|
|||
F: include/uapi/rdma/cxgb3-abi.h
|
||||
|
||||
CXGB4 ETHERNET DRIVER (CXGB4)
|
||||
M: Hariprasad S <hariprasad@chelsio.com>
|
||||
M: Ganesh Goudar <ganeshgr@chelsio.com>
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://www.chelsio.com
|
||||
S: Supported
|
||||
|
@ -4153,7 +4153,7 @@ F: Documentation/gpu/i915.rst
|
|||
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
|
||||
M: Zhenyu Wang <zhenyuw@linux.intel.com>
|
||||
M: Zhi Wang <zhi.a.wang@intel.com>
|
||||
L: igvt-g-dev@lists.01.org
|
||||
L: intel-gvt-dev@lists.freedesktop.org
|
||||
L: intel-gfx@lists.freedesktop.org
|
||||
W: https://01.org/igvt-g
|
||||
T: git https://github.com/01org/gvt-linux.git
|
||||
|
|
4
Makefile
4
Makefile
|
@ -1,8 +1,8 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Anniversary Edition
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
# To see a list of typical targets execute "make help"
|
||||
|
|
|
@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
|
|||
" lp 1f \n"
|
||||
" nop \n"
|
||||
"1: \n"
|
||||
: : "r"(loops));
|
||||
:
|
||||
: "r"(loops)
|
||||
: "lp_count");
|
||||
}
|
||||
|
||||
extern void __bad_udelay(void);
|
||||
|
|
|
@ -71,14 +71,14 @@ ENTRY(stext)
|
|||
GET_CPU_ID r5
|
||||
cmp r5, 0
|
||||
mov.nz r0, r5
|
||||
#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
|
||||
; Non-Master can proceed as system would be booted sufficiently
|
||||
jnz first_lines_of_secondary
|
||||
#else
|
||||
bz .Lmaster_proceed
|
||||
|
||||
; Non-Masters wait for Master to boot enough and bring them up
|
||||
jnz arc_platform_smp_wait_to_boot
|
||||
#endif
|
||||
; Master falls thru
|
||||
; when they resume, tail-call to entry point
|
||||
mov blink, @first_lines_of_secondary
|
||||
j arc_platform_smp_wait_to_boot
|
||||
|
||||
.Lmaster_proceed:
|
||||
#endif
|
||||
|
||||
; Clear BSS before updating any globals
|
||||
|
|
|
@ -93,11 +93,10 @@ static void mcip_probe_n_setup(void)
|
|||
READ_BCR(ARC_REG_MCIP_BCR, mp);
|
||||
|
||||
sprintf(smp_cpuinfo_buf,
|
||||
"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
|
||||
"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
|
||||
mp.ver, mp.num_cores,
|
||||
IS_AVAIL1(mp.ipi, "IPI "),
|
||||
IS_AVAIL1(mp.idu, "IDU "),
|
||||
IS_AVAIL1(mp.llm, "LLM "),
|
||||
IS_AVAIL1(mp.dbg, "DEBUG "),
|
||||
IS_AVAIL1(mp.gfrc, "GFRC"));
|
||||
|
||||
|
@ -175,7 +174,6 @@ static void idu_irq_unmask(struct irq_data *data)
|
|||
raw_spin_unlock_irqrestore(&mcip_lock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int
|
||||
idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
|
||||
bool force)
|
||||
|
@ -205,12 +203,27 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
|
|||
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void idu_irq_enable(struct irq_data *data)
|
||||
{
|
||||
/*
|
||||
* By default send all common interrupts to all available online CPUs.
|
||||
* The affinity of common interrupts in IDU must be set manually since
|
||||
* in some cases the kernel will not call irq_set_affinity() by itself:
|
||||
* 1. When the kernel is not configured with support of SMP.
|
||||
* 2. When the kernel is configured with support of SMP but upper
|
||||
* interrupt controllers does not support setting of the affinity
|
||||
* and cannot propagate it to IDU.
|
||||
*/
|
||||
idu_irq_set_affinity(data, cpu_online_mask, false);
|
||||
idu_irq_unmask(data);
|
||||
}
|
||||
|
||||
static struct irq_chip idu_irq_chip = {
|
||||
.name = "MCIP IDU Intc",
|
||||
.irq_mask = idu_irq_mask,
|
||||
.irq_unmask = idu_irq_unmask,
|
||||
.irq_enable = idu_irq_enable,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = idu_irq_set_affinity,
|
||||
#endif
|
||||
|
@ -243,36 +256,14 @@ static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
|
|||
const u32 *intspec, unsigned int intsize,
|
||||
irq_hw_number_t *out_hwirq, unsigned int *out_type)
|
||||
{
|
||||
irq_hw_number_t hwirq = *out_hwirq = intspec[0];
|
||||
int distri = intspec[1];
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Ignore value of interrupt distribution mode for common interrupts in
|
||||
* IDU which resides in intspec[1] since setting an affinity using value
|
||||
* from Device Tree is deprecated in ARC.
|
||||
*/
|
||||
*out_hwirq = intspec[0];
|
||||
*out_type = IRQ_TYPE_NONE;
|
||||
|
||||
/* XXX: validate distribution scheme again online cpu mask */
|
||||
if (distri == 0) {
|
||||
/* 0 - Round Robin to all cpus, otherwise 1 bit per core */
|
||||
raw_spin_lock_irqsave(&mcip_lock, flags);
|
||||
idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
|
||||
idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
|
||||
raw_spin_unlock_irqrestore(&mcip_lock, flags);
|
||||
} else {
|
||||
/*
|
||||
* DEST based distribution for Level Triggered intr can only
|
||||
* have 1 CPU, so generalize it to always contain 1 cpu
|
||||
*/
|
||||
int cpu = ffs(distri);
|
||||
|
||||
if (cpu != fls(distri))
|
||||
pr_warn("IDU irq %lx distri mode set to cpu %x\n",
|
||||
hwirq, cpu);
|
||||
|
||||
raw_spin_lock_irqsave(&mcip_lock, flags);
|
||||
idu_set_dest(hwirq, cpu);
|
||||
idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
|
||||
raw_spin_unlock_irqrestore(&mcip_lock, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
|||
*/
|
||||
static volatile int wake_flag;
|
||||
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
|
||||
#define __boot_read(f) f
|
||||
#define __boot_write(f, v) f = v
|
||||
|
||||
#else
|
||||
|
||||
#define __boot_read(f) arc_read_uncached_32(&f)
|
||||
#define __boot_write(f, v) arc_write_uncached_32(&f, v)
|
||||
|
||||
#endif
|
||||
|
||||
static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
|
||||
{
|
||||
BUG_ON(cpu == 0);
|
||||
wake_flag = cpu;
|
||||
|
||||
__boot_write(wake_flag, cpu);
|
||||
}
|
||||
|
||||
void arc_platform_smp_wait_to_boot(int cpu)
|
||||
{
|
||||
while (wake_flag != cpu)
|
||||
/* for halt-on-reset, we've waited already */
|
||||
if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
|
||||
return;
|
||||
|
||||
while (__boot_read(wake_flag) != cpu)
|
||||
;
|
||||
|
||||
wake_flag = 0;
|
||||
__asm__ __volatile__("j @first_lines_of_secondary \n");
|
||||
__boot_write(wake_flag, 0);
|
||||
}
|
||||
|
||||
|
||||
const char *arc_platform_smp_cpuinfo(void)
|
||||
{
|
||||
return plat_smp_ops.info ? : "";
|
||||
|
|
|
@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
|
|||
if (state.fault)
|
||||
goto fault;
|
||||
|
||||
/* clear any remanants of delay slot */
|
||||
if (delay_mode(regs)) {
|
||||
regs->ret = regs->bta;
|
||||
regs->ret = regs->bta ~1U;
|
||||
regs->status32 &= ~STATUS_DE_MASK;
|
||||
} else {
|
||||
regs->ret += state.instr_len;
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
* for more details.
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -209,7 +210,12 @@ static struct notifier_block init_cpu_capacity_notifier = {
|
|||
|
||||
static int __init register_cpufreq_notifier(void)
|
||||
{
|
||||
if (cap_parsing_failed)
|
||||
/*
|
||||
* on ACPI-based systems we need to use the default cpu capacity
|
||||
* until we have the necessary code to parse the cpu capacity, so
|
||||
* skip registering cpufreq notifier.
|
||||
*/
|
||||
if (!acpi_disabled || cap_parsing_failed)
|
||||
return -EINVAL;
|
||||
|
||||
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#endif
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
|
||||
#include <asm/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <linux/atomic.h>
|
||||
|
@ -17,6 +17,12 @@
|
|||
* to include/asm-i386/bitops.h or kerneldoc
|
||||
*/
|
||||
|
||||
#if __BITS_PER_LONG == 64
|
||||
#define SHIFT_PER_LONG 6
|
||||
#else
|
||||
#define SHIFT_PER_LONG 5
|
||||
#endif
|
||||
|
||||
#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
|
||||
|
||||
|
||||
|
|
|
@ -3,10 +3,8 @@
|
|||
|
||||
#if defined(__LP64__)
|
||||
#define __BITS_PER_LONG 64
|
||||
#define SHIFT_PER_LONG 6
|
||||
#else
|
||||
#define __BITS_PER_LONG 32
|
||||
#define SHIFT_PER_LONG 5
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#ifndef _PARISC_SWAB_H
|
||||
#define _PARISC_SWAB_H
|
||||
|
||||
#include <asm/bitsperlong.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
|
@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
|
|||
}
|
||||
#define __arch_swab32 __arch_swab32
|
||||
|
||||
#if BITS_PER_LONG > 32
|
||||
#if __BITS_PER_LONG > 32
|
||||
/*
|
||||
** From "PA-RISC 2.0 Architecture", HP Professional Books.
|
||||
** See Appendix I page 8 , "Endian Byte Swapping".
|
||||
|
@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
|
|||
return x;
|
||||
}
|
||||
#define __arch_swab64 __arch_swab64
|
||||
#endif /* BITS_PER_LONG > 32 */
|
||||
#endif /* __BITS_PER_LONG > 32 */
|
||||
|
||||
#endif /* _PARISC_SWAB_H */
|
||||
|
|
|
@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
|
|||
if (target == current)
|
||||
save_fpu_regs();
|
||||
|
||||
if (MACHINE_HAS_VX)
|
||||
convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
|
||||
else
|
||||
memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
|
||||
|
||||
/* If setting FPC, must validate it first. */
|
||||
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
|
||||
u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
|
||||
|
@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
|
|||
if (target == current)
|
||||
save_fpu_regs();
|
||||
|
||||
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||
vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
|
||||
|
||||
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
|
||||
if (rc == 0)
|
||||
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||
|
|
|
@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
|
|||
return pgste;
|
||||
}
|
||||
|
||||
static inline void ptep_xchg_commit(struct mm_struct *mm,
|
||||
static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pgste_t pgste, pte_t old, pte_t new)
|
||||
{
|
||||
|
@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
|
|||
} else {
|
||||
*ptep = new;
|
||||
}
|
||||
return old;
|
||||
}
|
||||
|
||||
pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
|
||||
|
@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
|
|||
preempt_disable();
|
||||
pgste = ptep_xchg_start(mm, addr, ptep);
|
||||
old = ptep_flush_direct(mm, addr, ptep);
|
||||
ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
|
||||
old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
|
||||
preempt_enable();
|
||||
return old;
|
||||
}
|
||||
|
@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
|
|||
preempt_disable();
|
||||
pgste = ptep_xchg_start(mm, addr, ptep);
|
||||
old = ptep_flush_lazy(mm, addr, ptep);
|
||||
ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
|
||||
old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
|
||||
preempt_enable();
|
||||
return old;
|
||||
}
|
||||
|
|
|
@ -852,23 +852,18 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
|
|||
|
||||
ACPI_FUNCTION_TRACE(tb_install_and_load_table);
|
||||
|
||||
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
|
||||
|
||||
/* Install the table and load it into the namespace */
|
||||
|
||||
status = acpi_tb_install_standard_table(address, flags, TRUE,
|
||||
override, &i);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
|
||||
status = acpi_tb_load_table(i, acpi_gbl_root_node);
|
||||
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
|
||||
|
||||
unlock_and_exit:
|
||||
exit:
|
||||
*table_index = i;
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
|
|
|
@ -217,6 +217,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
|
|||
goto release_and_exit;
|
||||
}
|
||||
|
||||
/* Acquire the table lock */
|
||||
|
||||
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
|
||||
|
||||
if (reload) {
|
||||
/*
|
||||
* Validate the incoming table signature.
|
||||
|
@ -244,7 +248,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
|
|||
new_table_desc.signature.integer));
|
||||
|
||||
status = AE_BAD_SIGNATURE;
|
||||
goto release_and_exit;
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
/* Check if table is already registered */
|
||||
|
@ -279,7 +283,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
|
|||
/* Table is still loaded, this is an error */
|
||||
|
||||
status = AE_ALREADY_EXISTS;
|
||||
goto release_and_exit;
|
||||
goto unlock_and_exit;
|
||||
} else {
|
||||
/*
|
||||
* Table was unloaded, allow it to be reloaded.
|
||||
|
@ -290,6 +294,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
|
|||
* indicate the re-installation.
|
||||
*/
|
||||
acpi_tb_uninstall_table(&new_table_desc);
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
|
||||
*table_index = i;
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
@ -303,11 +308,19 @@ acpi_tb_install_standard_table(acpi_physical_address address,
|
|||
|
||||
/* Invoke table handler if present */
|
||||
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
|
||||
if (acpi_gbl_table_handler) {
|
||||
(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
|
||||
new_table_desc.pointer,
|
||||
acpi_gbl_table_handler_context);
|
||||
}
|
||||
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
|
||||
|
||||
unlock_and_exit:
|
||||
|
||||
/* Release the table lock */
|
||||
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
|
||||
|
||||
release_and_exit:
|
||||
|
||||
|
|
|
@ -674,14 +674,6 @@ static void acpi_sleep_suspend_setup(void)
|
|||
if (acpi_sleep_state_supported(i))
|
||||
sleep_states[i] = 1;
|
||||
|
||||
/*
|
||||
* Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and
|
||||
* the default suspend mode was not selected from the command line.
|
||||
*/
|
||||
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 &&
|
||||
mem_sleep_default > PM_SUSPEND_MEM)
|
||||
mem_sleep_default = PM_SUSPEND_FREEZE;
|
||||
|
||||
suspend_set_ops(old_suspend_ordering ?
|
||||
&acpi_suspend_ops_old : &acpi_suspend_ops);
|
||||
freeze_set_ops(&acpi_freeze_ops);
|
||||
|
|
|
@ -305,17 +305,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
|
||||
/* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "HP Pavilion dv6",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
|
||||
},
|
||||
},
|
||||
|
||||
{ },
|
||||
};
|
||||
|
||||
|
|
|
@ -197,13 +197,13 @@ struct blkfront_info
|
|||
/* Number of pages per ring buffer. */
|
||||
unsigned int nr_ring_pages;
|
||||
struct request_queue *rq;
|
||||
unsigned int feature_flush;
|
||||
unsigned int feature_fua;
|
||||
unsigned int feature_flush:1;
|
||||
unsigned int feature_fua:1;
|
||||
unsigned int feature_discard:1;
|
||||
unsigned int feature_secdiscard:1;
|
||||
unsigned int feature_persistent:1;
|
||||
unsigned int discard_granularity;
|
||||
unsigned int discard_alignment;
|
||||
unsigned int feature_persistent:1;
|
||||
/* Number of 4KB segments handled */
|
||||
unsigned int max_indirect_segments;
|
||||
int is_ready;
|
||||
|
@ -2223,7 +2223,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
|
|||
}
|
||||
else
|
||||
grants = info->max_indirect_segments;
|
||||
psegs = grants / GRANTS_PER_PSEG;
|
||||
psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
|
||||
|
||||
err = fill_grant_buffer(rinfo,
|
||||
(grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
|
||||
|
@ -2323,13 +2323,16 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
|
|||
blkfront_setup_discard(info);
|
||||
|
||||
info->feature_persistent =
|
||||
xenbus_read_unsigned(info->xbdev->otherend,
|
||||
"feature-persistent", 0);
|
||||
!!xenbus_read_unsigned(info->xbdev->otherend,
|
||||
"feature-persistent", 0);
|
||||
|
||||
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
|
||||
"feature-max-indirect-segments", 0);
|
||||
info->max_indirect_segments = min(indirect_segments,
|
||||
xen_blkif_max_segments);
|
||||
if (indirect_segments > xen_blkif_max_segments)
|
||||
indirect_segments = xen_blkif_max_segments;
|
||||
if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
|
||||
indirect_segments = 0;
|
||||
info->max_indirect_segments = indirect_segments;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2652,6 +2655,9 @@ static int __init xlblk_init(void)
|
|||
if (!xen_domain())
|
||||
return -ENODEV;
|
||||
|
||||
if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
|
||||
xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
|
||||
|
||||
if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
|
||||
pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
|
||||
xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
|
||||
|
|
|
@ -2005,7 +2005,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
|||
limits = &performance_limits;
|
||||
perf_limits = limits;
|
||||
}
|
||||
if (policy->max >= policy->cpuinfo.max_freq) {
|
||||
if (policy->max >= policy->cpuinfo.max_freq &&
|
||||
!limits->no_turbo) {
|
||||
pr_debug("set performance\n");
|
||||
intel_pstate_set_performance_limits(perf_limits);
|
||||
goto out;
|
||||
|
@ -2047,6 +2048,17 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
|
|||
policy->policy != CPUFREQ_POLICY_PERFORMANCE)
|
||||
return -EINVAL;
|
||||
|
||||
/* When per-CPU limits are used, sysfs limits are not used */
|
||||
if (!per_cpu_limits) {
|
||||
unsigned int max_freq, min_freq;
|
||||
|
||||
max_freq = policy->cpuinfo.max_freq *
|
||||
limits->max_sysfs_pct / 100;
|
||||
min_freq = policy->cpuinfo.max_freq *
|
||||
limits->min_sysfs_pct / 100;
|
||||
cpufreq_verify_within_limits(policy, min_freq, max_freq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
|||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (!(*out_ring && (*out_ring)->adev)) {
|
||||
DRM_ERROR("Ring %d is not initialized on IP %d\n",
|
||||
ring, ip_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
|
|||
|
||||
static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||
|
||||
kfree(amdgpu_encoder->enc_priv);
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(amdgpu_encoder);
|
||||
kfree(encoder);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
|
||||
|
|
|
@ -113,6 +113,7 @@ struct ast_private {
|
|||
struct ttm_bo_kmap_obj cache_kmap;
|
||||
int next_cursor;
|
||||
bool support_wide_screen;
|
||||
bool DisableP2A;
|
||||
|
||||
enum ast_tx_chip tx_chip_type;
|
||||
u8 dp501_maxclk;
|
||||
|
|
|
@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
|||
} else
|
||||
*need_post = false;
|
||||
|
||||
/* Check P2A Access */
|
||||
ast->DisableP2A = true;
|
||||
data = ast_read32(ast, 0xf004);
|
||||
if (data != 0xFFFFFFFF)
|
||||
ast->DisableP2A = false;
|
||||
|
||||
/* Check if we support wide screen */
|
||||
switch (ast->chip) {
|
||||
case AST1180:
|
||||
|
@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
|||
ast->support_wide_screen = true;
|
||||
else {
|
||||
ast->support_wide_screen = false;
|
||||
/* Read SCU7c (silicon revision register) */
|
||||
ast_write32(ast, 0xf004, 0x1e6e0000);
|
||||
ast_write32(ast, 0xf000, 0x1);
|
||||
data = ast_read32(ast, 0x1207c);
|
||||
data &= 0x300;
|
||||
if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
|
||||
ast->support_wide_screen = true;
|
||||
if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
|
||||
ast->support_wide_screen = true;
|
||||
if (ast->DisableP2A == false) {
|
||||
/* Read SCU7c (silicon revision register) */
|
||||
ast_write32(ast, 0xf004, 0x1e6e0000);
|
||||
ast_write32(ast, 0xf000, 0x1);
|
||||
data = ast_read32(ast, 0x1207c);
|
||||
data &= 0x300;
|
||||
if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
|
||||
ast->support_wide_screen = true;
|
||||
if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
|
||||
ast->support_wide_screen = true;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev)
|
|||
uint32_t data, data2;
|
||||
uint32_t denum, num, div, ref_pll;
|
||||
|
||||
ast_write32(ast, 0xf004, 0x1e6e0000);
|
||||
ast_write32(ast, 0xf000, 0x1);
|
||||
|
||||
|
||||
ast_write32(ast, 0x10000, 0xfc600309);
|
||||
|
||||
do {
|
||||
if (pci_channel_offline(dev->pdev))
|
||||
return -EIO;
|
||||
} while (ast_read32(ast, 0x10000) != 0x01);
|
||||
data = ast_read32(ast, 0x10004);
|
||||
|
||||
if (data & 0x40)
|
||||
if (ast->DisableP2A)
|
||||
{
|
||||
ast->dram_bus_width = 16;
|
||||
ast->dram_type = AST_DRAM_1Gx16;
|
||||
ast->mclk = 396;
|
||||
}
|
||||
else
|
||||
ast->dram_bus_width = 32;
|
||||
{
|
||||
ast_write32(ast, 0xf004, 0x1e6e0000);
|
||||
ast_write32(ast, 0xf000, 0x1);
|
||||
data = ast_read32(ast, 0x10004);
|
||||
|
||||
if (ast->chip == AST2300 || ast->chip == AST2400) {
|
||||
switch (data & 0x03) {
|
||||
case 0:
|
||||
ast->dram_type = AST_DRAM_512Mx16;
|
||||
break;
|
||||
default:
|
||||
case 1:
|
||||
ast->dram_type = AST_DRAM_1Gx16;
|
||||
if (data & 0x40)
|
||||
ast->dram_bus_width = 16;
|
||||
else
|
||||
ast->dram_bus_width = 32;
|
||||
|
||||
if (ast->chip == AST2300 || ast->chip == AST2400) {
|
||||
switch (data & 0x03) {
|
||||
case 0:
|
||||
ast->dram_type = AST_DRAM_512Mx16;
|
||||
break;
|
||||
default:
|
||||
case 1:
|
||||
ast->dram_type = AST_DRAM_1Gx16;
|
||||
break;
|
||||
case 2:
|
||||
ast->dram_type = AST_DRAM_2Gx16;
|
||||
break;
|
||||
case 3:
|
||||
ast->dram_type = AST_DRAM_4Gx16;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (data & 0x0c) {
|
||||
case 0:
|
||||
case 4:
|
||||
ast->dram_type = AST_DRAM_512Mx16;
|
||||
break;
|
||||
case 8:
|
||||
if (data & 0x40)
|
||||
ast->dram_type = AST_DRAM_1Gx16;
|
||||
else
|
||||
ast->dram_type = AST_DRAM_512Mx32;
|
||||
break;
|
||||
case 0xc:
|
||||
ast->dram_type = AST_DRAM_1Gx32;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
data = ast_read32(ast, 0x10120);
|
||||
data2 = ast_read32(ast, 0x10170);
|
||||
if (data2 & 0x2000)
|
||||
ref_pll = 14318;
|
||||
else
|
||||
ref_pll = 12000;
|
||||
|
||||
denum = data & 0x1f;
|
||||
num = (data & 0x3fe0) >> 5;
|
||||
data = (data & 0xc000) >> 14;
|
||||
switch (data) {
|
||||
case 3:
|
||||
div = 0x4;
|
||||
break;
|
||||
case 2:
|
||||
ast->dram_type = AST_DRAM_2Gx16;
|
||||
case 1:
|
||||
div = 0x2;
|
||||
break;
|
||||
case 3:
|
||||
ast->dram_type = AST_DRAM_4Gx16;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (data & 0x0c) {
|
||||
case 0:
|
||||
case 4:
|
||||
ast->dram_type = AST_DRAM_512Mx16;
|
||||
break;
|
||||
case 8:
|
||||
if (data & 0x40)
|
||||
ast->dram_type = AST_DRAM_1Gx16;
|
||||
else
|
||||
ast->dram_type = AST_DRAM_512Mx32;
|
||||
break;
|
||||
case 0xc:
|
||||
ast->dram_type = AST_DRAM_1Gx32;
|
||||
default:
|
||||
div = 0x1;
|
||||
break;
|
||||
}
|
||||
ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
|
||||
}
|
||||
|
||||
data = ast_read32(ast, 0x10120);
|
||||
data2 = ast_read32(ast, 0x10170);
|
||||
if (data2 & 0x2000)
|
||||
ref_pll = 14318;
|
||||
else
|
||||
ref_pll = 12000;
|
||||
|
||||
denum = data & 0x1f;
|
||||
num = (data & 0x3fe0) >> 5;
|
||||
data = (data & 0xc000) >> 14;
|
||||
switch (data) {
|
||||
case 3:
|
||||
div = 0x4;
|
||||
break;
|
||||
case 2:
|
||||
case 1:
|
||||
div = 0x2;
|
||||
break;
|
||||
default:
|
||||
div = 0x1;
|
||||
break;
|
||||
}
|
||||
ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev)
|
|||
ast_open_key(ast);
|
||||
ast_set_def_ext_reg(dev);
|
||||
|
||||
if (ast->chip == AST2300 || ast->chip == AST2400)
|
||||
ast_init_dram_2300(dev);
|
||||
else
|
||||
ast_init_dram_reg(dev);
|
||||
if (ast->DisableP2A == false)
|
||||
{
|
||||
if (ast->chip == AST2300 || ast->chip == AST2400)
|
||||
ast_init_dram_2300(dev);
|
||||
else
|
||||
ast_init_dram_reg(dev);
|
||||
|
||||
ast_init_3rdtx(dev);
|
||||
ast_init_3rdtx(dev);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (ast->tx_chip_type != AST_TX_NONE)
|
||||
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
|
||||
}
|
||||
}
|
||||
|
||||
/* AST 2300 DRAM settings */
|
||||
|
|
|
@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
|
|||
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
|
||||
|
||||
static void set_out_fence_for_crtc(struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc, s64 __user *fence_ptr)
|
||||
struct drm_crtc *crtc, s32 __user *fence_ptr)
|
||||
{
|
||||
state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
|
||||
}
|
||||
|
||||
static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
|
||||
static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc)
|
||||
{
|
||||
s64 __user *fence_ptr;
|
||||
s32 __user *fence_ptr;
|
||||
|
||||
fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
|
||||
state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
|
||||
|
@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
|
|||
state->color_mgmt_changed |= replaced;
|
||||
return ret;
|
||||
} else if (property == config->prop_out_fence_ptr) {
|
||||
s64 __user *fence_ptr = u64_to_user_ptr(val);
|
||||
s32 __user *fence_ptr = u64_to_user_ptr(val);
|
||||
|
||||
if (!fence_ptr)
|
||||
return 0;
|
||||
|
@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
|
|||
*/
|
||||
|
||||
struct drm_out_fence_state {
|
||||
s64 __user *out_fence_ptr;
|
||||
s32 __user *out_fence_ptr;
|
||||
struct sync_file *sync_file;
|
||||
int fd;
|
||||
};
|
||||
|
@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
|
|||
return 0;
|
||||
|
||||
for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
u64 __user *fence_ptr;
|
||||
s32 __user *fence_ptr;
|
||||
|
||||
fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
|
||||
|
||||
|
|
|
@ -115,27 +115,24 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
|
|||
|
||||
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
|
||||
/**
|
||||
* drm_kms_helper_poll_enable - re-enable output polling.
|
||||
* drm_kms_helper_poll_enable_locked - re-enable output polling.
|
||||
* @dev: drm_device
|
||||
*
|
||||
* This function re-enables the output polling work, after it has been
|
||||
* temporarily disabled using drm_kms_helper_poll_disable(), for example over
|
||||
* suspend/resume.
|
||||
* This function re-enables the output polling work without
|
||||
* locking the mode_config mutex.
|
||||
*
|
||||
* Drivers can call this helper from their device resume implementation. It is
|
||||
* an error to call this when the output polling support has not yet been set
|
||||
* up.
|
||||
*
|
||||
* Note that calls to enable and disable polling must be strictly ordered, which
|
||||
* is automatically the case when they're only call from suspend/resume
|
||||
* callbacks.
|
||||
* This is like drm_kms_helper_poll_enable() however it is to be
|
||||
* called from a context where the mode_config mutex is locked
|
||||
* already.
|
||||
*/
|
||||
void drm_kms_helper_poll_enable(struct drm_device *dev)
|
||||
void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
|
||||
{
|
||||
bool poll = false;
|
||||
struct drm_connector *connector;
|
||||
unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
|
||||
|
||||
if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
|
||||
return;
|
||||
|
||||
|
@ -163,7 +160,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
|
|||
if (poll)
|
||||
schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
|
||||
EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
|
||||
|
||||
static enum drm_connector_status
|
||||
drm_connector_detect(struct drm_connector *connector, bool force)
|
||||
|
@ -290,7 +287,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
|
|||
|
||||
/* Re-enable polling in case the global poll config changed. */
|
||||
if (drm_kms_helper_poll != dev->mode_config.poll_running)
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
drm_kms_helper_poll_enable_locked(dev);
|
||||
|
||||
dev->mode_config.poll_running = drm_kms_helper_poll;
|
||||
|
||||
|
@ -482,12 +479,8 @@ static void output_poll_execute(struct work_struct *work)
|
|||
* This function disables the output polling work.
|
||||
*
|
||||
* Drivers can call this helper from their device suspend implementation. It is
|
||||
* not an error to call this even when output polling isn't enabled or already
|
||||
* disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable().
|
||||
*
|
||||
* Note that calls to enable and disable polling must be strictly ordered, which
|
||||
* is automatically the case when they're only call from suspend/resume
|
||||
* callbacks.
|
||||
* not an error to call this even when output polling isn't enabled or arlready
|
||||
* disabled.
|
||||
*/
|
||||
void drm_kms_helper_poll_disable(struct drm_device *dev)
|
||||
{
|
||||
|
@ -497,6 +490,24 @@ void drm_kms_helper_poll_disable(struct drm_device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
|
||||
|
||||
/**
|
||||
* drm_kms_helper_poll_enable - re-enable output polling.
|
||||
* @dev: drm_device
|
||||
*
|
||||
* This function re-enables the output polling work.
|
||||
*
|
||||
* Drivers can call this helper from their device resume implementation. It is
|
||||
* an error to call this when the output polling support has not yet been set
|
||||
* up.
|
||||
*/
|
||||
void drm_kms_helper_poll_enable(struct drm_device *dev)
|
||||
{
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
drm_kms_helper_poll_enable_locked(dev);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
|
||||
|
||||
/**
|
||||
* drm_kms_helper_poll_init - initialize and enable output polling
|
||||
* @dev: drm_device
|
||||
|
|
|
@ -481,7 +481,6 @@ struct parser_exec_state {
|
|||
(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
|
||||
|
||||
static unsigned long bypass_scan_mask = 0;
|
||||
static bool bypass_batch_buffer_scan = true;
|
||||
|
||||
/* ring ALL, type = 0 */
|
||||
static struct sub_op_bits sub_op_mi[] = {
|
||||
|
@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
|
|||
{
|
||||
struct intel_gvt *gvt = s->vgpu->gvt;
|
||||
|
||||
if (bypass_batch_buffer_scan)
|
||||
return 0;
|
||||
|
||||
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
|
||||
/* BDW decides privilege based on address space */
|
||||
if (cmd_val(s, 0) & (1 << 8))
|
||||
|
|
|
@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload)
|
|||
#define get_desc_from_elsp_dwords(ed, i) \
|
||||
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
|
||||
|
||||
|
||||
#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
|
||||
#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
|
||||
static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
|
||||
unsigned long add, int gmadr_bytes)
|
||||
{
|
||||
if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
|
||||
return -1;
|
||||
|
||||
*((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
|
||||
BATCH_BUFFER_ADDR_MASK;
|
||||
if (gmadr_bytes == 8) {
|
||||
*((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
|
||||
add & BATCH_BUFFER_ADDR_HIGH_MASK;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
|
||||
const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
|
||||
struct intel_shadow_bb_entry *entry_obj;
|
||||
|
||||
/* pin the gem object to ggtt */
|
||||
if (!list_empty(&workload->shadow_bb)) {
|
||||
struct intel_shadow_bb_entry *entry_obj =
|
||||
list_first_entry(&workload->shadow_bb,
|
||||
struct intel_shadow_bb_entry,
|
||||
list);
|
||||
struct intel_shadow_bb_entry *temp;
|
||||
list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
|
||||
list) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
|
||||
4, 0);
|
||||
if (IS_ERR(vma)) {
|
||||
gvt_err("Cannot pin\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* FIXME: we are not tracking our pinned VMA leaving it
|
||||
* up to the core to fix up the stray pin_count upon
|
||||
* free.
|
||||
*/
|
||||
|
||||
/* update the relocate gma with shadow batch buffer*/
|
||||
set_gma_to_bb_cmd(entry_obj,
|
||||
i915_ggtt_offset(vma),
|
||||
gmadr_bytes);
|
||||
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
|
||||
if (IS_ERR(vma)) {
|
||||
gvt_err("Cannot pin\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* FIXME: we are not tracking our pinned VMA leaving it
|
||||
* up to the core to fix up the stray pin_count upon
|
||||
* free.
|
||||
*/
|
||||
|
||||
/* update the relocate gma with shadow batch buffer*/
|
||||
entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
|
||||
if (gmadr_bytes == 8)
|
||||
entry_obj->bb_start_cmd_va[2] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
|
|||
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
|
||||
}
|
||||
|
||||
vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
|
||||
vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
|
||||
sizeof(struct intel_vgpu_workload), 0,
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
NULL);
|
||||
|
|
|
@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
|
||||
char *buf)
|
||||
static ssize_t available_instances_show(struct kobject *kobj,
|
||||
struct device *dev, char *buf)
|
||||
{
|
||||
struct intel_vgpu_type *type;
|
||||
unsigned int num = 0;
|
||||
|
@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
|
|||
type->fence);
|
||||
}
|
||||
|
||||
static MDEV_TYPE_ATTR_RO(available_instance);
|
||||
static MDEV_TYPE_ATTR_RO(available_instances);
|
||||
static MDEV_TYPE_ATTR_RO(device_api);
|
||||
static MDEV_TYPE_ATTR_RO(description);
|
||||
|
||||
static struct attribute *type_attrs[] = {
|
||||
&mdev_type_attr_available_instance.attr,
|
||||
&mdev_type_attr_available_instances.attr,
|
||||
&mdev_type_attr_device_api.attr,
|
||||
&mdev_type_attr_description.attr,
|
||||
NULL,
|
||||
|
|
|
@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
|
|||
struct drm_i915_gem_object *obj;
|
||||
void *va;
|
||||
unsigned long len;
|
||||
void *bb_start_cmd_va;
|
||||
u32 *bb_start_cmd_va;
|
||||
};
|
||||
|
||||
#define workload_q_head(vgpu, ring_id) \
|
||||
|
|
|
@ -2378,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev)
|
|||
|
||||
assert_forcewakes_inactive(dev_priv);
|
||||
|
||||
if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
|
||||
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
|
||||
intel_hpd_poll_init(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Device suspended\n");
|
||||
|
|
|
@ -1977,6 +1977,11 @@ struct drm_i915_private {
|
|||
|
||||
struct i915_frontbuffer_tracking fb_tracking;
|
||||
|
||||
struct intel_atomic_helper {
|
||||
struct llist_head free_list;
|
||||
struct work_struct free_work;
|
||||
} atomic_helper;
|
||||
|
||||
u16 orig_clock;
|
||||
|
||||
bool mchbar_need_disable;
|
||||
|
|
|
@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
|
|||
return ret;
|
||||
}
|
||||
|
||||
trace_i915_vma_bind(vma, bind_flags);
|
||||
ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
|
|||
struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
|
||||
struct edid *edid;
|
||||
struct i2c_adapter *i2c;
|
||||
bool ret = false;
|
||||
|
||||
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
|
||||
|
||||
|
@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
|
|||
*/
|
||||
if (!is_digital) {
|
||||
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
|
||||
return true;
|
||||
ret = true;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
|
||||
} else {
|
||||
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
|
||||
}
|
||||
|
||||
kfree(edid);
|
||||
|
||||
return false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
|
|
|
@ -2251,6 +2251,9 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
|
|||
intel_fill_fb_ggtt_view(&view, fb, rotation);
|
||||
vma = i915_gem_object_to_ggtt(obj, &view);
|
||||
|
||||
if (WARN_ON_ONCE(!vma))
|
||||
return;
|
||||
|
||||
i915_vma_unpin_fence(vma);
|
||||
i915_gem_object_unpin_from_display_plane(vma);
|
||||
}
|
||||
|
@ -2585,8 +2588,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
|
|||
* We only keep the x/y offsets, so push all of the
|
||||
* gtt offset into the x/y offsets.
|
||||
*/
|
||||
_intel_adjust_tile_offset(&x, &y, tile_size,
|
||||
tile_width, tile_height, pitch_tiles,
|
||||
_intel_adjust_tile_offset(&x, &y,
|
||||
tile_width, tile_height,
|
||||
tile_size, pitch_tiles,
|
||||
gtt_offset_rotated * tile_size, 0);
|
||||
|
||||
gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
|
||||
|
@ -6849,6 +6853,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
|
|||
}
|
||||
|
||||
state = drm_atomic_state_alloc(crtc->dev);
|
||||
if (!state) {
|
||||
DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
|
||||
crtc->base.id, crtc->name);
|
||||
return;
|
||||
}
|
||||
|
||||
state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
|
||||
|
||||
/* Everything's already locked, -EDEADLK can't happen. */
|
||||
|
@ -11246,6 +11256,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
|
|||
}
|
||||
|
||||
old->restore_state = restore_state;
|
||||
drm_atomic_state_put(state);
|
||||
|
||||
/* let the connector get through one full cycle before testing */
|
||||
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
|
||||
|
@ -14515,8 +14526,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
|
|||
break;
|
||||
|
||||
case FENCE_FREE:
|
||||
drm_atomic_state_put(&state->base);
|
||||
break;
|
||||
{
|
||||
struct intel_atomic_helper *helper =
|
||||
&to_i915(state->base.dev)->atomic_helper;
|
||||
|
||||
if (llist_add(&state->freed, &helper->free_list))
|
||||
schedule_work(&helper->free_work);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
|
@ -16395,6 +16412,18 @@ static void sanitize_watermarks(struct drm_device *dev)
|
|||
drm_modeset_acquire_fini(&ctx);
|
||||
}
|
||||
|
||||
static void intel_atomic_helper_free_state(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv), atomic_helper.free_work);
|
||||
struct intel_atomic_state *state, *next;
|
||||
struct llist_node *freed;
|
||||
|
||||
freed = llist_del_all(&dev_priv->atomic_helper.free_list);
|
||||
llist_for_each_entry_safe(state, next, freed, freed)
|
||||
drm_atomic_state_put(&state->base);
|
||||
}
|
||||
|
||||
int intel_modeset_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
@ -16414,6 +16443,9 @@ int intel_modeset_init(struct drm_device *dev)
|
|||
|
||||
dev->mode_config.funcs = &intel_mode_funcs;
|
||||
|
||||
INIT_WORK(&dev_priv->atomic_helper.free_work,
|
||||
intel_atomic_helper_free_state);
|
||||
|
||||
intel_init_quirks(dev);
|
||||
|
||||
intel_init_pm(dev_priv);
|
||||
|
@ -17027,7 +17059,8 @@ void intel_display_resume(struct drm_device *dev)
|
|||
|
||||
if (ret)
|
||||
DRM_ERROR("Restoring old state failed with %i\n", ret);
|
||||
drm_atomic_state_put(state);
|
||||
if (state)
|
||||
drm_atomic_state_put(state);
|
||||
}
|
||||
|
||||
void intel_modeset_gem_init(struct drm_device *dev)
|
||||
|
@ -17097,6 +17130,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
flush_work(&dev_priv->atomic_helper.free_work);
|
||||
WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
|
||||
|
||||
intel_disable_gt_powersave(dev_priv);
|
||||
|
||||
/*
|
||||
|
|
|
@ -370,6 +370,8 @@ struct intel_atomic_state {
|
|||
struct skl_wm_values wm_results;
|
||||
|
||||
struct i915_sw_fence commit_ready;
|
||||
|
||||
struct llist_node freed;
|
||||
};
|
||||
|
||||
struct intel_plane_state {
|
||||
|
|
|
@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
|
|||
{
|
||||
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
|
||||
|
||||
if (!ifbdev)
|
||||
return;
|
||||
|
||||
ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
|
||||
}
|
||||
|
||||
|
|
|
@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
|
|||
|
||||
/* Enable polling and queue hotplug re-enabling. */
|
||||
if (hpd_disabled) {
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
drm_kms_helper_poll_enable_locked(dev);
|
||||
mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
|
||||
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
|
||||
}
|
||||
|
@ -511,7 +511,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
|
|||
}
|
||||
|
||||
if (enabled)
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
drm_kms_helper_poll_enable_locked(dev);
|
||||
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
|
|
|
@ -411,7 +411,8 @@ nouveau_display_init(struct drm_device *dev)
|
|||
return ret;
|
||||
|
||||
/* enable polling for external displays */
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
if (!dev->mode_config.poll_enabled)
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
|
||||
/* enable hotplug interrupts */
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
|
|
|
@ -773,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
|
|||
pci_set_master(pdev);
|
||||
|
||||
ret = nouveau_do_resume(drm_dev, true);
|
||||
drm_kms_helper_poll_enable(drm_dev);
|
||||
|
||||
if (!drm_dev->mode_config.poll_enabled)
|
||||
drm_kms_helper_poll_enable(drm_dev);
|
||||
|
||||
/* do magic */
|
||||
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
|
||||
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
|
||||
|
|
|
@ -165,6 +165,8 @@ struct nouveau_drm {
|
|||
struct backlight_device *backlight;
|
||||
struct list_head bl_connectors;
|
||||
struct work_struct hpd_work;
|
||||
struct work_struct fbcon_work;
|
||||
int fbcon_new_state;
|
||||
#ifdef CONFIG_ACPI
|
||||
struct notifier_block acpi_nb;
|
||||
#endif
|
||||
|
|
|
@ -470,19 +470,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
|
|||
.fb_probe = nouveau_fbcon_create,
|
||||
};
|
||||
|
||||
static void
|
||||
nouveau_fbcon_set_suspend_work(struct work_struct *work)
|
||||
{
|
||||
struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
|
||||
int state = READ_ONCE(drm->fbcon_new_state);
|
||||
|
||||
if (state == FBINFO_STATE_RUNNING)
|
||||
pm_runtime_get_sync(drm->dev->dev);
|
||||
|
||||
console_lock();
|
||||
if (state == FBINFO_STATE_RUNNING)
|
||||
nouveau_fbcon_accel_restore(drm->dev);
|
||||
drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
|
||||
if (state != FBINFO_STATE_RUNNING)
|
||||
nouveau_fbcon_accel_save_disable(drm->dev);
|
||||
console_unlock();
|
||||
|
||||
if (state == FBINFO_STATE_RUNNING) {
|
||||
pm_runtime_mark_last_busy(drm->dev->dev);
|
||||
pm_runtime_put_sync(drm->dev->dev);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
if (drm->fbcon) {
|
||||
console_lock();
|
||||
if (state == FBINFO_STATE_RUNNING)
|
||||
nouveau_fbcon_accel_restore(dev);
|
||||
drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
|
||||
if (state != FBINFO_STATE_RUNNING)
|
||||
nouveau_fbcon_accel_save_disable(dev);
|
||||
console_unlock();
|
||||
}
|
||||
|
||||
if (!drm->fbcon)
|
||||
return;
|
||||
|
||||
drm->fbcon_new_state = state;
|
||||
/* Since runtime resume can happen as a result of a sysfs operation,
|
||||
* it's possible we already have the console locked. So handle fbcon
|
||||
* init/deinit from a seperate work thread
|
||||
*/
|
||||
schedule_work(&drm->fbcon_work);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -502,6 +526,7 @@ nouveau_fbcon_init(struct drm_device *dev)
|
|||
return -ENOMEM;
|
||||
|
||||
drm->fbcon = fbcon;
|
||||
INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
|
||||
|
||||
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
|
||||
|
||||
|
|
|
@ -366,11 +366,10 @@ static void
|
|||
radeon_pci_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
/* if we are running in a VM, make sure the device
|
||||
* torn down properly on reboot/shutdown.
|
||||
* unfortunately we can't detect certain
|
||||
* hypervisors so just do this all the time.
|
||||
* torn down properly on reboot/shutdown
|
||||
*/
|
||||
radeon_pci_remove(pdev);
|
||||
if (radeon_device_is_virtual())
|
||||
radeon_pci_remove(pdev);
|
||||
}
|
||||
|
||||
static int radeon_pmops_suspend(struct device *dev)
|
||||
|
|
|
@ -839,7 +839,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
|
|||
|
||||
}
|
||||
|
||||
__drm_atomic_helper_crtc_destroy_state(state);
|
||||
drm_atomic_helper_crtc_destroy_state(crtc, state);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_funcs vc4_crtc_funcs = {
|
||||
|
|
|
@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
|
|||
args->shader_rec_count);
|
||||
struct vc4_bo *bo;
|
||||
|
||||
if (uniforms_offset < shader_rec_offset ||
|
||||
if (shader_rec_offset < args->bin_cl_size ||
|
||||
uniforms_offset < shader_rec_offset ||
|
||||
exec_size < uniforms_offset ||
|
||||
args->shader_rec_count >= (UINT_MAX /
|
||||
sizeof(struct vc4_shader_state)) ||
|
||||
temp_size < exec_size) {
|
||||
DRM_ERROR("overflow in exec arguments\n");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
|
|
@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
|
|||
}
|
||||
|
||||
ret = vc4_full_res_bounds_check(exec, *obj, surf);
|
||||
if (!ret)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -962,10 +962,6 @@ static int cdns_i2c_probe(struct platform_device *pdev)
|
|||
goto err_clk_dis;
|
||||
}
|
||||
|
||||
ret = i2c_add_adapter(&id->adap);
|
||||
if (ret < 0)
|
||||
goto err_clk_dis;
|
||||
|
||||
/*
|
||||
* Cadence I2C controller has a bug wherein it generates
|
||||
* invalid read transaction after HW timeout in master receiver mode.
|
||||
|
@ -975,6 +971,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
|
|||
*/
|
||||
cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
|
||||
|
||||
ret = i2c_add_adapter(&id->adap);
|
||||
if (ret < 0)
|
||||
goto err_clk_dis;
|
||||
|
||||
dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
|
||||
id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/pinctrl/consumer.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -636,12 +637,31 @@ static int lpi2c_imx_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int lpi2c_imx_suspend(struct device *dev)
|
||||
{
|
||||
pinctrl_pm_select_sleep_state(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lpi2c_imx_resume(struct device *dev)
|
||||
{
|
||||
pinctrl_pm_select_default_state(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(imx_lpi2c_pm, lpi2c_imx_suspend, lpi2c_imx_resume);
|
||||
|
||||
static struct platform_driver lpi2c_imx_driver = {
|
||||
.probe = lpi2c_imx_probe,
|
||||
.remove = lpi2c_imx_remove,
|
||||
.driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.of_match_table = lpi2c_imx_of_match,
|
||||
.pm = &imx_lpi2c_pm,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -2811,7 +2811,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
|
|||
if (!src_addr || !src_addr->sa_family) {
|
||||
src_addr = (struct sockaddr *) &id->route.addr.src_addr;
|
||||
src_addr->sa_family = dst_addr->sa_family;
|
||||
if (dst_addr->sa_family == AF_INET6) {
|
||||
if (IS_ENABLED(CONFIG_IPV6) &&
|
||||
dst_addr->sa_family == AF_INET6) {
|
||||
struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
|
||||
struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
|
||||
src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
|
||||
|
|
|
@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||
IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
|
||||
|
||||
if (access & IB_ACCESS_ON_DEMAND) {
|
||||
put_pid(umem->pid);
|
||||
ret = ib_umem_odp_get(context, umem);
|
||||
if (ret) {
|
||||
kfree(umem);
|
||||
|
@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||
|
||||
page_list = (struct page **) __get_free_page(GFP_KERNEL);
|
||||
if (!page_list) {
|
||||
put_pid(umem->pid);
|
||||
kfree(umem);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
|
|
@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev,
|
|||
|
||||
memset(props, 0, sizeof(struct ib_port_attr));
|
||||
props->max_mtu = IB_MTU_4096;
|
||||
if (netdev->mtu >= 4096)
|
||||
props->active_mtu = IB_MTU_4096;
|
||||
else if (netdev->mtu >= 2048)
|
||||
props->active_mtu = IB_MTU_2048;
|
||||
else if (netdev->mtu >= 1024)
|
||||
props->active_mtu = IB_MTU_1024;
|
||||
else if (netdev->mtu >= 512)
|
||||
props->active_mtu = IB_MTU_512;
|
||||
else
|
||||
props->active_mtu = IB_MTU_256;
|
||||
props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
|
||||
|
||||
if (!netif_carrier_ok(netdev))
|
||||
props->state = IB_PORT_DOWN;
|
||||
|
|
|
@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
skb_trim(skb, dlen);
|
||||
mutex_lock(&ep->com.mutex);
|
||||
|
||||
/* update RX credits */
|
||||
update_rx_credits(ep, dlen);
|
||||
|
||||
switch (ep->com.state) {
|
||||
case MPA_REQ_SENT:
|
||||
update_rx_credits(ep, dlen);
|
||||
ep->rcv_seq += dlen;
|
||||
disconnect = process_mpa_reply(ep, skb);
|
||||
break;
|
||||
case MPA_REQ_WAIT:
|
||||
update_rx_credits(ep, dlen);
|
||||
ep->rcv_seq += dlen;
|
||||
disconnect = process_mpa_request(ep, skb);
|
||||
break;
|
||||
case FPDU_MODE: {
|
||||
struct c4iw_qp_attributes attrs;
|
||||
|
||||
update_rx_credits(ep, dlen);
|
||||
BUG_ON(!ep->com.qp);
|
||||
if (status)
|
||||
pr_err("%s Unexpected streaming data." \
|
||||
|
|
|
@ -504,6 +504,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
|
|||
goto skip_cqe;
|
||||
}
|
||||
|
||||
/*
|
||||
* Special cqe for drain WR completions...
|
||||
*/
|
||||
if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
|
||||
*cookie = CQE_DRAIN_COOKIE(hw_cqe);
|
||||
*cqe = *hw_cqe;
|
||||
goto skip_cqe;
|
||||
}
|
||||
|
||||
/*
|
||||
* Gotta tweak READ completions:
|
||||
* 1) the cqe doesn't contain the sq_wptr from the wr.
|
||||
|
@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
|
|||
c4iw_invalidate_mr(qhp->rhp,
|
||||
CQE_WRID_FR_STAG(&cqe));
|
||||
break;
|
||||
case C4IW_DRAIN_OPCODE:
|
||||
wc->opcode = IB_WC_SEND;
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR MOD "Unexpected opcode %d "
|
||||
"in the CQE received for QPID=0x%0x\n",
|
||||
|
@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
|
|||
}
|
||||
}
|
||||
out:
|
||||
if (wq) {
|
||||
if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
|
||||
if (t4_sq_empty(wq))
|
||||
complete(&qhp->sq_drained);
|
||||
if (t4_rq_empty(wq))
|
||||
complete(&qhp->rq_drained);
|
||||
}
|
||||
if (wq)
|
||||
spin_unlock(&qhp->lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -846,9 +846,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
|
|||
}
|
||||
}
|
||||
|
||||
rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
|
||||
if (!rdev->free_workq) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_status_page;
|
||||
}
|
||||
|
||||
rdev->status_page->db_off = 0;
|
||||
|
||||
return 0;
|
||||
err_free_status_page:
|
||||
free_page((unsigned long)rdev->status_page);
|
||||
destroy_ocqp_pool:
|
||||
c4iw_ocqp_pool_destroy(rdev);
|
||||
destroy_rqtpool:
|
||||
|
@ -862,6 +870,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
|
|||
|
||||
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
|
||||
{
|
||||
destroy_workqueue(rdev->free_workq);
|
||||
kfree(rdev->wr_log);
|
||||
free_page((unsigned long)rdev->status_page);
|
||||
c4iw_pblpool_destroy(rdev);
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#include <linux/kref.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
|
@ -107,6 +108,7 @@ struct c4iw_dev_ucontext {
|
|||
struct list_head qpids;
|
||||
struct list_head cqids;
|
||||
struct mutex lock;
|
||||
struct kref kref;
|
||||
};
|
||||
|
||||
enum c4iw_rdev_flags {
|
||||
|
@ -183,6 +185,7 @@ struct c4iw_rdev {
|
|||
atomic_t wr_log_idx;
|
||||
struct wr_log_entry *wr_log;
|
||||
int wr_log_size;
|
||||
struct workqueue_struct *free_workq;
|
||||
};
|
||||
|
||||
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
|
||||
|
@ -480,8 +483,8 @@ struct c4iw_qp {
|
|||
wait_queue_head_t wait;
|
||||
struct timer_list timer;
|
||||
int sq_sig_all;
|
||||
struct completion rq_drained;
|
||||
struct completion sq_drained;
|
||||
struct work_struct free_work;
|
||||
struct c4iw_ucontext *ucontext;
|
||||
};
|
||||
|
||||
static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
|
||||
|
@ -495,6 +498,7 @@ struct c4iw_ucontext {
|
|||
u32 key;
|
||||
spinlock_t mmap_lock;
|
||||
struct list_head mmaps;
|
||||
struct kref kref;
|
||||
};
|
||||
|
||||
static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
|
||||
|
@ -502,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
|
|||
return container_of(c, struct c4iw_ucontext, ibucontext);
|
||||
}
|
||||
|
||||
void _c4iw_free_ucontext(struct kref *kref);
|
||||
|
||||
static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
|
||||
{
|
||||
kref_put(&ucontext->kref, _c4iw_free_ucontext);
|
||||
}
|
||||
|
||||
static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
|
||||
{
|
||||
kref_get(&ucontext->kref);
|
||||
}
|
||||
|
||||
struct c4iw_mm_entry {
|
||||
struct list_head entry;
|
||||
u64 addr;
|
||||
|
@ -615,6 +631,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
|
|||
return IB_QPS_ERR;
|
||||
}
|
||||
|
||||
#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
|
||||
|
||||
static inline u32 c4iw_ib_to_tpt_access(int a)
|
||||
{
|
||||
return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
|
||||
|
@ -997,8 +1015,6 @@ extern int c4iw_wr_log;
|
|||
extern int db_fc_threshold;
|
||||
extern int db_coalescing_threshold;
|
||||
extern int use_dsgl;
|
||||
void c4iw_drain_rq(struct ib_qp *qp);
|
||||
void c4iw_drain_sq(struct ib_qp *qp);
|
||||
void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
|
|||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
|
||||
void _c4iw_free_ucontext(struct kref *kref)
|
||||
{
|
||||
struct c4iw_dev *rhp = to_c4iw_dev(context->device);
|
||||
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
|
||||
struct c4iw_ucontext *ucontext;
|
||||
struct c4iw_dev *rhp;
|
||||
struct c4iw_mm_entry *mm, *tmp;
|
||||
|
||||
PDBG("%s context %p\n", __func__, context);
|
||||
ucontext = container_of(kref, struct c4iw_ucontext, kref);
|
||||
rhp = to_c4iw_dev(ucontext->ibucontext.device);
|
||||
|
||||
PDBG("%s ucontext %p\n", __func__, ucontext);
|
||||
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
|
||||
kfree(mm);
|
||||
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
|
||||
kfree(ucontext);
|
||||
}
|
||||
|
||||
static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
|
||||
{
|
||||
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
|
||||
|
||||
PDBG("%s context %p\n", __func__, context);
|
||||
c4iw_put_ucontext(ucontext);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
|
|||
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
|
||||
INIT_LIST_HEAD(&context->mmaps);
|
||||
spin_lock_init(&context->mmap_lock);
|
||||
kref_init(&context->kref);
|
||||
|
||||
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
|
||||
if (!warned++)
|
||||
|
@ -361,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
|
|||
|
||||
memset(props, 0, sizeof(struct ib_port_attr));
|
||||
props->max_mtu = IB_MTU_4096;
|
||||
if (netdev->mtu >= 4096)
|
||||
props->active_mtu = IB_MTU_4096;
|
||||
else if (netdev->mtu >= 2048)
|
||||
props->active_mtu = IB_MTU_2048;
|
||||
else if (netdev->mtu >= 1024)
|
||||
props->active_mtu = IB_MTU_1024;
|
||||
else if (netdev->mtu >= 512)
|
||||
props->active_mtu = IB_MTU_512;
|
||||
else
|
||||
props->active_mtu = IB_MTU_256;
|
||||
props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
|
||||
|
||||
if (!netif_carrier_ok(netdev))
|
||||
props->state = IB_PORT_DOWN;
|
||||
|
@ -607,8 +610,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
|
|||
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
|
||||
dev->ibdev.get_port_immutable = c4iw_port_immutable;
|
||||
dev->ibdev.get_dev_fw_str = get_dev_fw_str;
|
||||
dev->ibdev.drain_sq = c4iw_drain_sq;
|
||||
dev->ibdev.drain_rq = c4iw_drain_rq;
|
||||
|
||||
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
|
||||
if (!dev->ibdev.iwcm)
|
||||
|
|
|
@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void _free_qp(struct kref *kref)
|
||||
static void free_qp_work(struct work_struct *work)
|
||||
{
|
||||
struct c4iw_ucontext *ucontext;
|
||||
struct c4iw_qp *qhp;
|
||||
struct c4iw_dev *rhp;
|
||||
|
||||
qhp = container_of(work, struct c4iw_qp, free_work);
|
||||
ucontext = qhp->ucontext;
|
||||
rhp = qhp->rhp;
|
||||
|
||||
PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
|
||||
destroy_qp(&rhp->rdev, &qhp->wq,
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
||||
|
||||
if (ucontext)
|
||||
c4iw_put_ucontext(ucontext);
|
||||
kfree(qhp);
|
||||
}
|
||||
|
||||
static void queue_qp_free(struct kref *kref)
|
||||
{
|
||||
struct c4iw_qp *qhp;
|
||||
|
||||
qhp = container_of(kref, struct c4iw_qp, kref);
|
||||
PDBG("%s qhp %p\n", __func__, qhp);
|
||||
kfree(qhp);
|
||||
queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
|
||||
}
|
||||
|
||||
void c4iw_qp_add_ref(struct ib_qp *qp)
|
||||
|
@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp)
|
|||
void c4iw_qp_rem_ref(struct ib_qp *qp)
|
||||
{
|
||||
PDBG("%s ib_qp %p\n", __func__, qp);
|
||||
kref_put(&to_c4iw_qp(qp)->kref, _free_qp);
|
||||
kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
|
||||
}
|
||||
|
||||
static void add_to_fc_list(struct list_head *head, struct list_head *entry)
|
||||
|
@ -776,6 +795,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
|
||||
{
|
||||
struct t4_cqe cqe = {};
|
||||
struct c4iw_cq *schp;
|
||||
unsigned long flag;
|
||||
struct t4_cq *cq;
|
||||
|
||||
schp = to_c4iw_cq(qhp->ibqp.send_cq);
|
||||
cq = &schp->cq;
|
||||
|
||||
cqe.u.drain_cookie = wr->wr_id;
|
||||
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
|
||||
CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
|
||||
CQE_TYPE_V(1) |
|
||||
CQE_SWCQE_V(1) |
|
||||
CQE_QPID_V(qhp->wq.sq.qid));
|
||||
|
||||
spin_lock_irqsave(&schp->lock, flag);
|
||||
cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
|
||||
cq->sw_queue[cq->sw_pidx] = cqe;
|
||||
t4_swcq_produce(cq);
|
||||
spin_unlock_irqrestore(&schp->lock, flag);
|
||||
|
||||
spin_lock_irqsave(&schp->comp_handler_lock, flag);
|
||||
(*schp->ibcq.comp_handler)(&schp->ibcq,
|
||||
schp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
|
||||
}
|
||||
|
||||
static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
|
||||
{
|
||||
struct t4_cqe cqe = {};
|
||||
struct c4iw_cq *rchp;
|
||||
unsigned long flag;
|
||||
struct t4_cq *cq;
|
||||
|
||||
rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
|
||||
cq = &rchp->cq;
|
||||
|
||||
cqe.u.drain_cookie = wr->wr_id;
|
||||
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
|
||||
CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
|
||||
CQE_TYPE_V(0) |
|
||||
CQE_SWCQE_V(1) |
|
||||
CQE_QPID_V(qhp->wq.sq.qid));
|
||||
|
||||
spin_lock_irqsave(&rchp->lock, flag);
|
||||
cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
|
||||
cq->sw_queue[cq->sw_pidx] = cqe;
|
||||
t4_swcq_produce(cq);
|
||||
spin_unlock_irqrestore(&rchp->lock, flag);
|
||||
|
||||
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
|
||||
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
|
||||
rchp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
|
||||
}
|
||||
|
||||
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
{
|
||||
|
@ -794,8 +871,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
spin_lock_irqsave(&qhp->lock, flag);
|
||||
if (t4_wq_in_error(&qhp->wq)) {
|
||||
spin_unlock_irqrestore(&qhp->lock, flag);
|
||||
*bad_wr = wr;
|
||||
return -EINVAL;
|
||||
complete_sq_drain_wr(qhp, wr);
|
||||
return err;
|
||||
}
|
||||
num_wrs = t4_sq_avail(&qhp->wq);
|
||||
if (num_wrs == 0) {
|
||||
|
@ -937,8 +1014,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|||
spin_lock_irqsave(&qhp->lock, flag);
|
||||
if (t4_wq_in_error(&qhp->wq)) {
|
||||
spin_unlock_irqrestore(&qhp->lock, flag);
|
||||
*bad_wr = wr;
|
||||
return -EINVAL;
|
||||
complete_rq_drain_wr(qhp, wr);
|
||||
return err;
|
||||
}
|
||||
num_wrs = t4_rq_avail(&qhp->wq);
|
||||
if (num_wrs == 0) {
|
||||
|
@ -1550,7 +1627,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
|||
}
|
||||
break;
|
||||
case C4IW_QP_STATE_CLOSING:
|
||||
if (!internal) {
|
||||
|
||||
/*
|
||||
* Allow kernel users to move to ERROR for qp draining.
|
||||
*/
|
||||
if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
|
||||
C4IW_QP_STATE_ERROR)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1643,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
|
|||
struct c4iw_dev *rhp;
|
||||
struct c4iw_qp *qhp;
|
||||
struct c4iw_qp_attributes attrs;
|
||||
struct c4iw_ucontext *ucontext;
|
||||
|
||||
qhp = to_c4iw_qp(ib_qp);
|
||||
rhp = qhp->rhp;
|
||||
|
@ -1663,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
|
|||
spin_unlock_irq(&rhp->lock);
|
||||
free_ird(rhp, qhp->attr.max_ird);
|
||||
|
||||
ucontext = ib_qp->uobject ?
|
||||
to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
|
||||
destroy_qp(&rhp->rdev, &qhp->wq,
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
||||
|
||||
c4iw_qp_rem_ref(ib_qp);
|
||||
|
||||
PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
|
||||
|
@ -1763,11 +1839,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
|||
qhp->attr.max_ird = 0;
|
||||
qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
|
||||
spin_lock_init(&qhp->lock);
|
||||
init_completion(&qhp->sq_drained);
|
||||
init_completion(&qhp->rq_drained);
|
||||
mutex_init(&qhp->mutex);
|
||||
init_waitqueue_head(&qhp->wait);
|
||||
kref_init(&qhp->kref);
|
||||
INIT_WORK(&qhp->free_work, free_qp_work);
|
||||
|
||||
ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
|
||||
if (ret)
|
||||
|
@ -1854,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
|||
ma_sync_key_mm->len = PAGE_SIZE;
|
||||
insert_mmap(ucontext, ma_sync_key_mm);
|
||||
}
|
||||
|
||||
c4iw_get_ucontext(ucontext);
|
||||
qhp->ucontext = ucontext;
|
||||
}
|
||||
qhp->ibqp.qp_num = qhp->wq.sq.qid;
|
||||
init_timer(&(qhp->timer));
|
||||
|
@ -1958,40 +2036,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void move_qp_to_err(struct c4iw_qp *qp)
|
||||
{
|
||||
struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
|
||||
|
||||
(void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
|
||||
}
|
||||
|
||||
void c4iw_drain_sq(struct ib_qp *ibqp)
|
||||
{
|
||||
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
|
||||
unsigned long flag;
|
||||
bool need_to_wait;
|
||||
|
||||
move_qp_to_err(qp);
|
||||
spin_lock_irqsave(&qp->lock, flag);
|
||||
need_to_wait = !t4_sq_empty(&qp->wq);
|
||||
spin_unlock_irqrestore(&qp->lock, flag);
|
||||
|
||||
if (need_to_wait)
|
||||
wait_for_completion(&qp->sq_drained);
|
||||
}
|
||||
|
||||
void c4iw_drain_rq(struct ib_qp *ibqp)
|
||||
{
|
||||
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
|
||||
unsigned long flag;
|
||||
bool need_to_wait;
|
||||
|
||||
move_qp_to_err(qp);
|
||||
spin_lock_irqsave(&qp->lock, flag);
|
||||
need_to_wait = !t4_rq_empty(&qp->wq);
|
||||
spin_unlock_irqrestore(&qp->lock, flag);
|
||||
|
||||
if (need_to_wait)
|
||||
wait_for_completion(&qp->rq_drained);
|
||||
}
|
||||
|
|
|
@ -179,6 +179,7 @@ struct t4_cqe {
|
|||
__be32 wrid_hi;
|
||||
__be32 wrid_low;
|
||||
} gen;
|
||||
u64 drain_cookie;
|
||||
} u;
|
||||
__be64 reserved;
|
||||
__be64 bits_type_ts;
|
||||
|
@ -238,6 +239,7 @@ struct t4_cqe {
|
|||
/* generic accessor macros */
|
||||
#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
|
||||
#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
|
||||
#define CQE_DRAIN_COOKIE(x) ((x)->u.drain_cookie)
|
||||
|
||||
/* macros for flit 3 of the cqe */
|
||||
#define CQE_GENBIT_S 63
|
||||
|
|
|
@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
|
|||
memset(props, 0, sizeof(*props));
|
||||
|
||||
props->max_mtu = IB_MTU_4096;
|
||||
if (netdev->mtu >= 4096)
|
||||
props->active_mtu = IB_MTU_4096;
|
||||
else if (netdev->mtu >= 2048)
|
||||
props->active_mtu = IB_MTU_2048;
|
||||
else if (netdev->mtu >= 1024)
|
||||
props->active_mtu = IB_MTU_1024;
|
||||
else if (netdev->mtu >= 512)
|
||||
props->active_mtu = IB_MTU_512;
|
||||
else
|
||||
props->active_mtu = IB_MTU_256;
|
||||
props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
|
||||
|
||||
props->lid = 1;
|
||||
if (netif_carrier_ok(iwdev->netdev))
|
||||
|
|
|
@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
|
|||
memset(props, 0, sizeof(*props));
|
||||
|
||||
props->max_mtu = IB_MTU_4096;
|
||||
|
||||
if (netdev->mtu >= 4096)
|
||||
props->active_mtu = IB_MTU_4096;
|
||||
else if (netdev->mtu >= 2048)
|
||||
props->active_mtu = IB_MTU_2048;
|
||||
else if (netdev->mtu >= 1024)
|
||||
props->active_mtu = IB_MTU_1024;
|
||||
else if (netdev->mtu >= 512)
|
||||
props->active_mtu = IB_MTU_512;
|
||||
else
|
||||
props->active_mtu = IB_MTU_256;
|
||||
props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
|
||||
|
||||
props->lid = 1;
|
||||
props->lmc = 0;
|
||||
|
|
|
@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void qedr_unaffiliated_event(void *context,
|
||||
u8 event_code)
|
||||
void qedr_unaffiliated_event(void *context, u8 event_code)
|
||||
{
|
||||
pr_err("unaffiliated event not implemented yet\n");
|
||||
}
|
||||
|
@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
|
|||
if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
|
||||
goto sysfs_err;
|
||||
|
||||
if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
|
||||
qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
|
||||
return dev;
|
||||
|
||||
|
@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev)
|
|||
ib_dealloc_device(&dev->ibdev);
|
||||
}
|
||||
|
||||
static int qedr_close(struct qedr_dev *dev)
|
||||
static void qedr_close(struct qedr_dev *dev)
|
||||
{
|
||||
qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
|
||||
|
||||
return 0;
|
||||
if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
|
||||
qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
|
||||
}
|
||||
|
||||
static void qedr_shutdown(struct qedr_dev *dev)
|
||||
|
@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev)
|
|||
qedr_remove(dev);
|
||||
}
|
||||
|
||||
static void qedr_open(struct qedr_dev *dev)
|
||||
{
|
||||
if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
|
||||
qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
|
||||
}
|
||||
|
||||
static void qedr_mac_address_change(struct qedr_dev *dev)
|
||||
{
|
||||
union ib_gid *sgid = &dev->sgid_tbl[0];
|
||||
|
@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
|
|||
|
||||
ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
|
||||
|
||||
qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
|
||||
qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
|
||||
|
||||
if (rc)
|
||||
DP_ERR(dev, "Error updating mac filter\n");
|
||||
|
@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
|
|||
{
|
||||
switch (event) {
|
||||
case QEDE_UP:
|
||||
qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
|
||||
qedr_open(dev);
|
||||
break;
|
||||
case QEDE_DOWN:
|
||||
qedr_close(dev);
|
||||
|
|
|
@ -113,6 +113,8 @@ struct qedr_device_attr {
|
|||
struct qed_rdma_events events;
|
||||
};
|
||||
|
||||
#define QEDR_ENET_STATE_BIT (0)
|
||||
|
||||
struct qedr_dev {
|
||||
struct ib_device ibdev;
|
||||
struct qed_dev *cdev;
|
||||
|
@ -153,6 +155,8 @@ struct qedr_dev {
|
|||
struct qedr_cq *gsi_sqcq;
|
||||
struct qedr_cq *gsi_rqcq;
|
||||
struct qedr_qp *gsi_qp;
|
||||
|
||||
unsigned long enet_state;
|
||||
};
|
||||
|
||||
#define QEDR_MAX_SQ_PBL (0x8000)
|
||||
|
@ -188,6 +192,7 @@ struct qedr_dev {
|
|||
#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
|
||||
|
||||
#define QEDR_MAX_PORT (1)
|
||||
#define QEDR_PORT (1)
|
||||
|
||||
#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
|
||||
|
||||
|
@ -251,9 +256,6 @@ struct qedr_cq {
|
|||
|
||||
u16 icid;
|
||||
|
||||
/* Lock to protect completion handler */
|
||||
spinlock_t comp_handler_lock;
|
||||
|
||||
/* Lock to protect multiplem CQ's */
|
||||
spinlock_t cq_lock;
|
||||
u8 arm_flags;
|
||||
|
|
|
@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
|
|||
qedr_inc_sw_gsi_cons(&qp->sq);
|
||||
spin_unlock_irqrestore(&qp->q_lock, flags);
|
||||
|
||||
if (cq->ibcq.comp_handler) {
|
||||
spin_lock_irqsave(&cq->comp_handler_lock, flags);
|
||||
if (cq->ibcq.comp_handler)
|
||||
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
|
||||
|
@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
|
|||
|
||||
spin_unlock_irqrestore(&qp->q_lock, flags);
|
||||
|
||||
if (cq->ibcq.comp_handler) {
|
||||
spin_lock_irqsave(&cq->comp_handler_lock, flags);
|
||||
if (cq->ibcq.comp_handler)
|
||||
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
|
||||
|
@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
|
|||
}
|
||||
|
||||
if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
|
||||
packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
|
||||
else
|
||||
packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
|
||||
else
|
||||
packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
|
||||
|
||||
packet->roce_mode = roce_mode;
|
||||
memcpy(packet->header.vaddr, ud_header_buffer, header_size);
|
||||
|
|
|
@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
|
|||
struct ib_ucontext *context, struct ib_udata *udata)
|
||||
{
|
||||
struct qedr_dev *dev = get_qedr_dev(ibdev);
|
||||
struct qedr_ucontext *uctx = NULL;
|
||||
struct qedr_alloc_pd_uresp uresp;
|
||||
struct qedr_pd *pd;
|
||||
u16 pd_id;
|
||||
int rc;
|
||||
|
@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
|
|||
if (!pd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
|
||||
rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
uresp.pd_id = pd_id;
|
||||
pd->pd_id = pd_id;
|
||||
|
||||
if (udata && context) {
|
||||
struct qedr_alloc_pd_uresp uresp;
|
||||
|
||||
uresp.pd_id = pd_id;
|
||||
|
||||
rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
|
||||
if (rc)
|
||||
if (rc) {
|
||||
DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
|
||||
uctx = get_qedr_ucontext(context);
|
||||
uctx->pd = pd;
|
||||
pd->uctx = uctx;
|
||||
dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
|
||||
goto err;
|
||||
}
|
||||
|
||||
pd->uctx = get_qedr_ucontext(context);
|
||||
pd->uctx->pd = pd;
|
||||
}
|
||||
|
||||
return &pd->ibpd;
|
||||
|
||||
err:
|
||||
kfree(pd);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
int qedr_dealloc_pd(struct ib_pd *ibpd)
|
||||
|
@ -1600,7 +1610,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
|
|||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
|
||||
enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
|
||||
static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
|
||||
{
|
||||
switch (qp_state) {
|
||||
case QED_ROCE_QP_STATE_RESET:
|
||||
|
@ -1621,7 +1631,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
|
|||
return IB_QPS_ERR;
|
||||
}
|
||||
|
||||
enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
|
||||
static enum qed_roce_qp_state qedr_get_state_from_ibqp(
|
||||
enum ib_qp_state qp_state)
|
||||
{
|
||||
switch (qp_state) {
|
||||
case IB_QPS_RESET:
|
||||
|
@ -1657,7 +1668,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
|
|||
int status = 0;
|
||||
|
||||
if (new_state == qp->state)
|
||||
return 1;
|
||||
return 0;
|
||||
|
||||
switch (qp->state) {
|
||||
case QED_ROCE_QP_STATE_RESET:
|
||||
|
@ -1733,6 +1744,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
|
|||
/* ERR->XXX */
|
||||
switch (new_state) {
|
||||
case QED_ROCE_QP_STATE_RESET:
|
||||
if ((qp->rq.prod != qp->rq.cons) ||
|
||||
(qp->sq.prod != qp->sq.cons)) {
|
||||
DP_NOTICE(dev,
|
||||
"Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
|
||||
qp->rq.prod, qp->rq.cons, qp->sq.prod,
|
||||
qp->sq.cons);
|
||||
status = -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
status = -EINVAL;
|
||||
|
@ -1865,7 +1884,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
|
||||
DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
|
||||
qp_params.remote_mac_addr);
|
||||
;
|
||||
|
||||
qp_params.mtu = qp->mtu;
|
||||
qp_params.lb_indication = false;
|
||||
|
@ -2016,7 +2034,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
|
|||
|
||||
qp_attr->qp_state = qedr_get_ibqp_state(params.state);
|
||||
qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
|
||||
qp_attr->path_mtu = iboe_get_mtu(params.mtu);
|
||||
qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
|
||||
qp_attr->path_mig_state = IB_MIG_MIGRATED;
|
||||
qp_attr->rq_psn = params.rq_psn;
|
||||
qp_attr->sq_psn = params.sq_psn;
|
||||
|
@ -2028,7 +2046,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
|
|||
qp_attr->cap.max_recv_wr = qp->rq.max_wr;
|
||||
qp_attr->cap.max_send_sge = qp->sq.max_sges;
|
||||
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
|
||||
qp_attr->cap.max_inline_data = qp->max_inline_data;
|
||||
qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
|
||||
qp_init_attr->cap = qp_attr->cap;
|
||||
|
||||
memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0],
|
||||
|
@ -2302,7 +2320,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
|
|||
return rc;
|
||||
}
|
||||
|
||||
struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
|
||||
static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
|
||||
int max_page_list_len)
|
||||
{
|
||||
struct qedr_pd *pd = get_qedr_pd(ibpd);
|
||||
struct qedr_dev *dev = get_qedr_dev(ibpd->device);
|
||||
|
@ -2704,7 +2723,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
|
||||
static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
|
||||
{
|
||||
switch (opcode) {
|
||||
case IB_WR_RDMA_WRITE:
|
||||
|
@ -2729,7 +2748,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
|
|||
}
|
||||
}
|
||||
|
||||
inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
|
||||
static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
|
||||
{
|
||||
int wq_is_full, err_wr, pbl_is_full;
|
||||
struct qedr_dev *dev = qp->dev;
|
||||
|
@ -2766,7 +2785,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
|
|||
return true;
|
||||
}
|
||||
|
||||
int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
{
|
||||
struct qedr_dev *dev = get_qedr_dev(ibqp->device);
|
||||
|
@ -3234,9 +3253,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
|
|||
IB_WC_SUCCESS, 0);
|
||||
break;
|
||||
case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
|
||||
DP_ERR(dev,
|
||||
"Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
|
||||
cq->icid, qp->icid);
|
||||
if (qp->state != QED_ROCE_QP_STATE_ERR)
|
||||
DP_ERR(dev,
|
||||
"Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
|
||||
cq->icid, qp->icid);
|
||||
cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
|
||||
IB_WC_WR_FLUSH_ERR, 1);
|
||||
break;
|
||||
|
|
|
@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
|
|||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to allocate interrupts\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_netdevice;
|
||||
goto err_free_cq_ring;
|
||||
}
|
||||
|
||||
/* Allocate UAR table. */
|
||||
|
@ -1092,8 +1092,6 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
|
|||
err_free_intrs:
|
||||
pvrdma_free_irq(dev);
|
||||
pvrdma_disable_msi_all(dev);
|
||||
err_netdevice:
|
||||
unregister_netdevice_notifier(&dev->nb_netdev);
|
||||
err_free_cq_ring:
|
||||
pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
|
||||
err_free_async_ring:
|
||||
|
|
|
@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
|
|||
union pvrdma_cmd_resp rsp;
|
||||
struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
|
||||
struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
|
||||
struct pvrdma_alloc_ucontext_resp uresp;
|
||||
struct pvrdma_alloc_ucontext_resp uresp = {0};
|
||||
int ret;
|
||||
void *ptr;
|
||||
|
||||
|
|
|
@ -555,7 +555,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
|
|||
}
|
||||
|
||||
spin_lock_bh(&dev_list_lock);
|
||||
list_add_tail(&rxe_dev_list, &rxe->list);
|
||||
list_add_tail(&rxe->list, &rxe_dev_list);
|
||||
spin_unlock_bh(&dev_list_lock);
|
||||
return rxe;
|
||||
}
|
||||
|
|
|
@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp)
|
|||
del_timer_sync(&qp->rnr_nak_timer);
|
||||
|
||||
rxe_cleanup_task(&qp->req.task);
|
||||
if (qp_type(qp) == IB_QPT_RC)
|
||||
rxe_cleanup_task(&qp->comp.task);
|
||||
rxe_cleanup_task(&qp->comp.task);
|
||||
|
||||
/* flush out any receive wr's or pending requests */
|
||||
__rxe_do_task(&qp->req.task);
|
||||
|
|
|
@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
|
|||
SHOST_DIX_GUARD_CRC);
|
||||
}
|
||||
|
||||
/*
|
||||
* Limit the sg_tablesize and max_sectors based on the device
|
||||
* max fastreg page list length.
|
||||
*/
|
||||
shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
|
||||
ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
|
||||
|
||||
if (iscsi_host_add(shost,
|
||||
ib_conn->device->ib_device->dma_device)) {
|
||||
mutex_unlock(&iser_conn->state_mutex);
|
||||
|
@ -679,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
|
|||
max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
|
||||
shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
|
||||
|
||||
iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
|
||||
iser_conn, shost->sg_tablesize,
|
||||
shost->max_sectors);
|
||||
|
||||
if (cmds_max > max_cmds) {
|
||||
iser_info("cmds_max changed from %u to %u\n",
|
||||
cmds_max, max_cmds);
|
||||
|
|
|
@ -496,7 +496,6 @@ struct ib_conn {
|
|||
* @rx_descs: rx buffers array (cyclic buffer)
|
||||
* @num_rx_descs: number of rx descriptors
|
||||
* @scsi_sg_tablesize: scsi host sg_tablesize
|
||||
* @scsi_max_sectors: scsi host max sectors
|
||||
*/
|
||||
struct iser_conn {
|
||||
struct ib_conn ib_conn;
|
||||
|
@ -519,7 +518,6 @@ struct iser_conn {
|
|||
struct iser_rx_desc *rx_descs;
|
||||
u32 num_rx_descs;
|
||||
unsigned short scsi_sg_tablesize;
|
||||
unsigned int scsi_max_sectors;
|
||||
bool snd_w_inv;
|
||||
};
|
||||
|
||||
|
|
|
@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
|
|||
sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
|
||||
device->ib_device->attrs.max_fast_reg_page_list_len);
|
||||
|
||||
if (sg_tablesize > sup_sg_tablesize) {
|
||||
sg_tablesize = sup_sg_tablesize;
|
||||
iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
|
||||
} else {
|
||||
iser_conn->scsi_max_sectors = max_sectors;
|
||||
}
|
||||
|
||||
iser_conn->scsi_sg_tablesize = sg_tablesize;
|
||||
|
||||
iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
|
||||
iser_conn, iser_conn->scsi_sg_tablesize,
|
||||
iser_conn->scsi_max_sectors);
|
||||
iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
|
|||
struct srp_fr_desc *d;
|
||||
struct ib_mr *mr;
|
||||
int i, ret = -EINVAL;
|
||||
enum ib_mr_type mr_type;
|
||||
|
||||
if (pool_size <= 0)
|
||||
goto err;
|
||||
|
@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
|
|||
spin_lock_init(&pool->lock);
|
||||
INIT_LIST_HEAD(&pool->free_list);
|
||||
|
||||
if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
|
||||
mr_type = IB_MR_TYPE_SG_GAPS;
|
||||
else
|
||||
mr_type = IB_MR_TYPE_MEM_REG;
|
||||
|
||||
for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
|
||||
mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
|
||||
max_page_list_len);
|
||||
mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
|
||||
if (IS_ERR(mr)) {
|
||||
ret = PTR_ERR(mr);
|
||||
if (ret == -ENOMEM)
|
||||
|
@ -3694,6 +3699,12 @@ static int __init srp_init_module(void)
|
|||
indirect_sg_entries = cmd_sg_entries;
|
||||
}
|
||||
|
||||
if (indirect_sg_entries > SG_MAX_SEGMENTS) {
|
||||
pr_warn("Clamping indirect_sg_entries to %u\n",
|
||||
SG_MAX_SEGMENTS);
|
||||
indirect_sg_entries = SG_MAX_SEGMENTS;
|
||||
}
|
||||
|
||||
srp_remove_wq = create_workqueue("srp_remove");
|
||||
if (!srp_remove_wq) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -11297,7 +11297,8 @@ static void mixer_notify_update(PLCI *plci, byte others)
|
|||
((CAPI_MSG *) msg)->header.ncci = 0;
|
||||
((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
|
||||
((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
|
||||
PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE);
|
||||
((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
|
||||
((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
|
||||
((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
|
||||
w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
|
||||
if (w != _QUEUE_FULL)
|
||||
|
|
|
@ -5291,6 +5291,11 @@ int md_run(struct mddev *mddev)
|
|||
if (start_readonly && mddev->ro == 0)
|
||||
mddev->ro = 2; /* read-only, but switch on first write */
|
||||
|
||||
/*
|
||||
* NOTE: some pers->run(), for example r5l_recovery_log(), wakes
|
||||
* up mddev->thread. It is important to initialize critical
|
||||
* resources for mddev->thread BEFORE calling pers->run().
|
||||
*/
|
||||
err = pers->run(mddev);
|
||||
if (err)
|
||||
pr_warn("md: pers->run() failed ...\n");
|
||||
|
|
|
@ -162,6 +162,8 @@ struct r5l_log {
|
|||
|
||||
/* to submit async io_units, to fulfill ordering of flush */
|
||||
struct work_struct deferred_io_work;
|
||||
/* to disable write back during in degraded mode */
|
||||
struct work_struct disable_writeback_work;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -611,6 +613,21 @@ static void r5l_submit_io_async(struct work_struct *work)
|
|||
r5l_do_submit_io(log, io);
|
||||
}
|
||||
|
||||
static void r5c_disable_writeback_async(struct work_struct *work)
|
||||
{
|
||||
struct r5l_log *log = container_of(work, struct r5l_log,
|
||||
disable_writeback_work);
|
||||
struct mddev *mddev = log->rdev->mddev;
|
||||
|
||||
if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
|
||||
return;
|
||||
pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
|
||||
mdname(mddev));
|
||||
mddev_suspend(mddev);
|
||||
log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
|
||||
mddev_resume(mddev);
|
||||
}
|
||||
|
||||
static void r5l_submit_current_io(struct r5l_log *log)
|
||||
{
|
||||
struct r5l_io_unit *io = log->current_io;
|
||||
|
@ -1393,8 +1410,6 @@ static void r5l_do_reclaim(struct r5l_log *log)
|
|||
next_checkpoint = r5c_calculate_new_cp(conf);
|
||||
spin_unlock_irq(&log->io_list_lock);
|
||||
|
||||
BUG_ON(reclaimable < 0);
|
||||
|
||||
if (reclaimable == 0 || !write_super)
|
||||
return;
|
||||
|
||||
|
@ -2062,7 +2077,7 @@ static int
|
|||
r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
|
||||
struct r5l_recovery_ctx *ctx)
|
||||
{
|
||||
struct stripe_head *sh, *next;
|
||||
struct stripe_head *sh;
|
||||
struct mddev *mddev = log->rdev->mddev;
|
||||
struct page *page;
|
||||
sector_t next_checkpoint = MaxSector;
|
||||
|
@ -2076,7 +2091,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
|
|||
|
||||
WARN_ON(list_empty(&ctx->cached_list));
|
||||
|
||||
list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
|
||||
list_for_each_entry(sh, &ctx->cached_list, lru) {
|
||||
struct r5l_meta_block *mb;
|
||||
int i;
|
||||
int offset;
|
||||
|
@ -2126,14 +2141,39 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
|
|||
ctx->pos = write_pos;
|
||||
ctx->seq += 1;
|
||||
next_checkpoint = sh->log_start;
|
||||
list_del_init(&sh->lru);
|
||||
raid5_release_stripe(sh);
|
||||
}
|
||||
log->next_checkpoint = next_checkpoint;
|
||||
__free_page(page);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
|
||||
struct r5l_recovery_ctx *ctx)
|
||||
{
|
||||
struct mddev *mddev = log->rdev->mddev;
|
||||
struct r5conf *conf = mddev->private;
|
||||
struct stripe_head *sh, *next;
|
||||
|
||||
if (ctx->data_only_stripes == 0)
|
||||
return;
|
||||
|
||||
log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
|
||||
|
||||
list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
|
||||
r5c_make_stripe_write_out(sh);
|
||||
set_bit(STRIPE_HANDLE, &sh->state);
|
||||
list_del_init(&sh->lru);
|
||||
raid5_release_stripe(sh);
|
||||
}
|
||||
|
||||
md_wakeup_thread(conf->mddev->thread);
|
||||
/* reuse conf->wait_for_quiescent in recovery */
|
||||
wait_event(conf->wait_for_quiescent,
|
||||
atomic_read(&conf->active_stripes) == 0);
|
||||
|
||||
log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
|
||||
}
|
||||
|
||||
static int r5l_recovery_log(struct r5l_log *log)
|
||||
{
|
||||
struct mddev *mddev = log->rdev->mddev;
|
||||
|
@ -2160,32 +2200,31 @@ static int r5l_recovery_log(struct r5l_log *log)
|
|||
pos = ctx.pos;
|
||||
ctx.seq += 10000;
|
||||
|
||||
if (ctx.data_only_stripes == 0) {
|
||||
log->next_checkpoint = ctx.pos;
|
||||
r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
|
||||
ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
|
||||
}
|
||||
|
||||
if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
|
||||
pr_debug("md/raid:%s: starting from clean shutdown\n",
|
||||
mdname(mddev));
|
||||
else {
|
||||
else
|
||||
pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
|
||||
mdname(mddev), ctx.data_only_stripes,
|
||||
ctx.data_parity_stripes);
|
||||
|
||||
if (ctx.data_only_stripes > 0)
|
||||
if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
|
||||
pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
|
||||
mdname(mddev));
|
||||
return -EIO;
|
||||
}
|
||||
if (ctx.data_only_stripes == 0) {
|
||||
log->next_checkpoint = ctx.pos;
|
||||
r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
|
||||
ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
|
||||
} else if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
|
||||
pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
|
||||
mdname(mddev));
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
log->log_start = ctx.pos;
|
||||
log->seq = ctx.seq;
|
||||
log->last_checkpoint = pos;
|
||||
r5l_write_super(log, pos);
|
||||
|
||||
r5c_recovery_flush_data_only_stripes(log, &ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2247,6 +2286,10 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
|
|||
val > R5C_JOURNAL_MODE_WRITE_BACK)
|
||||
return -EINVAL;
|
||||
|
||||
if (raid5_calc_degraded(conf) > 0 &&
|
||||
val == R5C_JOURNAL_MODE_WRITE_BACK)
|
||||
return -EINVAL;
|
||||
|
||||
mddev_suspend(mddev);
|
||||
conf->log->r5c_journal_mode = val;
|
||||
mddev_resume(mddev);
|
||||
|
@ -2301,6 +2344,16 @@ int r5c_try_caching_write(struct r5conf *conf,
|
|||
set_bit(STRIPE_R5C_CACHING, &sh->state);
|
||||
}
|
||||
|
||||
/*
|
||||
* When run in degraded mode, array is set to write-through mode.
|
||||
* This check helps drain pending write safely in the transition to
|
||||
* write-through mode.
|
||||
*/
|
||||
if (s->failed) {
|
||||
r5c_make_stripe_write_out(sh);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
for (i = disks; i--; ) {
|
||||
dev = &sh->dev[i];
|
||||
/* if non-overwrite, use writing-out phase */
|
||||
|
@ -2351,6 +2404,8 @@ void r5c_release_extra_page(struct stripe_head *sh)
|
|||
struct page *p = sh->dev[i].orig_page;
|
||||
|
||||
sh->dev[i].orig_page = sh->dev[i].page;
|
||||
clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
|
||||
|
||||
if (!using_disk_info_extra_page)
|
||||
put_page(p);
|
||||
}
|
||||
|
@ -2555,6 +2610,19 @@ static int r5l_load_log(struct r5l_log *log)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void r5c_update_on_rdev_error(struct mddev *mddev)
|
||||
{
|
||||
struct r5conf *conf = mddev->private;
|
||||
struct r5l_log *log = conf->log;
|
||||
|
||||
if (!log)
|
||||
return;
|
||||
|
||||
if (raid5_calc_degraded(conf) > 0 &&
|
||||
conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
|
||||
schedule_work(&log->disable_writeback_work);
|
||||
}
|
||||
|
||||
int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(rdev->bdev);
|
||||
|
@ -2627,6 +2695,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
|
|||
spin_lock_init(&log->no_space_stripes_lock);
|
||||
|
||||
INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
|
||||
INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
|
||||
|
||||
log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
|
||||
INIT_LIST_HEAD(&log->stripe_in_journal_list);
|
||||
|
@ -2659,6 +2728,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
|
|||
|
||||
void r5l_exit_log(struct r5l_log *log)
|
||||
{
|
||||
flush_work(&log->disable_writeback_work);
|
||||
md_unregister_thread(&log->reclaim_thread);
|
||||
mempool_destroy(log->meta_pool);
|
||||
bioset_free(log->bs);
|
||||
|
|
|
@ -556,7 +556,7 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
|
|||
* of the two sections, and some non-in_sync devices may
|
||||
* be insync in the section most affected by failed devices.
|
||||
*/
|
||||
static int calc_degraded(struct r5conf *conf)
|
||||
int raid5_calc_degraded(struct r5conf *conf)
|
||||
{
|
||||
int degraded, degraded2;
|
||||
int i;
|
||||
|
@ -619,7 +619,7 @@ static int has_failed(struct r5conf *conf)
|
|||
if (conf->mddev->reshape_position == MaxSector)
|
||||
return conf->mddev->degraded > conf->max_degraded;
|
||||
|
||||
degraded = calc_degraded(conf);
|
||||
degraded = raid5_calc_degraded(conf);
|
||||
if (degraded > conf->max_degraded)
|
||||
return 1;
|
||||
return 0;
|
||||
|
@ -1015,7 +1015,17 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
|
|||
|
||||
if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
|
||||
WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
|
||||
sh->dev[i].vec.bv_page = sh->dev[i].page;
|
||||
|
||||
if (!op_is_write(op) &&
|
||||
test_bit(R5_InJournal, &sh->dev[i].flags))
|
||||
/*
|
||||
* issuing read for a page in journal, this
|
||||
* must be preparing for prexor in rmw; read
|
||||
* the data into orig_page
|
||||
*/
|
||||
sh->dev[i].vec.bv_page = sh->dev[i].orig_page;
|
||||
else
|
||||
sh->dev[i].vec.bv_page = sh->dev[i].page;
|
||||
bi->bi_vcnt = 1;
|
||||
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
|
||||
bi->bi_io_vec[0].bv_offset = 0;
|
||||
|
@ -2380,6 +2390,13 @@ static void raid5_end_read_request(struct bio * bi)
|
|||
} else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
|
||||
clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
|
||||
|
||||
if (test_bit(R5_InJournal, &sh->dev[i].flags))
|
||||
/*
|
||||
* end read for a page in journal, this
|
||||
* must be preparing for prexor in rmw
|
||||
*/
|
||||
set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
|
||||
|
||||
if (atomic_read(&rdev->read_errors))
|
||||
atomic_set(&rdev->read_errors, 0);
|
||||
} else {
|
||||
|
@ -2538,7 +2555,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
|
|||
|
||||
spin_lock_irqsave(&conf->device_lock, flags);
|
||||
clear_bit(In_sync, &rdev->flags);
|
||||
mddev->degraded = calc_degraded(conf);
|
||||
mddev->degraded = raid5_calc_degraded(conf);
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
||||
|
||||
|
@ -2552,6 +2569,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
|
|||
bdevname(rdev->bdev, b),
|
||||
mdname(mddev),
|
||||
conf->raid_disks - mddev->degraded);
|
||||
r5c_update_on_rdev_error(mddev);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2880,6 +2898,30 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
|
|||
return r_sector;
|
||||
}
|
||||
|
||||
/*
|
||||
* There are cases where we want handle_stripe_dirtying() and
|
||||
* schedule_reconstruction() to delay towrite to some dev of a stripe.
|
||||
*
|
||||
* This function checks whether we want to delay the towrite. Specifically,
|
||||
* we delay the towrite when:
|
||||
*
|
||||
* 1. degraded stripe has a non-overwrite to the missing dev, AND this
|
||||
* stripe has data in journal (for other devices).
|
||||
*
|
||||
* In this case, when reading data for the non-overwrite dev, it is
|
||||
* necessary to handle complex rmw of write back cache (prexor with
|
||||
* orig_page, and xor with page). To keep read path simple, we would
|
||||
* like to flush data in journal to RAID disks first, so complex rmw
|
||||
* is handled in the write patch (handle_stripe_dirtying).
|
||||
*
|
||||
*/
|
||||
static inline bool delay_towrite(struct r5dev *dev,
|
||||
struct stripe_head_state *s)
|
||||
{
|
||||
return !test_bit(R5_OVERWRITE, &dev->flags) &&
|
||||
!test_bit(R5_Insync, &dev->flags) && s->injournal;
|
||||
}
|
||||
|
||||
static void
|
||||
schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
|
||||
int rcw, int expand)
|
||||
|
@ -2900,7 +2942,7 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
|
|||
for (i = disks; i--; ) {
|
||||
struct r5dev *dev = &sh->dev[i];
|
||||
|
||||
if (dev->towrite) {
|
||||
if (dev->towrite && !delay_towrite(dev, s)) {
|
||||
set_bit(R5_LOCKED, &dev->flags);
|
||||
set_bit(R5_Wantdrain, &dev->flags);
|
||||
if (!expand)
|
||||
|
@ -3295,13 +3337,6 @@ static int want_replace(struct stripe_head *sh, int disk_idx)
|
|||
return rv;
|
||||
}
|
||||
|
||||
/* fetch_block - checks the given member device to see if its data needs
|
||||
* to be read or computed to satisfy a request.
|
||||
*
|
||||
* Returns 1 when no more member devices need to be checked, otherwise returns
|
||||
* 0 to tell the loop in handle_stripe_fill to continue
|
||||
*/
|
||||
|
||||
static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
|
||||
int disk_idx, int disks)
|
||||
{
|
||||
|
@ -3392,6 +3427,12 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* fetch_block - checks the given member device to see if its data needs
|
||||
* to be read or computed to satisfy a request.
|
||||
*
|
||||
* Returns 1 when no more member devices need to be checked, otherwise returns
|
||||
* 0 to tell the loop in handle_stripe_fill to continue
|
||||
*/
|
||||
static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
|
||||
int disk_idx, int disks)
|
||||
{
|
||||
|
@ -3478,10 +3519,26 @@ static void handle_stripe_fill(struct stripe_head *sh,
|
|||
* midst of changing due to a write
|
||||
*/
|
||||
if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
|
||||
!sh->reconstruct_state)
|
||||
!sh->reconstruct_state) {
|
||||
|
||||
/*
|
||||
* For degraded stripe with data in journal, do not handle
|
||||
* read requests yet, instead, flush the stripe to raid
|
||||
* disks first, this avoids handling complex rmw of write
|
||||
* back cache (prexor with orig_page, and then xor with
|
||||
* page) in the read path
|
||||
*/
|
||||
if (s->injournal && s->failed) {
|
||||
if (test_bit(STRIPE_R5C_CACHING, &sh->state))
|
||||
r5c_make_stripe_write_out(sh);
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = disks; i--; )
|
||||
if (fetch_block(sh, s, i, disks))
|
||||
break;
|
||||
}
|
||||
out:
|
||||
set_bit(STRIPE_HANDLE, &sh->state);
|
||||
}
|
||||
|
||||
|
@ -3594,6 +3651,21 @@ static void handle_stripe_clean_event(struct r5conf *conf,
|
|||
break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
|
||||
}
|
||||
|
||||
/*
|
||||
* For RMW in write back cache, we need extra page in prexor to store the
|
||||
* old data. This page is stored in dev->orig_page.
|
||||
*
|
||||
* This function checks whether we have data for prexor. The exact logic
|
||||
* is:
|
||||
* R5_UPTODATE && (!R5_InJournal || R5_OrigPageUPTDODATE)
|
||||
*/
|
||||
static inline bool uptodate_for_rmw(struct r5dev *dev)
|
||||
{
|
||||
return (test_bit(R5_UPTODATE, &dev->flags)) &&
|
||||
(!test_bit(R5_InJournal, &dev->flags) ||
|
||||
test_bit(R5_OrigPageUPTDODATE, &dev->flags));
|
||||
}
|
||||
|
||||
static int handle_stripe_dirtying(struct r5conf *conf,
|
||||
struct stripe_head *sh,
|
||||
struct stripe_head_state *s,
|
||||
|
@ -3622,12 +3694,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
|
|||
} else for (i = disks; i--; ) {
|
||||
/* would I have to read this buffer for read_modify_write */
|
||||
struct r5dev *dev = &sh->dev[i];
|
||||
if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx ||
|
||||
if (((dev->towrite && !delay_towrite(dev, s)) ||
|
||||
i == sh->pd_idx || i == sh->qd_idx ||
|
||||
test_bit(R5_InJournal, &dev->flags)) &&
|
||||
!test_bit(R5_LOCKED, &dev->flags) &&
|
||||
!((test_bit(R5_UPTODATE, &dev->flags) &&
|
||||
(!test_bit(R5_InJournal, &dev->flags) ||
|
||||
dev->page != dev->orig_page)) ||
|
||||
!(uptodate_for_rmw(dev) ||
|
||||
test_bit(R5_Wantcompute, &dev->flags))) {
|
||||
if (test_bit(R5_Insync, &dev->flags))
|
||||
rmw++;
|
||||
|
@ -3639,7 +3710,6 @@ static int handle_stripe_dirtying(struct r5conf *conf,
|
|||
i != sh->pd_idx && i != sh->qd_idx &&
|
||||
!test_bit(R5_LOCKED, &dev->flags) &&
|
||||
!(test_bit(R5_UPTODATE, &dev->flags) ||
|
||||
test_bit(R5_InJournal, &dev->flags) ||
|
||||
test_bit(R5_Wantcompute, &dev->flags))) {
|
||||
if (test_bit(R5_Insync, &dev->flags))
|
||||
rcw++;
|
||||
|
@ -3689,13 +3759,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
|
|||
|
||||
for (i = disks; i--; ) {
|
||||
struct r5dev *dev = &sh->dev[i];
|
||||
if ((dev->towrite ||
|
||||
if (((dev->towrite && !delay_towrite(dev, s)) ||
|
||||
i == sh->pd_idx || i == sh->qd_idx ||
|
||||
test_bit(R5_InJournal, &dev->flags)) &&
|
||||
!test_bit(R5_LOCKED, &dev->flags) &&
|
||||
!((test_bit(R5_UPTODATE, &dev->flags) &&
|
||||
(!test_bit(R5_InJournal, &dev->flags) ||
|
||||
dev->page != dev->orig_page)) ||
|
||||
!(uptodate_for_rmw(dev) ||
|
||||
test_bit(R5_Wantcompute, &dev->flags)) &&
|
||||
test_bit(R5_Insync, &dev->flags)) {
|
||||
if (test_bit(STRIPE_PREREAD_ACTIVE,
|
||||
|
@ -3722,7 +3790,6 @@ static int handle_stripe_dirtying(struct r5conf *conf,
|
|||
i != sh->pd_idx && i != sh->qd_idx &&
|
||||
!test_bit(R5_LOCKED, &dev->flags) &&
|
||||
!(test_bit(R5_UPTODATE, &dev->flags) ||
|
||||
test_bit(R5_InJournal, &dev->flags) ||
|
||||
test_bit(R5_Wantcompute, &dev->flags))) {
|
||||
rcw++;
|
||||
if (test_bit(R5_Insync, &dev->flags) &&
|
||||
|
@ -7025,7 +7092,7 @@ static int raid5_run(struct mddev *mddev)
|
|||
/*
|
||||
* 0 for a fully functional array, 1 or 2 for a degraded array.
|
||||
*/
|
||||
mddev->degraded = calc_degraded(conf);
|
||||
mddev->degraded = raid5_calc_degraded(conf);
|
||||
|
||||
if (has_failed(conf)) {
|
||||
pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n",
|
||||
|
@ -7272,7 +7339,7 @@ static int raid5_spare_active(struct mddev *mddev)
|
|||
}
|
||||
}
|
||||
spin_lock_irqsave(&conf->device_lock, flags);
|
||||
mddev->degraded = calc_degraded(conf);
|
||||
mddev->degraded = raid5_calc_degraded(conf);
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
print_raid5_conf(conf);
|
||||
return count;
|
||||
|
@ -7632,7 +7699,7 @@ static int raid5_start_reshape(struct mddev *mddev)
|
|||
* pre and post number of devices.
|
||||
*/
|
||||
spin_lock_irqsave(&conf->device_lock, flags);
|
||||
mddev->degraded = calc_degraded(conf);
|
||||
mddev->degraded = raid5_calc_degraded(conf);
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
}
|
||||
mddev->raid_disks = conf->raid_disks;
|
||||
|
@ -7720,7 +7787,7 @@ static void raid5_finish_reshape(struct mddev *mddev)
|
|||
} else {
|
||||
int d;
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
mddev->degraded = calc_degraded(conf);
|
||||
mddev->degraded = raid5_calc_degraded(conf);
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
for (d = conf->raid_disks ;
|
||||
d < conf->raid_disks - mddev->delta_disks;
|
||||
|
|
|
@ -322,6 +322,11 @@ enum r5dev_flags {
|
|||
* data and parity being written are in the journal
|
||||
* device
|
||||
*/
|
||||
R5_OrigPageUPTDODATE, /* with write back cache, we read old data into
|
||||
* dev->orig_page for prexor. When this flag is
|
||||
* set, orig_page contains latest data in the
|
||||
* raid disk.
|
||||
*/
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -753,6 +758,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
|
|||
extern struct stripe_head *
|
||||
raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
|
||||
int previous, int noblock, int noquiesce);
|
||||
extern int raid5_calc_degraded(struct r5conf *conf);
|
||||
extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
|
||||
extern void r5l_exit_log(struct r5l_log *log);
|
||||
extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
|
||||
|
@ -781,4 +787,5 @@ extern void r5c_flush_cache(struct r5conf *conf, int num);
|
|||
extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
|
||||
extern void r5c_check_cached_full_stripe(struct r5conf *conf);
|
||||
extern struct md_sysfs_entry r5c_journal_mode;
|
||||
extern void r5c_update_on_rdev_error(struct mddev *mddev);
|
||||
#endif
|
||||
|
|
|
@ -30,8 +30,9 @@
|
|||
|
||||
#include "cec-priv.h"
|
||||
|
||||
static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx);
|
||||
static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx);
|
||||
static void cec_fill_msg_report_features(struct cec_adapter *adap,
|
||||
struct cec_msg *msg,
|
||||
unsigned int la_idx);
|
||||
|
||||
/*
|
||||
* 400 ms is the time it takes for one 16 byte message to be
|
||||
|
@ -288,10 +289,10 @@ static void cec_data_cancel(struct cec_data *data)
|
|||
|
||||
/* Mark it as an error */
|
||||
data->msg.tx_ts = ktime_get_ns();
|
||||
data->msg.tx_status = CEC_TX_STATUS_ERROR |
|
||||
CEC_TX_STATUS_MAX_RETRIES;
|
||||
data->msg.tx_status |= CEC_TX_STATUS_ERROR |
|
||||
CEC_TX_STATUS_MAX_RETRIES;
|
||||
data->msg.tx_error_cnt++;
|
||||
data->attempts = 0;
|
||||
data->msg.tx_error_cnt = 1;
|
||||
/* Queue transmitted message for monitoring purposes */
|
||||
cec_queue_msg_monitor(data->adap, &data->msg, 1);
|
||||
|
||||
|
@ -851,7 +852,7 @@ static const u8 cec_msg_size[256] = {
|
|||
[CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
|
||||
[CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
|
||||
[CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
|
||||
[CEC_MSG_REPORT_CURRENT_LATENCY] = 7 | BCAST,
|
||||
[CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST,
|
||||
[CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
|
||||
};
|
||||
|
||||
|
@ -1250,30 +1251,49 @@ static int cec_config_thread_func(void *arg)
|
|||
for (i = 1; i < las->num_log_addrs; i++)
|
||||
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
|
||||
}
|
||||
for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
|
||||
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
|
||||
adap->is_configured = true;
|
||||
adap->is_configuring = false;
|
||||
cec_post_state_event(adap);
|
||||
mutex_unlock(&adap->lock);
|
||||
|
||||
/*
|
||||
* Now post the Report Features and Report Physical Address broadcast
|
||||
* messages. Note that these are non-blocking transmits, meaning that
|
||||
* they are just queued up and once adap->lock is unlocked the main
|
||||
* thread will kick in and start transmitting these.
|
||||
*
|
||||
* If after this function is done (but before one or more of these
|
||||
* messages are actually transmitted) the CEC adapter is unconfigured,
|
||||
* then any remaining messages will be dropped by the main thread.
|
||||
*/
|
||||
for (i = 0; i < las->num_log_addrs; i++) {
|
||||
struct cec_msg msg = {};
|
||||
|
||||
if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
|
||||
(las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Report Features must come first according
|
||||
* to CEC 2.0
|
||||
*/
|
||||
if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED)
|
||||
cec_report_features(adap, i);
|
||||
cec_report_phys_addr(adap, i);
|
||||
msg.msg[0] = (las->log_addr[i] << 4) | 0x0f;
|
||||
|
||||
/* Report Features must come first according to CEC 2.0 */
|
||||
if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED &&
|
||||
adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) {
|
||||
cec_fill_msg_report_features(adap, &msg, i);
|
||||
cec_transmit_msg_fh(adap, &msg, NULL, false);
|
||||
}
|
||||
|
||||
/* Report Physical Address */
|
||||
cec_msg_report_physical_addr(&msg, adap->phys_addr,
|
||||
las->primary_device_type[i]);
|
||||
dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
|
||||
las->log_addr[i],
|
||||
cec_phys_addr_exp(adap->phys_addr));
|
||||
cec_transmit_msg_fh(adap, &msg, NULL, false);
|
||||
}
|
||||
for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
|
||||
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
|
||||
mutex_lock(&adap->lock);
|
||||
adap->kthread_config = NULL;
|
||||
mutex_unlock(&adap->lock);
|
||||
complete(&adap->config_completion);
|
||||
mutex_unlock(&adap->lock);
|
||||
return 0;
|
||||
|
||||
unconfigure:
|
||||
|
@ -1526,52 +1546,32 @@ EXPORT_SYMBOL_GPL(cec_s_log_addrs);
|
|||
|
||||
/* High-level core CEC message handling */
|
||||
|
||||
/* Transmit the Report Features message */
|
||||
static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx)
|
||||
/* Fill in the Report Features message */
|
||||
static void cec_fill_msg_report_features(struct cec_adapter *adap,
|
||||
struct cec_msg *msg,
|
||||
unsigned int la_idx)
|
||||
{
|
||||
struct cec_msg msg = { };
|
||||
const struct cec_log_addrs *las = &adap->log_addrs;
|
||||
const u8 *features = las->features[la_idx];
|
||||
bool op_is_dev_features = false;
|
||||
unsigned int idx;
|
||||
|
||||
/* This is 2.0 and up only */
|
||||
if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
|
||||
return 0;
|
||||
|
||||
/* Report Features */
|
||||
msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
|
||||
msg.len = 4;
|
||||
msg.msg[1] = CEC_MSG_REPORT_FEATURES;
|
||||
msg.msg[2] = adap->log_addrs.cec_version;
|
||||
msg.msg[3] = las->all_device_types[la_idx];
|
||||
msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
|
||||
msg->len = 4;
|
||||
msg->msg[1] = CEC_MSG_REPORT_FEATURES;
|
||||
msg->msg[2] = adap->log_addrs.cec_version;
|
||||
msg->msg[3] = las->all_device_types[la_idx];
|
||||
|
||||
/* Write RC Profiles first, then Device Features */
|
||||
for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
|
||||
msg.msg[msg.len++] = features[idx];
|
||||
msg->msg[msg->len++] = features[idx];
|
||||
if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
|
||||
if (op_is_dev_features)
|
||||
break;
|
||||
op_is_dev_features = true;
|
||||
}
|
||||
}
|
||||
return cec_transmit_msg(adap, &msg, false);
|
||||
}
|
||||
|
||||
/* Transmit the Report Physical Address message */
|
||||
static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx)
|
||||
{
|
||||
const struct cec_log_addrs *las = &adap->log_addrs;
|
||||
struct cec_msg msg = { };
|
||||
|
||||
/* Report Physical Address */
|
||||
msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
|
||||
cec_msg_report_physical_addr(&msg, adap->phys_addr,
|
||||
las->primary_device_type[la_idx]);
|
||||
dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
|
||||
las->log_addr[la_idx],
|
||||
cec_phys_addr_exp(adap->phys_addr));
|
||||
return cec_transmit_msg(adap, &msg, false);
|
||||
}
|
||||
|
||||
/* Transmit the Feature Abort message */
|
||||
|
@ -1777,9 +1777,10 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
|
|||
}
|
||||
|
||||
case CEC_MSG_GIVE_FEATURES:
|
||||
if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0)
|
||||
return cec_report_features(adap, la_idx);
|
||||
return 0;
|
||||
if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
|
||||
return cec_feature_abort(adap, msg);
|
||||
cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx);
|
||||
return cec_transmit_msg(adap, &tx_cec_msg, false);
|
||||
|
||||
default:
|
||||
/*
|
||||
|
|
|
@ -719,6 +719,9 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
|
|||
skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
|
||||
ETH_ALEN);
|
||||
skb_pull(h->priv->ule_skb, ETH_ALEN);
|
||||
} else {
|
||||
/* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */
|
||||
eth_zero_addr(dest_addr);
|
||||
}
|
||||
|
||||
/* Handle ULE Extension Headers. */
|
||||
|
@ -750,16 +753,8 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
|
|||
if (!h->priv->ule_bridged) {
|
||||
skb_push(h->priv->ule_skb, ETH_HLEN);
|
||||
h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
|
||||
if (!h->priv->ule_dbit) {
|
||||
/*
|
||||
* dest_addr buffer is only valid if
|
||||
* h->priv->ule_dbit == 0
|
||||
*/
|
||||
memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
|
||||
eth_zero_addr(h->ethh->h_source);
|
||||
} else /* zeroize source and dest */
|
||||
memset(h->ethh, 0, ETH_ALEN * 2);
|
||||
|
||||
memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
|
||||
eth_zero_addr(h->ethh->h_source);
|
||||
h->ethh->h_proto = htons(h->priv->ule_sndu_type);
|
||||
}
|
||||
/* else: skb is in correct state; nothing to do. */
|
||||
|
|
|
@ -655,6 +655,7 @@ config VIDEO_S5K6A3
|
|||
config VIDEO_S5K4ECGX
|
||||
tristate "Samsung S5K4ECGX sensor support"
|
||||
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
|
||||
select CRC32
|
||||
---help---
|
||||
This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
|
||||
camera sensor with an embedded SoC image signal processor.
|
||||
|
|
|
@ -2741,9 +2741,7 @@ static const struct v4l2_subdev_internal_ops smiapp_internal_ops = {
|
|||
* I2C Driver
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
static int smiapp_suspend(struct device *dev)
|
||||
static int __maybe_unused smiapp_suspend(struct device *dev)
|
||||
{
|
||||
struct i2c_client *client = to_i2c_client(dev);
|
||||
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
|
||||
|
@ -2768,7 +2766,7 @@ static int smiapp_suspend(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int smiapp_resume(struct device *dev)
|
||||
static int __maybe_unused smiapp_resume(struct device *dev)
|
||||
{
|
||||
struct i2c_client *client = to_i2c_client(dev);
|
||||
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
|
||||
|
@ -2783,13 +2781,6 @@ static int smiapp_resume(struct device *dev)
|
|||
return rval;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define smiapp_suspend NULL
|
||||
#define smiapp_resume NULL
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
|
||||
{
|
||||
struct smiapp_hwconfig *hwcfg;
|
||||
|
@ -2913,13 +2904,9 @@ static int smiapp_probe(struct i2c_client *client,
|
|||
if (IS_ERR(sensor->xshutdown))
|
||||
return PTR_ERR(sensor->xshutdown);
|
||||
|
||||
pm_runtime_enable(&client->dev);
|
||||
|
||||
rval = pm_runtime_get_sync(&client->dev);
|
||||
if (rval < 0) {
|
||||
rval = -ENODEV;
|
||||
goto out_power_off;
|
||||
}
|
||||
rval = smiapp_power_on(&client->dev);
|
||||
if (rval < 0)
|
||||
return rval;
|
||||
|
||||
rval = smiapp_identify_module(sensor);
|
||||
if (rval) {
|
||||
|
@ -3100,6 +3087,9 @@ static int smiapp_probe(struct i2c_client *client,
|
|||
if (rval < 0)
|
||||
goto out_media_entity_cleanup;
|
||||
|
||||
pm_runtime_set_active(&client->dev);
|
||||
pm_runtime_get_noresume(&client->dev);
|
||||
pm_runtime_enable(&client->dev);
|
||||
pm_runtime_set_autosuspend_delay(&client->dev, 1000);
|
||||
pm_runtime_use_autosuspend(&client->dev);
|
||||
pm_runtime_put_autosuspend(&client->dev);
|
||||
|
@ -3113,8 +3103,7 @@ static int smiapp_probe(struct i2c_client *client,
|
|||
smiapp_cleanup(sensor);
|
||||
|
||||
out_power_off:
|
||||
pm_runtime_put(&client->dev);
|
||||
pm_runtime_disable(&client->dev);
|
||||
smiapp_power_off(&client->dev);
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
@ -3127,8 +3116,10 @@ static int smiapp_remove(struct i2c_client *client)
|
|||
|
||||
v4l2_async_unregister_subdev(subdev);
|
||||
|
||||
pm_runtime_suspend(&client->dev);
|
||||
pm_runtime_disable(&client->dev);
|
||||
if (!pm_runtime_status_suspended(&client->dev))
|
||||
smiapp_power_off(&client->dev);
|
||||
pm_runtime_set_suspended(&client->dev);
|
||||
|
||||
for (i = 0; i < sensor->ssds_used; i++) {
|
||||
v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
|
||||
|
|
|
@ -291,8 +291,12 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
|
|||
tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode);
|
||||
tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input);
|
||||
|
||||
/* Svideo should enable YCrCb output and disable GPCL output
|
||||
* For Composite and TV, it should be the reverse
|
||||
/*
|
||||
* Setup the FID/GLCO/VLK/HVLK and INTREQ/GPCL/VBLK output signals. For
|
||||
* S-Video we output the vertical lock (VLK) signal on FID/GLCO/VLK/HVLK
|
||||
* and set INTREQ/GPCL/VBLK to logic 0. For composite we output the
|
||||
* field indicator (FID) signal on FID/GLCO/VLK/HVLK and set
|
||||
* INTREQ/GPCL/VBLK to logic 1.
|
||||
*/
|
||||
val = tvp5150_read(sd, TVP5150_MISC_CTL);
|
||||
if (val < 0) {
|
||||
|
@ -301,9 +305,9 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
|
|||
}
|
||||
|
||||
if (decoder->input == TVP5150_SVIDEO)
|
||||
val = (val & ~0x40) | 0x10;
|
||||
val = (val & ~TVP5150_MISC_CTL_GPCL) | TVP5150_MISC_CTL_HVLK;
|
||||
else
|
||||
val = (val & ~0x10) | 0x40;
|
||||
val = (val & ~TVP5150_MISC_CTL_HVLK) | TVP5150_MISC_CTL_GPCL;
|
||||
tvp5150_write(sd, TVP5150_MISC_CTL, val);
|
||||
};
|
||||
|
||||
|
@ -455,7 +459,12 @@ static const struct i2c_reg_value tvp5150_init_enable[] = {
|
|||
},{ /* Automatic offset and AGC enabled */
|
||||
TVP5150_ANAL_CHL_CTL, 0x15
|
||||
},{ /* Activate YCrCb output 0x9 or 0xd ? */
|
||||
TVP5150_MISC_CTL, 0x6f
|
||||
TVP5150_MISC_CTL, TVP5150_MISC_CTL_GPCL |
|
||||
TVP5150_MISC_CTL_INTREQ_OE |
|
||||
TVP5150_MISC_CTL_YCBCR_OE |
|
||||
TVP5150_MISC_CTL_SYNC_OE |
|
||||
TVP5150_MISC_CTL_VBLANK |
|
||||
TVP5150_MISC_CTL_CLOCK_OE,
|
||||
},{ /* Activates video std autodetection for all standards */
|
||||
TVP5150_AUTOSW_MSK, 0x0
|
||||
},{ /* Default format: 0x47. For 4:2:2: 0x40 */
|
||||
|
@ -861,8 +870,6 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
|
|||
|
||||
f = &format->format;
|
||||
|
||||
tvp5150_reset(sd, 0);
|
||||
|
||||
f->width = decoder->rect.width;
|
||||
f->height = decoder->rect.height / 2;
|
||||
|
||||
|
@ -1051,21 +1058,27 @@ static const struct media_entity_operations tvp5150_sd_media_ops = {
|
|||
static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
|
||||
{
|
||||
struct tvp5150 *decoder = to_tvp5150(sd);
|
||||
/* Output format: 8-bit ITU-R BT.656 with embedded syncs */
|
||||
int val = 0x09;
|
||||
int val;
|
||||
|
||||
/* Output format: 8-bit 4:2:2 YUV with discrete sync */
|
||||
if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
|
||||
val = 0x0d;
|
||||
/* Enable or disable the video output signals. */
|
||||
val = tvp5150_read(sd, TVP5150_MISC_CTL);
|
||||
if (val < 0)
|
||||
return val;
|
||||
|
||||
/* Initializes TVP5150 to its default values */
|
||||
/* # set PCLK (27MHz) */
|
||||
tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00);
|
||||
val &= ~(TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_SYNC_OE |
|
||||
TVP5150_MISC_CTL_CLOCK_OE);
|
||||
|
||||
if (enable)
|
||||
tvp5150_write(sd, TVP5150_MISC_CTL, val);
|
||||
else
|
||||
tvp5150_write(sd, TVP5150_MISC_CTL, 0x00);
|
||||
if (enable) {
|
||||
/*
|
||||
* Enable the YCbCr and clock outputs. In discrete sync mode
|
||||
* (non-BT.656) additionally enable the the sync outputs.
|
||||
*/
|
||||
val |= TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_CLOCK_OE;
|
||||
if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
|
||||
val |= TVP5150_MISC_CTL_SYNC_OE;
|
||||
}
|
||||
|
||||
tvp5150_write(sd, TVP5150_MISC_CTL, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1524,7 +1537,6 @@ static int tvp5150_probe(struct i2c_client *c,
|
|||
res = core->hdl.error;
|
||||
goto err;
|
||||
}
|
||||
v4l2_ctrl_handler_setup(&core->hdl);
|
||||
|
||||
/* Default is no cropping */
|
||||
core->rect.top = 0;
|
||||
|
@ -1535,6 +1547,8 @@ static int tvp5150_probe(struct i2c_client *c,
|
|||
core->rect.left = 0;
|
||||
core->rect.width = TVP5150_H_MAX;
|
||||
|
||||
tvp5150_reset(sd, 0); /* Calls v4l2_ctrl_handler_setup() */
|
||||
|
||||
res = v4l2_async_register_subdev(sd);
|
||||
if (res < 0)
|
||||
goto err;
|
||||
|
|
|
@ -9,6 +9,15 @@
|
|||
#define TVP5150_ANAL_CHL_CTL 0x01 /* Analog channel controls */
|
||||
#define TVP5150_OP_MODE_CTL 0x02 /* Operation mode controls */
|
||||
#define TVP5150_MISC_CTL 0x03 /* Miscellaneous controls */
|
||||
#define TVP5150_MISC_CTL_VBLK_GPCL BIT(7)
|
||||
#define TVP5150_MISC_CTL_GPCL BIT(6)
|
||||
#define TVP5150_MISC_CTL_INTREQ_OE BIT(5)
|
||||
#define TVP5150_MISC_CTL_HVLK BIT(4)
|
||||
#define TVP5150_MISC_CTL_YCBCR_OE BIT(3)
|
||||
#define TVP5150_MISC_CTL_SYNC_OE BIT(2)
|
||||
#define TVP5150_MISC_CTL_VBLANK BIT(1)
|
||||
#define TVP5150_MISC_CTL_CLOCK_OE BIT(0)
|
||||
|
||||
#define TVP5150_AUTOSW_MSK 0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */
|
||||
|
||||
/* Reserved 05h */
|
||||
|
|
|
@ -308,9 +308,7 @@ static void cobalt_pci_iounmap(struct cobalt *cobalt, struct pci_dev *pci_dev)
|
|||
static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev)
|
||||
{
|
||||
free_irq(pci_dev->irq, (void *)cobalt);
|
||||
|
||||
if (cobalt->msi_enabled)
|
||||
pci_disable_msi(pci_dev);
|
||||
pci_free_irq_vectors(pci_dev);
|
||||
}
|
||||
|
||||
static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
|
||||
|
@ -387,14 +385,12 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
|
|||
from being generated. */
|
||||
cobalt_set_interrupt(cobalt, false);
|
||||
|
||||
if (pci_enable_msi_range(pci_dev, 1, 1) < 1) {
|
||||
if (pci_alloc_irq_vectors(pci_dev, 1, 1, PCI_IRQ_MSI) < 1) {
|
||||
cobalt_err("Could not enable MSI\n");
|
||||
cobalt->msi_enabled = false;
|
||||
ret = -EIO;
|
||||
goto err_release;
|
||||
}
|
||||
msi_config_show(cobalt, pci_dev);
|
||||
cobalt->msi_enabled = true;
|
||||
|
||||
/* Register IRQ */
|
||||
if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED,
|
||||
|
|
|
@ -287,8 +287,6 @@ struct cobalt {
|
|||
u32 irq_none;
|
||||
u32 irq_full_fifo;
|
||||
|
||||
bool msi_enabled;
|
||||
|
||||
/* omnitek dma */
|
||||
int dma_channels;
|
||||
int first_fifo_channel;
|
||||
|
|
|
@ -97,14 +97,13 @@ struct pctv452e_state {
|
|||
u8 c; /* transaction counter, wraps around... */
|
||||
u8 initialized; /* set to 1 if 0x15 has been sent */
|
||||
u16 last_rc_key;
|
||||
|
||||
unsigned char data[80];
|
||||
};
|
||||
|
||||
static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
|
||||
unsigned int write_len, unsigned int read_len)
|
||||
{
|
||||
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
|
||||
u8 *buf;
|
||||
u8 id;
|
||||
unsigned int rlen;
|
||||
int ret;
|
||||
|
@ -114,36 +113,39 @@ static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
mutex_lock(&state->ca_mutex);
|
||||
buf = kmalloc(64, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
id = state->c++;
|
||||
|
||||
state->data[0] = SYNC_BYTE_OUT;
|
||||
state->data[1] = id;
|
||||
state->data[2] = cmd;
|
||||
state->data[3] = write_len;
|
||||
buf[0] = SYNC_BYTE_OUT;
|
||||
buf[1] = id;
|
||||
buf[2] = cmd;
|
||||
buf[3] = write_len;
|
||||
|
||||
memcpy(state->data + 4, data, write_len);
|
||||
memcpy(buf + 4, data, write_len);
|
||||
|
||||
rlen = (read_len > 0) ? 64 : 0;
|
||||
ret = dvb_usb_generic_rw(d, state->data, 4 + write_len,
|
||||
state->data, rlen, /* delay_ms */ 0);
|
||||
ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
|
||||
buf, rlen, /* delay_ms */ 0);
|
||||
if (0 != ret)
|
||||
goto failed;
|
||||
|
||||
ret = -EIO;
|
||||
if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
|
||||
if (SYNC_BYTE_IN != buf[0] || id != buf[1])
|
||||
goto failed;
|
||||
|
||||
memcpy(data, state->data + 4, read_len);
|
||||
memcpy(data, buf + 4, read_len);
|
||||
|
||||
mutex_unlock(&state->ca_mutex);
|
||||
kfree(buf);
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
err("CI error %d; %02X %02X %02X -> %*ph.",
|
||||
ret, SYNC_BYTE_OUT, id, cmd, 3, state->data);
|
||||
ret, SYNC_BYTE_OUT, id, cmd, 3, buf);
|
||||
|
||||
mutex_unlock(&state->ca_mutex);
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -410,53 +412,57 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
|
|||
u8 *rcv_buf, u8 rcv_len)
|
||||
{
|
||||
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
|
||||
u8 *buf;
|
||||
u8 id;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&state->ca_mutex);
|
||||
buf = kmalloc(64, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
id = state->c++;
|
||||
|
||||
ret = -EINVAL;
|
||||
if (snd_len > 64 - 7 || rcv_len > 64 - 7)
|
||||
goto failed;
|
||||
|
||||
state->data[0] = SYNC_BYTE_OUT;
|
||||
state->data[1] = id;
|
||||
state->data[2] = PCTV_CMD_I2C;
|
||||
state->data[3] = snd_len + 3;
|
||||
state->data[4] = addr << 1;
|
||||
state->data[5] = snd_len;
|
||||
state->data[6] = rcv_len;
|
||||
buf[0] = SYNC_BYTE_OUT;
|
||||
buf[1] = id;
|
||||
buf[2] = PCTV_CMD_I2C;
|
||||
buf[3] = snd_len + 3;
|
||||
buf[4] = addr << 1;
|
||||
buf[5] = snd_len;
|
||||
buf[6] = rcv_len;
|
||||
|
||||
memcpy(state->data + 7, snd_buf, snd_len);
|
||||
memcpy(buf + 7, snd_buf, snd_len);
|
||||
|
||||
ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len,
|
||||
state->data, /* rcv_len */ 64,
|
||||
ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
|
||||
buf, /* rcv_len */ 64,
|
||||
/* delay_ms */ 0);
|
||||
if (ret < 0)
|
||||
goto failed;
|
||||
|
||||
/* TT USB protocol error. */
|
||||
ret = -EIO;
|
||||
if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
|
||||
if (SYNC_BYTE_IN != buf[0] || id != buf[1])
|
||||
goto failed;
|
||||
|
||||
/* I2C device didn't respond as expected. */
|
||||
ret = -EREMOTEIO;
|
||||
if (state->data[5] < snd_len || state->data[6] < rcv_len)
|
||||
if (buf[5] < snd_len || buf[6] < rcv_len)
|
||||
goto failed;
|
||||
|
||||
memcpy(rcv_buf, state->data + 7, rcv_len);
|
||||
mutex_unlock(&state->ca_mutex);
|
||||
memcpy(rcv_buf, buf + 7, rcv_len);
|
||||
|
||||
kfree(buf);
|
||||
return rcv_len;
|
||||
|
||||
failed:
|
||||
err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph",
|
||||
ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
|
||||
7, state->data);
|
||||
7, buf);
|
||||
|
||||
mutex_unlock(&state->ca_mutex);
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -505,7 +511,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
|
|||
static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
|
||||
{
|
||||
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
|
||||
u8 *rx;
|
||||
u8 *b0, *rx;
|
||||
int ret;
|
||||
|
||||
info("%s: %d\n", __func__, i);
|
||||
|
@ -516,11 +522,12 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
|
|||
if (state->initialized)
|
||||
return 0;
|
||||
|
||||
rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL);
|
||||
if (!rx)
|
||||
b0 = kmalloc(5 + PCTV_ANSWER_LEN, GFP_KERNEL);
|
||||
if (!b0)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&state->ca_mutex);
|
||||
rx = b0 + 5;
|
||||
|
||||
/* hmm where shoud this should go? */
|
||||
ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
|
||||
if (ret != 0)
|
||||
|
@ -528,66 +535,70 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
|
|||
__func__, ret);
|
||||
|
||||
/* this is a one-time initialization, dont know where to put */
|
||||
state->data[0] = 0xaa;
|
||||
state->data[1] = state->c++;
|
||||
state->data[2] = PCTV_CMD_RESET;
|
||||
state->data[3] = 1;
|
||||
state->data[4] = 0;
|
||||
b0[0] = 0xaa;
|
||||
b0[1] = state->c++;
|
||||
b0[2] = PCTV_CMD_RESET;
|
||||
b0[3] = 1;
|
||||
b0[4] = 0;
|
||||
/* reset board */
|
||||
ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
|
||||
ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
|
||||
if (ret)
|
||||
goto ret;
|
||||
|
||||
state->data[1] = state->c++;
|
||||
state->data[4] = 1;
|
||||
b0[1] = state->c++;
|
||||
b0[4] = 1;
|
||||
/* reset board (again?) */
|
||||
ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
|
||||
ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
|
||||
if (ret)
|
||||
goto ret;
|
||||
|
||||
state->initialized = 1;
|
||||
|
||||
ret:
|
||||
mutex_unlock(&state->ca_mutex);
|
||||
kfree(rx);
|
||||
kfree(b0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pctv452e_rc_query(struct dvb_usb_device *d)
|
||||
{
|
||||
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
|
||||
u8 *b, *rx;
|
||||
int ret, i;
|
||||
u8 id;
|
||||
|
||||
mutex_lock(&state->ca_mutex);
|
||||
b = kmalloc(CMD_BUFFER_SIZE + PCTV_ANSWER_LEN, GFP_KERNEL);
|
||||
if (!b)
|
||||
return -ENOMEM;
|
||||
|
||||
rx = b + CMD_BUFFER_SIZE;
|
||||
|
||||
id = state->c++;
|
||||
|
||||
/* prepare command header */
|
||||
state->data[0] = SYNC_BYTE_OUT;
|
||||
state->data[1] = id;
|
||||
state->data[2] = PCTV_CMD_IR;
|
||||
state->data[3] = 0;
|
||||
b[0] = SYNC_BYTE_OUT;
|
||||
b[1] = id;
|
||||
b[2] = PCTV_CMD_IR;
|
||||
b[3] = 0;
|
||||
|
||||
/* send ir request */
|
||||
ret = dvb_usb_generic_rw(d, state->data, 4,
|
||||
state->data, PCTV_ANSWER_LEN, 0);
|
||||
ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
|
||||
if (ret != 0)
|
||||
goto ret;
|
||||
|
||||
if (debug > 3) {
|
||||
info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data);
|
||||
for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++)
|
||||
info(" %02x", state->data[i + 3]);
|
||||
info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx);
|
||||
for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
|
||||
info(" %02x", rx[i+3]);
|
||||
|
||||
info("\n");
|
||||
}
|
||||
|
||||
if ((state->data[3] == 9) && (state->data[12] & 0x01)) {
|
||||
if ((rx[3] == 9) && (rx[12] & 0x01)) {
|
||||
/* got a "press" event */
|
||||
state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]);
|
||||
state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]);
|
||||
if (debug > 2)
|
||||
info("%s: cmd=0x%02x sys=0x%02x\n",
|
||||
__func__, state->data[6], state->data[7]);
|
||||
__func__, rx[6], rx[7]);
|
||||
|
||||
rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
|
||||
} else if (state->last_rc_key) {
|
||||
|
@ -595,7 +606,7 @@ static int pctv452e_rc_query(struct dvb_usb_device *d)
|
|||
state->last_rc_key = 0;
|
||||
}
|
||||
ret:
|
||||
mutex_unlock(&state->ca_mutex);
|
||||
kfree(b);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -3354,10 +3354,11 @@ int dw_mci_runtime_resume(struct device *dev)
|
|||
|
||||
if (!slot)
|
||||
continue;
|
||||
if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
|
||||
if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
|
||||
dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
|
||||
dw_mci_setup_bus(slot, true);
|
||||
}
|
||||
|
||||
/* Force setup bus to guarantee available clock output */
|
||||
dw_mci_setup_bus(slot, true);
|
||||
}
|
||||
|
||||
/* Now that slots are all setup, we can enable card detect */
|
||||
|
|
|
@ -161,6 +161,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
|
|||
|
||||
dev->irq = pdev->irq;
|
||||
priv->base = addr;
|
||||
priv->device = &pdev->dev;
|
||||
|
||||
if (!c_can_pci_data->freq) {
|
||||
dev_err(&pdev->dev, "no clock frequency defined\n");
|
||||
|
|
|
@ -948,7 +948,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
|
|||
netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
|
||||
HECC_DEF_NAPI_WEIGHT);
|
||||
|
||||
clk_enable(priv->clk);
|
||||
err = clk_prepare_enable(priv->clk);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
|
||||
goto probe_exit_clk;
|
||||
}
|
||||
|
||||
err = register_candev(ndev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "register_candev() failed\n");
|
||||
|
@ -981,7 +986,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
|
|||
struct ti_hecc_priv *priv = netdev_priv(ndev);
|
||||
|
||||
unregister_candev(ndev);
|
||||
clk_disable(priv->clk);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
clk_put(priv->clk);
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
iounmap(priv->base);
|
||||
|
@ -1006,7 +1011,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
|
|||
hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
|
||||
priv->can.state = CAN_STATE_SLEEPING;
|
||||
|
||||
clk_disable(priv->clk);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1015,8 +1020,11 @@ static int ti_hecc_resume(struct platform_device *pdev)
|
|||
{
|
||||
struct net_device *dev = platform_get_drvdata(pdev);
|
||||
struct ti_hecc_priv *priv = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
clk_enable(priv->clk);
|
||||
err = clk_prepare_enable(priv->clk);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
|
||||
priv->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||
|
|
|
@ -891,6 +891,8 @@
|
|||
#define PCS_V1_WINDOW_SELECT 0x03fc
|
||||
#define PCS_V2_WINDOW_DEF 0x9060
|
||||
#define PCS_V2_WINDOW_SELECT 0x9064
|
||||
#define PCS_V2_RV_WINDOW_DEF 0x1060
|
||||
#define PCS_V2_RV_WINDOW_SELECT 0x1064
|
||||
|
||||
/* PCS register entry bit positions and sizes */
|
||||
#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
|
||||
|
|
|
@ -1151,7 +1151,7 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
|
|||
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
|
||||
|
||||
spin_lock_irqsave(&pdata->xpcs_lock, flags);
|
||||
XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
|
||||
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
|
||||
mmd_data = XPCS16_IOREAD(pdata, offset);
|
||||
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
|
||||
|
||||
|
@ -1183,7 +1183,7 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
|
|||
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
|
||||
|
||||
spin_lock_irqsave(&pdata->xpcs_lock, flags);
|
||||
XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
|
||||
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
|
||||
XPCS16_IOWRITE(pdata, offset, mmd_data);
|
||||
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
|
||||
}
|
||||
|
@ -3407,8 +3407,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
|
|||
|
||||
/* Flush Tx queues */
|
||||
ret = xgbe_flush_tx_queues(pdata);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
netdev_err(pdata->netdev, "error flushing TX queues\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize DMA related features
|
||||
|
|
|
@ -1070,7 +1070,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
|
|||
|
||||
DBGPR("-->xgbe_start\n");
|
||||
|
||||
hw_if->init(pdata);
|
||||
ret = hw_if->init(pdata);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
xgbe_napi_enable(pdata, 1);
|
||||
|
||||
|
|
|
@ -265,6 +265,7 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
struct xgbe_prv_data *pdata;
|
||||
struct device *dev = &pdev->dev;
|
||||
void __iomem * const *iomap_table;
|
||||
struct pci_dev *rdev;
|
||||
unsigned int ma_lo, ma_hi;
|
||||
unsigned int reg;
|
||||
int bar_mask;
|
||||
|
@ -326,8 +327,20 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
if (netif_msg_probe(pdata))
|
||||
dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
|
||||
|
||||
/* Set the PCS indirect addressing definition registers */
|
||||
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
|
||||
if (rdev &&
|
||||
(rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
|
||||
pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
|
||||
pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
|
||||
} else {
|
||||
pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
|
||||
pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
|
||||
}
|
||||
pci_dev_put(rdev);
|
||||
|
||||
/* Configure the PCS indirect addressing support */
|
||||
reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
|
||||
reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
|
||||
pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
|
||||
pdata->xpcs_window <<= 6;
|
||||
pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
|
||||
|
|
|
@ -955,6 +955,8 @@ struct xgbe_prv_data {
|
|||
|
||||
/* XPCS indirect addressing lock */
|
||||
spinlock_t xpcs_lock;
|
||||
unsigned int xpcs_window_def_reg;
|
||||
unsigned int xpcs_window_sel_reg;
|
||||
unsigned int xpcs_window;
|
||||
unsigned int xpcs_window_size;
|
||||
unsigned int xpcs_window_mask;
|
||||
|
|
|
@ -685,8 +685,6 @@ static int alx_alloc_rings(struct alx_priv *alx)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
alx_reinit_rings(alx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -703,7 +701,7 @@ static void alx_free_rings(struct alx_priv *alx)
|
|||
if (alx->qnapi[0] && alx->qnapi[0]->rxq)
|
||||
kfree(alx->qnapi[0]->rxq->bufs);
|
||||
|
||||
if (!alx->descmem.virt)
|
||||
if (alx->descmem.virt)
|
||||
dma_free_coherent(&alx->hw.pdev->dev,
|
||||
alx->descmem.size,
|
||||
alx->descmem.virt,
|
||||
|
@ -984,6 +982,7 @@ static int alx_realloc_resources(struct alx_priv *alx)
|
|||
alx_free_rings(alx);
|
||||
alx_free_napis(alx);
|
||||
alx_disable_advanced_intr(alx);
|
||||
alx_init_intr(alx, false);
|
||||
|
||||
err = alx_alloc_napis(alx);
|
||||
if (err)
|
||||
|
@ -1241,6 +1240,12 @@ static int __alx_open(struct alx_priv *alx, bool resume)
|
|||
if (err)
|
||||
goto out_free_rings;
|
||||
|
||||
/* must be called after alx_request_irq because the chip stops working
|
||||
* if we copy the dma addresses in alx_init_ring_ptrs twice when
|
||||
* requesting msi-x interrupts failed
|
||||
*/
|
||||
alx_reinit_rings(alx);
|
||||
|
||||
netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
|
||||
netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
|
||||
|
||||
|
|
|
@ -913,6 +913,8 @@ static int bcm_enet_open(struct net_device *dev)
|
|||
priv->old_link = 0;
|
||||
priv->old_duplex = -1;
|
||||
priv->old_pause = -1;
|
||||
} else {
|
||||
phydev = NULL;
|
||||
}
|
||||
|
||||
/* mask all interrupts and request them */
|
||||
|
@ -1083,7 +1085,7 @@ static int bcm_enet_open(struct net_device *dev)
|
|||
enet_dmac_writel(priv, priv->dma_chan_int_mask,
|
||||
ENETDMAC_IRMASK, priv->tx_chan);
|
||||
|
||||
if (priv->has_phy)
|
||||
if (phydev)
|
||||
phy_start(phydev);
|
||||
else
|
||||
bcm_enet_adjust_link(dev);
|
||||
|
@ -1126,7 +1128,7 @@ static int bcm_enet_open(struct net_device *dev)
|
|||
free_irq(dev->irq, dev);
|
||||
|
||||
out_phy_disconnect:
|
||||
if (priv->has_phy)
|
||||
if (phydev)
|
||||
phy_disconnect(phydev);
|
||||
|
||||
return ret;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue