This is the 4.19.94 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl4W8A4ACgkQONu9yGCS aT5ZcBAAha0GMcpxm1ettNVMXUVD/Df2pntc3x2G1T+dtI89YwIilJcdQBpbDGB6 6oNRpnopc+/ynm820SMlhjBNE8KlDzHS3Tmsn1lplru0yOqZMFcFlHSESCAA0b4E T21KwQ4rLZTzW4LvTf//4WpJZD1RnVrwKkbgkci9kvCjZsdh2GMK3XkBeVBUdXuX 3gvW+9zsgmkU3Bhk5Mi8JUmOw2yY5sJt2tDmIyxOtBknAo1TK6n4kqd+NgjfsdcI cnNTstDU0Ikmi4UBOZGDmey0THtHdvi/oM3DUkzHtZ68W0rg/gPu4nDR+Fx3sKvo y5bI10j4W16PKXyxVehel+lD8XmIV/+zSerS0enGjijBPZKI9FTlGEuczk0x7sj+ wqMh3WkkPig2bQPrCOIjkA5VW4n/ZL07cjd1nNeZ48MkvA/3k47o4vDV/lPE88ZT 49qqaJvZ3kAdqtV1pfzpQtrG1Pp8YPcEHAMYIM/6jb6poCro5vFtuRX4tzj2fRSf u7jSVPDt7ED9SgHPe+RrGWVIx2/iVnr5mVdi53rjWTWfeTdNIY5bUs/wsTde1k99 9bnAhwD4ZbFrO240MMYPWpOCr8kl0LXAeyQ104m7ONbMRnLoRp4sQCae252jyHFD Qxgez5cDwDQnj2W4/SdXSWytioTnyVHsI89FkWw+f/IU8AsbBuw= =mmeT -----END PGP SIGNATURE----- Merge 4.19.94 into android-4.19-q Changes in 4.19.94 nvme_fc: add module to ops template to allow module references nvme-fc: fix double-free scenarios on hw queues drm/amdgpu: add check before enabling/disabling broadcast mode drm/amdgpu: add cache flush workaround to gfx8 emit_fence drm/amd/display: Fixed kernel panic when booting with DP-to-HDMI dongle iio: adc: max9611: Fix too short conversion time delay PM / devfreq: Fix devfreq_notifier_call returning errno PM / devfreq: Set scaling_max_freq to max on OPP notifier error PM / devfreq: Don't fail devfreq_dev_release if not in list afs: Fix afs_find_server lookups for ipv4 peers afs: Fix SELinux setting security label on /afs RDMA/cma: add missed unregister_pernet_subsys in init failure rxe: correctly calculate iCRC for unaligned payloads scsi: lpfc: Fix memory leak on lpfc_bsg_write_ebuf_set func scsi: qla2xxx: Drop superfluous INIT_WORK of del_work scsi: qla2xxx: Don't call qlt_async_event twice scsi: qla2xxx: Fix PLOGI payload and ELS IOCB dump length scsi: qla2xxx: Configure local loop for N2N target scsi: qla2xxx: Send Notify ACK after N2N PLOGI scsi: qla2xxx: Ignore PORT UPDATE after N2N PLOGI scsi: iscsi: qla4xxx: fix double free in probe scsi: libsas: stop discovering if oob mode is disconnected drm/nouveau: Move the declaration of struct nouveau_conn_atom up a bit usb: gadget: fix wrong endpoint desc net: make socket read/write_iter() honor IOCB_NOWAIT afs: Fix creation calls in the dynamic root to fail with EOPNOTSUPP md: raid1: check rdev before reference in raid1_sync_request func s390/cpum_sf: Adjust sampling interval to avoid hitting sample limits s390/cpum_sf: Avoid SBD overflow condition in irq handler IB/mlx4: Follow mirror sequence of device add during device removal IB/mlx5: Fix steering rule of drop and count xen-blkback: prevent premature module unload xen/balloon: fix ballooned page accounting without hotplug enabled PM / hibernate: memory_bm_find_bit(): Tighten node optimisation ALSA: hda/realtek - Add Bass Speaker and fixed dac for bass speaker ALSA: hda/realtek - Enable the bass speaker of ASUS UX431FLC ALSA: hda - fixup for the bass speaker on Lenovo Carbon X1 7th gen xfs: fix mount failure crash on invalid iclog memory access taskstats: fix data-race drm: limit to INT_MAX in create_blob ioctl netfilter: nft_tproxy: Fix port selector on Big Endian ALSA: ice1724: Fix sleep-in-atomic in Infrasonic Quartet support code ALSA: usb-audio: fix set_format altsetting sanity check ALSA: usb-audio: set the interface format after resume on Dell WD19 ALSA: hda/realtek - Add headset Mic no shutup for ALC283 drm/sun4i: hdmi: Remove duplicate cleanup calls MIPS: Avoid VDSO ABI breakage due to global register variable media: pulse8-cec: fix lost cec_transmit_attempt_done() call media: cec: CEC 2.0-only bcast messages were ignored media: cec: avoid decrementing transmit_queue_sz if it is 0 media: cec: check 'transmit_in_progress', not 'transmitting' mm/zsmalloc.c: fix the migrated zspage statistics. memcg: account security cred as well to kmemcg mm: move_pages: return valid node id in status if the page is already on the target node pstore/ram: Write new dumps to start of recycled zones locks: print unsigned ino in /proc/locks dmaengine: Fix access to uninitialized dma_slave_caps compat_ioctl: block: handle Persistent Reservations compat_ioctl: block: handle BLKREPORTZONE/BLKRESETZONE ata: libahci_platform: Export again ahci_platform_<en/dis>able_phys() ata: ahci_brcm: Fix AHCI resources management ata: ahci_brcm: Allow optional reset controller to be used ata: ahci_brcm: Add missing clock management during recovery ata: ahci_brcm: BCM7425 AHCI requires AHCI_HFLAG_DELAY_ENGINE libata: Fix retrieving of active qcs gpiolib: fix up emulated open drain outputs riscv: ftrace: correct the condition logic in function graph tracer rseq/selftests: Fix: Namespace gettid() for compatibility with glibc 2.30 tracing: Fix lock inversion in trace_event_enable_tgid_record() tracing: Avoid memory leak in process_system_preds() tracing: Have the histogram compare functions convert to u64 first tracing: Fix endianness bug in histogram trigger apparmor: fix aa_xattrs_match() may sleep while holding a RCU lock ALSA: cs4236: fix error return comparison of an unsigned integer ALSA: firewire-motu: Correct a typo in the clock proc string exit: panic before exit_mm() on global init exit arm64: Revert support for execute-only user mappings ftrace: Avoid potential division by zero in function profiler drm/msm: include linux/sched/task.h PM / devfreq: Check NULL governor in available_governors_show nfsd4: fix up replay_matches_cache() HID: i2c-hid: Reset ALPS touchpads on resume ACPI: sysfs: Change ACPI_MASKABLE_GPE_MAX to 0x100 xfs: don't check for AG deadlock for realtime files in bunmapi platform/x86: pmc_atom: Add Siemens CONNECT X300 to critclk_systems DMI table Bluetooth: btusb: fix PM leak in error case of setup Bluetooth: delete a stray unlock Bluetooth: Fix memory leak in hci_connect_le_scan media: flexcop-usb: ensure -EIO is returned on error condition regulator: ab8500: Remove AB8505 USB regulator media: usb: fix memory leak in af9005_identify_state dt-bindings: clock: renesas: rcar-usb2-clock-sel: Fix typo in example arm64: dts: meson: odroid-c2: Disable usb_otg bus to avoid power failed warning tty: serial: msm_serial: Fix lockup for sysrq and oops fix compat handling of FICLONERANGE, FIDEDUPERANGE and FS_IOC_FIEMAP bdev: Factor out bdev revalidation into a common helper bdev: Refresh bdev size for disks without partitioning scsi: qedf: Do not retry ELS request if qedf_alloc_cmd fails drm/mst: Fix MST sideband up-reply failure handling powerpc/pseries/hvconsole: Fix stack overread via udbg selftests: rtnetlink: add addresses with fixed life time KVM: PPC: Book3S HV: use smp_mb() when setting/clearing host_ipi flag rxrpc: Fix possible NULL pointer access in ICMP handling tcp: annotate tp->rcv_nxt lockless reads net: core: limit nested device depth ath9k_htc: Modify byte order for an error message ath9k_htc: Discard undersized packets xfs: periodically yield scrub threads to the scheduler net: add annotations on hh->hh_len lockless accesses ubifs: ubifs_tnc_start_commit: Fix OOB in layout_in_gaps s390/smp: fix physical to logical CPU map for SMT xen/blkback: Avoid unmapping unmapped grant pages perf/x86/intel/bts: Fix the use of page_private() Linux 4.19.94 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I0802486772485e255df74d660ab60313c2965762
This commit is contained in:
commit
dabb11de5b
135 changed files with 1373 additions and 557 deletions
|
@ -113,7 +113,7 @@
|
|||
the GPE dispatcher.
|
||||
This facility can be used to prevent such uncontrolled
|
||||
GPE floodings.
|
||||
Format: <int>
|
||||
Format: <byte>
|
||||
|
||||
acpi_no_auto_serialize [HW,ACPI]
|
||||
Disable auto-serialization of AML methods
|
||||
|
|
|
@ -46,7 +46,7 @@ Required properties:
|
|||
Example (R-Car H3):
|
||||
|
||||
usb2_clksel: clock-controller@e6590630 {
|
||||
compatible = "renesas,r8a77950-rcar-usb2-clock-sel",
|
||||
compatible = "renesas,r8a7795-rcar-usb2-clock-sel",
|
||||
"renesas,rcar-gen3-usb2-clock-sel";
|
||||
reg = <0 0xe6590630 0 0x02>;
|
||||
clocks = <&cpg CPG_MOD 703>, <&usb_extal>, <&usb_xtal>;
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 93
|
||||
SUBLEVEL = 94
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
|
|
@ -293,7 +293,7 @@
|
|||
};
|
||||
|
||||
&usb0_phy {
|
||||
status = "okay";
|
||||
status = "disabled";
|
||||
phy-supply = <&usb_otg_pwr>;
|
||||
};
|
||||
|
||||
|
@ -303,7 +303,7 @@
|
|||
};
|
||||
|
||||
&usb0 {
|
||||
status = "okay";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
&usb1 {
|
||||
|
|
|
@ -96,13 +96,12 @@
|
|||
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
|
||||
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
|
||||
#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
|
||||
|
||||
#define __P000 PAGE_NONE
|
||||
#define __P001 PAGE_READONLY
|
||||
#define __P010 PAGE_READONLY
|
||||
#define __P011 PAGE_READONLY
|
||||
#define __P100 PAGE_EXECONLY
|
||||
#define __P100 PAGE_READONLY_EXEC
|
||||
#define __P101 PAGE_READONLY_EXEC
|
||||
#define __P110 PAGE_READONLY_EXEC
|
||||
#define __P111 PAGE_READONLY_EXEC
|
||||
|
@ -111,7 +110,7 @@
|
|||
#define __S001 PAGE_READONLY
|
||||
#define __S010 PAGE_SHARED
|
||||
#define __S011 PAGE_SHARED
|
||||
#define __S100 PAGE_EXECONLY
|
||||
#define __S100 PAGE_READONLY_EXEC
|
||||
#define __S101 PAGE_READONLY_EXEC
|
||||
#define __S110 PAGE_SHARED_EXEC
|
||||
#define __S111 PAGE_SHARED_EXEC
|
||||
|
|
|
@ -105,12 +105,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
|||
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
|
||||
|
||||
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
|
||||
/*
|
||||
* Execute-only user mappings do not have the PTE_USER bit set. All valid
|
||||
* kernel mappings have the PTE_UXN bit set.
|
||||
*/
|
||||
#define pte_valid_not_user(pte) \
|
||||
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
|
||||
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
|
||||
#define pte_valid_young(pte) \
|
||||
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
|
||||
#define pte_valid_user(pte) \
|
||||
|
@ -126,8 +122,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
|||
|
||||
/*
|
||||
* p??_access_permitted() is true for valid user mappings (subject to the
|
||||
* write permission check) other than user execute-only which do not have the
|
||||
* PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
|
||||
* write permission check). PROT_NONE mappings do not have the PTE_VALID bit
|
||||
* set.
|
||||
*/
|
||||
#define pte_access_permitted(pte, write) \
|
||||
(pte_valid_user(pte) && (!(write) || pte_write(pte)))
|
||||
|
|
|
@ -428,7 +428,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
|||
struct mm_struct *mm;
|
||||
struct siginfo si;
|
||||
vm_fault_t fault, major = 0;
|
||||
unsigned long vm_flags = VM_READ | VM_WRITE;
|
||||
unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
|
||||
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
|
||||
if (notify_page_fault(regs, esr))
|
||||
|
|
|
@ -49,8 +49,26 @@ struct thread_info {
|
|||
.addr_limit = KERNEL_DS, \
|
||||
}
|
||||
|
||||
/* How to get the thread information struct from C. */
|
||||
/*
|
||||
* A pointer to the struct thread_info for the currently executing thread is
|
||||
* held in register $28/$gp.
|
||||
*
|
||||
* We declare __current_thread_info as a global register variable rather than a
|
||||
* local register variable within current_thread_info() because clang doesn't
|
||||
* support explicit local register variables.
|
||||
*
|
||||
* When building the VDSO we take care not to declare the global register
|
||||
* variable because this causes GCC to not preserve the value of $28/$gp in
|
||||
* functions that change its value (which is common in the PIC VDSO when
|
||||
* accessing the GOT). Since the VDSO shouldn't be accessing
|
||||
* __current_thread_info anyway we declare it extern in order to cause a link
|
||||
* failure if it's referenced.
|
||||
*/
|
||||
#ifdef __VDSO__
|
||||
extern struct thread_info *__current_thread_info;
|
||||
#else
|
||||
register struct thread_info *__current_thread_info __asm__("$28");
|
||||
#endif
|
||||
|
||||
static inline struct thread_info *current_thread_info(void)
|
||||
{
|
||||
|
|
|
@ -458,9 +458,100 @@ static inline u32 kvmppc_get_xics_latch(void)
|
|||
return xirr;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
|
||||
/*
|
||||
* To avoid the need to unnecessarily exit fully to the host kernel, an IPI to
|
||||
* a CPU thread that's running/napping inside of a guest is by default regarded
|
||||
* as a request to wake the CPU (if needed) and continue execution within the
|
||||
* guest, potentially to process new state like externally-generated
|
||||
* interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI).
|
||||
*
|
||||
* To force an exit to the host kernel, kvmppc_set_host_ipi() must be called
|
||||
* prior to issuing the IPI to set the corresponding 'host_ipi' flag in the
|
||||
* target CPU's PACA. To avoid unnecessary exits to the host, this flag should
|
||||
* be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on
|
||||
* the receiving side prior to processing the IPI work.
|
||||
*
|
||||
* NOTE:
|
||||
*
|
||||
* We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi().
|
||||
* This is to guard against sequences such as the following:
|
||||
*
|
||||
* CPU
|
||||
* X: smp_muxed_ipi_set_message():
|
||||
* X: smp_mb()
|
||||
* X: message[RESCHEDULE] = 1
|
||||
* X: doorbell_global_ipi(42):
|
||||
* X: kvmppc_set_host_ipi(42)
|
||||
* X: ppc_msgsnd_sync()/smp_mb()
|
||||
* X: ppc_msgsnd() -> 42
|
||||
* 42: doorbell_exception(): // from CPU X
|
||||
* 42: ppc_msgsync()
|
||||
* 105: smp_muxed_ipi_set_message():
|
||||
* 105: smb_mb()
|
||||
* // STORE DEFERRED DUE TO RE-ORDERING
|
||||
* --105: message[CALL_FUNCTION] = 1
|
||||
* | 105: doorbell_global_ipi(42):
|
||||
* | 105: kvmppc_set_host_ipi(42)
|
||||
* | 42: kvmppc_clear_host_ipi(42)
|
||||
* | 42: smp_ipi_demux_relaxed()
|
||||
* | 42: // returns to executing guest
|
||||
* | // RE-ORDERED STORE COMPLETES
|
||||
* ->105: message[CALL_FUNCTION] = 1
|
||||
* 105: ppc_msgsnd_sync()/smp_mb()
|
||||
* 105: ppc_msgsnd() -> 42
|
||||
* 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
|
||||
* 105: // hangs waiting on 42 to process messages/call_single_queue
|
||||
*
|
||||
* We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is
|
||||
* to guard against sequences such as the following (as well as to create
|
||||
* a read-side pairing with the barrier in kvmppc_set_host_ipi()):
|
||||
*
|
||||
* CPU
|
||||
* X: smp_muxed_ipi_set_message():
|
||||
* X: smp_mb()
|
||||
* X: message[RESCHEDULE] = 1
|
||||
* X: doorbell_global_ipi(42):
|
||||
* X: kvmppc_set_host_ipi(42)
|
||||
* X: ppc_msgsnd_sync()/smp_mb()
|
||||
* X: ppc_msgsnd() -> 42
|
||||
* 42: doorbell_exception(): // from CPU X
|
||||
* 42: ppc_msgsync()
|
||||
* // STORE DEFERRED DUE TO RE-ORDERING
|
||||
* -- 42: kvmppc_clear_host_ipi(42)
|
||||
* | 42: smp_ipi_demux_relaxed()
|
||||
* | 105: smp_muxed_ipi_set_message():
|
||||
* | 105: smb_mb()
|
||||
* | 105: message[CALL_FUNCTION] = 1
|
||||
* | 105: doorbell_global_ipi(42):
|
||||
* | 105: kvmppc_set_host_ipi(42)
|
||||
* | // RE-ORDERED STORE COMPLETES
|
||||
* -> 42: kvmppc_clear_host_ipi(42)
|
||||
* 42: // returns to executing guest
|
||||
* 105: ppc_msgsnd_sync()/smp_mb()
|
||||
* 105: ppc_msgsnd() -> 42
|
||||
* 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
|
||||
* 105: // hangs waiting on 42 to process messages/call_single_queue
|
||||
*/
|
||||
static inline void kvmppc_set_host_ipi(int cpu)
|
||||
{
|
||||
paca_ptrs[cpu]->kvm_hstate.host_ipi = host_ipi;
|
||||
/*
|
||||
* order stores of IPI messages vs. setting of host_ipi flag
|
||||
*
|
||||
* pairs with the barrier in kvmppc_clear_host_ipi()
|
||||
*/
|
||||
smp_mb();
|
||||
paca_ptrs[cpu]->kvm_hstate.host_ipi = 1;
|
||||
}
|
||||
|
||||
static inline void kvmppc_clear_host_ipi(int cpu)
|
||||
{
|
||||
paca_ptrs[cpu]->kvm_hstate.host_ipi = 0;
|
||||
/*
|
||||
* order clearing of host_ipi flag vs. processing of IPI messages
|
||||
*
|
||||
* pairs with the barrier in kvmppc_set_host_ipi()
|
||||
*/
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
|
||||
|
@ -489,7 +580,10 @@ static inline u32 kvmppc_get_xics_latch(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
|
||||
static inline void kvmppc_set_host_ipi(int cpu)
|
||||
{}
|
||||
|
||||
static inline void kvmppc_clear_host_ipi(int cpu)
|
||||
{}
|
||||
|
||||
static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -36,7 +36,7 @@ void doorbell_global_ipi(int cpu)
|
|||
{
|
||||
u32 tag = get_hard_smp_processor_id(cpu);
|
||||
|
||||
kvmppc_set_host_ipi(cpu, 1);
|
||||
kvmppc_set_host_ipi(cpu);
|
||||
/* Order previous accesses vs. msgsnd, which is treated as a store */
|
||||
ppc_msgsnd_sync();
|
||||
ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag);
|
||||
|
@ -51,7 +51,7 @@ void doorbell_core_ipi(int cpu)
|
|||
{
|
||||
u32 tag = cpu_thread_in_core(cpu);
|
||||
|
||||
kvmppc_set_host_ipi(cpu, 1);
|
||||
kvmppc_set_host_ipi(cpu);
|
||||
/* Order previous accesses vs. msgsnd, which is treated as a store */
|
||||
ppc_msgsnd_sync();
|
||||
ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag);
|
||||
|
@ -86,7 +86,7 @@ void doorbell_exception(struct pt_regs *regs)
|
|||
|
||||
may_hard_irq_enable();
|
||||
|
||||
kvmppc_set_host_ipi(smp_processor_id(), 0);
|
||||
kvmppc_clear_host_ipi(smp_processor_id());
|
||||
__this_cpu_inc(irq_stat.doorbell_irqs);
|
||||
|
||||
smp_ipi_demux_relaxed(); /* already performed the barrier */
|
||||
|
|
|
@ -61,7 +61,7 @@ static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
|
|||
hcpu = hcore << threads_shift;
|
||||
kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
|
||||
smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
|
||||
kvmppc_set_host_ipi(hcpu, 1);
|
||||
kvmppc_set_host_ipi(hcpu);
|
||||
smp_mb();
|
||||
kvmhv_rm_send_ipi(hcpu);
|
||||
}
|
||||
|
|
|
@ -223,7 +223,7 @@ static void pnv_smp_cpu_kill_self(void)
|
|||
* for coming online, which are handled via
|
||||
* generic_check_cpu_restart() calls.
|
||||
*/
|
||||
kvmppc_set_host_ipi(cpu, 0);
|
||||
kvmppc_clear_host_ipi(cpu);
|
||||
|
||||
srr1 = pnv_cpu_offline(cpu);
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ EXPORT_SYMBOL(hvc_get_chars);
|
|||
* @vtermno: The vtermno or unit_address of the adapter from which the data
|
||||
* originated.
|
||||
* @buf: The character buffer that contains the character data to send to
|
||||
* firmware.
|
||||
* firmware. Must be at least 16 bytes, even if count is less than 16.
|
||||
* @count: Send this number of characters.
|
||||
*/
|
||||
int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
|
||||
|
|
|
@ -145,7 +145,7 @@ static unsigned int icp_native_get_irq(void)
|
|||
|
||||
static void icp_native_cause_ipi(int cpu)
|
||||
{
|
||||
kvmppc_set_host_ipi(cpu, 1);
|
||||
kvmppc_set_host_ipi(cpu);
|
||||
icp_native_set_qirr(cpu, IPI_PRIORITY);
|
||||
}
|
||||
|
||||
|
@ -184,7 +184,7 @@ void icp_native_flush_interrupt(void)
|
|||
if (vec == XICS_IPI) {
|
||||
/* Clear pending IPI */
|
||||
int cpu = smp_processor_id();
|
||||
kvmppc_set_host_ipi(cpu, 0);
|
||||
kvmppc_clear_host_ipi(cpu);
|
||||
icp_native_set_qirr(cpu, 0xff);
|
||||
} else {
|
||||
pr_err("XICS: hw interrupt 0x%x to offline cpu, disabling\n",
|
||||
|
@ -205,7 +205,7 @@ static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
|
|||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
kvmppc_set_host_ipi(cpu, 0);
|
||||
kvmppc_clear_host_ipi(cpu);
|
||||
icp_native_set_qirr(cpu, 0xff);
|
||||
|
||||
return smp_ipi_demux();
|
||||
|
|
|
@ -130,7 +130,7 @@ static void icp_opal_cause_ipi(int cpu)
|
|||
{
|
||||
int hw_cpu = get_hard_smp_processor_id(cpu);
|
||||
|
||||
kvmppc_set_host_ipi(cpu, 1);
|
||||
kvmppc_set_host_ipi(cpu);
|
||||
opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
|
||||
}
|
||||
|
||||
|
@ -138,7 +138,7 @@ static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
|
|||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
kvmppc_set_host_ipi(cpu, 0);
|
||||
kvmppc_clear_host_ipi(cpu);
|
||||
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
|
||||
|
||||
return smp_ipi_demux();
|
||||
|
@ -161,7 +161,7 @@ void icp_opal_flush_interrupt(void)
|
|||
if (vec == XICS_IPI) {
|
||||
/* Clear pending IPI */
|
||||
int cpu = smp_processor_id();
|
||||
kvmppc_set_host_ipi(cpu, 0);
|
||||
kvmppc_clear_host_ipi(cpu);
|
||||
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
|
||||
} else {
|
||||
pr_err("XICS: hw interrupt 0x%x to offline cpu, "
|
||||
|
|
|
@ -142,7 +142,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
|||
*/
|
||||
old = *parent;
|
||||
|
||||
if (function_graph_enter(old, self_addr, frame_pointer, parent))
|
||||
if (!function_graph_enter(old, self_addr, frame_pointer, parent))
|
||||
*parent = return_hooker;
|
||||
}
|
||||
|
||||
|
|
|
@ -1261,18 +1261,28 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
|
|||
*/
|
||||
if (flush_all && done)
|
||||
break;
|
||||
|
||||
/* If an event overflow happened, discard samples by
|
||||
* processing any remaining sample-data-blocks.
|
||||
*/
|
||||
if (event_overflow)
|
||||
flush_all = 1;
|
||||
}
|
||||
|
||||
/* Account sample overflows in the event hardware structure */
|
||||
if (sampl_overflow)
|
||||
OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
|
||||
sampl_overflow, 1 + num_sdb);
|
||||
|
||||
/* Perf_event_overflow() and perf_event_account_interrupt() limit
|
||||
* the interrupt rate to an upper limit. Roughly 1000 samples per
|
||||
* task tick.
|
||||
* Hitting this limit results in a large number
|
||||
* of throttled REF_REPORT_THROTTLE entries and the samples
|
||||
* are dropped.
|
||||
* Slightly increase the interval to avoid hitting this limit.
|
||||
*/
|
||||
if (event_overflow) {
|
||||
SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
|
||||
debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n",
|
||||
__func__,
|
||||
DIV_ROUND_UP(SAMPL_RATE(hwc), 10));
|
||||
}
|
||||
|
||||
if (sampl_overflow || event_overflow)
|
||||
debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: "
|
||||
"overflow stats: sample=%llu event=%llu\n",
|
||||
|
|
|
@ -715,39 +715,67 @@ static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
|
|||
|
||||
static int smp_add_present_cpu(int cpu);
|
||||
|
||||
static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
|
||||
static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
|
||||
bool configured, bool early)
|
||||
{
|
||||
struct pcpu *pcpu;
|
||||
cpumask_t avail;
|
||||
int cpu, nr, i, j;
|
||||
int cpu, nr, i;
|
||||
u16 address;
|
||||
|
||||
nr = 0;
|
||||
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
|
||||
cpu = cpumask_first(&avail);
|
||||
for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
|
||||
if (sclp.has_core_type && info->core[i].type != boot_core_type)
|
||||
continue;
|
||||
address = info->core[i].core_id << smp_cpu_mt_shift;
|
||||
for (j = 0; j <= smp_cpu_mtid; j++) {
|
||||
if (pcpu_find_address(cpu_present_mask, address + j))
|
||||
if (sclp.has_core_type && core->type != boot_core_type)
|
||||
return nr;
|
||||
cpu = cpumask_first(avail);
|
||||
address = core->core_id << smp_cpu_mt_shift;
|
||||
for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
|
||||
if (pcpu_find_address(cpu_present_mask, address + i))
|
||||
continue;
|
||||
pcpu = pcpu_devices + cpu;
|
||||
pcpu->address = address + j;
|
||||
pcpu->state =
|
||||
(cpu >= info->configured*(smp_cpu_mtid + 1)) ?
|
||||
CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
|
||||
pcpu->address = address + i;
|
||||
if (configured)
|
||||
pcpu->state = CPU_STATE_CONFIGURED;
|
||||
else
|
||||
pcpu->state = CPU_STATE_STANDBY;
|
||||
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
|
||||
set_cpu_present(cpu, true);
|
||||
if (sysfs_add && smp_add_present_cpu(cpu) != 0)
|
||||
if (!early && smp_add_present_cpu(cpu) != 0)
|
||||
set_cpu_present(cpu, false);
|
||||
else
|
||||
nr++;
|
||||
cpu = cpumask_next(cpu, &avail);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
cpumask_clear_cpu(cpu, avail);
|
||||
cpu = cpumask_next(cpu, avail);
|
||||
}
|
||||
return nr;
|
||||
}
|
||||
|
||||
static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
|
||||
{
|
||||
struct sclp_core_entry *core;
|
||||
cpumask_t avail;
|
||||
bool configured;
|
||||
u16 core_id;
|
||||
int nr, i;
|
||||
|
||||
nr = 0;
|
||||
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
|
||||
/*
|
||||
* Add IPL core first (which got logical CPU number 0) to make sure
|
||||
* that all SMT threads get subsequent logical CPU numbers.
|
||||
*/
|
||||
if (early) {
|
||||
core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
|
||||
for (i = 0; i < info->configured; i++) {
|
||||
core = &info->core[i];
|
||||
if (core->core_id == core_id) {
|
||||
nr += smp_add_core(core, &avail, true, early);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (i = 0; i < info->combined; i++) {
|
||||
configured = i < info->configured;
|
||||
nr += smp_add_core(&info->core[i], &avail, configured, early);
|
||||
}
|
||||
return nr;
|
||||
}
|
||||
|
||||
|
@ -793,7 +821,7 @@ void __init smp_detect_cpus(void)
|
|||
|
||||
/* Add CPUs present at boot */
|
||||
get_online_cpus();
|
||||
__smp_rescan_cpus(info, 0);
|
||||
__smp_rescan_cpus(info, true);
|
||||
put_online_cpus();
|
||||
memblock_free_early((unsigned long)info, sizeof(*info));
|
||||
}
|
||||
|
@ -1145,7 +1173,7 @@ int __ref smp_rescan_cpus(void)
|
|||
smp_get_core_info(info, 0);
|
||||
get_online_cpus();
|
||||
mutex_lock(&smp_cpu_state_mutex);
|
||||
nr = __smp_rescan_cpus(info, 1);
|
||||
nr = __smp_rescan_cpus(info, false);
|
||||
mutex_unlock(&smp_cpu_state_mutex);
|
||||
put_online_cpus();
|
||||
kfree(info);
|
||||
|
|
|
@ -71,9 +71,17 @@ struct bts_buffer {
|
|||
|
||||
static struct pmu bts_pmu;
|
||||
|
||||
static int buf_nr_pages(struct page *page)
|
||||
{
|
||||
if (!PagePrivate(page))
|
||||
return 1;
|
||||
|
||||
return 1 << page_private(page);
|
||||
}
|
||||
|
||||
static size_t buf_size(struct page *page)
|
||||
{
|
||||
return 1 << (PAGE_SHIFT + page_private(page));
|
||||
return buf_nr_pages(page) * PAGE_SIZE;
|
||||
}
|
||||
|
||||
static void *
|
||||
|
@ -91,9 +99,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages,
|
|||
/* count all the high order buffers */
|
||||
for (pg = 0, nbuf = 0; pg < nr_pages;) {
|
||||
page = virt_to_page(pages[pg]);
|
||||
if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1))
|
||||
return NULL;
|
||||
pg += 1 << page_private(page);
|
||||
pg += buf_nr_pages(page);
|
||||
nbuf++;
|
||||
}
|
||||
|
||||
|
@ -117,7 +123,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages,
|
|||
unsigned int __nr_pages;
|
||||
|
||||
page = virt_to_page(pages[pg]);
|
||||
__nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1;
|
||||
__nr_pages = buf_nr_pages(page);
|
||||
buf->buf[nbuf].page = page;
|
||||
buf->buf[nbuf].offset = offset;
|
||||
buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/compat.h>
|
||||
#include <linux/elevator.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/pr.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -354,6 +355,8 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
|||
* but we call blkdev_ioctl, which gets the lock for us
|
||||
*/
|
||||
case BLKRRPART:
|
||||
case BLKREPORTZONE:
|
||||
case BLKRESETZONE:
|
||||
return blkdev_ioctl(bdev, mode, cmd,
|
||||
(unsigned long)compat_ptr(arg));
|
||||
case BLKBSZSET_32:
|
||||
|
@ -401,6 +404,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
|||
case BLKTRACETEARDOWN: /* compatible */
|
||||
ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
|
||||
return ret;
|
||||
case IOC_PR_REGISTER:
|
||||
case IOC_PR_RESERVE:
|
||||
case IOC_PR_RELEASE:
|
||||
case IOC_PR_PREEMPT:
|
||||
case IOC_PR_PREEMPT_ABORT:
|
||||
case IOC_PR_CLEAR:
|
||||
return blkdev_ioctl(bdev, mode, cmd,
|
||||
(unsigned long)compat_ptr(arg));
|
||||
default:
|
||||
if (disk->fops->compat_ioctl)
|
||||
ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
|
||||
|
|
|
@ -816,14 +816,14 @@ static ssize_t counter_set(struct kobject *kobj,
|
|||
* interface:
|
||||
* echo unmask > /sys/firmware/acpi/interrupts/gpe00
|
||||
*/
|
||||
#define ACPI_MASKABLE_GPE_MAX 0xFF
|
||||
#define ACPI_MASKABLE_GPE_MAX 0x100
|
||||
static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
|
||||
|
||||
static int __init acpi_gpe_set_masked_gpes(char *val)
|
||||
{
|
||||
u8 gpe;
|
||||
|
||||
if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
|
||||
if (kstrtou8(val, 0, &gpe))
|
||||
return -EINVAL;
|
||||
set_bit(gpe, acpi_masked_gpes_map);
|
||||
|
||||
|
@ -835,7 +835,7 @@ void __init acpi_gpe_apply_masked_gpes(void)
|
|||
{
|
||||
acpi_handle handle;
|
||||
acpi_status status;
|
||||
u8 gpe;
|
||||
u16 gpe;
|
||||
|
||||
for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
|
||||
status = acpi_get_gpe_device(gpe, &handle);
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "ahci.h"
|
||||
|
@ -84,8 +85,7 @@ enum brcm_ahci_version {
|
|||
};
|
||||
|
||||
enum brcm_ahci_quirks {
|
||||
BRCM_AHCI_QUIRK_NO_NCQ = BIT(0),
|
||||
BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1),
|
||||
BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(0),
|
||||
};
|
||||
|
||||
struct brcm_ahci_priv {
|
||||
|
@ -94,6 +94,7 @@ struct brcm_ahci_priv {
|
|||
u32 port_mask;
|
||||
u32 quirks;
|
||||
enum brcm_ahci_version version;
|
||||
struct reset_control *rcdev;
|
||||
};
|
||||
|
||||
static inline u32 brcm_sata_readreg(void __iomem *addr)
|
||||
|
@ -220,19 +221,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv)
|
|||
brcm_sata_phy_disable(priv, i);
|
||||
}
|
||||
|
||||
static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
|
||||
static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv,
|
||||
struct brcm_ahci_priv *priv)
|
||||
{
|
||||
void __iomem *ahci;
|
||||
struct resource *res;
|
||||
u32 impl;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci");
|
||||
ahci = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(ahci))
|
||||
return 0;
|
||||
|
||||
impl = readl(ahci + HOST_PORTS_IMPL);
|
||||
impl = readl(hpriv->mmio + HOST_PORTS_IMPL);
|
||||
|
||||
if (fls(impl) > SATA_TOP_MAX_PHYS)
|
||||
dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n",
|
||||
|
@ -240,9 +234,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
|
|||
else if (!impl)
|
||||
dev_info(priv->dev, "no ports found\n");
|
||||
|
||||
devm_iounmap(&pdev->dev, ahci);
|
||||
devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
|
||||
|
||||
return impl;
|
||||
}
|
||||
|
||||
|
@ -292,6 +283,13 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev,
|
|||
/* Perform the SATA PHY reset sequence */
|
||||
brcm_sata_phy_disable(priv, ap->port_no);
|
||||
|
||||
/* Reset the SATA clock */
|
||||
ahci_platform_disable_clks(hpriv);
|
||||
msleep(10);
|
||||
|
||||
ahci_platform_enable_clks(hpriv);
|
||||
msleep(10);
|
||||
|
||||
/* Bring the PHY back on */
|
||||
brcm_sata_phy_enable(priv, ap->port_no);
|
||||
|
||||
|
@ -354,11 +352,10 @@ static int brcm_ahci_suspend(struct device *dev)
|
|||
struct ata_host *host = dev_get_drvdata(dev);
|
||||
struct ahci_host_priv *hpriv = host->private_data;
|
||||
struct brcm_ahci_priv *priv = hpriv->plat_data;
|
||||
int ret;
|
||||
|
||||
ret = ahci_platform_suspend(dev);
|
||||
brcm_sata_phys_disable(priv);
|
||||
return ret;
|
||||
|
||||
return ahci_platform_suspend(dev);
|
||||
}
|
||||
|
||||
static int brcm_ahci_resume(struct device *dev)
|
||||
|
@ -366,11 +363,44 @@ static int brcm_ahci_resume(struct device *dev)
|
|||
struct ata_host *host = dev_get_drvdata(dev);
|
||||
struct ahci_host_priv *hpriv = host->private_data;
|
||||
struct brcm_ahci_priv *priv = hpriv->plat_data;
|
||||
int ret;
|
||||
|
||||
/* Make sure clocks are turned on before re-configuration */
|
||||
ret = ahci_platform_enable_clks(hpriv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
brcm_sata_init(priv);
|
||||
brcm_sata_phys_enable(priv);
|
||||
brcm_sata_alpm_init(hpriv);
|
||||
return ahci_platform_resume(dev);
|
||||
|
||||
/* Since we had to enable clocks earlier on, we cannot use
|
||||
* ahci_platform_resume() as-is since a second call to
|
||||
* ahci_platform_enable_resources() would bump up the resources
|
||||
* (regulators, clocks, PHYs) count artificially so we copy the part
|
||||
* after ahci_platform_enable_resources().
|
||||
*/
|
||||
ret = ahci_platform_enable_phys(hpriv);
|
||||
if (ret)
|
||||
goto out_disable_phys;
|
||||
|
||||
ret = ahci_platform_resume_host(dev);
|
||||
if (ret)
|
||||
goto out_disable_platform_phys;
|
||||
|
||||
/* We resumed so update PM runtime state */
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_set_active(dev);
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
out_disable_platform_phys:
|
||||
ahci_platform_disable_phys(hpriv);
|
||||
out_disable_phys:
|
||||
brcm_sata_phys_disable(priv);
|
||||
ahci_platform_disable_clks(hpriv);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -411,44 +441,76 @@ static int brcm_ahci_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(priv->top_ctrl))
|
||||
return PTR_ERR(priv->top_ctrl);
|
||||
|
||||
if ((priv->version == BRCM_SATA_BCM7425) ||
|
||||
(priv->version == BRCM_SATA_NSP)) {
|
||||
priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
|
||||
priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
|
||||
}
|
||||
|
||||
brcm_sata_init(priv);
|
||||
|
||||
priv->port_mask = brcm_ahci_get_portmask(pdev, priv);
|
||||
if (!priv->port_mask)
|
||||
return -ENODEV;
|
||||
|
||||
brcm_sata_phys_enable(priv);
|
||||
/* Reset is optional depending on platform */
|
||||
priv->rcdev = devm_reset_control_get(&pdev->dev, "ahci");
|
||||
if (!IS_ERR_OR_NULL(priv->rcdev))
|
||||
reset_control_deassert(priv->rcdev);
|
||||
|
||||
hpriv = ahci_platform_get_resources(pdev, 0);
|
||||
if (IS_ERR(hpriv))
|
||||
return PTR_ERR(hpriv);
|
||||
if (IS_ERR(hpriv)) {
|
||||
ret = PTR_ERR(hpriv);
|
||||
goto out_reset;
|
||||
}
|
||||
|
||||
hpriv->plat_data = priv;
|
||||
hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
|
||||
hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO;
|
||||
|
||||
switch (priv->version) {
|
||||
case BRCM_SATA_BCM7425:
|
||||
hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE;
|
||||
/* fall through */
|
||||
case BRCM_SATA_NSP:
|
||||
hpriv->flags |= AHCI_HFLAG_NO_NCQ;
|
||||
priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
ret = ahci_platform_enable_clks(hpriv);
|
||||
if (ret)
|
||||
goto out_reset;
|
||||
|
||||
/* Must be first so as to configure endianness including that
|
||||
* of the standard AHCI register space.
|
||||
*/
|
||||
brcm_sata_init(priv);
|
||||
|
||||
/* Initializes priv->port_mask which is used below */
|
||||
priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
|
||||
if (!priv->port_mask) {
|
||||
ret = -ENODEV;
|
||||
goto out_disable_clks;
|
||||
}
|
||||
|
||||
/* Must be done before ahci_platform_enable_phys() */
|
||||
brcm_sata_phys_enable(priv);
|
||||
|
||||
brcm_sata_alpm_init(hpriv);
|
||||
|
||||
ret = ahci_platform_enable_resources(hpriv);
|
||||
ret = ahci_platform_enable_phys(hpriv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ)
|
||||
hpriv->flags |= AHCI_HFLAG_NO_NCQ;
|
||||
hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO;
|
||||
goto out_disable_phys;
|
||||
|
||||
ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info,
|
||||
&ahci_platform_sht);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_disable_platform_phys;
|
||||
|
||||
dev_info(dev, "Broadcom AHCI SATA3 registered\n");
|
||||
|
||||
return 0;
|
||||
|
||||
out_disable_platform_phys:
|
||||
ahci_platform_disable_phys(hpriv);
|
||||
out_disable_phys:
|
||||
brcm_sata_phys_disable(priv);
|
||||
out_disable_clks:
|
||||
ahci_platform_disable_clks(hpriv);
|
||||
out_reset:
|
||||
if (!IS_ERR_OR_NULL(priv->rcdev))
|
||||
reset_control_assert(priv->rcdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int brcm_ahci_remove(struct platform_device *pdev)
|
||||
|
@ -458,12 +520,12 @@ static int brcm_ahci_remove(struct platform_device *pdev)
|
|||
struct brcm_ahci_priv *priv = hpriv->plat_data;
|
||||
int ret;
|
||||
|
||||
brcm_sata_phys_disable(priv);
|
||||
|
||||
ret = ata_platform_remove_one(pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
brcm_sata_phys_disable(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops);
|
|||
* RETURNS:
|
||||
* 0 on success otherwise a negative error code
|
||||
*/
|
||||
static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
|
||||
int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
|
||||
{
|
||||
int rc, i;
|
||||
|
||||
|
@ -72,6 +72,7 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
|
|||
}
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ahci_platform_enable_phys);
|
||||
|
||||
/**
|
||||
* ahci_platform_disable_phys - Disable PHYs
|
||||
|
@ -79,7 +80,7 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
|
|||
*
|
||||
* This function disables all PHYs found in hpriv->phys.
|
||||
*/
|
||||
static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
|
||||
void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -88,6 +89,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
|
|||
phy_exit(hpriv->phys[i]);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ahci_platform_disable_phys);
|
||||
|
||||
/**
|
||||
* ahci_platform_enable_clks - Enable platform clocks
|
||||
|
|
|
@ -5343,6 +5343,30 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_qc_get_active - get bitmask of active qcs
|
||||
* @ap: port in question
|
||||
*
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*
|
||||
* RETURNS:
|
||||
* Bitmask of active qcs
|
||||
*/
|
||||
u64 ata_qc_get_active(struct ata_port *ap)
|
||||
{
|
||||
u64 qc_active = ap->qc_active;
|
||||
|
||||
/* ATA_TAG_INTERNAL is sent to hw as tag 0 */
|
||||
if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
|
||||
qc_active |= (1 << 0);
|
||||
qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
|
||||
}
|
||||
|
||||
return qc_active;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_qc_get_active);
|
||||
|
||||
/**
|
||||
* ata_qc_complete_multiple - Complete multiple qcs successfully
|
||||
* @ap: port in question
|
||||
|
|
|
@ -1283,7 +1283,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
|
|||
i, ioread32(hcr_base + CC),
|
||||
ioread32(hcr_base + CA));
|
||||
}
|
||||
ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
|
||||
ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
|
||||
return;
|
||||
|
||||
} else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) {
|
||||
|
|
|
@ -2840,7 +2840,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
|
|||
}
|
||||
|
||||
if (work_done) {
|
||||
ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
|
||||
ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
|
||||
|
||||
/* Update the software queue position index in hardware */
|
||||
writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
|
||||
|
|
|
@ -1000,7 +1000,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
|
|||
check_commands = 0;
|
||||
check_commands &= ~(1 << pos);
|
||||
}
|
||||
ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
|
||||
ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -936,6 +936,8 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
|
|||
out_of_memory:
|
||||
pr_alert("%s: out of memory\n", __func__);
|
||||
put_free_pages(ring, pages_to_gnt, segs_to_map);
|
||||
for (i = last_map; i < num; i++)
|
||||
pages[i]->handle = BLKBACK_INVALID_HANDLE;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -179,6 +179,15 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
|
|||
blkif->domid = domid;
|
||||
atomic_set(&blkif->refcnt, 1);
|
||||
init_completion(&blkif->drain_complete);
|
||||
|
||||
/*
|
||||
* Because freeing back to the cache may be deferred, it is not
|
||||
* safe to unload the module (and hence destroy the cache) until
|
||||
* this has completed. To prevent premature unloading, take an
|
||||
* extra module reference here and release only when the object
|
||||
* has been freed back to the cache.
|
||||
*/
|
||||
__module_get(THIS_MODULE);
|
||||
INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
|
||||
|
||||
return blkif;
|
||||
|
@ -328,6 +337,7 @@ static void xen_blkif_free(struct xen_blkif *blkif)
|
|||
|
||||
/* Make sure everything is drained before shutting down */
|
||||
kmem_cache_free(xen_blkif_cachep, blkif);
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
int __init xen_blkif_interface_init(void)
|
||||
|
|
|
@ -1138,7 +1138,7 @@ static int btusb_open(struct hci_dev *hdev)
|
|||
if (data->setup_on_usb) {
|
||||
err = data->setup_on_usb(hdev);
|
||||
if (err < 0)
|
||||
return err;
|
||||
goto setup_fail;
|
||||
}
|
||||
|
||||
data->intf->needs_remote_wakeup = 1;
|
||||
|
@ -1170,6 +1170,7 @@ static int btusb_open(struct hci_dev *hdev)
|
|||
|
||||
failed:
|
||||
clear_bit(BTUSB_INTR_RUNNING, &data->flags);
|
||||
setup_fail:
|
||||
usb_autopm_put_interface(data->intf);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -538,26 +538,30 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
|
|||
void *devp)
|
||||
{
|
||||
struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
|
||||
int ret;
|
||||
int err = -EINVAL;
|
||||
|
||||
mutex_lock(&devfreq->lock);
|
||||
|
||||
devfreq->scaling_min_freq = find_available_min_freq(devfreq);
|
||||
if (!devfreq->scaling_min_freq) {
|
||||
mutex_unlock(&devfreq->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!devfreq->scaling_min_freq)
|
||||
goto out;
|
||||
|
||||
devfreq->scaling_max_freq = find_available_max_freq(devfreq);
|
||||
if (!devfreq->scaling_max_freq) {
|
||||
mutex_unlock(&devfreq->lock);
|
||||
return -EINVAL;
|
||||
devfreq->scaling_max_freq = ULONG_MAX;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = update_devfreq(devfreq);
|
||||
mutex_unlock(&devfreq->lock);
|
||||
err = update_devfreq(devfreq);
|
||||
|
||||
return ret;
|
||||
out:
|
||||
mutex_unlock(&devfreq->lock);
|
||||
if (err)
|
||||
dev_err(devfreq->dev.parent,
|
||||
"failed to update frequency from OPP notifier (%d)\n",
|
||||
err);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -571,11 +575,6 @@ static void devfreq_dev_release(struct device *dev)
|
|||
struct devfreq *devfreq = to_devfreq(dev);
|
||||
|
||||
mutex_lock(&devfreq_list_lock);
|
||||
if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
|
||||
mutex_unlock(&devfreq_list_lock);
|
||||
dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
|
||||
return;
|
||||
}
|
||||
list_del(&devfreq->node);
|
||||
mutex_unlock(&devfreq_list_lock);
|
||||
|
||||
|
@ -630,6 +629,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
|
|||
devfreq->dev.parent = dev;
|
||||
devfreq->dev.class = devfreq_class;
|
||||
devfreq->dev.release = devfreq_dev_release;
|
||||
INIT_LIST_HEAD(&devfreq->node);
|
||||
devfreq->profile = profile;
|
||||
strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
|
||||
devfreq->previous_freq = profile->initial_freq;
|
||||
|
@ -1086,7 +1086,7 @@ static ssize_t available_governors_show(struct device *d,
|
|||
* The devfreq with immutable governor (e.g., passive) shows
|
||||
* only own governor.
|
||||
*/
|
||||
if (df->governor->immutable) {
|
||||
if (df->governor && df->governor->immutable) {
|
||||
count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
|
||||
"%s ", df->governor_name);
|
||||
/*
|
||||
|
|
|
@ -249,7 +249,11 @@ static int fwnet_header_cache(const struct neighbour *neigh,
|
|||
h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h)));
|
||||
h->h_proto = type;
|
||||
memcpy(h->h_dest, neigh->ha, net->addr_len);
|
||||
hh->hh_len = FWNET_HLEN;
|
||||
|
||||
/* Pairs with the READ_ONCE() in neigh_resolve_output(),
|
||||
* neigh_hh_output() and neigh_update_hhs().
|
||||
*/
|
||||
smp_store_release(&hh->hh_len, FWNET_HLEN);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -217,6 +217,14 @@ int gpiod_get_direction(struct gpio_desc *desc)
|
|||
chip = gpiod_to_chip(desc);
|
||||
offset = gpio_chip_hwgpio(desc);
|
||||
|
||||
/*
|
||||
* Open drain emulation using input mode may incorrectly report
|
||||
* input here, fix that up.
|
||||
*/
|
||||
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) &&
|
||||
test_bit(FLAG_IS_OUT, &desc->flags))
|
||||
return 0;
|
||||
|
||||
if (!chip->get_direction)
|
||||
return status;
|
||||
|
||||
|
|
|
@ -75,24 +75,30 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 tmp;
|
||||
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) {
|
||||
/* Put DF on broadcast mode */
|
||||
adev->df_funcs->enable_broadcast_mode(adev, true);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
|
||||
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
|
||||
if (enable) {
|
||||
tmp = RREG32_SOC15(DF, 0,
|
||||
mmDF_PIE_AON0_DfGlobalClkGater);
|
||||
tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
|
||||
tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
|
||||
WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
|
||||
WREG32_SOC15(DF, 0,
|
||||
mmDF_PIE_AON0_DfGlobalClkGater, tmp);
|
||||
} else {
|
||||
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
|
||||
tmp = RREG32_SOC15(DF, 0,
|
||||
mmDF_PIE_AON0_DfGlobalClkGater);
|
||||
tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
|
||||
tmp |= DF_V3_6_MGCG_DISABLE;
|
||||
WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
|
||||
WREG32_SOC15(DF, 0,
|
||||
mmDF_PIE_AON0_DfGlobalClkGater, tmp);
|
||||
}
|
||||
|
||||
/* Exit broadcast mode */
|
||||
adev->df_funcs->enable_broadcast_mode(adev, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
|
||||
u32 *flags)
|
||||
|
|
|
@ -6405,7 +6405,23 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
|
|||
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
|
||||
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
|
||||
|
||||
/* EVENT_WRITE_EOP - flush caches, send int */
|
||||
/* Workaround for cache flush problems. First send a dummy EOP
|
||||
* event down the pipe with seq one below.
|
||||
*/
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
|
||||
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
|
||||
EOP_TC_ACTION_EN |
|
||||
EOP_TC_WB_ACTION_EN |
|
||||
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
|
||||
EVENT_INDEX(5)));
|
||||
amdgpu_ring_write(ring, addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
|
||||
DATA_SEL(1) | INT_SEL(0));
|
||||
amdgpu_ring_write(ring, lower_32_bits(seq - 1));
|
||||
amdgpu_ring_write(ring, upper_32_bits(seq - 1));
|
||||
|
||||
/* Then send the real EOP event down the pipe:
|
||||
* EVENT_WRITE_EOP - flush caches, send int */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
|
||||
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
|
||||
EOP_TC_ACTION_EN |
|
||||
|
@ -7154,7 +7170,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
|||
5 + /* COND_EXEC */
|
||||
7 + /* PIPELINE_SYNC */
|
||||
VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
|
||||
8 + /* FENCE for VM_FLUSH */
|
||||
12 + /* FENCE for VM_FLUSH */
|
||||
20 + /* GDS switch */
|
||||
4 + /* double SWITCH_BUFFER,
|
||||
the first COND_EXEC jump to the place just
|
||||
|
@ -7166,7 +7182,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
|||
31 + /* DE_META */
|
||||
3 + /* CNTX_CTRL */
|
||||
5 + /* HDP_INVL */
|
||||
8 + 8 + /* FENCE x2 */
|
||||
12 + 12 + /* FENCE x2 */
|
||||
2, /* SWITCH_BUFFER */
|
||||
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
|
||||
|
|
|
@ -348,7 +348,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
|
|||
|
||||
if (GPIO_RESULT_OK != dal_ddc_open(
|
||||
ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
|
||||
dal_gpio_destroy_ddc(&ddc);
|
||||
dal_ddc_close(ddc);
|
||||
|
||||
return present;
|
||||
}
|
||||
|
|
|
@ -1582,8 +1582,12 @@ static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
|
|||
if (ret != 1)
|
||||
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
|
||||
|
||||
if (txmsg->seqno != -1) {
|
||||
WARN_ON((unsigned int)txmsg->seqno >
|
||||
ARRAY_SIZE(txmsg->dst->tx_slots));
|
||||
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_sideband_msg_tx *txmsg)
|
||||
|
|
|
@ -556,7 +556,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
|
|||
struct drm_property_blob *blob;
|
||||
int ret;
|
||||
|
||||
if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
|
||||
if (!length || length > INT_MAX - sizeof(struct drm_property_blob))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/pm_opp.h>
|
||||
#include <linux/devfreq.h>
|
||||
#include <linux/devcoredump.h>
|
||||
#include <linux/sched/task.h>
|
||||
|
||||
/*
|
||||
* Power Management:
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
|
||||
#include <nvif/notify.h>
|
||||
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_dp_helper.h>
|
||||
|
@ -37,6 +38,60 @@
|
|||
|
||||
struct nvkm_i2c_port;
|
||||
|
||||
#define nouveau_conn_atom(p) \
|
||||
container_of((p), struct nouveau_conn_atom, state)
|
||||
|
||||
struct nouveau_conn_atom {
|
||||
struct drm_connector_state state;
|
||||
|
||||
struct {
|
||||
/* The enum values specifically defined here match nv50/gf119
|
||||
* hw values, and the code relies on this.
|
||||
*/
|
||||
enum {
|
||||
DITHERING_MODE_OFF = 0x00,
|
||||
DITHERING_MODE_ON = 0x01,
|
||||
DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
|
||||
DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
|
||||
DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
|
||||
DITHERING_MODE_AUTO
|
||||
} mode;
|
||||
enum {
|
||||
DITHERING_DEPTH_6BPC = 0x00,
|
||||
DITHERING_DEPTH_8BPC = 0x02,
|
||||
DITHERING_DEPTH_AUTO
|
||||
} depth;
|
||||
} dither;
|
||||
|
||||
struct {
|
||||
int mode; /* DRM_MODE_SCALE_* */
|
||||
struct {
|
||||
enum {
|
||||
UNDERSCAN_OFF,
|
||||
UNDERSCAN_ON,
|
||||
UNDERSCAN_AUTO,
|
||||
} mode;
|
||||
u32 hborder;
|
||||
u32 vborder;
|
||||
} underscan;
|
||||
bool full;
|
||||
} scaler;
|
||||
|
||||
struct {
|
||||
int color_vibrance;
|
||||
int vibrant_hue;
|
||||
} procamp;
|
||||
|
||||
union {
|
||||
struct {
|
||||
bool dither:1;
|
||||
bool scaler:1;
|
||||
bool procamp:1;
|
||||
};
|
||||
u8 mask;
|
||||
} set;
|
||||
};
|
||||
|
||||
struct nouveau_connector {
|
||||
struct drm_connector base;
|
||||
enum dcb_connector_type type;
|
||||
|
@ -111,61 +166,6 @@ extern int nouveau_ignorelid;
|
|||
extern int nouveau_duallink;
|
||||
extern int nouveau_hdmimhz;
|
||||
|
||||
#include <drm/drm_crtc.h>
|
||||
#define nouveau_conn_atom(p) \
|
||||
container_of((p), struct nouveau_conn_atom, state)
|
||||
|
||||
struct nouveau_conn_atom {
|
||||
struct drm_connector_state state;
|
||||
|
||||
struct {
|
||||
/* The enum values specifically defined here match nv50/gf119
|
||||
* hw values, and the code relies on this.
|
||||
*/
|
||||
enum {
|
||||
DITHERING_MODE_OFF = 0x00,
|
||||
DITHERING_MODE_ON = 0x01,
|
||||
DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
|
||||
DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
|
||||
DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
|
||||
DITHERING_MODE_AUTO
|
||||
} mode;
|
||||
enum {
|
||||
DITHERING_DEPTH_6BPC = 0x00,
|
||||
DITHERING_DEPTH_8BPC = 0x02,
|
||||
DITHERING_DEPTH_AUTO
|
||||
} depth;
|
||||
} dither;
|
||||
|
||||
struct {
|
||||
int mode; /* DRM_MODE_SCALE_* */
|
||||
struct {
|
||||
enum {
|
||||
UNDERSCAN_OFF,
|
||||
UNDERSCAN_ON,
|
||||
UNDERSCAN_AUTO,
|
||||
} mode;
|
||||
u32 hborder;
|
||||
u32 vborder;
|
||||
} underscan;
|
||||
bool full;
|
||||
} scaler;
|
||||
|
||||
struct {
|
||||
int color_vibrance;
|
||||
int vibrant_hue;
|
||||
} procamp;
|
||||
|
||||
union {
|
||||
struct {
|
||||
bool dither:1;
|
||||
bool scaler:1;
|
||||
bool procamp:1;
|
||||
};
|
||||
u8 mask;
|
||||
} set;
|
||||
};
|
||||
|
||||
void nouveau_conn_attach_properties(struct drm_connector *);
|
||||
void nouveau_conn_reset(struct drm_connector *);
|
||||
struct drm_connector_state *
|
||||
|
|
|
@ -651,8 +651,6 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
|
|||
struct sun4i_hdmi *hdmi = dev_get_drvdata(dev);
|
||||
|
||||
cec_unregister_adapter(hdmi->cec_adap);
|
||||
drm_connector_cleanup(&hdmi->connector);
|
||||
drm_encoder_cleanup(&hdmi->encoder);
|
||||
i2c_del_adapter(hdmi->i2c);
|
||||
clk_disable_unprepare(hdmi->mod_clk);
|
||||
clk_disable_unprepare(hdmi->bus_clk);
|
||||
|
|
|
@ -51,6 +51,7 @@
|
|||
#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
|
||||
#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP BIT(3)
|
||||
#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
|
||||
#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
|
||||
|
||||
/* flags */
|
||||
#define I2C_HID_STARTED 0
|
||||
|
@ -182,6 +183,8 @@ static const struct i2c_hid_quirks {
|
|||
I2C_HID_QUIRK_NO_RUNTIME_PM },
|
||||
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
|
||||
I2C_HID_QUIRK_BOGUS_IRQ },
|
||||
{ USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
|
||||
I2C_HID_QUIRK_RESET_ON_RESUME },
|
||||
{ 0, 0 }
|
||||
};
|
||||
|
||||
|
@ -1290,8 +1293,15 @@ static int i2c_hid_resume(struct device *dev)
|
|||
* solves "incomplete reports" on Raydium devices 2386:3118 and
|
||||
* 2386:4B33 and fixes various SIS touchscreens no longer sending
|
||||
* data after a suspend/resume.
|
||||
*
|
||||
* However some ALPS touchpads generate IRQ storm without reset, so
|
||||
* let's still reset them here.
|
||||
*/
|
||||
if (ihid->quirks & I2C_HID_QUIRK_RESET_ON_RESUME)
|
||||
ret = i2c_hid_hwreset(client);
|
||||
else
|
||||
ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -92,6 +92,12 @@
|
|||
#define MAX9611_TEMP_SCALE_NUM 1000000
|
||||
#define MAX9611_TEMP_SCALE_DIV 2083
|
||||
|
||||
/*
|
||||
* Conversion time is 2 ms (typically) at Ta=25 degreeC
|
||||
* No maximum value is known, so play it safe.
|
||||
*/
|
||||
#define MAX9611_CONV_TIME_US_RANGE 3000, 3300
|
||||
|
||||
struct max9611_dev {
|
||||
struct device *dev;
|
||||
struct i2c_client *i2c_client;
|
||||
|
@ -239,11 +245,9 @@ static int max9611_read_single(struct max9611_dev *max9611,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* need a delay here to make register configuration
|
||||
* stabilize. 1 msec at least, from empirical testing.
|
||||
*/
|
||||
usleep_range(1000, 2000);
|
||||
/* need a delay here to make register configuration stabilize. */
|
||||
|
||||
usleep_range(MAX9611_CONV_TIME_US_RANGE);
|
||||
|
||||
ret = i2c_smbus_read_word_swapped(max9611->i2c_client, reg_addr);
|
||||
if (ret < 0) {
|
||||
|
@ -510,7 +514,7 @@ static int max9611_init(struct max9611_dev *max9611)
|
|||
MAX9611_REG_CTRL2, 0);
|
||||
return ret;
|
||||
}
|
||||
usleep_range(1000, 2000);
|
||||
usleep_range(MAX9611_CONV_TIME_US_RANGE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -4658,6 +4658,7 @@ static int __init cma_init(void)
|
|||
err:
|
||||
unregister_netdevice_notifier(&cma_nb);
|
||||
ib_sa_unregister_client(&sa_client);
|
||||
unregister_pernet_subsys(&cma_pernet_operations);
|
||||
err_wq:
|
||||
destroy_workqueue(cma_wq);
|
||||
return ret;
|
||||
|
|
|
@ -3069,16 +3069,17 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
|
|||
ibdev->ib_active = false;
|
||||
flush_workqueue(wq);
|
||||
|
||||
mlx4_ib_close_sriov(ibdev);
|
||||
mlx4_ib_mad_cleanup(ibdev);
|
||||
ib_unregister_device(&ibdev->ib_dev);
|
||||
mlx4_ib_diag_cleanup(ibdev);
|
||||
if (ibdev->iboe.nb.notifier_call) {
|
||||
if (unregister_netdevice_notifier(&ibdev->iboe.nb))
|
||||
pr_warn("failure unregistering notifier\n");
|
||||
ibdev->iboe.nb.notifier_call = NULL;
|
||||
}
|
||||
|
||||
mlx4_ib_close_sriov(ibdev);
|
||||
mlx4_ib_mad_cleanup(ibdev);
|
||||
ib_unregister_device(&ibdev->ib_dev);
|
||||
mlx4_ib_diag_cleanup(ibdev);
|
||||
|
||||
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
|
||||
ibdev->steer_qpn_count);
|
||||
kfree(ibdev->ib_uc_qpns_bitmap);
|
||||
|
|
|
@ -3286,10 +3286,6 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
|
|||
}
|
||||
|
||||
INIT_LIST_HEAD(&handler->list);
|
||||
if (dst) {
|
||||
memcpy(&dest_arr[0], dst, sizeof(*dst));
|
||||
dest_num++;
|
||||
}
|
||||
|
||||
for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
|
||||
err = parse_flow_attr(dev->mdev, spec->match_criteria,
|
||||
|
@ -3303,6 +3299,11 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
|
|||
ib_flow += ((union ib_flow_spec *)ib_flow)->size;
|
||||
}
|
||||
|
||||
if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) {
|
||||
memcpy(&dest_arr[0], dst, sizeof(*dst));
|
||||
dest_num++;
|
||||
}
|
||||
|
||||
if (!flow_is_multicast_only(flow_attr))
|
||||
set_underlay_qp(dev, spec, underlay_qpn);
|
||||
|
||||
|
@ -3340,10 +3341,8 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
|
|||
}
|
||||
|
||||
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
|
||||
if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) {
|
||||
if (!dest_num)
|
||||
rule_dst = NULL;
|
||||
dest_num = 0;
|
||||
}
|
||||
} else {
|
||||
if (is_egress)
|
||||
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
||||
|
|
|
@ -391,7 +391,7 @@ void rxe_rcv(struct sk_buff *skb)
|
|||
|
||||
calc_icrc = rxe_icrc_hdr(pkt, skb);
|
||||
calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt),
|
||||
payload_size(pkt));
|
||||
payload_size(pkt) + bth_pad(pkt));
|
||||
calc_icrc = (__force u32)cpu_to_be32(~calc_icrc);
|
||||
if (unlikely(calc_icrc != pack_icrc)) {
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
|
|
|
@ -500,6 +500,12 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
|||
if (err)
|
||||
return err;
|
||||
}
|
||||
if (bth_pad(pkt)) {
|
||||
u8 *pad = payload_addr(pkt) + paylen;
|
||||
|
||||
memset(pad, 0, bth_pad(pkt));
|
||||
crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt));
|
||||
}
|
||||
}
|
||||
p = payload_addr(pkt) + paylen + bth_pad(pkt);
|
||||
|
||||
|
|
|
@ -737,6 +737,13 @@ static enum resp_states read_reply(struct rxe_qp *qp,
|
|||
if (err)
|
||||
pr_err("Failed copying memory\n");
|
||||
|
||||
if (bth_pad(&ack_pkt)) {
|
||||
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
|
||||
u8 *pad = payload_addr(&ack_pkt) + payload;
|
||||
|
||||
memset(pad, 0, bth_pad(&ack_pkt));
|
||||
icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt));
|
||||
}
|
||||
p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
|
||||
*p = ~icrc;
|
||||
|
||||
|
|
|
@ -2756,7 +2756,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||
write_targets++;
|
||||
}
|
||||
}
|
||||
if (bio->bi_end_io) {
|
||||
if (rdev && bio->bi_end_io) {
|
||||
atomic_inc(&rdev->nr_pending);
|
||||
bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
|
||||
bio_set_dev(bio, rdev->bdev);
|
||||
|
|
|
@ -365,6 +365,7 @@ static void cec_data_cancel(struct cec_data *data, u8 tx_status)
|
|||
} else {
|
||||
list_del_init(&data->list);
|
||||
if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
|
||||
if (!WARN_ON(!data->adap->transmit_queue_sz))
|
||||
data->adap->transmit_queue_sz--;
|
||||
}
|
||||
|
||||
|
@ -417,6 +418,14 @@ static void cec_flush(struct cec_adapter *adap)
|
|||
* need to do anything special in that case.
|
||||
*/
|
||||
}
|
||||
/*
|
||||
* If something went wrong and this counter isn't what it should
|
||||
* be, then this will reset it back to 0. Warn if it is not 0,
|
||||
* since it indicates a bug, either in this framework or in a
|
||||
* CEC driver.
|
||||
*/
|
||||
if (WARN_ON(adap->transmit_queue_sz))
|
||||
adap->transmit_queue_sz = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -441,7 +450,7 @@ int cec_thread_func(void *_adap)
|
|||
bool timeout = false;
|
||||
u8 attempts;
|
||||
|
||||
if (adap->transmitting) {
|
||||
if (adap->transmit_in_progress) {
|
||||
int err;
|
||||
|
||||
/*
|
||||
|
@ -476,7 +485,7 @@ int cec_thread_func(void *_adap)
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
if (adap->transmitting && timeout) {
|
||||
if (adap->transmit_in_progress && timeout) {
|
||||
/*
|
||||
* If we timeout, then log that. Normally this does
|
||||
* not happen and it is an indication of a faulty CEC
|
||||
|
@ -485,14 +494,18 @@ int cec_thread_func(void *_adap)
|
|||
* so much traffic on the bus that the adapter was
|
||||
* unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s).
|
||||
*/
|
||||
if (adap->transmitting) {
|
||||
pr_warn("cec-%s: message %*ph timed out\n", adap->name,
|
||||
adap->transmitting->msg.len,
|
||||
adap->transmitting->msg.msg);
|
||||
adap->transmit_in_progress = false;
|
||||
adap->tx_timeouts++;
|
||||
/* Just give up on this. */
|
||||
cec_data_cancel(adap->transmitting,
|
||||
CEC_TX_STATUS_TIMEOUT);
|
||||
} else {
|
||||
pr_warn("cec-%s: transmit timed out\n", adap->name);
|
||||
}
|
||||
adap->transmit_in_progress = false;
|
||||
adap->tx_timeouts++;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
|
@ -507,6 +520,7 @@ int cec_thread_func(void *_adap)
|
|||
data = list_first_entry(&adap->transmit_queue,
|
||||
struct cec_data, list);
|
||||
list_del_init(&data->list);
|
||||
if (!WARN_ON(!data->adap->transmit_queue_sz))
|
||||
adap->transmit_queue_sz--;
|
||||
|
||||
/* Make this the current transmitting message */
|
||||
|
@ -1038,11 +1052,11 @@ void cec_received_msg_ts(struct cec_adapter *adap,
|
|||
valid_la = false;
|
||||
else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED))
|
||||
valid_la = false;
|
||||
else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4))
|
||||
else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST))
|
||||
valid_la = false;
|
||||
else if (cec_msg_is_broadcast(msg) &&
|
||||
adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 &&
|
||||
!(dir_fl & BCAST2_0))
|
||||
adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0 &&
|
||||
!(dir_fl & BCAST1_4))
|
||||
valid_la = false;
|
||||
}
|
||||
if (valid_la && min_len) {
|
||||
|
|
|
@ -294,7 +294,7 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
|
|||
|
||||
mutex_unlock(&fc_usb->data_mutex);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* actual bus specific access functions,
|
||||
|
|
|
@ -985,7 +985,8 @@ static int af9005_identify_state(struct usb_device *udev,
|
|||
else if (reply == 0x02)
|
||||
*cold = 0;
|
||||
else
|
||||
return -EIO;
|
||||
ret = -EIO;
|
||||
if (!ret)
|
||||
deb_info("Identify state cold = %d\n", *cold);
|
||||
|
||||
err:
|
||||
|
|
|
@ -121,6 +121,7 @@ struct pulse8 {
|
|||
unsigned int vers;
|
||||
struct completion cmd_done;
|
||||
struct work_struct work;
|
||||
u8 work_result;
|
||||
struct delayed_work ping_eeprom_work;
|
||||
struct cec_msg rx_msg;
|
||||
u8 data[DATA_SIZE];
|
||||
|
@ -142,8 +143,10 @@ static void pulse8_irq_work_handler(struct work_struct *work)
|
|||
{
|
||||
struct pulse8 *pulse8 =
|
||||
container_of(work, struct pulse8, work);
|
||||
u8 result = pulse8->work_result;
|
||||
|
||||
switch (pulse8->data[0] & 0x3f) {
|
||||
pulse8->work_result = 0;
|
||||
switch (result & 0x3f) {
|
||||
case MSGCODE_FRAME_DATA:
|
||||
cec_received_msg(pulse8->adap, &pulse8->rx_msg);
|
||||
break;
|
||||
|
@ -177,12 +180,12 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
|
|||
pulse8->escape = false;
|
||||
} else if (data == MSGEND) {
|
||||
struct cec_msg *msg = &pulse8->rx_msg;
|
||||
u8 msgcode = pulse8->buf[0];
|
||||
|
||||
if (debug)
|
||||
dev_info(pulse8->dev, "received: %*ph\n",
|
||||
pulse8->idx, pulse8->buf);
|
||||
pulse8->data[0] = pulse8->buf[0];
|
||||
switch (pulse8->buf[0] & 0x3f) {
|
||||
switch (msgcode & 0x3f) {
|
||||
case MSGCODE_FRAME_START:
|
||||
msg->len = 1;
|
||||
msg->msg[0] = pulse8->buf[1];
|
||||
|
@ -191,14 +194,20 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
|
|||
if (msg->len == CEC_MAX_MSG_SIZE)
|
||||
break;
|
||||
msg->msg[msg->len++] = pulse8->buf[1];
|
||||
if (pulse8->buf[0] & MSGCODE_FRAME_EOM)
|
||||
if (msgcode & MSGCODE_FRAME_EOM) {
|
||||
WARN_ON(pulse8->work_result);
|
||||
pulse8->work_result = msgcode;
|
||||
schedule_work(&pulse8->work);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case MSGCODE_TRANSMIT_SUCCEEDED:
|
||||
case MSGCODE_TRANSMIT_FAILED_LINE:
|
||||
case MSGCODE_TRANSMIT_FAILED_ACK:
|
||||
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
|
||||
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
|
||||
WARN_ON(pulse8->work_result);
|
||||
pulse8->work_result = msgcode;
|
||||
schedule_work(&pulse8->work);
|
||||
break;
|
||||
case MSGCODE_HIGH_ERROR:
|
||||
|
|
|
@ -973,6 +973,8 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
|
|||
struct ath_htc_rx_status *rxstatus;
|
||||
struct ath_rx_status rx_stats;
|
||||
bool decrypt_error = false;
|
||||
__be16 rs_datalen;
|
||||
bool is_phyerr;
|
||||
|
||||
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
|
||||
ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
|
||||
|
@ -982,11 +984,24 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
|
|||
|
||||
rxstatus = (struct ath_htc_rx_status *)skb->data;
|
||||
|
||||
if (be16_to_cpu(rxstatus->rs_datalen) -
|
||||
(skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
|
||||
rs_datalen = be16_to_cpu(rxstatus->rs_datalen);
|
||||
if (unlikely(rs_datalen -
|
||||
(skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0)) {
|
||||
ath_err(common,
|
||||
"Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n",
|
||||
rxstatus->rs_datalen, skb->len);
|
||||
rs_datalen, skb->len);
|
||||
goto rx_next;
|
||||
}
|
||||
|
||||
is_phyerr = rxstatus->rs_status & ATH9K_RXERR_PHY;
|
||||
/*
|
||||
* Discard zero-length packets and packets smaller than an ACK
|
||||
* which are not PHY_ERROR (short radar pulses have a length of 3)
|
||||
*/
|
||||
if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) {
|
||||
ath_warn(common,
|
||||
"Short RX data len, dropping (dlen: %d)\n",
|
||||
rs_datalen);
|
||||
goto rx_next;
|
||||
}
|
||||
|
||||
|
@ -1011,7 +1026,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
|
|||
* Process PHY errors and return so that the packet
|
||||
* can be dropped.
|
||||
*/
|
||||
if (rx_stats.rs_status & ATH9K_RXERR_PHY) {
|
||||
if (unlikely(is_phyerr)) {
|
||||
/* TODO: Not using DFS processing now. */
|
||||
if (ath_cmn_process_fft(&priv->spec_priv, hdr,
|
||||
&rx_stats, rx_status->mactime)) {
|
||||
|
|
|
@ -342,7 +342,8 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
|
|||
!template->ls_req || !template->fcp_io ||
|
||||
!template->ls_abort || !template->fcp_abort ||
|
||||
!template->max_hw_queues || !template->max_sgl_segments ||
|
||||
!template->max_dif_sgl_segments || !template->dma_boundary) {
|
||||
!template->max_dif_sgl_segments || !template->dma_boundary ||
|
||||
!template->module) {
|
||||
ret = -EINVAL;
|
||||
goto out_reghost_failed;
|
||||
}
|
||||
|
@ -1986,6 +1987,7 @@ nvme_fc_ctrl_free(struct kref *ref)
|
|||
{
|
||||
struct nvme_fc_ctrl *ctrl =
|
||||
container_of(ref, struct nvme_fc_ctrl, ref);
|
||||
struct nvme_fc_lport *lport = ctrl->lport;
|
||||
unsigned long flags;
|
||||
|
||||
if (ctrl->ctrl.tagset) {
|
||||
|
@ -2011,6 +2013,7 @@ nvme_fc_ctrl_free(struct kref *ref)
|
|||
if (ctrl->ctrl.opts)
|
||||
nvmf_free_options(ctrl->ctrl.opts);
|
||||
kfree(ctrl);
|
||||
module_put(lport->ops->module);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2891,10 +2894,22 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
|
|||
static void
|
||||
__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
|
||||
{
|
||||
/*
|
||||
* if state is connecting - the error occurred as part of a
|
||||
* reconnect attempt. The create_association error paths will
|
||||
* clean up any outstanding io.
|
||||
*
|
||||
* if it's a different state - ensure all pending io is
|
||||
* terminated. Given this can delay while waiting for the
|
||||
* aborted io to return, we recheck adapter state below
|
||||
* before changing state.
|
||||
*/
|
||||
if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
|
||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
||||
|
||||
/* will block will waiting for io to terminate */
|
||||
nvme_fc_delete_association(ctrl);
|
||||
}
|
||||
|
||||
if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
|
||||
!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
|
||||
|
@ -3040,10 +3055,15 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
|||
goto out_fail;
|
||||
}
|
||||
|
||||
if (!try_module_get(lport->ops->module)) {
|
||||
ret = -EUNATCH;
|
||||
goto out_free_ctrl;
|
||||
}
|
||||
|
||||
idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
|
||||
if (idx < 0) {
|
||||
ret = -ENOSPC;
|
||||
goto out_free_ctrl;
|
||||
goto out_mod_put;
|
||||
}
|
||||
|
||||
ctrl->ctrl.opts = opts;
|
||||
|
@ -3185,6 +3205,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
|||
out_free_ida:
|
||||
put_device(ctrl->dev);
|
||||
ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
|
||||
out_mod_put:
|
||||
module_put(lport->ops->module);
|
||||
out_free_ctrl:
|
||||
kfree(ctrl);
|
||||
out_fail:
|
||||
|
|
|
@ -825,6 +825,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
|
|||
#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
|
||||
|
||||
static struct nvme_fc_port_template fctemplate = {
|
||||
.module = THIS_MODULE,
|
||||
.localport_delete = fcloop_localport_delete,
|
||||
.remoteport_delete = fcloop_remoteport_delete,
|
||||
.create_queue = fcloop_create_queue,
|
||||
|
|
|
@ -452,6 +452,14 @@ static const struct dmi_system_id critclk_systems[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_VERSION, "6ES7647-8B"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "CONNECT X300",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "A5E45074588"),
|
||||
},
|
||||
},
|
||||
|
||||
{ /*sentinel*/ }
|
||||
};
|
||||
|
||||
|
|
|
@ -956,23 +956,6 @@ static struct ab8500_regulator_info
|
|||
.update_val_idle = 0x82,
|
||||
.update_val_normal = 0x02,
|
||||
},
|
||||
[AB8505_LDO_USB] = {
|
||||
.desc = {
|
||||
.name = "LDO-USB",
|
||||
.ops = &ab8500_regulator_mode_ops,
|
||||
.type = REGULATOR_VOLTAGE,
|
||||
.id = AB8505_LDO_USB,
|
||||
.owner = THIS_MODULE,
|
||||
.n_voltages = 1,
|
||||
.volt_table = fixed_3300000_voltage,
|
||||
},
|
||||
.update_bank = 0x03,
|
||||
.update_reg = 0x82,
|
||||
.update_mask = 0x03,
|
||||
.update_val = 0x01,
|
||||
.update_val_idle = 0x03,
|
||||
.update_val_normal = 0x01,
|
||||
},
|
||||
[AB8505_LDO_AUDIO] = {
|
||||
.desc = {
|
||||
.name = "LDO-AUDIO",
|
||||
|
|
|
@ -97,12 +97,21 @@ static int sas_get_port_device(struct asd_sas_port *port)
|
|||
else
|
||||
dev->dev_type = SAS_SATA_DEV;
|
||||
dev->tproto = SAS_PROTOCOL_SATA;
|
||||
} else {
|
||||
} else if (port->oob_mode == SAS_OOB_MODE) {
|
||||
struct sas_identify_frame *id =
|
||||
(struct sas_identify_frame *) dev->frame_rcvd;
|
||||
dev->dev_type = id->dev_type;
|
||||
dev->iproto = id->initiator_bits;
|
||||
dev->tproto = id->target_bits;
|
||||
} else {
|
||||
/* If the oob mode is OOB_NOT_CONNECTED, the port is
|
||||
* disconnected due to race with PHY down. We cannot
|
||||
* continue to discover this port
|
||||
*/
|
||||
sas_put_device(dev);
|
||||
pr_warn("Port %016llx is disconnected when discovering\n",
|
||||
SAS_ADDR(port->attached_sas_addr));
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
sas_init_dev(dev);
|
||||
|
|
|
@ -4419,12 +4419,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
|
|||
phba->mbox_ext_buf_ctx.seqNum++;
|
||||
nemb_tp = phba->mbox_ext_buf_ctx.nembType;
|
||||
|
||||
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
|
||||
if (!dd_data) {
|
||||
rc = -ENOMEM;
|
||||
goto job_error;
|
||||
}
|
||||
|
||||
pbuf = (uint8_t *)dmabuf->virt;
|
||||
size = job->request_payload.payload_len;
|
||||
sg_copy_to_buffer(job->request_payload.sg_list,
|
||||
|
@ -4461,6 +4455,13 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
|
|||
"2968 SLI_CONFIG ext-buffer wr all %d "
|
||||
"ebuffers received\n",
|
||||
phba->mbox_ext_buf_ctx.numBuf);
|
||||
|
||||
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
|
||||
if (!dd_data) {
|
||||
rc = -ENOMEM;
|
||||
goto job_error;
|
||||
}
|
||||
|
||||
/* mailbox command structure for base driver */
|
||||
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!pmboxq) {
|
||||
|
@ -4509,6 +4510,8 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
|
|||
return SLI_CONFIG_HANDLED;
|
||||
|
||||
job_error:
|
||||
if (pmboxq)
|
||||
mempool_free(pmboxq, phba->mbox_mem_pool);
|
||||
lpfc_bsg_dma_page_free(phba, dmabuf);
|
||||
kfree(dd_data);
|
||||
|
||||
|
|
|
@ -1903,6 +1903,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
|||
|
||||
/* Declare and initialization an instance of the FC NVME template. */
|
||||
static struct nvme_fc_port_template lpfc_nvme_template = {
|
||||
.module = THIS_MODULE,
|
||||
|
||||
/* initiator-based functions */
|
||||
.localport_delete = lpfc_nvme_localport_delete,
|
||||
.remoteport_delete = lpfc_nvme_remoteport_delete,
|
||||
|
|
|
@ -23,8 +23,6 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
|
|||
int rc = 0;
|
||||
uint32_t did, sid;
|
||||
uint16_t xid;
|
||||
uint32_t start_time = jiffies / HZ;
|
||||
uint32_t current_time;
|
||||
struct fcoe_wqe *sqe;
|
||||
unsigned long flags;
|
||||
u16 sqe_idx;
|
||||
|
@ -59,19 +57,13 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
|
|||
goto els_err;
|
||||
}
|
||||
|
||||
retry_els:
|
||||
els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
|
||||
if (!els_req) {
|
||||
current_time = jiffies / HZ;
|
||||
if ((current_time - start_time) > 10) {
|
||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
|
||||
"els: Failed els 0x%x\n", op);
|
||||
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
|
||||
"Failed to alloc ELS request 0x%x\n", op);
|
||||
rc = -ENOMEM;
|
||||
goto els_err;
|
||||
}
|
||||
mdelay(20 * USEC_PER_MSEC);
|
||||
goto retry_els;
|
||||
}
|
||||
|
||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
|
||||
"0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
|
||||
|
|
|
@ -4815,14 +4815,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
|
|||
set_bit(RSCN_UPDATE, &flags);
|
||||
clear_bit(LOCAL_LOOP_UPDATE, &flags);
|
||||
|
||||
} else if (ha->current_topology == ISP_CFG_N) {
|
||||
clear_bit(RSCN_UPDATE, &flags);
|
||||
if (qla_tgt_mode_enabled(vha)) {
|
||||
/* allow the other side to start the login */
|
||||
clear_bit(LOCAL_LOOP_UPDATE, &flags);
|
||||
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
|
||||
}
|
||||
} else if (ha->current_topology == ISP_CFG_NL) {
|
||||
} else if (ha->current_topology == ISP_CFG_NL ||
|
||||
ha->current_topology == ISP_CFG_N) {
|
||||
clear_bit(RSCN_UPDATE, &flags);
|
||||
set_bit(LOCAL_LOOP_UPDATE, &flags);
|
||||
} else if (!vha->flags.online ||
|
||||
|
|
|
@ -2537,7 +2537,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
|
|||
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
|
||||
"PLOGI ELS IOCB:\n");
|
||||
ql_dump_buffer(ql_log_info, vha, 0x0109,
|
||||
(uint8_t *)els_iocb, 0x70);
|
||||
(uint8_t *)els_iocb,
|
||||
sizeof(*els_iocb));
|
||||
} else {
|
||||
els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
|
||||
els_iocb->tx_address[0] =
|
||||
|
@ -2703,7 +2704,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
|
|||
|
||||
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
|
||||
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
|
||||
(uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
|
||||
(uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
|
||||
sizeof(*elsio->u.els_plogi.els_plogi_pyld));
|
||||
|
||||
rval = qla2x00_start_sp(sp);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
|
|
|
@ -1049,8 +1049,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
|
|||
ql_dbg(ql_dbg_async, vha, 0x5011,
|
||||
"Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
|
||||
mb[1], mb[2], mb[3]);
|
||||
|
||||
qlt_async_event(mb[0], vha, mb);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1067,8 +1065,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
|
|||
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
|
||||
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
|
||||
set_bit(VP_CONFIG_OK, &vha->vp_flags);
|
||||
|
||||
qlt_async_event(mb[0], vha, mb);
|
||||
break;
|
||||
|
||||
case MBA_RSCN_UPDATE: /* State Change Registration */
|
||||
|
|
|
@ -3871,6 +3871,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
|
|||
vha->d_id.b24 = 0;
|
||||
vha->d_id.b.al_pa = 1;
|
||||
ha->flags.n2n_bigger = 1;
|
||||
ha->flags.n2n_ae = 0;
|
||||
|
||||
id.b.al_pa = 2;
|
||||
ql_dbg(ql_dbg_async, vha, 0x5075,
|
||||
|
@ -3881,6 +3882,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
|
|||
"Format 1: Remote login - Waiting for WWPN %8phC.\n",
|
||||
rptid_entry->u.f1.port_name);
|
||||
ha->flags.n2n_bigger = 0;
|
||||
ha->flags.n2n_ae = 1;
|
||||
}
|
||||
qla24xx_post_newsess_work(vha, &id,
|
||||
rptid_entry->u.f1.port_name,
|
||||
|
@ -3892,7 +3894,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
|
|||
/* if our portname is higher then initiate N2N login */
|
||||
|
||||
set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
|
||||
ha->flags.n2n_ae = 1;
|
||||
return;
|
||||
break;
|
||||
case TOPO_FL:
|
||||
|
|
|
@ -560,6 +560,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
|
|||
}
|
||||
|
||||
static struct nvme_fc_port_template qla_nvme_fc_transport = {
|
||||
.module = THIS_MODULE,
|
||||
.localport_delete = qla_nvme_localport_delete,
|
||||
.remoteport_delete = qla_nvme_remoteport_delete,
|
||||
.create_queue = qla_nvme_alloc_queue,
|
||||
|
|
|
@ -1261,7 +1261,6 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
|
|||
"Scheduling sess %p for deletion %8phC\n",
|
||||
sess, sess->port_name);
|
||||
|
||||
INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
|
||||
WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
|
||||
}
|
||||
|
||||
|
@ -4780,6 +4779,7 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
|
|||
|
||||
switch (sess->disc_state) {
|
||||
case DSC_DELETED:
|
||||
case DSC_LOGIN_PEND:
|
||||
qlt_plogi_ack_unref(vha, pla);
|
||||
break;
|
||||
|
||||
|
|
|
@ -4280,7 +4280,6 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
|
|||
return QLA_SUCCESS;
|
||||
|
||||
mem_alloc_error_exit:
|
||||
qla4xxx_mem_free(ha);
|
||||
return QLA_ERROR;
|
||||
}
|
||||
|
||||
|
|
|
@ -107,6 +107,14 @@ static int hvterm_raw_get_chars(uint32_t vtermno, char *buf, int count)
|
|||
return got;
|
||||
}
|
||||
|
||||
/**
|
||||
* hvterm_raw_put_chars: send characters to firmware for given vterm adapter
|
||||
* @vtermno: The virtual terminal number.
|
||||
* @buf: The characters to send. Because of the underlying hypercall in
|
||||
* hvc_put_chars(), this buffer must be at least 16 bytes long, even if
|
||||
* you are sending fewer chars.
|
||||
* @count: number of chars to send.
|
||||
*/
|
||||
static int hvterm_raw_put_chars(uint32_t vtermno, const char *buf, int count)
|
||||
{
|
||||
struct hvterm_priv *pv = hvterm_privs[vtermno];
|
||||
|
@ -219,6 +227,7 @@ static const struct hv_ops hvterm_hvsi_ops = {
|
|||
static void udbg_hvc_putc(char c)
|
||||
{
|
||||
int count = -1;
|
||||
unsigned char bounce_buffer[16];
|
||||
|
||||
if (!hvterm_privs[0])
|
||||
return;
|
||||
|
@ -229,7 +238,12 @@ static void udbg_hvc_putc(char c)
|
|||
do {
|
||||
switch(hvterm_privs[0]->proto) {
|
||||
case HV_PROTOCOL_RAW:
|
||||
count = hvterm_raw_put_chars(0, &c, 1);
|
||||
/*
|
||||
* hvterm_raw_put_chars requires at least a 16-byte
|
||||
* buffer, so go via the bounce buffer
|
||||
*/
|
||||
bounce_buffer[0] = c;
|
||||
count = hvterm_raw_put_chars(0, bounce_buffer, 1);
|
||||
break;
|
||||
case HV_PROTOCOL_HVSI:
|
||||
count = hvterm_hvsi_put_chars(0, &c, 1);
|
||||
|
|
|
@ -1580,6 +1580,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
|
|||
int num_newlines = 0;
|
||||
bool replaced = false;
|
||||
void __iomem *tf;
|
||||
int locked = 1;
|
||||
|
||||
if (is_uartdm)
|
||||
tf = port->membase + UARTDM_TF;
|
||||
|
@ -1592,7 +1593,13 @@ static void __msm_console_write(struct uart_port *port, const char *s,
|
|||
num_newlines++;
|
||||
count += num_newlines;
|
||||
|
||||
if (port->sysrq)
|
||||
locked = 0;
|
||||
else if (oops_in_progress)
|
||||
locked = spin_trylock(&port->lock);
|
||||
else
|
||||
spin_lock(&port->lock);
|
||||
|
||||
if (is_uartdm)
|
||||
msm_reset_dm_count(port, count);
|
||||
|
||||
|
@ -1628,6 +1635,8 @@ static void __msm_console_write(struct uart_port *port, const char *s,
|
|||
iowrite32_rep(tf, buf, 1);
|
||||
i += num_chars;
|
||||
}
|
||||
|
||||
if (locked)
|
||||
spin_unlock(&port->lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -621,8 +621,12 @@ static void ecm_disable(struct usb_function *f)
|
|||
|
||||
DBG(cdev, "ecm deactivated\n");
|
||||
|
||||
if (ecm->port.in_ep->enabled)
|
||||
if (ecm->port.in_ep->enabled) {
|
||||
gether_disconnect(&ecm->port);
|
||||
} else {
|
||||
ecm->port.in_ep->desc = NULL;
|
||||
ecm->port.out_ep->desc = NULL;
|
||||
}
|
||||
|
||||
usb_ep_disable(ecm->notify);
|
||||
ecm->notify->desc = NULL;
|
||||
|
|
|
@ -618,6 +618,7 @@ static void rndis_disable(struct usb_function *f)
|
|||
gether_disconnect(&rndis->port);
|
||||
|
||||
usb_ep_disable(rndis->notify);
|
||||
rndis->notify->desc = NULL;
|
||||
}
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
|
|
@ -395,7 +395,8 @@ static struct notifier_block xen_memory_nb = {
|
|||
#else
|
||||
static enum bp_state reserve_additional_memory(void)
|
||||
{
|
||||
balloon_stats.target_pages = balloon_stats.current_pages;
|
||||
balloon_stats.target_pages = balloon_stats.current_pages +
|
||||
balloon_stats.target_unpopulated;
|
||||
return BP_ECANCELED;
|
||||
}
|
||||
#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
|
||||
|
|
|
@ -145,6 +145,9 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr
|
|||
|
||||
ASSERTCMP(d_inode(dentry), ==, NULL);
|
||||
|
||||
if (flags & LOOKUP_CREATE)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
if (dentry->d_name.len >= AFSNAMEMAX) {
|
||||
_leave(" = -ENAMETOOLONG");
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
|
|
|
@ -34,18 +34,11 @@ static void afs_dec_servers_outstanding(struct afs_net *net)
|
|||
struct afs_server *afs_find_server(struct afs_net *net,
|
||||
const struct sockaddr_rxrpc *srx)
|
||||
{
|
||||
const struct sockaddr_in6 *a = &srx->transport.sin6, *b;
|
||||
const struct afs_addr_list *alist;
|
||||
struct afs_server *server = NULL;
|
||||
unsigned int i;
|
||||
bool ipv6 = true;
|
||||
int seq = 0, diff;
|
||||
|
||||
if (srx->transport.sin6.sin6_addr.s6_addr32[0] == 0 ||
|
||||
srx->transport.sin6.sin6_addr.s6_addr32[1] == 0 ||
|
||||
srx->transport.sin6.sin6_addr.s6_addr32[2] == htonl(0xffff))
|
||||
ipv6 = false;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
do {
|
||||
|
@ -54,7 +47,8 @@ struct afs_server *afs_find_server(struct afs_net *net,
|
|||
server = NULL;
|
||||
read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
|
||||
|
||||
if (ipv6) {
|
||||
if (srx->transport.family == AF_INET6) {
|
||||
const struct sockaddr_in6 *a = &srx->transport.sin6, *b;
|
||||
hlist_for_each_entry_rcu(server, &net->fs_addresses6, addr6_link) {
|
||||
alist = rcu_dereference(server->addresses);
|
||||
for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
|
||||
|
@ -70,15 +64,16 @@ struct afs_server *afs_find_server(struct afs_net *net,
|
|||
}
|
||||
}
|
||||
} else {
|
||||
const struct sockaddr_in *a = &srx->transport.sin, *b;
|
||||
hlist_for_each_entry_rcu(server, &net->fs_addresses4, addr4_link) {
|
||||
alist = rcu_dereference(server->addresses);
|
||||
for (i = 0; i < alist->nr_ipv4; i++) {
|
||||
b = &alist->addrs[i].transport.sin6;
|
||||
diff = ((u16 __force)a->sin6_port -
|
||||
(u16 __force)b->sin6_port);
|
||||
b = &alist->addrs[i].transport.sin;
|
||||
diff = ((u16 __force)a->sin_port -
|
||||
(u16 __force)b->sin_port);
|
||||
if (diff == 0)
|
||||
diff = ((u32 __force)a->sin6_addr.s6_addr32[3] -
|
||||
(u32 __force)b->sin6_addr.s6_addr32[3]);
|
||||
diff = ((u32 __force)a->sin_addr.s_addr -
|
||||
(u32 __force)b->sin_addr.s_addr);
|
||||
if (diff == 0)
|
||||
goto found;
|
||||
}
|
||||
|
|
|
@ -404,7 +404,6 @@ static int afs_fill_super(struct super_block *sb,
|
|||
/* allocate the root inode and dentry */
|
||||
if (as->dyn_root) {
|
||||
inode = afs_iget_pseudo_dir(sb, true);
|
||||
sb->s_flags |= SB_RDONLY;
|
||||
} else {
|
||||
sprintf(sb->s_id, "%u", as->volume->vid);
|
||||
afs_activate_volume(as->volume);
|
||||
|
|
|
@ -1328,10 +1328,6 @@ static void flush_disk(struct block_device *bdev, bool kill_dirty)
|
|||
"resized disk %s\n",
|
||||
bdev->bd_disk ? bdev->bd_disk->disk_name : "");
|
||||
}
|
||||
|
||||
if (!bdev->bd_disk)
|
||||
return;
|
||||
if (disk_part_scan_enabled(bdev->bd_disk))
|
||||
bdev->bd_invalidated = 1;
|
||||
}
|
||||
|
||||
|
@ -1430,6 +1426,19 @@ EXPORT_SYMBOL(bd_set_size);
|
|||
|
||||
static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
|
||||
|
||||
static void bdev_disk_changed(struct block_device *bdev, bool invalidate)
|
||||
{
|
||||
if (disk_part_scan_enabled(bdev->bd_disk)) {
|
||||
if (invalidate)
|
||||
invalidate_partitions(bdev->bd_disk, bdev);
|
||||
else
|
||||
rescan_partitions(bdev->bd_disk, bdev);
|
||||
} else {
|
||||
check_disk_size_change(bdev->bd_disk, bdev, !invalidate);
|
||||
bdev->bd_invalidated = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* bd_mutex locking:
|
||||
*
|
||||
|
@ -1512,12 +1521,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
|||
* The latter is necessary to prevent ghost
|
||||
* partitions on a removed medium.
|
||||
*/
|
||||
if (bdev->bd_invalidated) {
|
||||
if (!ret)
|
||||
rescan_partitions(disk, bdev);
|
||||
else if (ret == -ENOMEDIUM)
|
||||
invalidate_partitions(disk, bdev);
|
||||
}
|
||||
if (bdev->bd_invalidated &&
|
||||
(!ret || ret == -ENOMEDIUM))
|
||||
bdev_disk_changed(bdev, ret == -ENOMEDIUM);
|
||||
|
||||
if (ret)
|
||||
goto out_clear;
|
||||
|
@ -1550,12 +1556,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
|||
if (bdev->bd_disk->fops->open)
|
||||
ret = bdev->bd_disk->fops->open(bdev, mode);
|
||||
/* the same as first opener case, read comment there */
|
||||
if (bdev->bd_invalidated) {
|
||||
if (!ret)
|
||||
rescan_partitions(bdev->bd_disk, bdev);
|
||||
else if (ret == -ENOMEDIUM)
|
||||
invalidate_partitions(bdev->bd_disk, bdev);
|
||||
}
|
||||
if (bdev->bd_invalidated &&
|
||||
(!ret || ret == -ENOMEDIUM))
|
||||
bdev_disk_changed(bdev, ret == -ENOMEDIUM);
|
||||
if (ret)
|
||||
goto out_unlock_bdev;
|
||||
}
|
||||
|
|
|
@ -1401,10 +1401,11 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
|
|||
#endif
|
||||
|
||||
case FICLONE:
|
||||
goto do_ioctl;
|
||||
case FICLONERANGE:
|
||||
case FIDEDUPERANGE:
|
||||
case FS_IOC_FIEMAP:
|
||||
goto do_ioctl;
|
||||
goto found_handler;
|
||||
|
||||
case FIBMAP:
|
||||
case FIGETBSZ:
|
||||
|
|
|
@ -2678,7 +2678,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
|
|||
}
|
||||
if (inode) {
|
||||
/* userspace relies on this representation of dev_t */
|
||||
seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
|
||||
seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
|
||||
MAJOR(inode->i_sb->s_dev),
|
||||
MINOR(inode->i_sb->s_dev), inode->i_ino);
|
||||
} else {
|
||||
|
|
|
@ -3072,12 +3072,17 @@ static bool replay_matches_cache(struct svc_rqst *rqstp,
|
|||
(bool)seq->cachethis)
|
||||
return false;
|
||||
/*
|
||||
* If there's an error than the reply can have fewer ops than
|
||||
* the call. But if we cached a reply with *more* ops than the
|
||||
* call you're sending us now, then this new call is clearly not
|
||||
* really a replay of the old one:
|
||||
* If there's an error then the reply can have fewer ops than
|
||||
* the call.
|
||||
*/
|
||||
if (slot->sl_opcnt < argp->opcnt)
|
||||
if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
|
||||
return false;
|
||||
/*
|
||||
* But if we cached a reply with *more* ops than the call you're
|
||||
* sending us now, then this new call is clearly not really a
|
||||
* replay of the old one:
|
||||
*/
|
||||
if (slot->sl_opcnt > argp->opcnt)
|
||||
return false;
|
||||
/* This is the only check explicitly called by spec: */
|
||||
if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
|
||||
|
|
|
@ -437,6 +437,17 @@ static int notrace ramoops_pstore_write(struct pstore_record *record)
|
|||
|
||||
prz = cxt->dprzs[cxt->dump_write_cnt];
|
||||
|
||||
/*
|
||||
* Since this is a new crash dump, we need to reset the buffer in
|
||||
* case it still has an old dump present. Without this, the new dump
|
||||
* will get appended, which would seriously confuse anything trying
|
||||
* to check dump file contents. Specifically, ramoops_read_kmsg_hdr()
|
||||
* expects to find a dump header in the beginning of buffer data, so
|
||||
* we must to reset the buffer values, in order to ensure that the
|
||||
* header will be written to the beginning of the buffer.
|
||||
*/
|
||||
persistent_ram_zap(prz);
|
||||
|
||||
/* Build header and append record contents. */
|
||||
hlen = ramoops_write_kmsg_hdr(prz, record);
|
||||
size = record->size;
|
||||
|
|
|
@ -219,7 +219,7 @@ static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
|
|||
/**
|
||||
* layout_leb_in_gaps - layout index nodes using in-the-gaps method.
|
||||
* @c: UBIFS file-system description object
|
||||
* @p: return LEB number here
|
||||
* @p: return LEB number in @c->gap_lebs[p]
|
||||
*
|
||||
* This function lays out new index nodes for dirty znodes using in-the-gaps
|
||||
* method of TNC commit.
|
||||
|
@ -228,7 +228,7 @@ static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
|
|||
* This function returns the number of index nodes written into the gaps, or a
|
||||
* negative error code on failure.
|
||||
*/
|
||||
static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
|
||||
static int layout_leb_in_gaps(struct ubifs_info *c, int p)
|
||||
{
|
||||
struct ubifs_scan_leb *sleb;
|
||||
struct ubifs_scan_node *snod;
|
||||
|
@ -243,7 +243,7 @@ static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
|
|||
* filled, however we do not check there at present.
|
||||
*/
|
||||
return lnum; /* Error code */
|
||||
*p = lnum;
|
||||
c->gap_lebs[p] = lnum;
|
||||
dbg_gc("LEB %d", lnum);
|
||||
/*
|
||||
* Scan the index LEB. We use the generic scan for this even though
|
||||
|
@ -362,7 +362,7 @@ static int get_leb_cnt(struct ubifs_info *c, int cnt)
|
|||
*/
|
||||
static int layout_in_gaps(struct ubifs_info *c, int cnt)
|
||||
{
|
||||
int err, leb_needed_cnt, written, *p;
|
||||
int err, leb_needed_cnt, written, p = 0, old_idx_lebs, *gap_lebs;
|
||||
|
||||
dbg_gc("%d znodes to write", cnt);
|
||||
|
||||
|
@ -371,9 +371,9 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
|
|||
if (!c->gap_lebs)
|
||||
return -ENOMEM;
|
||||
|
||||
p = c->gap_lebs;
|
||||
old_idx_lebs = c->lst.idx_lebs;
|
||||
do {
|
||||
ubifs_assert(c, p < c->gap_lebs + c->lst.idx_lebs);
|
||||
ubifs_assert(c, p < c->lst.idx_lebs);
|
||||
written = layout_leb_in_gaps(c, p);
|
||||
if (written < 0) {
|
||||
err = written;
|
||||
|
@ -399,9 +399,29 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
|
|||
leb_needed_cnt = get_leb_cnt(c, cnt);
|
||||
dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
|
||||
leb_needed_cnt, c->ileb_cnt);
|
||||
/*
|
||||
* Dynamically change the size of @c->gap_lebs to prevent
|
||||
* oob, because @c->lst.idx_lebs could be increased by
|
||||
* function @get_idx_gc_leb (called by layout_leb_in_gaps->
|
||||
* ubifs_find_dirty_idx_leb) during loop. Only enlarge
|
||||
* @c->gap_lebs when needed.
|
||||
*
|
||||
*/
|
||||
if (leb_needed_cnt > c->ileb_cnt && p >= old_idx_lebs &&
|
||||
old_idx_lebs < c->lst.idx_lebs) {
|
||||
old_idx_lebs = c->lst.idx_lebs;
|
||||
gap_lebs = krealloc(c->gap_lebs, sizeof(int) *
|
||||
(old_idx_lebs + 1), GFP_NOFS);
|
||||
if (!gap_lebs) {
|
||||
kfree(c->gap_lebs);
|
||||
c->gap_lebs = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
c->gap_lebs = gap_lebs;
|
||||
}
|
||||
} while (leb_needed_cnt > c->ileb_cnt);
|
||||
|
||||
*p = -1;
|
||||
c->gap_lebs[p] = -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -5239,7 +5239,7 @@ __xfs_bunmapi(
|
|||
* Make sure we don't touch multiple AGF headers out of order
|
||||
* in a single transaction, as that could cause AB-BA deadlocks.
|
||||
*/
|
||||
if (!wasdel) {
|
||||
if (!wasdel && !isrt) {
|
||||
agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
|
||||
if (prev_agno != NULLAGNUMBER && prev_agno > agno)
|
||||
break;
|
||||
|
|
|
@ -16,6 +16,13 @@ xchk_should_terminate(
|
|||
struct xfs_scrub *sc,
|
||||
int *error)
|
||||
{
|
||||
/*
|
||||
* If preemption is disabled, we need to yield to the scheduler every
|
||||
* few seconds so that we don't run afoul of the soft lockup watchdog
|
||||
* or RCU stall detector.
|
||||
*/
|
||||
cond_resched();
|
||||
|
||||
if (fatal_signal_pending(current)) {
|
||||
if (*error == 0)
|
||||
*error = -EAGAIN;
|
||||
|
|
|
@ -1578,6 +1578,8 @@ xlog_alloc_log(
|
|||
if (iclog->ic_bp)
|
||||
xfs_buf_free(iclog->ic_bp);
|
||||
kmem_free(iclog);
|
||||
if (prev_iclog == log->l_iclog)
|
||||
break;
|
||||
}
|
||||
spinlock_destroy(&log->l_icloglock);
|
||||
xfs_buf_free(log->l_xbuf);
|
||||
|
|
|
@ -23,6 +23,8 @@ struct ahci_host_priv;
|
|||
struct platform_device;
|
||||
struct scsi_host_template;
|
||||
|
||||
int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
|
||||
void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
|
||||
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
|
||||
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
|
||||
int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
|
||||
|
|
|
@ -1373,8 +1373,11 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
|
|||
static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
struct dma_slave_caps caps;
|
||||
int ret;
|
||||
|
||||
dma_get_slave_caps(tx->chan, &caps);
|
||||
ret = dma_get_slave_caps(tx->chan, &caps);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (caps.descriptor_reuse) {
|
||||
tx->flags |= DMA_CTRL_REUSE;
|
||||
|
|
|
@ -1190,6 +1190,7 @@ extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
|
|||
struct ata_taskfile *tf, u16 *id);
|
||||
extern void ata_qc_complete(struct ata_queued_cmd *qc);
|
||||
extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
|
||||
extern u64 ata_qc_get_active(struct ata_port *ap);
|
||||
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
|
||||
extern int ata_std_bios_param(struct scsi_device *sdev,
|
||||
struct block_device *bdev,
|
||||
|
|
|
@ -1619,6 +1619,8 @@ enum netdev_priv_flags {
|
|||
* @perm_addr: Permanent hw address
|
||||
* @addr_assign_type: Hw address assignment type
|
||||
* @addr_len: Hardware address length
|
||||
* @upper_level: Maximum depth level of upper devices.
|
||||
* @lower_level: Maximum depth level of lower devices.
|
||||
* @neigh_priv_len: Used in neigh_alloc()
|
||||
* @dev_id: Used to differentiate devices that share
|
||||
* the same link layer address
|
||||
|
@ -1853,6 +1855,8 @@ struct net_device {
|
|||
unsigned char perm_addr[MAX_ADDR_LEN];
|
||||
unsigned char addr_assign_type;
|
||||
unsigned char addr_len;
|
||||
unsigned char upper_level;
|
||||
unsigned char lower_level;
|
||||
unsigned short neigh_priv_len;
|
||||
unsigned short dev_id;
|
||||
unsigned short dev_port;
|
||||
|
|
|
@ -282,6 +282,8 @@ struct nvme_fc_remote_port {
|
|||
*
|
||||
* Host/Initiator Transport Entrypoints/Parameters:
|
||||
*
|
||||
* @module: The LLDD module using the interface
|
||||
*
|
||||
* @localport_delete: The LLDD initiates deletion of a localport via
|
||||
* nvme_fc_deregister_localport(). However, the teardown is
|
||||
* asynchronous. This routine is called upon the completion of the
|
||||
|
@ -395,6 +397,8 @@ struct nvme_fc_remote_port {
|
|||
* Value is Mandatory. Allowed to be zero.
|
||||
*/
|
||||
struct nvme_fc_port_template {
|
||||
struct module *module;
|
||||
|
||||
/* initiator-based functions */
|
||||
void (*localport_delete)(struct nvme_fc_local_port *);
|
||||
void (*remoteport_delete)(struct nvme_fc_remote_port *);
|
||||
|
|
|
@ -38,7 +38,6 @@ enum ab8505_regulator_id {
|
|||
AB8505_LDO_AUX6,
|
||||
AB8505_LDO_INTCORE,
|
||||
AB8505_LDO_ADC,
|
||||
AB8505_LDO_USB,
|
||||
AB8505_LDO_AUDIO,
|
||||
AB8505_LDO_ANAMIC1,
|
||||
AB8505_LDO_ANAMIC2,
|
||||
|
|
|
@ -459,7 +459,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
|
|||
|
||||
do {
|
||||
seq = read_seqbegin(&hh->hh_lock);
|
||||
hh_len = hh->hh_len;
|
||||
hh_len = READ_ONCE(hh->hh_len);
|
||||
if (likely(hh_len <= HH_DATA_MOD)) {
|
||||
hh_alen = HH_DATA_MOD;
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ struct cred *cred_alloc_blank(void)
|
|||
new->magic = CRED_MAGIC;
|
||||
#endif
|
||||
|
||||
if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
|
||||
if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
|
||||
goto error;
|
||||
|
||||
return new;
|
||||
|
@ -279,7 +279,7 @@ struct cred *prepare_creds(void)
|
|||
new->security = NULL;
|
||||
#endif
|
||||
|
||||
if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
|
||||
if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
|
||||
goto error;
|
||||
validate_creds(new);
|
||||
return new;
|
||||
|
@ -654,7 +654,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
|
|||
#ifdef CONFIG_SECURITY
|
||||
new->security = NULL;
|
||||
#endif
|
||||
if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
|
||||
if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
|
||||
goto error;
|
||||
|
||||
put_cred(old);
|
||||
|
|
|
@ -578,10 +578,6 @@ static struct task_struct *find_child_reaper(struct task_struct *father,
|
|||
}
|
||||
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
if (unlikely(pid_ns == &init_pid_ns)) {
|
||||
panic("Attempted to kill init! exitcode=0x%08x\n",
|
||||
father->signal->group_exit_code ?: father->exit_code);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(p, n, dead, ptrace_entry) {
|
||||
list_del_init(&p->ptrace_entry);
|
||||
|
@ -845,6 +841,14 @@ void __noreturn do_exit(long code)
|
|||
acct_update_integrals(tsk);
|
||||
group_dead = atomic_dec_and_test(&tsk->signal->live);
|
||||
if (group_dead) {
|
||||
/*
|
||||
* If the last thread of global init has exited, panic
|
||||
* immediately to get a useable coredump.
|
||||
*/
|
||||
if (unlikely(is_global_init(tsk)))
|
||||
panic("Attempted to kill init! exitcode=0x%08x\n",
|
||||
tsk->signal->group_exit_code ?: (int)code);
|
||||
|
||||
#ifdef CONFIG_POSIX_TIMERS
|
||||
hrtimer_cancel(&tsk->signal->real_timer);
|
||||
exit_itimers(tsk->signal);
|
||||
|
|
|
@ -736,8 +736,15 @@ static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
|
|||
* We have found the zone. Now walk the radix tree to find the leaf node
|
||||
* for our PFN.
|
||||
*/
|
||||
|
||||
/*
|
||||
* If the zone we wish to scan is the the current zone and the
|
||||
* pfn falls into the current node then we do not need to walk
|
||||
* the tree.
|
||||
*/
|
||||
node = bm->cur.node;
|
||||
if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
|
||||
if (zone == bm->cur.zone &&
|
||||
((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
|
||||
goto node_found;
|
||||
|
||||
node = zone->rtree;
|
||||
|
|
|
@ -564,25 +564,33 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
|
|||
static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
|
||||
{
|
||||
struct signal_struct *sig = tsk->signal;
|
||||
struct taskstats *stats;
|
||||
struct taskstats *stats_new, *stats;
|
||||
|
||||
if (sig->stats || thread_group_empty(tsk))
|
||||
goto ret;
|
||||
/* Pairs with smp_store_release() below. */
|
||||
stats = smp_load_acquire(&sig->stats);
|
||||
if (stats || thread_group_empty(tsk))
|
||||
return stats;
|
||||
|
||||
/* No problem if kmem_cache_zalloc() fails */
|
||||
stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
|
||||
stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
|
||||
|
||||
spin_lock_irq(&tsk->sighand->siglock);
|
||||
if (!sig->stats) {
|
||||
sig->stats = stats;
|
||||
stats = NULL;
|
||||
stats = sig->stats;
|
||||
if (!stats) {
|
||||
/*
|
||||
* Pairs with smp_store_release() above and order the
|
||||
* kmem_cache_zalloc().
|
||||
*/
|
||||
smp_store_release(&sig->stats, stats_new);
|
||||
stats = stats_new;
|
||||
stats_new = NULL;
|
||||
}
|
||||
spin_unlock_irq(&tsk->sighand->siglock);
|
||||
|
||||
if (stats)
|
||||
kmem_cache_free(taskstats_cache, stats);
|
||||
ret:
|
||||
return sig->stats;
|
||||
if (stats_new)
|
||||
kmem_cache_free(taskstats_cache, stats_new);
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
/* Send pid data out on exit */
|
||||
|
|
|
@ -555,8 +555,7 @@ static int function_stat_show(struct seq_file *m, void *v)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
avg = rec->time;
|
||||
do_div(avg, rec->counter);
|
||||
avg = div64_ul(rec->time, rec->counter);
|
||||
if (tracing_thresh && (avg < tracing_thresh))
|
||||
goto out;
|
||||
#endif
|
||||
|
@ -582,7 +581,8 @@ static int function_stat_show(struct seq_file *m, void *v)
|
|||
* Divide only 1000 for ns^2 -> us^2 conversion.
|
||||
* trace_print_graph_duration will divide 1000 again.
|
||||
*/
|
||||
do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
|
||||
stddev = div64_ul(stddev,
|
||||
rec->counter * (rec->counter - 1) * 1000);
|
||||
}
|
||||
|
||||
trace_seq_init(&s);
|
||||
|
|
|
@ -4370,6 +4370,10 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
|
|||
|
||||
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
|
||||
{
|
||||
if ((mask == TRACE_ITER_RECORD_TGID) ||
|
||||
(mask == TRACE_ITER_RECORD_CMD))
|
||||
lockdep_assert_held(&event_mutex);
|
||||
|
||||
/* do nothing if flag is already set */
|
||||
if (!!(tr->trace_flags & mask) == !!enabled)
|
||||
return 0;
|
||||
|
@ -4435,6 +4439,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
|
|||
cmp += 2;
|
||||
}
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
mutex_lock(&trace_types_lock);
|
||||
|
||||
ret = match_string(trace_options, -1, cmp);
|
||||
|
@ -4445,6 +4450,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
|
|||
ret = set_tracer_flag(tr, 1 << ret, !neg);
|
||||
|
||||
mutex_unlock(&trace_types_lock);
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
/*
|
||||
* If the first trailing whitespace is replaced with '\0' by strstrip,
|
||||
|
@ -7457,9 +7463,11 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|||
if (val != 0 && val != 1)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
mutex_lock(&trace_types_lock);
|
||||
ret = set_tracer_flag(tr, 1 << index, val);
|
||||
mutex_unlock(&trace_types_lock);
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue