Merge remote-tracking branch 'origin/tmp-7876320' into msm-kona

* origin/tmp-7876320:
  Linux 4.19-rc4
  Code of Conduct: Let's revamp it.
  x86/APM: Fix build warning when PROC_FS is not enabled
  NFS: Don't open code clearing of delegation state
  NFSv4.1 fix infinite loop on I/O.
  NFSv4: Fix a tracepoint Oops in initiate_file_draining()
  pNFS: Ensure we return the error if someone kills a waiting layoutget
  NFSv4: Fix a tracepoint Oops in initiate_file_draining()
  Revert "x86/mm/legacy: Populate the user page-table with user pgd's"
  xen/gntdev: fix up blockable calls to mn_invl_range_start
  xen: fix GCC warning and remove duplicate EVTCHN_ROW/EVTCHN_COL usage
  xen: avoid crash in disable_hotplug_cpu
  xen/balloon: add runtime control for scrubbing ballooned out pages
  xen/manage: don't complain about an empty value in control/sysrq node
  asm-generic: io: Fix ioport_map() for !CONFIG_GENERIC_IOMAP && CONFIG_INDIRECT_PIO
  mm: get rid of vmacache_flush_all() entirely
  MAINTAINERS: Make Dennis the percpu tree maintainer
  pstore: Fix incorrect persistent ram buffer mapping
  drm/nouveau/devinit: fix warning when PMU/PRE_OS is missing
  null_blk: fix zoned support for non-rq based operation
  cifs: read overflow in is_valid_oplock_break()
  nfp: flower: reject tunnel encap with ipv6 outer headers for offloading
  nfp: flower: fix vlan match by checking both vlan id and vlan pcp
  tipc: check return value of __tipc_dump_start()
  s390/qeth: don't dump past end of unknown HW header
  s390/qeth: use vzalloc for QUERY OAT buffer
  s390/qeth: switch on SG by default for IQD devices
  s390/qeth: indicate error when netdev allocation fails
  x86/efi: Load fixmap GDT in efi_call_phys_epilog() before setting %cr3
  x86/xen: Disable CPU0 hotplug for Xen PV
  tracing/Makefile: Fix handling redefinition of CC_FLAGS_FTRACE
  cifs: integer overflow in in SMB2_ioctl()
  CIFS: fix wrapping bugs in num_entries()
  cifs: prevent integer overflow in nxt_dir_entry()
  s390/zcrypt: remove VLA usage from the AP bus
  firmware: Fix security issue with request_firmware_into_buf()
  vmbus: don't return values for uninitalized channels
  fpga: dfl: fme: fix return value check in in pr_mgmt_init()
  misc: hmc6352: fix potential Spectre v1
  Tools: hv: Fix a bug in the key delete code
  misc: ibmvsm: Fix wrong assignment of return code
  android: binder: fix the race mmap and alloc_new_buf_locked
  mei: bus: need to unlink client before freeing
  mei: bus: fix hw module get/put balance
  mei: fix use-after-free in mei_cl_write
  mei: ignore not found client in the enumeration
  rds: fix two RCU related problems
  r8169: Clear RTL_FLAG_TASK_*_PENDING when clearing RTL_FLAG_TASK_ENABLED
  erspan: fix error handling for erspan tunnel
  erspan: return PACKET_REJECT when the appropriate tunnel is not found
  tcp: rate limit synflood warnings further
  MIPS: lantiq: dma: add dev pointer
  xtensa: enable SG chaining in Kconfig
  xtensa: remove unnecessary KBUILD_SRC ifeq conditional
  PCI: Fix enabling of PASID on RC integrated endpoints
  IB/hfi1,PCI: Allow bus reset while probing
  PCI: Fix faulty logic in pci_reset_bus()
  x86/EISA: Don't probe EISA bus for Xen PV guests
  drm/amdgpu: fix error handling in amdgpu_cs_user_fence_chunk
  perf tools: Fix maps__find_symbol_by_name()
  tools headers uapi: Update tools's copy of linux/if_link.h
  blk-cgroup: increase number of supported policies
  staging: vboxvideo: Change address of scanout buffer on page-flip
  staging: vboxvideo: Fix IRQs no longer working
  of: fix phandle cache creation for DTs with no phandles
  tools headers uapi: Update tools's copy of linux/vhost.h
  tools headers uapi: Update tools's copies of kvm headers
  drm/i915/overlay: Allocate physical registers from stolen
  tools headers uapi: Update tools's copy of drm/drm.h
  tools headers uapi: Update tools's copy of asm-generic/unistd.h
  tools headers uapi: Update tools's copy of linux/perf_event.h
  PCI: pciehp: Fix hot-add vs powerfault detection order
  switchtec: Fix Spectre v1 vulnerability
  Revert "PCI: Add ACS quirk for Intel 300 series"
  MAINTAINERS: Add Gustavo Pimentel as DesignWare PCI maintainer
  MAINTAINERS: Add entries for PPC64 RPA PCI hotplug drivers
  arm64: kernel: arch_crash_save_vmcoreinfo() should depend on CONFIG_CRASH_CORE
  arm64: jump_label.h: use asm_volatile_goto macro instead of "asm goto"
  Revert "printk: make sure to print log on console."
  drm/amdgpu: move PSP init prior to IH in gpu reset
  drm/amdgpu: Fix SDMA hang in prt mode v2
  drm/amdgpu: fix amdgpu_mn_unlock() in the CS error path
  hexagon: modify ffs() and fls() to return int
  arch/hexagon: fix kernel/dma.c build warning
  netfilter: xt_hashlimit: use s->file instead of s->private
  netfilter: nfnetlink_queue: Solve the NFQUEUE/conntrack clash for NF_REPEAT
  netfilter: cttimeout: ctnl_timeout_find_get() returns incorrect pointer to type
  netfilter: conntrack: timeout interface depend on CONFIG_NF_CONNTRACK_TIMEOUT
  netfilter: conntrack: reset tcp maxwin on re-register
  dm thin metadata: try to avoid ever aborting transactions
  Revert "cdc-acm: implement put_char() and flush_chars()"
  usb: Change usb_of_get_companion_dev() place to usb/common
  usb: xhci: fix interrupt transfer error happened on MTK platforms
  qmi_wwan: Support dynamic config on Quectel EP06
  drm/i915/bdw: Increase IPS disable timeout to 100ms
  ethernet: renesas: convert to SPDX identifiers
  staging: gasket: TODO: re-implement using UIO
  tty: hvc: hvc_write() fix break condition
  tty: hvc: hvc_poll() fix read loop batching
  tty: hvc: hvc_poll() fix read loop hang
  x86/doc: Fix Documentation/x86/earlyprintk.txt
  perf/core: Force USER_DS when recording user stack data
  locking/ww_mutex: Fix spelling mistake "cylic" -> "cyclic"
  locking/lockdep: Delete unnecessary #include
  tools/lib/lockdep: Add dummy task_struct state member
  tools/lib/lockdep: Add empty nmi.h
  tools/lib/lockdep: Update Sasha Levin email to MSFT
  ovl: fix oopses in ovl_fill_super() failure paths
  staging/fbtft: Update TODO and mailing lists
  sched/fair: Fix kernel-doc notation warning
  jump_label: Fix typo in warning message
  sched/fair: Fix load_balance redo for !imbalance
  sched/fair: Fix scale_rt_capacity() for SMT
  sched/fair: Fix vruntime_normalized() for remote non-migration wakeup
  sched/pelt: Fix update_blocked_averages() for RT and DL classes
  sched/topology: Set correct NUMA topology type
  sched/debug: Fix potential deadlock when writing to sched_features
  staging: erofs: rename superblock flags (MS_xyz -> SB_xyz)
  locking/mutex: Fix mutex debug call and ww_mutex documentation
  perf/UAPI: Clearly mark __PERF_SAMPLE_CALLCHAIN_EARLY as internal use
  perf/x86/intel: Add support/quirk for the MISPREDICT bit on Knights Landing CPUs
  ip: frags: fix crash in ip_do_fragment()
  net/tls: Set count of SG entries if sk_alloc_sg returns -ENOSPC
  net: ena: fix incorrect usage of memory barriers
  net: ena: fix missing calls to READ_ONCE
  net: ena: fix missing lock during device destruction
  net: ena: fix potential double ena_destroy_device()
  net: ena: fix device destruction to gracefully free resources
  net: ena: fix driver when PAGE_SIZE == 64kB
  net: ena: fix surprise unplug NULL dereference kernel crash
  fs/cifs: require sha512
  fs/cifs: suppress a string overflow warning
  tcp: really ignore MSG_ZEROCOPY if no SO_ZEROCOPY
  net_sched: properly cancel netlink dump on failure
  xen/netfront: fix waiting for xenbus state change
  r8169: set TxConfig register after TX / RX is enabled, just like RxConfig
  tipc: call start and done ops directly in __tipc_nl_compat_dumpit()
  dm raid: bump target version, update comments and documentation
  dm raid: fix RAID leg rebuild errors
  dm raid: fix rebuild of specific devices by updating superblock
  dm raid: fix stripe adding reshape deadlock
  drm/nouveau/disp/gm200-: enforce identity-mapped SOR assignment for LVDS/eDP panels
  drm/nouveau/disp: fix DP disable race
  drm/nouveau/disp: move eDP panel power handling
  drm/nouveau/disp: remove unused struct member
  drm/nouveau/TBDdevinit: don't fail when PMU/PRE_OS is missing from VBIOS
  drm/nouveau/mmu: don't attempt to dereference vmm without valid instance pointer
  drm/nouveau: fix oops in client init failure path
  drm/nouveau: Fix nouveau_connector_ddc_detect()
  drm/nouveau/drm/nouveau: Don't forget to cancel hpd_work on suspend/unload
  drm/nouveau/drm/nouveau: Prevent handling ACPI HPD events too early
  drm/nouveau: Reset MST branching unit before enabling
  drm/nouveau: Only write DP_MSTM_CTRL when needed
  drm/nouveau: Remove useless poll_enable() call in drm_load()
  drm/nouveau: Remove useless poll_disable() call in switcheroo_set_state()
  drm/nouveau: Remove useless poll_enable() call in switcheroo_set_state()
  drm/nouveau: Fix deadlocks in nouveau_connector_detect()
  drm/nouveau/drm/nouveau: Use pm_runtime_get_noresume() in connector_detect()
  drm/nouveau/drm/nouveau: Fix deadlock with fb_helper with async RPM requests
  drm/nouveau: Remove duplicate poll_enable() in pmops_runtime_suspend()
  drm/nouveau/drm/nouveau: Fix bogus drm_kms_helper_poll_enable() placement
  RDMA/mlx4: Ensure that maximal send/receive SGE less than supported by HW
  RDMA/cma: Protect cma dev list with lock
  xtensa: ISS: don't allocate memory in platform_setup
  dm raid: fix reshape race on small devices
  dm: disable CRYPTO_TFM_REQ_MAY_SLEEP to fix a GFP_KERNEL recursion deadlock
  HID: i2c-hid: Don't reset device upon system resume
  net/iucv: declare iucv_path_table_empty() as static
  net/af_iucv: fix skb handling on HiperTransport xmit error
  net/af_iucv: drop inbound packets with invalid flags
  net/sched: fix memory leak in act_tunnel_key_init()
  tipc: orphan sock in tipc_release()
  drm/i915/gvt: Fix the incorrect length of child_device_config issue
  net/mlx5: Fix possible deadlock from lockdep when adding fte to fg
  net/mlx5e: Ethtool steering, fix udp source port value
  net/mlx5: Check for error in mlx5_attach_interface
  net/mlx5: Consider PCI domain in search for next dev
  net/mlx5: Fix not releasing read lock when adding flow rules
  net/mlx5: E-Switch, Fix memory leak when creating switchdev mode FDB tables
  net/mlx5: Use u16 for Work Queue buffer strides offset
  net/mlx5: Use u16 for Work Queue buffer fragment size
  net/mlx5: Fix debugfs cleanup in the device init/remove flow
  net/mlx5: Fix use-after-free in self-healing flow
  RDMA/uverbs: Fix error cleanup path of ib_uverbs_add_one()
  bnxt_re: Fix couple of memory leaks that could lead to IOMMU call traces
  IB/ipoib: Avoid a race condition between start_xmit and cm_rep_handler
  nvmet-rdma: fix possible bogus dereference under heavy load
  net: qca_spi: Fix race condition in spi transfers
  be2net: Fix memory leak in be_cmd_get_profile_config()
  mlxsw: spectrum_buffers: Set up a dedicated pool for BUM traffic
  usb: cdc-wdm: Fix a sleep-in-atomic-context bug in service_outstanding_interrupt()
  usb: misc: uss720: Fix two sleep-in-atomic-context bugs
  usb: host: u132-hcd: Fix a sleep-in-atomic-context bug in u132_get_frame()
  usb: Avoid use-after-free by flushing endpoints early in usb_set_interface()
  linux/mod_devicetable.h: fix kernel-doc missing notation for typec_device_id
  usb/typec: fix kernel-doc notation warning for typec_match_altmode
  usb: Don't die twice if PCI xhci host is not responding in resume
  usb: mtu3: fix error of xhci port id when enable U3 dual role
  usb: uas: add support for more quirk flags
  USB: Add quirk to support DJI CineSSD
  usb: typec: fix kernel-doc parameter warning
  usb/dwc3/gadget: fix kernel-doc parameter warning
  USB: yurex: Check for truncation in yurex_read()
  USB: yurex: Fix buffer over-read in yurex_write()
  usb: host: xhci-plat: Iterate over parent nodes for finding quirks
  xhci: Fix use after free for URB cancellation on a reallocated endpoint
  USB: add quirk for WORLDE Controller KS49 or Prodipe MIDI 49C USB controller
  usb: dwc2: Fix call location of dwc2_check_core_endianness
  HID: sensor-hub: Restore fixup for Lenovo ThinkPad Helix 2 sensor hub report
  HID: core: fix NULL pointer dereference
  mmc: meson-mx-sdio: fix OF child-node lookup
  riscv: Do not overwrite initrd_start and initrd_end
  iw_cxgb4: only allow 1 flush on user qps
  IB/core: Release object lock if destroy failed
  RDMA/ucma: check fd type in ucma_migrate_id()
  HID: core: fix grouping by application
  HID: multitouch: fix Elan panels with 2 input modes declaration
  dm verity: fix crash on bufio buffer that was allocated with vmalloc
  mmc: omap_hsmmc: fix wakeirq handling on removal
  s390/crypto: Fix return code checking in cbc_paes_crypt()
  drm/i915/gvt: Fix life cycle reference on KVM mm
  ovl: add ovl_fadvise()
  iio: imu: st_lsm6dsx: take into account ts samples in wm configuration
  Revert "iio: temperature: maxim_thermocouple: add MAX31856 part"
  ipmi: Fix NULL pointer dereference in ssif_probe
  netfilter: nf_tables: release chain in flushing set
  netfilter: kconfig: nat related expression depend on nftables core
  ipmi: Fix I2C client removal in the SSIF driver
  ipmi: Move BT capabilities detection to the detect call
  ipmi: Rework SMI registration failure
  ipmi: kcs_bmc: don't change device name
  perf annotate: Fix parsing aarch64 branch instructions after objdump update
  perf probe powerpc: Ignore SyS symbols irrespective of endianness
  vfs: implement readahead(2) using POSIX_FADV_WILLNEED
  perf event-parse: Use fixed size string for comms
  perf util: Fix bad memory access in trace info.
  perf tools: Streamline bpf examples and headers installation
  perf evsel: Fix potential null pointer dereference in perf_evsel__new_idx()
  perf arm64: Fix include path for asm-generic/unistd.h
  perf/hw_breakpoint: Simplify breakpoint enable in perf_event_modify_breakpoint
  perf/hw_breakpoint: Enable breakpoint in modify_user_hw_breakpoint
  perf/hw_breakpoint: Remove superfluous bp->attr.disabled = 0
  perf/hw_breakpoint: Modify breakpoint even if the new attr has disabled set
  perf tests: Add breakpoint modify tests
  perf annotate: Properly interpret indirect call
  vfs: add the fadvise() file operation
  Documentation/filesystems: update documentation of file_operations
  ovl: fix GPF in swapfile_activate of file from overlayfs over xfs
  ovl: respect FIEMAP_FLAG_SYNC flag
  scsi: qedi: Add the CRC size within iSCSI NVM image
  scsi: iscsi: target: Fix conn_ops double free
  scsi: iscsi: target: Set conn->sess to NULL when iscsi_login_set_conn_values fails
  HID: hid-saitek: Add device ID for RAT 7 Contagion
  pinctrl: madera: Fix possible NULL pointer with pdata config
  pinctrl: ingenic: Fix group & function error checking
  netfilter: nf_tables: rework ct timeout set support
  netfilter: conntrack: place 'new' timeout in first location too
  pinctrl: msm: Really mask level interrupts to prevent latching
  usb: dwc3: pci: Fix return value check in dwc3_byt_enable_ulpi_refclock()
  usb: gadget: udc: renesas_usb3: fix maxpacket size of ep0
  usb: gadget: fotg210-udc: Fix memory leak of fotg210->ep[i]
  USB: net2280: Fix erroneous synchronization change
  usb: dwc3: of-simple: avoid unused function warnings
  Revert "staging: erofs: disable compiling temporarile"
  HID: core: fix memory leak on probe
  HID: input: fix leaking custom input node name
  HID: add support for Apple Magic Keyboards
  HID: i2c-hid: Fix flooded incomplete report after S3 on Rayd touchscreen
  HID: intel-ish-hid: Enable Sunrise Point-H ish driver
  MAINTAINERS: Switch a maintainer for drivers/staging/gasket
  staging: wilc1000: revert "fix TODO to compile spi and sdio components in single module"
  USB: serial: ti_usb_3410_5052: fix array underflow in completion handler
  USB: serial: io_ti: fix array underflow in completion handler
  dmaengine: mic_x100_dma: use devm_kzalloc to fix an issue
  netfilter: xt_checksum: ignore gso skbs
  netfilter: xt_cluster: add dependency on conntrack module
  netfilter: conntrack: remove duplicated include from nf_conntrack_proto_udp.c

Change-Id: I9fdae855388077fd5a44e66153c360a7ed1c7cc5
[rishabhb@codeaurora.org:Resolved minor merge conflicts].
Signed-off-by: Rishabh Bhatnagar <rishabhb@codeaurora.org>
This commit is contained in:
Rishabh Bhatnagar 2018-09-17 14:54:47 -07:00
commit af3f58fa5f
299 changed files with 2975 additions and 1733 deletions

View file

@ -75,3 +75,12 @@ Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Description:
Amount (in KiB) of low (or normal) memory in the
balloon.
What: /sys/devices/system/xen_memory/xen_memory0/scrub_pages
Date: September 2018
KernelVersion: 4.20
Contact: xen-devel@lists.xenproject.org
Description:
Control scrubbing pages before returning them to Xen for others domains
use. Can be set with xen_scrub_pages cmdline
parameter. Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.

View file

@ -5007,6 +5007,12 @@
Disables the PV optimizations forcing the HVM guest to
run as generic HVM guest with no PV drivers.
xen_scrub_pages= [XEN]
Boolean option to control scrubbing pages before giving them back
to Xen, for use by other domains. Can be also changed at runtime
with /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
xirc2ps_cs= [NET,PCMCIA]
Format:
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]

View file

@ -348,3 +348,7 @@ Version History
1.13.1 Fix deadlock caused by early md_stop_writes(). Also fix size an
state races.
1.13.2 Fix raid redundancy validation and avoid keeping raid set frozen
1.14.0 Fix reshape race on small devices. Fix stripe adding reshape
deadlock/potential data corruption. Update superblock when
specific devices are requested via rebuild. Fix RAID leg
rebuild errors.

View file

@ -848,7 +848,7 @@ struct file_operations
----------------------
This describes how the VFS can manipulate an open file. As of kernel
4.1, the following members are defined:
4.18, the following members are defined:
struct file_operations {
struct module *owner;
@ -858,11 +858,11 @@ struct file_operations {
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
int (*mremap)(struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
@ -882,6 +882,10 @@ struct file_operations {
#ifndef CONFIG_MMU
unsigned (*mmap_capabilities)(struct file *);
#endif
ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int);
int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
int (*fadvise)(struct file *, loff_t, loff_t, int);
};
Again, all methods are called without any locks being held, unless
@ -899,6 +903,9 @@ otherwise noted.
iterate: called when the VFS needs to read the directory contents
iterate_shared: called when the VFS needs to read the directory contents
when filesystem supports concurrent dir iterators
poll: called by the VFS when a process wants to check if there is
activity on this file and (optionally) go to sleep until there
is activity. Called by the select(2) and poll(2) system calls
@ -951,6 +958,16 @@ otherwise noted.
fallocate: called by the VFS to preallocate blocks or punch a hole.
copy_file_range: called by the copy_file_range(2) system call.
clone_file_range: called by the ioctl(2) system call for FICLONERANGE and
FICLONE commands.
dedupe_file_range: called by the ioctl(2) system call for FIDEDUPERANGE
command.
fadvise: possibly called by the fadvise64() system call.
Note that the file operations are implemented by the specific
filesystem in which the inode resides. When opening a device node
(character or block special) most filesystems will call special

View file

@ -0,0 +1,81 @@
Contributor Covenant Code of Conduct
++++++++++++++++++++++++++++++++++++
Our Pledge
==========
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and
expression, level of experience, education, socio-economic status, nationality,
personal appearance, race, religion, or sexual identity and orientation.
Our Standards
=============
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
Our Responsibilities
====================
Maintainers are responsible for clarifying the standards of acceptable behavior
and are expected to take appropriate and fair corrective action in response to
any instances of unacceptable behavior.
Maintainers have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, or to ban temporarily or permanently any
contributor for other behaviors that they deem inappropriate, threatening,
offensive, or harmful.
Scope
=====
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
Enforcement
===========
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the Technical Advisory Board (TAB) at
<tab@lists.linux-foundation.org>. All complaints will be reviewed and
investigated and will result in a response that is deemed necessary and
appropriate to the circumstances. The TAB is obligated to maintain
confidentiality with regard to the reporter of an incident. Further details of
specific enforcement policies may be posted separately.
Maintainers who do not follow or enforce the Code of Conduct in good faith may
face temporary or permanent repercussions as determined by other members of the
projects leadership.
Attribution
===========
This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html

View file

@ -1,28 +0,0 @@
Code of Conflict
----------------
The Linux kernel development effort is a very personal process compared
to "traditional" ways of developing software. Your code and ideas
behind it will be carefully reviewed, often resulting in critique and
criticism. The review will almost always require improvements to the
code before it can be included in the kernel. Know that this happens
because everyone involved wants to see the best possible solution for
the overall success of Linux. This development process has been proven
to create the most robust operating system kernel ever, and we do not
want to do anything to cause the quality of submission and eventual
result to ever decrease.
If however, anyone feels personally abused, threatened, or otherwise
uncomfortable due to this process, that is not acceptable. If so,
please contact the Linux Foundation's Technical Advisory Board at
<tab@lists.linux-foundation.org>, or the individual members, and they
will work to resolve the issue to the best of their ability. For more
information on who is on the Technical Advisory Board and what their
role is, please see:
- http://www.linuxfoundation.org/projects/linux/tab
As a reviewer of code, please strive to keep things civil and focused on
the technical issues involved. We are all humans, and frustrations can
be high on both sides of the process. Try to keep in mind the immortal
words of Bill and Ted, "Be excellent to each other."

View file

@ -20,7 +20,7 @@ Below are the essential guides that every developer should read.
:maxdepth: 1
howto
code-of-conflict
code-of-conduct
development-process
submitting-patches
coding-style

View file

@ -35,25 +35,25 @@ and two USB cables, connected like this:
( If your system does not list a debug port capability then you probably
won't be able to use the USB debug key. )
b.) You also need a Netchip USB debug cable/key:
b.) You also need a NetChip USB debug cable/key:
http://www.plxtech.com/products/NET2000/NET20DC/default.asp
This is a small blue plastic connector with two USB connections,
This is a small blue plastic connector with two USB connections;
it draws power from its USB connections.
c.) You need a second client/console system with a high speed USB 2.0
port.
d.) The Netchip device must be plugged directly into the physical
d.) The NetChip device must be plugged directly into the physical
debug port on the "host/target" system. You cannot use a USB hub in
between the physical debug port and the "host/target" system.
The EHCI debug controller is bound to a specific physical USB
port and the Netchip device will only work as an early printk
port and the NetChip device will only work as an early printk
device in this port. The EHCI host controllers are electrically
wired such that the EHCI debug controller is hooked up to the
first physical and there is no way to change this via software.
first physical port and there is no way to change this via software.
You can find the physical port through experimentation by trying
each physical port on the system and rebooting. Or you can try
and use lsusb or look at the kernel info messages emitted by the
@ -65,9 +65,9 @@ and two USB cables, connected like this:
to the hardware vendor, because there is no reason not to wire
this port into one of the physically accessible ports.
e.) It is also important to note, that many versions of the Netchip
e.) It is also important to note, that many versions of the NetChip
device require the "client/console" system to be plugged into the
right and side of the device (with the product logo facing up and
right hand side of the device (with the product logo facing up and
readable left to right). The reason being is that the 5 volt
power supply is taken from only one side of the device and it
must be the side that does not get rebooted.
@ -81,13 +81,18 @@ and two USB cables, connected like this:
CONFIG_EARLY_PRINTK_DBGP=y
And you need to add the boot command line: "earlyprintk=dbgp".
(If you are using Grub, append it to the 'kernel' line in
/etc/grub.conf)
/etc/grub.conf. If you are using Grub2 on a BIOS firmware system,
append it to the 'linux' line in /boot/grub2/grub.cfg. If you are
using Grub2 on an EFI firmware system, append it to the 'linux'
or 'linuxefi' line in /boot/grub2/grub.cfg or
/boot/efi/EFI/<distro>/grub.cfg.)
On systems with more than one EHCI debug controller you must
specify the correct EHCI debug controller number. The ordering
comes from the PCI bus enumeration of the EHCI controllers. The
default with no number argument is "0" the first EHCI debug
default with no number argument is "0" or the first EHCI debug
controller. To use the second EHCI debug controller, you would
use the command line: "earlyprintk=dbgp1"
@ -111,7 +116,7 @@ and two USB cables, connected like this:
see the raw output.
c.) On Nvidia Southbridge based systems: the kernel will try to probe
and find out which port has debug device connected.
and find out which port has a debug device connected.
3. Testing that it works fine:

View file

@ -5625,6 +5625,8 @@ F: lib/fault-inject.c
FBTFT Framebuffer drivers
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
L: dri-devel@lists.freedesktop.org
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/staging/fbtft/
@ -6060,7 +6062,7 @@ F: Documentation/gcc-plugins.txt
GASKET DRIVER FRAMEWORK
M: Rob Springer <rspringer@google.com>
M: John Joseph <jnjoseph@google.com>
M: Todd Poynor <toddpoynor@google.com>
M: Ben Chan <benchan@chromium.org>
S: Maintained
F: drivers/staging/gasket/
@ -7016,6 +7018,20 @@ F: drivers/crypto/vmx/aes*
F: drivers/crypto/vmx/ghash*
F: drivers/crypto/vmx/ppc-xlate.pl
IBM Power PCI Hotplug Driver for RPA-compliant PPC64 platform
M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
L: linux-pci@vger.kernel.org
L: linuxppc-dev@lists.ozlabs.org
S: Supported
F: drivers/pci/hotplug/rpaphp*
IBM Power IO DLPAR Driver for RPA-compliant PPC64 platform
M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
L: linux-pci@vger.kernel.org
L: linuxppc-dev@lists.ozlabs.org
S: Supported
F: drivers/pci/hotplug/rpadlpar*
IBM ServeRAID RAID DRIVER
S: Orphan
F: drivers/scsi/ips.*
@ -8300,7 +8316,7 @@ F: include/linux/libata.h
F: Documentation/devicetree/bindings/ata/
LIBLOCKDEP
M: Sasha Levin <alexander.levin@verizon.com>
M: Sasha Levin <alexander.levin@microsoft.com>
S: Maintained
F: tools/lib/lockdep/
@ -11154,7 +11170,7 @@ F: drivers/pci/controller/dwc/pci-exynos.c
PCI DRIVER FOR SYNOPSYS DESIGNWARE
M: Jingoo Han <jingoohan1@gmail.com>
M: Joao Pinto <Joao.Pinto@synopsys.com>
M: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
L: linux-pci@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/designware-pcie.txt
@ -11346,10 +11362,10 @@ S: Maintained
F: drivers/platform/x86/peaq-wmi.c
PER-CPU MEMORY ALLOCATOR
M: Dennis Zhou <dennis@kernel.org>
M: Tejun Heo <tj@kernel.org>
M: Christoph Lameter <cl@linux.com>
M: Dennis Zhou <dennisszhou@gmail.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git
S: Maintained
F: include/linux/percpu*.h
F: mm/percpu*.c

View file

@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Merciless Moray
# *DOCUMENTATION*
@ -621,6 +621,11 @@ CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \
$(call cc-disable-warning,maybe-uninitialized,)
export CFLAGS_GCOV
# The arch Makefiles can override CC_FLAGS_FTRACE. We may also append it later.
ifdef CONFIG_FUNCTION_TRACER
CC_FLAGS_FTRACE := -pg
endif
# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
# values of the respective KBUILD_* variables
ARCH_CPPFLAGS :=
@ -778,9 +783,6 @@ KBUILD_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \
endif
ifdef CONFIG_FUNCTION_TRACER
ifndef CC_FLAGS_FTRACE
CC_FLAGS_FTRACE := -pg
endif
ifdef CONFIG_FTRACE_MCOUNT_RECORD
# gcc 5 supports generating the mcount tables directly
ifeq ($(call cc-option-yn,-mrecord-mcount),y)

View file

@ -28,7 +28,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm goto("1: nop\n\t"
asm_volatile_goto("1: nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
@ -42,7 +42,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm goto("1: b %l[l_yes]\n\t"
asm_volatile_goto("1: b %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"

View file

@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
arm64-obj-$(CONFIG_CRASH_CORE) += crash_core.o
arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o

View file

@ -0,0 +1,19 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Linaro.
* Copyright (C) Huawei Futurewei Technologies.
*/
#include <linux/crash_core.h>
#include <asm/memory.h>
void arch_crash_save_vmcoreinfo(void)
{
VMCOREINFO_NUMBER(VA_BITS);
/* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
kimage_voffset);
vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
PHYS_OFFSET);
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
}

View file

@ -358,14 +358,3 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
}
}
#endif /* CONFIG_HIBERNATION */
void arch_crash_save_vmcoreinfo(void)
{
VMCOREINFO_NUMBER(VA_BITS);
/* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
kimage_voffset);
vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
PHYS_OFFSET);
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
}

View file

@ -211,7 +211,7 @@ static inline long ffz(int x)
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static inline long fls(int x)
static inline int fls(int x)
{
int r;
@ -232,7 +232,7 @@ static inline long fls(int x)
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
static inline long ffs(int x)
static inline int ffs(int x)
{
int r;

View file

@ -60,7 +60,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
panic("Can't create %s() memory pool!", __func__);
else
gen_pool_add(coherent_pool,
pfn_to_virt(max_low_pfn),
(unsigned long)pfn_to_virt(max_low_pfn),
hexagon_coherent_pool_size, -1);
}

View file

@ -40,6 +40,7 @@ struct ltq_dma_channel {
int desc; /* the current descriptor */
struct ltq_dma_desc *desc_base; /* the descriptor base */
int phys; /* physical addr */
struct device *dev;
};
enum {

View file

@ -130,7 +130,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
unsigned long flags;
ch->desc = 0;
ch->desc_base = dma_zalloc_coherent(NULL,
ch->desc_base = dma_zalloc_coherent(ch->dev,
LTQ_DESC_NUM * LTQ_DESC_SIZE,
&ch->phys, GFP_ATOMIC);
@ -182,7 +182,7 @@ ltq_dma_free(struct ltq_dma_channel *ch)
if (!ch->desc_base)
return;
ltq_dma_close(ch);
dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE,
dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE,
ch->desc_base, ch->phys);
}
EXPORT_SYMBOL_GPL(ltq_dma_free);

View file

@ -85,15 +85,8 @@ atomic_t hart_lottery;
#ifdef CONFIG_BLK_DEV_INITRD
static void __init setup_initrd(void)
{
extern char __initramfs_start[];
extern unsigned long __initramfs_size;
unsigned long size;
if (__initramfs_size > 0) {
initrd_start = (unsigned long)(&__initramfs_start);
initrd_end = initrd_start + __initramfs_size;
}
if (initrd_start >= initrd_end) {
printk(KERN_INFO "initrd not found or empty");
goto disable;

View file

@ -208,7 +208,7 @@ static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
walk->dst.virt.addr, walk->src.virt.addr, n);
if (k)
ret = blkcipher_walk_done(desc, walk, nbytes - k);
if (n < k) {
if (k < n) {
if (__cbc_paes_set_key(ctx) != 0)
return blkcipher_walk_done(desc, walk, -EIO);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);

View file

@ -1272,4 +1272,8 @@ void intel_pmu_lbr_init_knl(void)
x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu.lbr_sel_map = snb_lbr_sel_map;
/* Knights Landing does have MISPREDICT bit */
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
}

View file

@ -19,9 +19,6 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
#ifdef CONFIG_PAGE_TABLE_ISOLATION
pmd.pud.p4d.pgd = pti_set_user_pgtbl(&pmdp->pud.p4d.pgd, pmd.pud.p4d.pgd);
#endif
*pmdp = pmd;
}
@ -61,9 +58,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
#ifdef CONFIG_SMP
static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
{
#ifdef CONFIG_PAGE_TABLE_ISOLATION
pti_set_user_pgtbl(&xp->pud.p4d.pgd, __pgd(0));
#endif
return __pmd(xchg((pmdval_t *)xp, 0));
}
#else
@ -73,9 +67,6 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#ifdef CONFIG_SMP
static inline pud_t native_pudp_get_and_clear(pud_t *xp)
{
#ifdef CONFIG_PAGE_TABLE_ISOLATION
pti_set_user_pgtbl(&xp->p4d.pgd, __pgd(0));
#endif
return __pud(xchg((pudval_t *)xp, 0));
}
#else

View file

@ -1640,6 +1640,7 @@ static int do_open(struct inode *inode, struct file *filp)
return 0;
}
#ifdef CONFIG_PROC_FS
static int proc_apm_show(struct seq_file *m, void *v)
{
unsigned short bx;
@ -1719,6 +1720,7 @@ static int proc_apm_show(struct seq_file *m, void *v)
units);
return 0;
}
#endif
static int apm(void *unused)
{

View file

@ -7,11 +7,17 @@
#include <linux/eisa.h>
#include <linux/io.h>
#include <xen/xen.h>
static __init int eisa_bus_probe(void)
{
void __iomem *p = ioremap(0x0FFFD9, 4);
void __iomem *p;
if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
if (xen_pv_domain() && !xen_initial_domain())
return 0;
p = ioremap(0x0FFFD9, 4);
if (p && readl(p) == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24))
EISA_bus = 1;
iounmap(p);
return 0;

View file

@ -111,8 +111,10 @@ int arch_register_cpu(int num)
/*
* Currently CPU0 is only hotpluggable on Intel platforms. Other
* vendors can add hotplug support later.
* Xen PV guests don't support CPU0 hotplug at all.
*/
if (c->x86_vendor != X86_VENDOR_INTEL)
if (c->x86_vendor != X86_VENDOR_INTEL ||
boot_cpu_has(X86_FEATURE_XENPV))
cpu0_hotpluggable = 0;
/*

View file

@ -85,10 +85,9 @@ pgd_t * __init efi_call_phys_prolog(void)
void __init efi_call_phys_epilog(pgd_t *save_pgd)
{
load_fixmap_gdt(0);
load_cr3(save_pgd);
__flush_tlb_all();
load_fixmap_gdt(0);
}
void __init efi_runtime_update_mappings(void)

View file

@ -4,6 +4,7 @@ config ZONE_DMA
config XTENSA
def_bool y
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_NO_COHERENT_DMA_MMAP if !MMU

View file

@ -64,11 +64,7 @@ endif
vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y))
plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y))
ifeq ($(KBUILD_SRC),)
KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(vardirs) $(plfdirs))
else
KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(vardirs) $(plfdirs))
endif
KBUILD_DEFCONFIG := iss_defconfig

View file

@ -78,23 +78,28 @@ static struct notifier_block iss_panic_block = {
void __init platform_setup(char **p_cmdline)
{
static void *argv[COMMAND_LINE_SIZE / sizeof(void *)] __initdata;
static char cmdline[COMMAND_LINE_SIZE] __initdata;
int argc = simc_argc();
int argv_size = simc_argv_size();
if (argc > 1) {
void **argv = alloc_bootmem(argv_size);
char *cmdline = alloc_bootmem(argv_size);
int i;
if (argv_size > sizeof(argv)) {
pr_err("%s: command line too long: argv_size = %d\n",
__func__, argv_size);
} else {
int i;
cmdline[0] = 0;
simc_argv((void *)argv);
cmdline[0] = 0;
simc_argv((void *)argv);
for (i = 1; i < argc; ++i) {
if (i > 1)
strcat(cmdline, " ");
strcat(cmdline, argv[i]);
for (i = 1; i < argc; ++i) {
if (i > 1)
strcat(cmdline, " ");
strcat(cmdline, argv[i]);
}
*p_cmdline = cmdline;
}
*p_cmdline = cmdline;
}
atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block);

View file

@ -1510,8 +1510,10 @@ int blkcg_policy_register(struct blkcg_policy *pol)
for (i = 0; i < BLKCG_MAX_POLS; i++)
if (!blkcg_policy[i])
break;
if (i >= BLKCG_MAX_POLS)
if (i >= BLKCG_MAX_POLS) {
pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
goto err_unlock;
}
/* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||

View file

@ -332,6 +332,35 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
return vma ? -ENOMEM : -ESRCH;
}
static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
if (vma)
alloc->vma_vm_mm = vma->vm_mm;
/*
* If we see alloc->vma is not NULL, buffer data structures set up
* completely. Look at smp_rmb side binder_alloc_get_vma.
* We also want to guarantee new alloc->vma_vm_mm is always visible
* if alloc->vma is set.
*/
smp_wmb();
alloc->vma = vma;
}
static inline struct vm_area_struct *binder_alloc_get_vma(
struct binder_alloc *alloc)
{
struct vm_area_struct *vma = NULL;
if (alloc->vma) {
/* Look at description in binder_alloc_set_vma */
smp_rmb();
vma = alloc->vma;
}
return vma;
}
static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
size_t data_size,
@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;
if (alloc->vma == NULL) {
if (!binder_alloc_get_vma(alloc)) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
barrier();
alloc->vma = vma;
alloc->vma_vm_mm = vma->vm_mm;
binder_alloc_set_vma(alloc, vma);
mmgrab(alloc->vma_vm_mm);
return 0;
@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int buffers, page_count;
struct binder_buffer *buffer;
BUG_ON(alloc->vma);
buffers = 0;
mutex_lock(&alloc->mutex);
BUG_ON(alloc->vma);
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
WRITE_ONCE(alloc->vma, NULL);
binder_alloc_set_vma(alloc, NULL);
}
/**
@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
vma = alloc->vma;
vma = binder_alloc_get_vma(alloc);
if (vma) {
if (!mmget_not_zero(alloc->vma_vm_mm))
goto err_mmget;

View file

@ -87,10 +87,10 @@ struct nullb {
#ifdef CONFIG_BLK_DEV_ZONED
int null_zone_init(struct nullb_device *dev);
void null_zone_exit(struct nullb_device *dev);
blk_status_t null_zone_report(struct nullb *nullb,
struct nullb_cmd *cmd);
void null_zone_write(struct nullb_cmd *cmd);
void null_zone_reset(struct nullb_cmd *cmd);
blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio);
void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors);
void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
#else
static inline int null_zone_init(struct nullb_device *dev)
{
@ -98,11 +98,14 @@ static inline int null_zone_init(struct nullb_device *dev)
}
static inline void null_zone_exit(struct nullb_device *dev) {}
static inline blk_status_t null_zone_report(struct nullb *nullb,
struct nullb_cmd *cmd)
struct bio *bio)
{
return BLK_STS_NOTSUPP;
}
static inline void null_zone_write(struct nullb_cmd *cmd) {}
static inline void null_zone_reset(struct nullb_cmd *cmd) {}
static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors)
{
}
static inline void null_zone_reset(struct nullb_cmd *cmd, sector_t sector) {}
#endif /* CONFIG_BLK_DEV_ZONED */
#endif /* __NULL_BLK_H */

View file

@ -1157,16 +1157,33 @@ static void null_restart_queue_async(struct nullb *nullb)
}
}
static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
{
struct nullb_device *dev = cmd->nq->dev;
if (dev->queue_mode == NULL_Q_BIO) {
if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) {
cmd->error = null_zone_report(nullb, cmd->bio);
return true;
}
} else {
if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
cmd->error = null_zone_report(nullb, cmd->rq->bio);
return true;
}
}
return false;
}
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
int err = 0;
if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
cmd->error = null_zone_report(nullb, cmd);
if (cmd_report_zone(nullb, cmd))
goto out;
}
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
struct request *rq = cmd->rq;
@ -1234,10 +1251,24 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
cmd->error = errno_to_blk_status(err);
if (!cmd->error && dev->zoned) {
if (req_op(cmd->rq) == REQ_OP_WRITE)
null_zone_write(cmd);
else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET)
null_zone_reset(cmd);
sector_t sector;
unsigned int nr_sectors;
int op;
if (dev->queue_mode == NULL_Q_BIO) {
op = bio_op(cmd->bio);
sector = cmd->bio->bi_iter.bi_sector;
nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
} else {
op = req_op(cmd->rq);
sector = blk_rq_pos(cmd->rq);
nr_sectors = blk_rq_sectors(cmd->rq);
}
if (op == REQ_OP_WRITE)
null_zone_write(cmd, sector, nr_sectors);
else if (op == REQ_OP_ZONE_RESET)
null_zone_reset(cmd, sector);
}
out:
/* Complete IO by inline, softirq or timer */

View file

@ -48,8 +48,8 @@ void null_zone_exit(struct nullb_device *dev)
kvfree(dev->zones);
}
static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
unsigned int zno, unsigned int nr_zones)
static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
unsigned int zno, unsigned int nr_zones)
{
struct blk_zone_report_hdr *hdr = NULL;
struct bio_vec bvec;
@ -57,7 +57,7 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
void *addr;
unsigned int zones_to_cpy;
bio_for_each_segment(bvec, rq->bio, iter) {
bio_for_each_segment(bvec, bio, iter) {
addr = kmap_atomic(bvec.bv_page);
zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
@ -84,29 +84,24 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
}
}
blk_status_t null_zone_report(struct nullb *nullb,
struct nullb_cmd *cmd)
blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio)
{
struct nullb_device *dev = nullb->dev;
struct request *rq = cmd->rq;
unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
unsigned int nr_zones = dev->nr_zones - zno;
unsigned int max_zones = (blk_rq_bytes(rq) /
sizeof(struct blk_zone)) - 1;
unsigned int max_zones;
max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1;
nr_zones = min_t(unsigned int, nr_zones, max_zones);
null_zone_fill_rq(nullb->dev, rq, zno, nr_zones);
null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
return BLK_STS_OK;
}
void null_zone_write(struct nullb_cmd *cmd)
void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
struct request *rq = cmd->rq;
sector_t sector = blk_rq_pos(rq);
unsigned int rq_sectors = blk_rq_sectors(rq);
unsigned int zno = null_zone_no(dev, sector);
struct blk_zone *zone = &dev->zones[zno];
@ -118,7 +113,7 @@ void null_zone_write(struct nullb_cmd *cmd)
case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_IMP_OPEN:
/* Writes must be at the write pointer position */
if (blk_rq_pos(rq) != zone->wp) {
if (sector != zone->wp) {
cmd->error = BLK_STS_IOERR;
break;
}
@ -126,7 +121,7 @@ void null_zone_write(struct nullb_cmd *cmd)
if (zone->cond == BLK_ZONE_COND_EMPTY)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
zone->wp += rq_sectors;
zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->len)
zone->cond = BLK_ZONE_COND_FULL;
break;
@ -137,11 +132,10 @@ void null_zone_write(struct nullb_cmd *cmd)
}
}
void null_zone_reset(struct nullb_cmd *cmd)
void null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
struct request *rq = cmd->rq;
unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
unsigned int zno = null_zone_no(dev, sector);
struct blk_zone *zone = &dev->zones[zno];
zone->cond = BLK_ZONE_COND_EMPTY;

View file

@ -59,8 +59,6 @@ enum bt_states {
BT_STATE_RESET3,
BT_STATE_RESTART,
BT_STATE_PRINTME,
BT_STATE_CAPABILITIES_BEGIN,
BT_STATE_CAPABILITIES_END,
BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */
};
@ -86,7 +84,6 @@ struct si_sm_data {
int error_retries; /* end of "common" fields */
int nonzero_status; /* hung BMCs stay all 0 */
enum bt_states complete; /* to divert the state machine */
int BT_CAP_outreqs;
long BT_CAP_req2rsp;
int BT_CAP_retries; /* Recommended retries */
};
@ -137,8 +134,6 @@ static char *state2txt(unsigned char state)
case BT_STATE_RESET3: return("RESET3");
case BT_STATE_RESTART: return("RESTART");
case BT_STATE_LONG_BUSY: return("LONG_BUSY");
case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN");
case BT_STATE_CAPABILITIES_END: return("CAP_END");
}
return("BAD STATE");
}
@ -185,7 +180,6 @@ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
bt->complete = BT_STATE_IDLE; /* end here */
bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC;
bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
/* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
return 3; /* We claim 3 bytes of space; ought to check SPMI table */
}
@ -451,7 +445,7 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
{
unsigned char status, BT_CAP[8];
unsigned char status;
static enum bt_states last_printed = BT_STATE_PRINTME;
int i;
@ -504,12 +498,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
BT_CONTROL(BT_H_BUSY);
bt->timeout = bt->BT_CAP_req2rsp;
/* Read BT capabilities if it hasn't been done yet */
if (!bt->BT_CAP_outreqs)
BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
SI_SM_CALL_WITHOUT_DELAY);
BT_SI_SM_RETURN(SI_SM_IDLE);
case BT_STATE_XACTION_START:
@ -614,37 +602,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
BT_STATE_CHANGE(BT_STATE_XACTION_START,
SI_SM_CALL_WITH_DELAY);
/*
* Get BT Capabilities, using timing of upper level state machine.
* Set outreqs to prevent infinite loop on timeout.
*/
case BT_STATE_CAPABILITIES_BEGIN:
bt->BT_CAP_outreqs = 1;
{
unsigned char GetBT_CAP[] = { 0x18, 0x36 };
bt->state = BT_STATE_IDLE;
bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
}
bt->complete = BT_STATE_CAPABILITIES_END;
BT_STATE_CHANGE(BT_STATE_XACTION_START,
SI_SM_CALL_WITH_DELAY);
case BT_STATE_CAPABILITIES_END:
i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
bt_init_data(bt, bt->io);
if ((i == 8) && !BT_CAP[2]) {
bt->BT_CAP_outreqs = BT_CAP[3];
bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
bt->BT_CAP_retries = BT_CAP[7];
} else
printk(KERN_WARNING "IPMI BT: using default values\n");
if (!bt->BT_CAP_outreqs)
bt->BT_CAP_outreqs = 1;
printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
bt->timeout = bt->BT_CAP_req2rsp;
return SI_SM_CALL_WITHOUT_DELAY;
default: /* should never occur */
return error_recovery(bt,
status,
@ -655,6 +612,11 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
static int bt_detect(struct si_sm_data *bt)
{
unsigned char GetBT_CAP[] = { 0x18, 0x36 };
unsigned char BT_CAP[8];
enum si_sm_result smi_result;
int rv;
/*
* It's impossible for the BT status and interrupt registers to be
* all 1's, (assuming a properly functioning, self-initialized BMC)
@ -665,6 +627,48 @@ static int bt_detect(struct si_sm_data *bt)
if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
return 1;
reset_flags(bt);
/*
* Try getting the BT capabilities here.
*/
rv = bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
if (rv) {
dev_warn(bt->io->dev,
"Can't start capabilities transaction: %d\n", rv);
goto out_no_bt_cap;
}
smi_result = SI_SM_CALL_WITHOUT_DELAY;
for (;;) {
if (smi_result == SI_SM_CALL_WITH_DELAY ||
smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
schedule_timeout_uninterruptible(1);
smi_result = bt_event(bt, jiffies_to_usecs(1));
} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
smi_result = bt_event(bt, 0);
} else
break;
}
rv = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
bt_init_data(bt, bt->io);
if (rv < 8) {
dev_warn(bt->io->dev, "bt cap response too short: %d\n", rv);
goto out_no_bt_cap;
}
if (BT_CAP[2]) {
dev_warn(bt->io->dev, "Error fetching bt cap: %x\n", BT_CAP[2]);
out_no_bt_cap:
dev_warn(bt->io->dev, "using default values\n");
} else {
bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
bt->BT_CAP_retries = BT_CAP[7];
}
dev_info(bt->io->dev, "req2rsp=%ld secs retries=%d\n",
bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
return 0;
}

View file

@ -3381,39 +3381,45 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
rv = handlers->start_processing(send_info, intf);
if (rv)
goto out;
goto out_err;
rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
if (rv) {
dev_err(si_dev, "Unable to get the device id: %d\n", rv);
goto out;
goto out_err_started;
}
mutex_lock(&intf->bmc_reg_mutex);
rv = __scan_channels(intf, &id);
mutex_unlock(&intf->bmc_reg_mutex);
if (rv)
goto out_err_bmc_reg;
out:
if (rv) {
ipmi_bmc_unregister(intf);
list_del_rcu(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
synchronize_srcu(&ipmi_interfaces_srcu);
cleanup_srcu_struct(&intf->users_srcu);
kref_put(&intf->refcount, intf_free);
} else {
/*
* Keep memory order straight for RCU readers. Make
* sure everything else is committed to memory before
* setting intf_num to mark the interface valid.
*/
smp_wmb();
intf->intf_num = i;
mutex_unlock(&ipmi_interfaces_mutex);
/*
* Keep memory order straight for RCU readers. Make
* sure everything else is committed to memory before
* setting intf_num to mark the interface valid.
*/
smp_wmb();
intf->intf_num = i;
mutex_unlock(&ipmi_interfaces_mutex);
/* After this point the interface is legal to use. */
call_smi_watchers(i, intf->si_dev);
}
/* After this point the interface is legal to use. */
call_smi_watchers(i, intf->si_dev);
return 0;
out_err_bmc_reg:
ipmi_bmc_unregister(intf);
out_err_started:
if (intf->handlers->shutdown)
intf->handlers->shutdown(intf->send_info);
out_err:
list_del_rcu(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
synchronize_srcu(&ipmi_interfaces_srcu);
cleanup_srcu_struct(&intf->users_srcu);
kref_put(&intf->refcount, intf_free);
return rv;
}
@ -3504,7 +3510,8 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
}
srcu_read_unlock(&intf->users_srcu, index);
intf->handlers->shutdown(intf->send_info);
if (intf->handlers->shutdown)
intf->handlers->shutdown(intf->send_info);
cleanup_smi_msgs(intf);

View file

@ -2083,18 +2083,9 @@ static int try_smi_init(struct smi_info *new_smi)
si_to_str[new_smi->io.si_type]);
WARN_ON(new_smi->io.dev->init_name != NULL);
out_err:
kfree(init_name);
return 0;
out_err:
if (new_smi->intf) {
ipmi_unregister_smi(new_smi->intf);
new_smi->intf = NULL;
}
kfree(init_name);
return rv;
}
@ -2227,6 +2218,8 @@ static void shutdown_smi(void *send_info)
kfree(smi_info->si_sm);
smi_info->si_sm = NULL;
smi_info->intf = NULL;
}
/*
@ -2240,10 +2233,8 @@ static void cleanup_one_si(struct smi_info *smi_info)
list_del(&smi_info->link);
if (smi_info->intf) {
if (smi_info->intf)
ipmi_unregister_smi(smi_info->intf);
smi_info->intf = NULL;
}
if (smi_info->pdev) {
if (smi_info->pdev_registered)

View file

@ -181,6 +181,8 @@ struct ssif_addr_info {
struct device *dev;
struct i2c_client *client;
struct i2c_client *added_client;
struct mutex clients_mutex;
struct list_head clients;
@ -1214,18 +1216,11 @@ static void shutdown_ssif(void *send_info)
complete(&ssif_info->wake_thread);
kthread_stop(ssif_info->thread);
}
/*
* No message can be outstanding now, we have removed the
* upper layer and it permitted us to do so.
*/
kfree(ssif_info);
}
static int ssif_remove(struct i2c_client *client)
{
struct ssif_info *ssif_info = i2c_get_clientdata(client);
struct ipmi_smi *intf;
struct ssif_addr_info *addr_info;
if (!ssif_info)
@ -1235,9 +1230,7 @@ static int ssif_remove(struct i2c_client *client)
* After this point, we won't deliver anything asychronously
* to the message handler. We can unregister ourself.
*/
intf = ssif_info->intf;
ssif_info->intf = NULL;
ipmi_unregister_smi(intf);
ipmi_unregister_smi(ssif_info->intf);
list_for_each_entry(addr_info, &ssif_infos, link) {
if (addr_info->client == client) {
@ -1246,6 +1239,8 @@ static int ssif_remove(struct i2c_client *client)
}
}
kfree(ssif_info);
return 0;
}
@ -1648,15 +1643,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
out:
if (rv) {
/*
* Note that if addr_info->client is assigned, we
* leave it. The i2c client hangs around even if we
* return a failure here, and the failure here is not
* propagated back to the i2c code. This seems to be
* design intent, strange as it may be. But if we
* don't leave it, ssif_platform_remove will not remove
* the client like it should.
*/
if (addr_info)
addr_info->client = NULL;
dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv);
kfree(ssif_info);
}
@ -1676,7 +1665,8 @@ static int ssif_adapter_handler(struct device *adev, void *opaque)
if (adev->type != &i2c_adapter_type)
return 0;
i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo);
addr_info->added_client = i2c_new_device(to_i2c_adapter(adev),
&addr_info->binfo);
if (!addr_info->adapter_name)
return 1; /* Only try the first I2C adapter by default. */
@ -1849,7 +1839,7 @@ static int ssif_platform_remove(struct platform_device *dev)
return 0;
mutex_lock(&ssif_infos_mutex);
i2c_unregister_device(addr_info->client);
i2c_unregister_device(addr_info->added_client);
list_del(&addr_info->link);
kfree(addr_info);

View file

@ -16,6 +16,8 @@
#include "kcs_bmc.h"
#define DEVICE_NAME "ipmi-kcs"
#define KCS_MSG_BUFSIZ 1000
#define KCS_ZERO_DATA 0
@ -429,8 +431,6 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
if (!kcs_bmc)
return NULL;
dev_set_name(dev, "ipmi-kcs%u", channel);
spin_lock_init(&kcs_bmc->lock);
kcs_bmc->channel = channel;
@ -444,7 +444,8 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
return NULL;
kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
kcs_bmc->miscdev.name = dev_name(dev);
kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u",
DEVICE_NAME, channel);
kcs_bmc->miscdev.fops = &kcs_bmc_fops;
return kcs_bmc;

View file

@ -639,7 +639,7 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
int ret;
struct device *dev = &mbdev->dev;
mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL);
mic_dma_dev = devm_kzalloc(dev, sizeof(*mic_dma_dev), GFP_KERNEL);
if (!mic_dma_dev) {
ret = -ENOMEM;
goto alloc_error;
@ -664,7 +664,6 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
reg_error:
mic_dma_uninit(mic_dma_dev);
init_error:
kfree(mic_dma_dev);
mic_dma_dev = NULL;
alloc_error:
dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret);
@ -674,7 +673,6 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
{
mic_dma_uninit(mic_dma_dev);
kfree(mic_dma_dev);
}
/* DEBUGFS CODE */

View file

@ -420,7 +420,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
/* Create region for each port */
fme_region = dfl_fme_create_region(pdata, mgr,
fme_br->br, i);
if (!fme_region) {
if (IS_ERR(fme_region)) {
ret = PTR_ERR(fme_region);
goto destroy_region;
}

View file

@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
{
struct drm_gem_object *gobj;
unsigned long size;
int r;
gobj = drm_gem_object_lookup(p->filp, data->handle);
if (gobj == NULL)
@ -50,20 +51,26 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
p->uf_entry.tv.shared = true;
p->uf_entry.user_pages = NULL;
drm_gem_object_put_unlocked(gobj);
size = amdgpu_bo_size(p->uf_entry.robj);
if (size != PAGE_SIZE || (data->offset + 8) > size)
return -EINVAL;
if (size != PAGE_SIZE || (data->offset + 8) > size) {
r = -EINVAL;
goto error_unref;
}
if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
r = -EINVAL;
goto error_unref;
}
*offset = data->offset;
drm_gem_object_put_unlocked(gobj);
if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
amdgpu_bo_unref(&p->uf_entry.robj);
return -EINVAL;
}
return 0;
error_unref:
amdgpu_bo_unref(&p->uf_entry.robj);
return r;
}
static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
@ -1262,10 +1269,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
error_abort:
dma_fence_put(&job->base.s_fence->finished);
job->base.s_fence = NULL;
amdgpu_mn_unlock(p->mn);
error_unlock:
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
return r;
}

View file

@ -2063,6 +2063,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_GMC,
AMD_IP_BLOCK_TYPE_COMMON,
AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_IH,
};
@ -2093,7 +2094,6 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_SMC,
AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_DCE,
AMD_IP_BLOCK_TYPE_GFX,
AMD_IP_BLOCK_TYPE_SDMA,

View file

@ -70,6 +70,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
@ -81,7 +82,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000)
};
static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
@ -109,7 +111,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
};
static const struct soc15_reg_golden golden_settings_sdma_4_2[] =

View file

@ -32,6 +32,7 @@
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/sched/mm.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/rbtree.h>
@ -1792,16 +1793,21 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
info = (struct kvmgt_guest_info *)handle;
kvm = info->kvm;
if (kthread)
if (kthread) {
if (!mmget_not_zero(kvm->mm))
return -EFAULT;
use_mm(kvm->mm);
}
idx = srcu_read_lock(&kvm->srcu);
ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
kvm_read_guest(kvm, gpa, buf, len);
srcu_read_unlock(&kvm->srcu, idx);
if (kthread)
if (kthread) {
unuse_mm(kvm->mm);
mmput(kvm->mm);
}
return ret;
}

View file

@ -42,8 +42,6 @@
#define DEVICE_TYPE_EFP3 0x20
#define DEVICE_TYPE_EFP4 0x10
#define DEV_SIZE 38
struct opregion_header {
u8 signature[16];
u32 size;
@ -63,6 +61,10 @@ struct bdb_data_header {
u16 size; /* data size */
} __packed;
/* For supporting windows guest with opregion, here hardcode the emulated
* bdb header version as '186', and the corresponding child_device_config
* length should be '33' but not '38'.
*/
struct efp_child_device_config {
u16 handle;
u16 device_type;
@ -109,12 +111,6 @@ struct efp_child_device_config {
u8 mipi_bridge_type; /* 171 */
u16 device_class_ext;
u8 dvo_function;
u8 dp_usb_type_c:1; /* 195 */
u8 skip6:7;
u8 dp_usb_type_c_2x_gpio_index; /* 195 */
u16 dp_usb_type_c_2x_gpio_pin; /* 195 */
u8 iboost_dp:4; /* 196 */
u8 iboost_hdmi:4; /* 196 */
} __packed;
struct vbt {
@ -155,7 +151,7 @@ static void virt_vbt_generation(struct vbt *v)
v->header.bdb_offset = offsetof(struct vbt, bdb_header);
strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
v->bdb_header.version = 186; /* child_dev_size = 38 */
v->bdb_header.version = 186; /* child_dev_size = 33 */
v->bdb_header.header_size = sizeof(v->bdb_header);
v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
@ -169,11 +165,13 @@ static void virt_vbt_generation(struct vbt *v)
/* child device */
num_child = 4; /* each port has one child */
v->general_definitions.child_dev_size =
sizeof(struct efp_child_device_config);
v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
/* size will include child devices */
v->general_definitions_header.size =
sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE;
v->general_definitions.child_dev_size = DEV_SIZE;
sizeof(struct bdb_general_definitions) +
num_child * v->general_definitions.child_dev_size;
/* portA */
v->child0.handle = DEVICE_TYPE_EFP1;

View file

@ -5079,10 +5079,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->pcu_lock);
/* wait for pcode to finish disabling IPS, which may take up to 42ms */
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
* instead.
*/
if (intel_wait_for_register(dev_priv,
IPS_CTL, IPS_ENABLE, 0,
42))
100))
DRM_ERROR("Timed out waiting for IPS disable\n");
} else {
I915_WRITE(IPS_CTL, 0);

View file

@ -181,8 +181,9 @@ struct intel_overlay {
u32 brightness, contrast, saturation;
u32 old_xscale, old_yscale;
/* register access */
u32 flip_addr;
struct drm_i915_gem_object *reg_bo;
struct overlay_registers __iomem *regs;
u32 flip_addr;
/* flip handling */
struct i915_gem_active last_flip;
};
@ -210,29 +211,6 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
}
static struct overlay_registers __iomem *
intel_overlay_map_regs(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else
regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
overlay->flip_addr,
PAGE_SIZE);
return regs;
}
static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs)
{
if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
io_mapping_unmap(regs);
}
static void intel_overlay_submit_request(struct intel_overlay *overlay,
struct i915_request *rq,
i915_gem_retire_fn retire)
@ -784,13 +762,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct drm_i915_gem_object *new_bo,
struct put_image_params *params)
{
int ret, tmp_width;
struct overlay_registers __iomem *regs;
bool scale_changed = false;
struct overlay_registers __iomem *regs = overlay->regs;
struct drm_i915_private *dev_priv = overlay->i915;
u32 swidth, swidthsw, sheight, ostride;
enum pipe pipe = overlay->crtc->pipe;
bool scale_changed = false;
struct i915_vma *vma;
int ret, tmp_width;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
@ -815,30 +793,19 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (!overlay->active) {
u32 oconfig;
regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unpin;
}
oconfig = OCONF_CC_OUT_8BIT;
if (IS_GEN4(dev_priv))
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
iowrite32(oconfig, &regs->OCONFIG);
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_on(overlay);
if (ret != 0)
goto out_unpin;
}
regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unpin;
}
iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
@ -882,8 +849,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
iowrite32(overlay_cmd_reg(params), &regs->OCMD);
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_continue(overlay, vma, scale_changed);
if (ret)
goto out_unpin;
@ -901,7 +866,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
struct overlay_registers __iomem *regs;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
@ -918,9 +882,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
if (ret != 0)
return ret;
regs = intel_overlay_map_regs(overlay);
iowrite32(0, &regs->OCMD);
intel_overlay_unmap_regs(overlay, regs);
iowrite32(0, &overlay->regs->OCMD);
return intel_overlay_off(overlay);
}
@ -1305,7 +1267,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct drm_intel_overlay_attrs *attrs = data;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_overlay *overlay;
struct overlay_registers __iomem *regs;
int ret;
overlay = dev_priv->overlay;
@ -1345,15 +1306,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
overlay->contrast = attrs->contrast;
overlay->saturation = attrs->saturation;
regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unlock;
}
update_reg_attrs(overlay, regs);
intel_overlay_unmap_regs(overlay, regs);
update_reg_attrs(overlay, overlay->regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
if (IS_GEN2(dev_priv))
@ -1386,12 +1339,47 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
return ret;
}
static int get_registers(struct intel_overlay *overlay, bool use_phys)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int err;
obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE);
if (obj == NULL)
obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_put_bo;
}
if (use_phys)
overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
else
overlay->flip_addr = i915_ggtt_offset(vma);
overlay->regs = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(overlay->regs)) {
err = PTR_ERR(overlay->regs);
goto err_put_bo;
}
overlay->reg_bo = obj;
return 0;
err_put_bo:
i915_gem_object_put(obj);
return err;
}
void intel_setup_overlay(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay;
struct drm_i915_gem_object *reg_bo;
struct overlay_registers __iomem *regs;
struct i915_vma *vma = NULL;
int ret;
if (!HAS_OVERLAY(dev_priv))
@ -1401,46 +1389,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
if (!overlay)
return;
mutex_lock(&dev_priv->drm.struct_mutex);
if (WARN_ON(dev_priv->overlay))
goto out_free;
overlay->i915 = dev_priv;
reg_bo = NULL;
if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE);
if (reg_bo == NULL)
reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE);
if (IS_ERR(reg_bo))
goto out_free;
overlay->reg_bo = reg_bo;
if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
if (ret) {
DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo;
}
overlay->flip_addr = reg_bo->phys_handle->busaddr;
} else {
vma = i915_gem_object_ggtt_pin(reg_bo, NULL,
0, PAGE_SIZE, PIN_MAPPABLE);
if (IS_ERR(vma)) {
DRM_ERROR("failed to pin overlay register bo\n");
ret = PTR_ERR(vma);
goto out_free_bo;
}
overlay->flip_addr = i915_ggtt_offset(vma);
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) {
DRM_ERROR("failed to move overlay register bo into the GTT\n");
goto out_unpin_bo;
}
}
/* init all values */
overlay->color_key = 0x0101fe;
overlay->color_key_enabled = true;
overlay->brightness = -19;
@ -1449,44 +1399,51 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
init_request_active(&overlay->last_flip, NULL);
regs = intel_overlay_map_regs(overlay);
if (!regs)
goto out_unpin_bo;
mutex_lock(&dev_priv->drm.struct_mutex);
memset_io(regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(regs);
update_reg_attrs(overlay, regs);
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
if (ret)
goto out_free;
intel_overlay_unmap_regs(overlay, regs);
ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true);
if (ret)
goto out_reg_bo;
mutex_unlock(&dev_priv->drm.struct_mutex);
memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(overlay->regs);
update_reg_attrs(overlay, overlay->regs);
dev_priv->overlay = overlay;
mutex_unlock(&dev_priv->drm.struct_mutex);
DRM_INFO("initialized overlay support\n");
DRM_INFO("Initialized overlay support.\n");
return;
out_unpin_bo:
if (vma)
i915_vma_unpin(vma);
out_free_bo:
i915_gem_object_put(reg_bo);
out_reg_bo:
i915_gem_object_put(overlay->reg_bo);
out_free:
mutex_unlock(&dev_priv->drm.struct_mutex);
kfree(overlay);
return;
}
void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
{
if (!dev_priv->overlay)
struct intel_overlay *overlay;
overlay = fetch_and_zero(&dev_priv->overlay);
if (!overlay)
return;
/* The bo's should be free'd by the generic code already.
/*
* The bo's should be free'd by the generic code already.
* Furthermore modesetting teardown happens beforehand so the
* hardware should be off already */
WARN_ON(dev_priv->overlay->active);
* hardware should be off already.
*/
WARN_ON(overlay->active);
i915_gem_object_put(dev_priv->overlay->reg_bo);
kfree(dev_priv->overlay);
i915_gem_object_put(overlay->reg_bo);
kfree(overlay);
}
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
@ -1498,37 +1455,11 @@ struct intel_overlay_error_state {
u32 isr;
};
static struct overlay_registers __iomem *
intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
/* Cast to make sparse happy, but it's wc memory anyway, so
* equivalent to the wc io mapping on X86. */
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr;
else
regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
overlay->flip_addr);
return regs;
}
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs)
{
if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
io_mapping_unmap_atomic(regs);
}
struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay = dev_priv->overlay;
struct intel_overlay_error_state *error;
struct overlay_registers __iomem *regs;
if (!overlay || !overlay->active)
return NULL;
@ -1541,18 +1472,9 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
error->isr = I915_READ(ISR);
error->base = overlay->flip_addr;
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
goto err;
memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
intel_overlay_unmap_regs_atomic(overlay, regs);
memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
return error;
err:
kfree(error);
return NULL;
}
void

View file

@ -1123,17 +1123,21 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
int ret;
if (dpcd >= 0x12) {
ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
/* Even if we're enabling MST, start with disabling the
* branching unit to clear any sink-side MST topology state
* that wasn't set by us
*/
ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0);
if (ret < 0)
return ret;
dpcd &= ~DP_MST_EN;
if (state)
dpcd |= DP_MST_EN;
ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
if (ret < 0)
return ret;
if (state) {
/* Now, start initializing */
ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL,
DP_MST_EN);
if (ret < 0)
return ret;
}
}
return nvif_mthd(disp, 0, &args, sizeof(args));
@ -1142,31 +1146,58 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
int
nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
{
int ret, state = 0;
struct drm_dp_aux *aux;
int ret;
bool old_state, new_state;
u8 mstm_ctrl;
if (!mstm)
return 0;
if (dpcd[0] >= 0x12) {
ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
mutex_lock(&mstm->mgr.lock);
old_state = mstm->mgr.mst_state;
new_state = old_state;
aux = mstm->mgr.aux;
if (old_state) {
/* Just check that the MST hub is still as we expect it */
ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl);
if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) {
DRM_DEBUG_KMS("Hub gone, disabling MST topology\n");
new_state = false;
}
} else if (dpcd[0] >= 0x12) {
ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]);
if (ret < 0)
return ret;
goto probe_error;
if (!(dpcd[1] & DP_MST_CAP))
dpcd[0] = 0x11;
else
state = allow;
new_state = allow;
}
ret = nv50_mstm_enable(mstm, dpcd[0], state);
if (ret)
return ret;
if (new_state == old_state) {
mutex_unlock(&mstm->mgr.lock);
return new_state;
}
ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
ret = nv50_mstm_enable(mstm, dpcd[0], new_state);
if (ret)
goto probe_error;
mutex_unlock(&mstm->mgr.lock);
ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state);
if (ret)
return nv50_mstm_enable(mstm, dpcd[0], 0);
return mstm->mgr.mst_state;
return new_state;
probe_error:
mutex_unlock(&mstm->mgr.lock);
return ret;
}
static void
@ -2074,7 +2105,7 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev)
static const struct drm_mode_config_funcs
nv50_disp_func = {
.fb_create = nouveau_user_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.output_poll_changed = nouveau_fbcon_output_poll_changed,
.atomic_check = nv50_disp_atomic_check,
.atomic_commit = nv50_disp_atomic_commit,
.atomic_state_alloc = nv50_disp_atomic_state_alloc,

View file

@ -409,59 +409,45 @@ static struct nouveau_encoder *
nouveau_connector_ddc_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
struct nouveau_encoder *nv_encoder = NULL;
struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
struct drm_encoder *encoder;
int i, panel = -ENODEV;
/* eDP panels need powering on by us (if the VBIOS doesn't default it
* to on) before doing any AUX channel transactions. LVDS panel power
* is handled by the SOR itself, and not required for LVDS DDC.
*/
if (nv_connector->type == DCB_CONNECTOR_eDP) {
panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
if (panel == 0) {
nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
msleep(300);
}
}
int i, ret;
bool switcheroo_ddc = false;
drm_connector_for_each_possible_encoder(connector, encoder, i) {
nv_encoder = nouveau_encoder(encoder);
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
int ret = nouveau_dp_detect(nv_encoder);
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_DP:
ret = nouveau_dp_detect(nv_encoder);
if (ret == NOUVEAU_DP_MST)
return NULL;
if (ret == NOUVEAU_DP_SST)
else if (ret == NOUVEAU_DP_SST)
found = nv_encoder;
break;
case DCB_OUTPUT_LVDS:
switcheroo_ddc = !!(vga_switcheroo_handler_flags() &
VGA_SWITCHEROO_CAN_SWITCH_DDC);
/* fall-through */
default:
if (!nv_encoder->i2c)
break;
} else
if ((vga_switcheroo_handler_flags() &
VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&
nv_encoder->i2c) {
int ret;
vga_switcheroo_lock_ddc(dev->pdev);
ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50);
vga_switcheroo_unlock_ddc(dev->pdev);
if (ret)
break;
} else
if (nv_encoder->i2c) {
if (switcheroo_ddc)
vga_switcheroo_lock_ddc(dev->pdev);
if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
break;
found = nv_encoder;
if (switcheroo_ddc)
vga_switcheroo_unlock_ddc(dev->pdev);
break;
}
if (found)
break;
}
/* eDP panel not detected, restore panel power GPIO to previous
* state to avoid confusing the SOR for other output types.
*/
if (!nv_encoder && panel == 0)
nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
return nv_encoder;
return found;
}
static struct nouveau_encoder *
@ -555,12 +541,16 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nv_connector->edid = NULL;
}
/* Outputs are only polled while runtime active, so acquiring a
* runtime PM ref here is unnecessary (and would deadlock upon
* runtime suspend because it waits for polling to finish).
/* Outputs are only polled while runtime active, so resuming the
* device here is unnecessary (and would deadlock upon runtime suspend
* because it waits for polling to finish). We do however, want to
* prevent the autosuspend timer from elapsing during this operation
* if possible.
*/
if (!drm_kms_helper_is_poll_worker()) {
ret = pm_runtime_get_sync(connector->dev->dev);
if (drm_kms_helper_is_poll_worker()) {
pm_runtime_get_noresume(dev->dev);
} else {
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES)
return conn_status;
}
@ -638,10 +628,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
out:
if (!drm_kms_helper_is_poll_worker()) {
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
}
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return conn_status;
}
@ -1105,6 +1093,26 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
const struct nvif_notify_conn_rep_v0 *rep = notify->data;
const char *name = connector->name;
struct nouveau_encoder *nv_encoder;
int ret;
ret = pm_runtime_get(drm->dev->dev);
if (ret == 0) {
/* We can't block here if there's a pending PM request
* running, as we'll deadlock nouveau_display_fini() when it
* calls nvif_put() on our nvif_notify struct. So, simply
* defer the hotplug event until the device finishes resuming
*/
NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n",
name);
schedule_work(&drm->hpd_work);
pm_runtime_put_noidle(drm->dev->dev);
return NVIF_NOTIFY_KEEP;
} else if (ret != 1 && ret != -EACCES) {
NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n",
name, ret);
return NVIF_NOTIFY_DROP;
}
if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
NV_DEBUG(drm, "service %s\n", name);
@ -1122,6 +1130,8 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
drm_helper_hpd_irq_event(connector->dev);
}
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_autosuspend(drm->dev->dev);
return NVIF_NOTIFY_KEEP;
}

View file

@ -293,7 +293,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.output_poll_changed = nouveau_fbcon_output_poll_changed,
};
@ -355,8 +355,6 @@ nouveau_display_hpd_work(struct work_struct *work)
pm_runtime_get_sync(drm->dev->dev);
drm_helper_hpd_irq_event(drm->dev);
/* enable polling for external displays */
drm_kms_helper_poll_enable(drm->dev);
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_sync(drm->dev->dev);
@ -379,15 +377,29 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
{
struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
struct acpi_bus_event *info = data;
int ret;
if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
/*
* This may be the only indication we receive of a
* connector hotplug on a runtime suspended GPU,
* schedule hpd_work to check.
*/
schedule_work(&drm->hpd_work);
ret = pm_runtime_get(drm->dev->dev);
if (ret == 1 || ret == -EACCES) {
/* If the GPU is already awake, or in a state
* where we can't wake it up, it can handle
* it's own hotplug events.
*/
pm_runtime_put_autosuspend(drm->dev->dev);
} else if (ret == 0) {
/* This may be the only indication we receive
* of a connector hotplug on a runtime
* suspended GPU, schedule hpd_work to check.
*/
NV_DEBUG(drm, "ACPI requested connector reprobe\n");
schedule_work(&drm->hpd_work);
pm_runtime_put_noidle(drm->dev->dev);
} else {
NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
ret);
}
/* acpi-video should not generate keypresses for this */
return NOTIFY_BAD;
@ -411,6 +423,11 @@ nouveau_display_init(struct drm_device *dev)
if (ret)
return ret;
/* enable connector detection and polling for connectors without HPD
* support
*/
drm_kms_helper_poll_enable(dev);
/* enable hotplug interrupts */
drm_connector_list_iter_begin(dev, &conn_iter);
nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
@ -425,7 +442,7 @@ nouveau_display_init(struct drm_device *dev)
}
void
nouveau_display_fini(struct drm_device *dev, bool suspend)
nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
{
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
@ -450,6 +467,9 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
}
drm_connector_list_iter_end(&conn_iter);
if (!runtime)
cancel_work_sync(&drm->hpd_work);
drm_kms_helper_poll_disable(dev);
disp->fini(dev);
}
@ -618,11 +638,11 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
}
}
nouveau_display_fini(dev, true);
nouveau_display_fini(dev, true, runtime);
return 0;
}
nouveau_display_fini(dev, true);
nouveau_display_fini(dev, true, runtime);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;

View file

@ -62,7 +62,7 @@ nouveau_display(struct drm_device *dev)
int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
int nouveau_display_init(struct drm_device *dev);
void nouveau_display_fini(struct drm_device *dev, bool suspend);
void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
int nouveau_display_suspend(struct drm_device *dev, bool runtime);
void nouveau_display_resume(struct drm_device *dev, bool runtime);
int nouveau_display_vblank_enable(struct drm_device *, unsigned int);

View file

@ -230,7 +230,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
mutex_unlock(&drm->master.lock);
}
if (ret) {
NV_ERROR(drm, "Client allocation failed: %d\n", ret);
NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
goto done;
}
@ -240,37 +240,37 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
}, sizeof(struct nv_device_v0),
&cli->device);
if (ret) {
NV_ERROR(drm, "Device allocation failed: %d\n", ret);
NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
goto done;
}
ret = nvif_mclass(&cli->device.object, mmus);
if (ret < 0) {
NV_ERROR(drm, "No supported MMU class\n");
NV_PRINTK(err, cli, "No supported MMU class\n");
goto done;
}
ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
if (ret) {
NV_ERROR(drm, "MMU allocation failed: %d\n", ret);
NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
goto done;
}
ret = nvif_mclass(&cli->mmu.object, vmms);
if (ret < 0) {
NV_ERROR(drm, "No supported VMM class\n");
NV_PRINTK(err, cli, "No supported VMM class\n");
goto done;
}
ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
if (ret) {
NV_ERROR(drm, "VMM allocation failed: %d\n", ret);
NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret);
goto done;
}
ret = nvif_mclass(&cli->mmu.object, mems);
if (ret < 0) {
NV_ERROR(drm, "No supported MEM class\n");
NV_PRINTK(err, cli, "No supported MEM class\n");
goto done;
}
@ -592,10 +592,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
pm_runtime_allow(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put(dev->dev);
} else {
/* enable polling for external displays */
drm_kms_helper_poll_enable(dev);
}
return 0;
fail_dispinit:
@ -629,7 +627,7 @@ nouveau_drm_unload(struct drm_device *dev)
nouveau_debugfs_fini(drm);
if (dev->mode_config.num_crtc)
nouveau_display_fini(dev, false);
nouveau_display_fini(dev, false, false);
nouveau_display_destroy(dev);
nouveau_bios_takedown(dev);
@ -835,7 +833,6 @@ nouveau_pmops_runtime_suspend(struct device *dev)
return -EBUSY;
}
drm_kms_helper_poll_disable(drm_dev);
nouveau_switcheroo_optimus_dsm();
ret = nouveau_do_suspend(drm_dev, true);
pci_save_state(pdev);

View file

@ -466,6 +466,7 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work)
console_unlock();
if (state == FBINFO_STATE_RUNNING) {
nouveau_fbcon_hotplug_resume(drm->fbcon);
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_sync(drm->dev->dev);
}
@ -487,6 +488,61 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
schedule_work(&drm->fbcon_work);
}
void
nouveau_fbcon_output_poll_changed(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_fbdev *fbcon = drm->fbcon;
int ret;
if (!fbcon)
return;
mutex_lock(&fbcon->hotplug_lock);
ret = pm_runtime_get(dev->dev);
if (ret == 1 || ret == -EACCES) {
drm_fb_helper_hotplug_event(&fbcon->helper);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
} else if (ret == 0) {
/* If the GPU was already in the process of suspending before
* this event happened, then we can't block here as we'll
* deadlock the runtime pmops since they wait for us to
* finish. So, just defer this event for when we runtime
* resume again. It will be handled by fbcon_work.
*/
NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
fbcon->hotplug_waiting = true;
pm_runtime_put_noidle(drm->dev->dev);
} else {
DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
ret);
}
mutex_unlock(&fbcon->hotplug_lock);
}
void
nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
{
struct nouveau_drm *drm;
if (!fbcon)
return;
drm = nouveau_drm(fbcon->helper.dev);
mutex_lock(&fbcon->hotplug_lock);
if (fbcon->hotplug_waiting) {
fbcon->hotplug_waiting = false;
NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
drm_fb_helper_hotplug_event(&fbcon->helper);
}
mutex_unlock(&fbcon->hotplug_lock);
}
int
nouveau_fbcon_init(struct drm_device *dev)
{
@ -505,6 +561,7 @@ nouveau_fbcon_init(struct drm_device *dev)
drm->fbcon = fbcon;
INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
mutex_init(&fbcon->hotplug_lock);
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);

View file

@ -41,6 +41,9 @@ struct nouveau_fbdev {
struct nvif_object gdi;
struct nvif_object blit;
struct nvif_object twod;
struct mutex hotplug_lock;
bool hotplug_waiting;
};
void nouveau_fbcon_restore(void);
@ -68,6 +71,8 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
void nouveau_fbcon_accel_restore(struct drm_device *dev);
void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
void nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon);
extern int nouveau_nofbaccel;
#endif /* __NV50_FBCON_H__ */

View file

@ -46,12 +46,10 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
pr_err("VGA switcheroo: switched nouveau on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
nouveau_pmops_resume(&pdev->dev);
drm_kms_helper_poll_enable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
pr_err("VGA switcheroo: switched nouveau off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(dev);
nouveau_switcheroo_optimus_dsm();
nouveau_pmops_suspend(&pdev->dev);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;

View file

@ -275,6 +275,7 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
struct nvkm_outp *outp, *outt, *pair;
struct nvkm_conn *conn;
struct nvkm_head *head;
struct nvkm_ior *ior;
struct nvbios_connE connE;
struct dcb_output dcbE;
u8 hpd = 0, ver, hdr;
@ -399,6 +400,19 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
return ret;
}
/* Enforce identity-mapped SOR assignment for panels, which have
* certain bits (ie. backlight controls) wired to a specific SOR.
*/
list_for_each_entry(outp, &disp->outp, head) {
if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
outp->conn->info.type == DCB_CONNECTOR_eDP) {
ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
if (!WARN_ON(!ior))
ior->identity = true;
outp->identity = true;
}
}
i = 0;
list_for_each_entry(head, &disp->head, head)
i = max(i, head->id + 1);

View file

@ -28,6 +28,7 @@
#include <subdev/bios.h>
#include <subdev/bios/init.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <nvif/event.h>
@ -412,14 +413,10 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
}
static void
nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
{
struct nvkm_dp *dp = nvkm_dp(outp);
/* Prevent link from being retrained if sink sends an IRQ. */
atomic_set(&dp->lt.done, 0);
ior->dp.nr = 0;
/* Execute DisableLT script from DP Info Table. */
nvbios_init(&ior->disp->engine.subdev, dp->info.script[4],
init.outp = &dp->outp.info;
@ -428,6 +425,16 @@ nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
);
}
static void
nvkm_dp_release(struct nvkm_outp *outp)
{
struct nvkm_dp *dp = nvkm_dp(outp);
/* Prevent link from being retrained if sink sends an IRQ. */
atomic_set(&dp->lt.done, 0);
dp->outp.ior->dp.nr = 0;
}
static int
nvkm_dp_acquire(struct nvkm_outp *outp)
{
@ -491,7 +498,7 @@ nvkm_dp_acquire(struct nvkm_outp *outp)
return ret;
}
static void
static bool
nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
{
struct nvkm_i2c_aux *aux = dp->aux;
@ -505,7 +512,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd,
sizeof(dp->dpcd)))
return;
return true;
}
if (dp->present) {
@ -515,6 +522,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
}
atomic_set(&dp->lt.done, 0);
return false;
}
static int
@ -555,9 +563,38 @@ nvkm_dp_fini(struct nvkm_outp *outp)
static void
nvkm_dp_init(struct nvkm_outp *outp)
{
struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
struct nvkm_dp *dp = nvkm_dp(outp);
nvkm_notify_put(&dp->outp.conn->hpd);
nvkm_dp_enable(dp, true);
/* eDP panels need powering on by us (if the VBIOS doesn't default it
* to on) before doing any AUX channel transactions. LVDS panel power
* is handled by the SOR itself, and not required for LVDS DDC.
*/
if (dp->outp.conn->info.type == DCB_CONNECTOR_eDP) {
int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
if (power == 0)
nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
/* We delay here unconditionally, even if already powered,
* because some laptop panels having a significant resume
* delay before the panel begins responding.
*
* This is likely a bit of a hack, but no better idea for
* handling this at the moment.
*/
msleep(300);
/* If the eDP panel can't be detected, we need to restore
* the panel power GPIO to avoid breaking another output.
*/
if (!nvkm_dp_enable(dp, true) && power == 0)
nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);
} else {
nvkm_dp_enable(dp, true);
}
nvkm_notify_get(&dp->hpd);
}
@ -576,6 +613,7 @@ nvkm_dp_func = {
.fini = nvkm_dp_fini,
.acquire = nvkm_dp_acquire,
.release = nvkm_dp_release,
.disable = nvkm_dp_disable,
};
static int

View file

@ -16,6 +16,7 @@ struct nvkm_ior {
char name[8];
struct list_head head;
bool identity;
struct nvkm_ior_state {
struct nvkm_outp *outp;

View file

@ -501,11 +501,11 @@ nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head)
nv50_disp_super_ied_off(head, ior, 2);
/* If we're shutting down the OR's only active head, execute
* the output path's release function.
* the output path's disable function.
*/
if (ior->arm.head == (1 << head->id)) {
if ((outp = ior->arm.outp) && outp->func->release)
outp->func->release(outp, ior);
if ((outp = ior->arm.outp) && outp->func->disable)
outp->func->disable(outp, ior);
}
}

View file

@ -93,6 +93,8 @@ nvkm_outp_release(struct nvkm_outp *outp, u8 user)
if (ior) {
outp->acquired &= ~user;
if (!outp->acquired) {
if (outp->func->release && outp->ior)
outp->func->release(outp);
outp->ior->asy.outp = NULL;
outp->ior = NULL;
}
@ -127,17 +129,26 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
if (proto == UNKNOWN)
return -ENOSYS;
/* Deal with panels requiring identity-mapped SOR assignment. */
if (outp->identity) {
ior = nvkm_ior_find(outp->disp, SOR, ffs(outp->info.or) - 1);
if (WARN_ON(!ior))
return -ENOSPC;
return nvkm_outp_acquire_ior(outp, user, ior);
}
/* First preference is to reuse the OR that is currently armed
* on HW, if any, in order to prevent unnecessary switching.
*/
list_for_each_entry(ior, &outp->disp->ior, head) {
if (!ior->asy.outp && ior->arm.outp == outp)
if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp)
return nvkm_outp_acquire_ior(outp, user, ior);
}
/* Failing that, a completely unused OR is the next best thing. */
list_for_each_entry(ior, &outp->disp->ior, head) {
if (!ior->asy.outp && ior->type == type && !ior->arm.outp &&
if (!ior->identity &&
!ior->asy.outp && ior->type == type && !ior->arm.outp &&
(ior->func->route.set || ior->id == __ffs(outp->info.or)))
return nvkm_outp_acquire_ior(outp, user, ior);
}
@ -146,7 +157,7 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
* but will be released during the next modeset.
*/
list_for_each_entry(ior, &outp->disp->ior, head) {
if (!ior->asy.outp && ior->type == type &&
if (!ior->identity && !ior->asy.outp && ior->type == type &&
(ior->func->route.set || ior->id == __ffs(outp->info.or)))
return nvkm_outp_acquire_ior(outp, user, ior);
}
@ -245,7 +256,6 @@ nvkm_outp_ctor(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
outp->index = index;
outp->info = *dcbE;
outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
outp->or = ffs(outp->info.or) - 1;
OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
"edid %x bus %d head %x",

View file

@ -13,10 +13,10 @@ struct nvkm_outp {
struct dcb_output info;
struct nvkm_i2c_bus *i2c;
int or;
struct list_head head;
struct nvkm_conn *conn;
bool identity;
/* Assembly state. */
#define NVKM_OUTP_PRIV 1
@ -41,7 +41,8 @@ struct nvkm_outp_func {
void (*init)(struct nvkm_outp *);
void (*fini)(struct nvkm_outp *);
int (*acquire)(struct nvkm_outp *);
void (*release)(struct nvkm_outp *, struct nvkm_ior *);
void (*release)(struct nvkm_outp *);
void (*disable)(struct nvkm_outp *, struct nvkm_ior *);
};
#define OUTP_MSG(o,l,f,a...) do { \

View file

@ -86,10 +86,8 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_pmuR pmu;
if (!nvbios_pmuRm(bios, type, &pmu)) {
nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type);
if (!nvbios_pmuRm(bios, type, &pmu))
return -EINVAL;
}
if (!post)
return 0;
@ -124,29 +122,30 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
return -EINVAL;
}
/* Upload DEVINIT application from VBIOS onto PMU. */
ret = pmu_load(init, 0x04, post, &exec, &args);
if (ret)
if (ret) {
nvkm_error(subdev, "VBIOS PMU/DEVINIT not found\n");
return ret;
}
/* upload first chunk of init data */
/* Upload tables required by opcodes in boot scripts. */
if (post) {
// devinit tables
u32 pmu = pmu_args(init, args + 0x08, 0x08);
u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
pmu_data(init, pmu, img, len);
}
/* upload second chunk of init data */
/* Upload boot scripts. */
if (post) {
// devinit boot scripts
u32 pmu = pmu_args(init, args + 0x08, 0x10);
u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
pmu_data(init, pmu, img, len);
}
/* execute init tables */
/* Execute DEVINIT. */
if (post) {
nvkm_wr32(device, 0x10a040, 0x00005000);
pmu_exec(init, exec);
@ -157,8 +156,11 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
return -ETIMEDOUT;
}
/* load and execute some other ucode image (bios therm?) */
return pmu_load(init, 0x01, post, NULL, NULL);
/* Optional: Execute PRE_OS application on PMU, which should at
* least take care of fans until a full PMU has been loaded.
*/
pmu_load(init, 0x01, post, NULL, NULL);
return 0;
}
static const struct nvkm_devinit_func

View file

@ -1423,7 +1423,7 @@ nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
void
nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
if (vmm->func->part && inst) {
if (inst && vmm->func->part) {
mutex_lock(&vmm->mutex);
vmm->func->part(vmm, inst);
mutex_unlock(&vmm->mutex);

View file

@ -335,7 +335,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
if (usage->hid == (HID_UP_CUSTOM | 0x0003)) {
if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
/* The fn key on Apple USB keyboards */
set_bit(EV_REP, hi->input->evbit);
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
@ -472,6 +473,12 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),

View file

@ -1000,7 +1000,7 @@ int hid_open_report(struct hid_device *device)
parser = vzalloc(sizeof(struct hid_parser));
if (!parser) {
ret = -ENOMEM;
goto err;
goto alloc_err;
}
parser->device = device;
@ -1039,6 +1039,7 @@ int hid_open_report(struct hid_device *device)
hid_err(device, "unbalanced delimiter at end of report description\n");
goto err;
}
kfree(parser->collection_stack);
vfree(parser);
device->status |= HID_STAT_PARSED;
return 0;
@ -1047,6 +1048,8 @@ int hid_open_report(struct hid_device *device)
hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
err:
kfree(parser->collection_stack);
alloc_err:
vfree(parser);
hid_close_report(device);
return ret;

View file

@ -88,6 +88,7 @@
#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
#define USB_VENDOR_ID_APPLE 0x05ac
#define BT_VENDOR_ID_APPLE 0x004c
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
@ -157,6 +158,7 @@
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI 0x026c
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
@ -528,9 +530,6 @@
#define I2C_VENDOR_ID_HANTICK 0x0911
#define I2C_PRODUCT_ID_HANTICK_5288 0x5288
#define I2C_VENDOR_ID_RAYD 0x2386
#define I2C_PRODUCT_ID_RAYD_3118 0x3118
#define USB_VENDOR_ID_HANWANG 0x0b57
#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff
@ -950,6 +949,7 @@
#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
#define USB_DEVICE_ID_SAITEK_PS1000 0x0621
#define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb
#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION 0x0ccd
#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0

View file

@ -1582,6 +1582,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
input_dev->dev.parent = &hid->dev;
hidinput->input = input_dev;
hidinput->application = application;
list_add_tail(&hidinput->list, &hid->inputs);
INIT_LIST_HEAD(&hidinput->reports);
@ -1677,8 +1678,7 @@ static struct hid_input *hidinput_match_application(struct hid_report *report)
struct hid_input *hidinput;
list_for_each_entry(hidinput, &hid->inputs, list) {
if (hidinput->report &&
hidinput->report->application == report->application)
if (hidinput->application == report->application)
return hidinput;
}
@ -1815,6 +1815,7 @@ void hidinput_disconnect(struct hid_device *hid)
input_unregister_device(hidinput->input);
else
input_free_device(hidinput->input);
kfree(hidinput->name);
kfree(hidinput);
}

View file

@ -1375,7 +1375,8 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
struct hid_usage *usage,
enum latency_mode latency,
bool surface_switch,
bool button_switch)
bool button_switch,
bool *inputmode_found)
{
struct mt_device *td = hid_get_drvdata(hdev);
struct mt_class *cls = &td->mtclass;
@ -1387,6 +1388,14 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
switch (usage->hid) {
case HID_DG_INPUTMODE:
/*
* Some elan panels wrongly declare 2 input mode features,
* and silently ignore when we set the value in the second
* field. Skip the second feature and hope for the best.
*/
if (*inputmode_found)
return false;
if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) {
report_len = hid_report_len(report);
buf = hid_alloc_report_buf(report, GFP_KERNEL);
@ -1402,6 +1411,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
}
field->value[index] = td->inputmode_value;
*inputmode_found = true;
return true;
case HID_DG_CONTACTMAX:
@ -1439,6 +1449,7 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
struct hid_usage *usage;
int i, j;
bool update_report;
bool inputmode_found = false;
rep_enum = &hdev->report_enum[HID_FEATURE_REPORT];
list_for_each_entry(rep, &rep_enum->report_list, list) {
@ -1457,7 +1468,8 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
usage,
latency,
surface_switch,
button_switch))
button_switch,
&inputmode_found))
update_report = true;
}
}
@ -1685,6 +1697,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
*/
hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
hdev->quirks |= HID_QUIRK_MULTI_INPUT;
timer_setup(&td->release_timer, mt_expired_timeout, 0);
ret = hid_parse(hdev);

View file

@ -183,6 +183,8 @@ static const struct hid_device_id saitek_devices[] = {
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION),
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),

View file

@ -579,6 +579,28 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev)
}
EXPORT_SYMBOL_GPL(sensor_hub_device_close);
static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
/*
* Checks if the report descriptor of Thinkpad Helix 2 has a logical
* minimum for magnetic flux axis greater than the maximum.
*/
if (hdev->product == USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA &&
*rsize == 2558 && rdesc[913] == 0x17 && rdesc[914] == 0x40 &&
rdesc[915] == 0x81 && rdesc[916] == 0x08 &&
rdesc[917] == 0x00 && rdesc[918] == 0x27 &&
rdesc[921] == 0x07 && rdesc[922] == 0x00) {
/* Sets negative logical minimum for mag x, y and z */
rdesc[914] = rdesc[935] = rdesc[956] = 0xc0;
rdesc[915] = rdesc[936] = rdesc[957] = 0x7e;
rdesc[916] = rdesc[937] = rdesc[958] = 0xf7;
rdesc[917] = rdesc[938] = rdesc[959] = 0xff;
}
return rdesc;
}
static int sensor_hub_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@ -743,6 +765,7 @@ static struct hid_driver sensor_hub_driver = {
.probe = sensor_hub_probe,
.remove = sensor_hub_remove,
.raw_event = sensor_hub_raw_event,
.report_fixup = sensor_hub_report_fixup,
#ifdef CONFIG_PM
.suspend = sensor_hub_suspend,
.resume = sensor_hub_resume,

View file

@ -170,8 +170,6 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
{ I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
I2C_HID_QUIRK_RESEND_REPORT_DESCR },
{ USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
I2C_HID_QUIRK_RESEND_REPORT_DESCR },
{ 0, 0 }
@ -1235,11 +1233,16 @@ static int i2c_hid_resume(struct device *dev)
pm_runtime_enable(dev);
enable_irq(client->irq);
ret = i2c_hid_hwreset(client);
/* Instead of resetting device, simply powers the device on. This
* solves "incomplete reports" on Raydium devices 2386:3118 and
* 2386:4B33
*/
ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
if (ret)
return ret;
/* RAYDIUM device (2386:3118) need to re-send report descr cmd
/* Some devices need to re-send report descr cmd
* after resume, after this it will be back normal.
* otherwise it issues too many incomplete reports.
*/

View file

@ -29,6 +29,7 @@
#define CNL_Ax_DEVICE_ID 0x9DFC
#define GLK_Ax_DEVICE_ID 0x31A2
#define CNL_H_DEVICE_ID 0xA37C
#define SPT_H_DEVICE_ID 0xA135
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0

View file

@ -38,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);

View file

@ -1291,6 +1291,9 @@ static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
if (!attribute->show)
return -EIO;
if (chan->state != CHANNEL_OPENED_STATE)
return -EINVAL;
return attribute->show(chan, buf);
}

View file

@ -187,12 +187,15 @@ static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor,
int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
{
u16 fifo_watermark = ~0, cur_watermark, sip = 0, fifo_th_mask;
u16 fifo_watermark = ~0, cur_watermark, fifo_th_mask;
struct st_lsm6dsx_hw *hw = sensor->hw;
struct st_lsm6dsx_sensor *cur_sensor;
int i, err, data;
__le16 wdata;
if (!hw->sip)
return 0;
for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
cur_sensor = iio_priv(hw->iio_devs[i]);
@ -203,14 +206,10 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
: cur_sensor->watermark;
fifo_watermark = min_t(u16, fifo_watermark, cur_watermark);
sip += cur_sensor->sip;
}
if (!sip)
return 0;
fifo_watermark = max_t(u16, fifo_watermark, sip);
fifo_watermark = (fifo_watermark / sip) * sip;
fifo_watermark = max_t(u16, fifo_watermark, hw->sip);
fifo_watermark = (fifo_watermark / hw->sip) * hw->sip;
fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl;
err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1,

View file

@ -258,7 +258,6 @@ static int maxim_thermocouple_remove(struct spi_device *spi)
static const struct spi_device_id maxim_thermocouple_id[] = {
{"max6675", MAX6675},
{"max31855", MAX31855},
{"max31856", MAX31855},
{},
};
MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);

View file

@ -724,6 +724,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
dgid = (union ib_gid *) &addr->sib_addr;
pkey = ntohs(addr->sib_pkey);
mutex_lock(&lock);
list_for_each_entry(cur_dev, &dev_list, list) {
for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
if (!rdma_cap_af_ib(cur_dev->device, p))
@ -750,18 +751,19 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
cma_dev = cur_dev;
sgid = gid;
id_priv->id.port_num = p;
goto found;
}
}
}
}
if (!cma_dev)
return -ENODEV;
mutex_unlock(&lock);
return -ENODEV;
found:
cma_attach_to_dev(id_priv, cma_dev);
addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
memcpy(&addr->sib_addr, &sgid, sizeof sgid);
mutex_unlock(&lock);
addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
return 0;
}

View file

@ -882,6 +882,8 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
if (!uverbs_destroy_uobject(obj, reason))
ret = 0;
else
atomic_set(&obj->usecnt, 0);
}
return ret;
}

View file

@ -124,6 +124,8 @@ static DEFINE_MUTEX(mut);
static DEFINE_IDR(ctx_idr);
static DEFINE_IDR(multicast_idr);
static const struct file_operations ucma_fops;
static inline struct ucma_context *_ucma_find_context(int id,
struct ucma_file *file)
{
@ -1581,6 +1583,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
f = fdget(cmd.fd);
if (!f.file)
return -ENOENT;
if (f.file->f_op != &ucma_fops) {
ret = -EINVAL;
goto file_put;
}
/* Validate current fd and prevent destruction of id. */
ctx = ucma_get_ctx(f.file->private_data, cmd.id);

View file

@ -1050,7 +1050,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
if (ib_uverbs_create_uapi(device, uverbs_dev))
goto err;
goto err_uapi;
cdev_init(&uverbs_dev->cdev, NULL);
uverbs_dev->cdev.owner = THIS_MODULE;
@ -1077,11 +1077,10 @@ static void ib_uverbs_add_one(struct ib_device *device)
err_class:
device_destroy(uverbs_class, uverbs_dev->cdev.dev);
err_cdev:
cdev_del(&uverbs_dev->cdev);
err_uapi:
clear_bit(devnum, dev_map);
err:
if (atomic_dec_and_test(&uverbs_dev->refcount))
ib_uverbs_comp_dev(uverbs_dev);

View file

@ -833,6 +833,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
"Failed to destroy Shadow QP");
return rc;
}
bnxt_qplib_free_qp_res(&rdev->qplib_res,
&rdev->qp1_sqp->qplib_qp);
mutex_lock(&rdev->qp_lock);
list_del(&rdev->qp1_sqp->list);
atomic_dec(&rdev->qp_count);

View file

@ -196,7 +196,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_q *rq = &qp->rq;
struct bnxt_qplib_q *sq = &qp->rq;
struct bnxt_qplib_q *sq = &qp->sq;
int rc = 0;
if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {

View file

@ -1685,6 +1685,12 @@ static void flush_qp(struct c4iw_qp *qhp)
schp = to_c4iw_cq(qhp->ibqp.send_cq);
if (qhp->ibqp.uobject) {
/* for user qps, qhp->wq.flushed is protected by qhp->mutex */
if (qhp->wq.flushed)
return;
qhp->wq.flushed = 1;
t4_set_wq_in_error(&qhp->wq, 0);
t4_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, flag);

View file

@ -893,14 +893,11 @@ static int trigger_sbr(struct hfi1_devdata *dd)
}
/*
* A secondary bus reset (SBR) issues a hot reset to our device.
* The following routine does a 1s wait after the reset is dropped
* per PCI Trhfa (recovery time). PCIe 3.0 section 6.6.1 -
* Conventional Reset, paragraph 3, line 35 also says that a 1s
* delay after a reset is required. Per spec requirements,
* the link is either working or not after that point.
* This is an end around to do an SBR during probe time. A new API needs
* to be implemented to have cleaner interface but this fixes the
* current brokenness
*/
return pci_reset_bus(dev);
return pci_bridge_secondary_bus_reset(dev->bus->self);
}
/*

View file

@ -517,9 +517,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->page_size_cap = dev->dev->caps.page_size_cap;
props->max_qp = dev->dev->quotas.qp;
props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
props->max_send_sge = dev->dev->caps.max_sq_sg;
props->max_recv_sge = dev->dev->caps.max_rq_sg;
props->max_sge_rd = MLX4_MAX_SGE_RD;
props->max_send_sge =
min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
props->max_recv_sge =
min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
props->max_sge_rd = MLX4_MAX_SGE_RD;
props->max_cq = dev->dev->quotas.cq;
props->max_cqe = dev->dev->caps.max_cqes;
props->max_mr = dev->dev->quotas.mpt;

View file

@ -1027,12 +1027,14 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id,
skb_queue_head_init(&skqueue);
netif_tx_lock_bh(p->dev);
spin_lock_irq(&priv->lock);
set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
if (p->neigh)
while ((skb = __skb_dequeue(&p->neigh->queue)))
__skb_queue_tail(&skqueue, skb);
spin_unlock_irq(&priv->lock);
netif_tx_unlock_bh(p->dev);
while ((skb = __skb_dequeue(&skqueue))) {
skb->dev = p->dev;

View file

@ -332,7 +332,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
int err;
desc->tfm = essiv->hash_tfm;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
desc->flags = 0;
err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
shash_desc_zero(desc);
@ -606,7 +606,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
int i, r;
desc->tfm = lmk->hash_tfm;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
desc->flags = 0;
r = crypto_shash_init(desc);
if (r)
@ -768,7 +768,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
/* calculate crc32 for every 32bit part and xor it */
desc->tfm = tcw->crc32_tfm;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
desc->flags = 0;
for (i = 0; i < 4; i++) {
r = crypto_shash_init(desc);
if (r)
@ -1251,7 +1251,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
* requests if driver request queue is full.
*/
skcipher_request_set_callback(ctx->r.req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
}
@ -1268,7 +1268,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
* requests if driver request queue is full.
*/
aead_request_set_callback(ctx->r.req_aead,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
}

View file

@ -532,7 +532,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
unsigned j, size;
desc->tfm = ic->journal_mac;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
desc->flags = 0;
r = crypto_shash_init(desc);
if (unlikely(r)) {
@ -676,7 +676,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err)
static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
{
int r;
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
complete_journal_encrypt, comp);
if (likely(encrypt))
r = crypto_skcipher_encrypt(req);

View file

@ -1,6 +1,6 @@
/*
* Copyright (C) 2010-2011 Neil Brown
* Copyright (C) 2010-2017 Red Hat, Inc. All rights reserved.
* Copyright (C) 2010-2018 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
@ -29,9 +29,6 @@
*/
#define MIN_RAID456_JOURNAL_SPACE (4*2048)
/* Global list of all raid sets */
static LIST_HEAD(raid_sets);
static bool devices_handle_discard_safely = false;
/*
@ -227,7 +224,6 @@ struct rs_layout {
struct raid_set {
struct dm_target *ti;
struct list_head list;
uint32_t stripe_cache_entries;
unsigned long ctr_flags;
@ -273,19 +269,6 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
mddev->new_chunk_sectors = l->new_chunk_sectors;
}
/* Find any raid_set in active slot for @rs on global list */
static struct raid_set *rs_find_active(struct raid_set *rs)
{
struct raid_set *r;
struct mapped_device *md = dm_table_get_md(rs->ti->table);
list_for_each_entry(r, &raid_sets, list)
if (r != rs && dm_table_get_md(r->ti->table) == md)
return r;
return NULL;
}
/* raid10 algorithms (i.e. formats) */
#define ALGORITHM_RAID10_DEFAULT 0
#define ALGORITHM_RAID10_NEAR 1
@ -764,7 +747,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
mddev_init(&rs->md);
INIT_LIST_HEAD(&rs->list);
rs->raid_disks = raid_devs;
rs->delta_disks = 0;
@ -782,9 +764,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
for (i = 0; i < raid_devs; i++)
md_rdev_init(&rs->dev[i].rdev);
/* Add @rs to global list. */
list_add(&rs->list, &raid_sets);
/*
* Remaining items to be initialized by further RAID params:
* rs->md.persistent
@ -797,7 +776,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
return rs;
}
/* Free all @rs allocations and remove it from global list. */
/* Free all @rs allocations */
static void raid_set_free(struct raid_set *rs)
{
int i;
@ -815,8 +794,6 @@ static void raid_set_free(struct raid_set *rs)
dm_put_device(rs->ti, rs->dev[i].data_dev);
}
list_del(&rs->list);
kfree(rs);
}
@ -2649,7 +2626,7 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
return 0;
}
/* HM FIXME: get InSync raid_dev? */
/* HM FIXME: get In_Sync raid_dev? */
rdev = &rs->dev[0].rdev;
if (rs->delta_disks < 0) {
@ -3149,6 +3126,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
rs_set_new(rs);
} else if (rs_is_recovering(rs)) {
/* Rebuild particular devices */
if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
rs_setup_recovery(rs, MaxSector);
}
/* A recovering raid set may be resized */
; /* skip setup rs */
} else if (rs_is_reshaping(rs)) {
@ -3242,6 +3224,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Start raid set read-only and assumed clean to change in raid_resume() */
rs->md.ro = 1;
rs->md.in_sync = 1;
/* Keep array frozen */
set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
/* Has to be held on running the array */
@ -3265,7 +3249,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
rs->callbacks.congested_fn = raid_is_congested;
dm_table_add_target_callbacks(ti->table, &rs->callbacks);
/* If raid4/5/6 journal mode explictely requested (only possible with journal dev) -> set it */
/* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
if (r) {
@ -3350,32 +3334,53 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
/* Return string describing the current sync action of @mddev */
static const char *decipher_sync_action(struct mddev *mddev, unsigned long recovery)
/* Return sync state string for @state */
enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle };
static const char *sync_str(enum sync_state state)
{
/* Has to be in above sync_state order! */
static const char *sync_strs[] = {
"frozen",
"reshape",
"resync",
"check",
"repair",
"recover",
"idle"
};
return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef";
};
/* Return enum sync_state for @mddev derived from @recovery flags */
static const enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
{
if (test_bit(MD_RECOVERY_FROZEN, &recovery))
return "frozen";
return st_frozen;
/* The MD sync thread can be done with io but still be running */
/* The MD sync thread can be done with io or be interrupted but still be running */
if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
(test_bit(MD_RECOVERY_RUNNING, &recovery) ||
(!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
return "reshape";
return st_reshape;
if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
return "resync";
else if (test_bit(MD_RECOVERY_CHECK, &recovery))
return "check";
return "repair";
return st_resync;
if (test_bit(MD_RECOVERY_CHECK, &recovery))
return st_check;
return st_repair;
}
if (test_bit(MD_RECOVERY_RECOVER, &recovery))
return "recover";
return st_recover;
if (mddev->reshape_position != MaxSector)
return st_reshape;
}
return "idle";
return st_idle;
}
/*
@ -3409,6 +3414,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
sector_t resync_max_sectors)
{
sector_t r;
enum sync_state state;
struct mddev *mddev = &rs->md;
clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
@ -3419,20 +3425,14 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
} else {
if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) &&
!test_bit(MD_RECOVERY_INTR, &recovery) &&
(test_bit(MD_RECOVERY_NEEDED, &recovery) ||
test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
test_bit(MD_RECOVERY_RUNNING, &recovery)))
r = mddev->curr_resync_completed;
else
r = mddev->recovery_cp;
state = decipher_sync_action(mddev, recovery);
if (r >= resync_max_sectors &&
(!test_bit(MD_RECOVERY_REQUESTED, &recovery) ||
(!test_bit(MD_RECOVERY_FROZEN, &recovery) &&
!test_bit(MD_RECOVERY_NEEDED, &recovery) &&
!test_bit(MD_RECOVERY_RUNNING, &recovery)))) {
if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
r = mddev->recovery_cp;
else
r = mddev->curr_resync_completed;
if (state == st_idle && r >= resync_max_sectors) {
/*
* Sync complete.
*/
@ -3440,24 +3440,20 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
if (test_bit(MD_RECOVERY_RECOVER, &recovery))
set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
} else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) {
} else if (state == st_recover)
/*
* In case we are recovering, the array is not in sync
* and health chars should show the recovering legs.
*/
;
} else if (test_bit(MD_RECOVERY_SYNC, &recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
else if (state == st_resync)
/*
* If "resync" is occurring, the raid set
* is or may be out of sync hence the health
* characters shall be 'a'.
*/
set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
} else if (test_bit(MD_RECOVERY_RESHAPE, &recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
else if (state == st_reshape)
/*
* If "reshape" is occurring, the raid set
* is or may be out of sync hence the health
@ -3465,7 +3461,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
*/
set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
} else if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
else if (state == st_check || state == st_repair)
/*
* If "check" or "repair" is occurring, the raid set has
* undergone an initial sync and the health characters
@ -3473,12 +3469,12 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
*/
set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
} else {
else {
struct md_rdev *rdev;
/*
* We are idle and recovery is needed, prevent 'A' chars race
* caused by components still set to in-sync by constrcuctor.
* caused by components still set to in-sync by constructor.
*/
if (test_bit(MD_RECOVERY_NEEDED, &recovery))
set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
@ -3542,7 +3538,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
progress = rs_get_progress(rs, recovery, resync_max_sectors);
resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
atomic64_read(&mddev->resync_mismatches) : 0;
sync_action = decipher_sync_action(&rs->md, recovery);
sync_action = sync_str(decipher_sync_action(&rs->md, recovery));
/* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
for (i = 0; i < rs->raid_disks; i++)
@ -3892,14 +3888,13 @@ static int rs_start_reshape(struct raid_set *rs)
struct mddev *mddev = &rs->md;
struct md_personality *pers = mddev->pers;
/* Don't allow the sync thread to work until the table gets reloaded. */
set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
r = rs_setup_reshape(rs);
if (r)
return r;
/* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
mddev_resume(mddev);
/*
* Check any reshape constraints enforced by the personalility
*
@ -3923,10 +3918,6 @@ static int rs_start_reshape(struct raid_set *rs)
}
}
/* Suspend because a resume will happen in raid_resume() */
set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
mddev_suspend(mddev);
/*
* Now reshape got set up, update superblocks to
* reflect the fact so that a table reload will
@ -3947,29 +3938,6 @@ static int raid_preresume(struct dm_target *ti)
if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
return 0;
if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
struct raid_set *rs_active = rs_find_active(rs);
if (rs_active) {
/*
* In case no rebuilds have been requested
* and an active table slot exists, copy
* current resynchonization completed and
* reshape position pointers across from
* suspended raid set in the active slot.
*
* This resumes the new mapping at current
* offsets to continue recover/reshape without
* necessarily redoing a raid set partially or
* causing data corruption in case of a reshape.
*/
if (rs_active->md.curr_resync_completed != MaxSector)
mddev->curr_resync_completed = rs_active->md.curr_resync_completed;
if (rs_active->md.reshape_position != MaxSector)
mddev->reshape_position = rs_active->md.reshape_position;
}
}
/*
* The superblocks need to be updated on disk if the
* array is new or new devices got added (thus zeroed
@ -4046,7 +4014,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
.version = {1, 13, 2},
.version = {1, 14, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,

View file

@ -188,6 +188,12 @@ struct dm_pool_metadata {
unsigned long flags;
sector_t data_block_size;
/*
* We reserve a section of the metadata for commit overhead.
* All reported space does *not* include this.
*/
dm_block_t metadata_reserve;
/*
* Set if a transaction has to be aborted but the attempt to roll back
* to the previous (good) transaction failed. The only pool metadata
@ -816,6 +822,22 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
return dm_tm_commit(pmd->tm, sblock);
}
static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
{
int r;
dm_block_t total;
dm_block_t max_blocks = 4096; /* 16M */
r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
if (r) {
DMERR("could not get size of metadata device");
pmd->metadata_reserve = max_blocks;
} else {
sector_div(total, 10);
pmd->metadata_reserve = min(max_blocks, total);
}
}
struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool format_device)
@ -849,6 +871,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
return ERR_PTR(r);
}
__set_metadata_reserve(pmd);
return pmd;
}
@ -1820,6 +1844,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = dm_sm_get_nr_free(pmd->metadata_sm, result);
if (!r) {
if (*result < pmd->metadata_reserve)
*result = 0;
else
*result -= pmd->metadata_reserve;
}
up_read(&pmd->root_lock);
return r;
@ -1932,8 +1963,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
int r = -EINVAL;
down_write(&pmd->root_lock);
if (!pmd->fail_io)
if (!pmd->fail_io) {
r = __resize_space_map(pmd->metadata_sm, new_count);
if (!r)
__set_metadata_reserve(pmd);
}
up_write(&pmd->root_lock);
return r;

View file

@ -200,7 +200,13 @@ struct dm_thin_new_mapping;
enum pool_mode {
PM_WRITE, /* metadata may be changed */
PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
/*
* Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
*/
PM_OUT_OF_METADATA_SPACE,
PM_READ_ONLY, /* metadata may not be changed */
PM_FAIL, /* all I/O fails */
};
@ -1371,7 +1377,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
static void requeue_bios(struct pool *pool);
static void check_for_space(struct pool *pool)
static bool is_read_only_pool_mode(enum pool_mode mode)
{
return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
}
static bool is_read_only(struct pool *pool)
{
return is_read_only_pool_mode(get_pool_mode(pool));
}
static void check_for_metadata_space(struct pool *pool)
{
int r;
const char *ooms_reason = NULL;
dm_block_t nr_free;
r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
if (r)
ooms_reason = "Could not get free metadata blocks";
else if (!nr_free)
ooms_reason = "No free metadata blocks";
if (ooms_reason && !is_read_only(pool)) {
DMERR("%s", ooms_reason);
set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
}
}
static void check_for_data_space(struct pool *pool)
{
int r;
dm_block_t nr_free;
@ -1397,14 +1431,16 @@ static int commit(struct pool *pool)
{
int r;
if (get_pool_mode(pool) >= PM_READ_ONLY)
if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
return -EINVAL;
r = dm_pool_commit_metadata(pool->pmd);
if (r)
metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
else
check_for_space(pool);
else {
check_for_metadata_space(pool);
check_for_data_space(pool);
}
return r;
}
@ -1470,6 +1506,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
return r;
}
r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
if (r) {
metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
return r;
}
if (!free_blocks) {
/* Let's commit before we use up the metadata reserve. */
r = commit(pool);
if (r)
return r;
}
return 0;
}
@ -1501,6 +1550,7 @@ static blk_status_t should_error_unserviceable_bio(struct pool *pool)
case PM_OUT_OF_DATA_SPACE:
return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
case PM_OUT_OF_METADATA_SPACE:
case PM_READ_ONLY:
case PM_FAIL:
return BLK_STS_IOERR;
@ -2464,8 +2514,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
error_retry_list(pool);
break;
case PM_OUT_OF_METADATA_SPACE:
case PM_READ_ONLY:
if (old_mode != new_mode)
if (!is_read_only_pool_mode(old_mode))
notify_of_pool_mode_change(pool, "read-only");
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_read_only;
@ -3403,6 +3454,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
DMINFO("%s: growing the metadata device from %llu to %llu blocks",
dm_device_name(pool->pool_md),
sb_metadata_dev_size, metadata_dev_size);
if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
set_pool_mode(pool, PM_WRITE);
r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
if (r) {
metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
@ -3707,7 +3762,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
if (get_pool_mode(pool) >= PM_READ_ONLY) {
if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
dm_device_name(pool->pool_md));
return -EOPNOTSUPP;
@ -3781,6 +3836,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
dm_block_t nr_blocks_data;
dm_block_t nr_blocks_metadata;
dm_block_t held_root;
enum pool_mode mode;
char buf[BDEVNAME_SIZE];
char buf2[BDEVNAME_SIZE];
struct pool_c *pt = ti->private;
@ -3851,9 +3907,10 @@ static void pool_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("- ");
if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
mode = get_pool_mode(pool);
if (mode == PM_OUT_OF_DATA_SPACE)
DMEMIT("out_of_data_space ");
else if (pool->pf.mode == PM_READ_ONLY)
else if (is_read_only_pool_mode(mode))
DMEMIT("ro ");
else
DMEMIT("rw ");

View file

@ -99,10 +99,26 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
{
struct scatterlist sg;
sg_init_one(&sg, data, len);
ahash_request_set_crypt(req, &sg, NULL, len);
return crypto_wait_req(crypto_ahash_update(req), wait);
if (likely(!is_vmalloc_addr(data))) {
sg_init_one(&sg, data, len);
ahash_request_set_crypt(req, &sg, NULL, len);
return crypto_wait_req(crypto_ahash_update(req), wait);
} else {
do {
int r;
size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
flush_kernel_vmap_range((void *)data, this_step);
sg_init_table(&sg, 1);
sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
ahash_request_set_crypt(req, &sg, NULL, this_step);
r = crypto_wait_req(crypto_ahash_update(req), wait);
if (unlikely(r))
return r;
data += this_step;
len -= this_step;
} while (len);
return 0;
}
}
/*

View file

@ -27,6 +27,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/sysfs.h>
#include <linux/nospec.h>
static DEFINE_MUTEX(compass_mutex);
@ -50,6 +51,7 @@ static int compass_store(struct device *dev, const char *buf, size_t count,
return ret;
if (val >= strlen(map))
return -EINVAL;
val = array_index_nospec(val, strlen(map));
mutex_lock(&compass_mutex);
ret = compass_command(c, map[val]);
mutex_unlock(&compass_mutex);

View file

@ -2131,7 +2131,7 @@ static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
retrc = plpar_hcall_norets(H_REG_CRQ,
vdev->unit_address,
queue->msg_token, PAGE_SIZE);
retrc = rc;
rc = retrc;
if (rc == H_RESOURCE)
rc = ibmvmc_reset_crq_queue(adapter);

View file

@ -521,17 +521,15 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
cl = cldev->cl;
mutex_lock(&bus->device_lock);
if (cl->state == MEI_FILE_UNINITIALIZED) {
mutex_lock(&bus->device_lock);
ret = mei_cl_link(cl);
mutex_unlock(&bus->device_lock);
if (ret)
return ret;
goto out;
/* update pointers */
cl->cldev = cldev;
}
mutex_lock(&bus->device_lock);
if (mei_cl_is_connected(cl)) {
ret = 0;
goto out;
@ -616,9 +614,8 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
if (err < 0)
dev_err(bus->dev, "Could not disconnect from the ME client\n");
out:
mei_cl_bus_module_put(cldev);
out:
/* Flush queues and remove any pending read */
mei_cl_flush_queues(cl, NULL);
mei_cl_unlink(cl);
@ -876,12 +873,13 @@ static void mei_cl_bus_dev_release(struct device *dev)
mei_me_cl_put(cldev->me_cl);
mei_dev_bus_put(cldev->bus);
mei_cl_unlink(cldev->cl);
kfree(cldev->cl);
kfree(cldev);
}
static const struct device_type mei_cl_device_type = {
.release = mei_cl_bus_dev_release,
.release = mei_cl_bus_dev_release,
};
/**

View file

@ -1767,7 +1767,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
}
}
rets = buf->size;
rets = len;
err:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);

View file

@ -1161,15 +1161,18 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
props_res = (struct hbm_props_response *)mei_msg;
if (props_res->status) {
if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) {
dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n",
props_res->me_addr);
} else if (props_res->status) {
dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n",
props_res->status,
mei_hbm_status_str(props_res->status));
return -EPROTO;
} else {
mei_hbm_me_cl_add(dev, props_res);
}
mei_hbm_me_cl_add(dev, props_res);
/* request property for the next client */
if (mei_hbm_prop_req(dev, props_res->me_addr + 1))
return -EIO;

View file

@ -517,19 +517,23 @@ static struct mmc_host_ops meson_mx_mmc_ops = {
static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent)
{
struct device_node *slot_node;
struct platform_device *pdev;
/*
* TODO: the MMC core framework currently does not support
* controllers with multiple slots properly. So we only register
* the first slot for now
*/
slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot");
slot_node = of_get_compatible_child(parent->of_node, "mmc-slot");
if (!slot_node) {
dev_warn(parent, "no 'mmc-slot' sub-node found\n");
return ERR_PTR(-ENOENT);
}
return of_platform_device_create(slot_node, NULL, parent);
pdev = of_platform_device_create(slot_node, NULL, parent);
of_node_put(slot_node);
return pdev;
}
static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)

View file

@ -2177,6 +2177,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
dma_release_channel(host->tx_chan);
dma_release_channel(host->rx_chan);
dev_pm_clear_wake_irq(host->dev);
pm_runtime_dont_use_autosuspend(host->dev);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);

Some files were not shown because too many files have changed in this diff Show more