kernel-fxtec-pro1x/kernel/workqueue.c
Srinivasarao P 90264576e2 Merge android-4.19-stable.125 (a483478) into msm-4.19
* refs/heads/tmp-a483478:
  UPSTREAM: arm64: vdso: Build vDSO with -ffixed-x18
  Revert "drm/dsi: Fix byte order of DCS set/get brightness"
  Reverting below patches from android-4.19-stable.125
  Linux 4.19.125
  rxrpc: Fix ack discard
  rxrpc: Trace discarded ACKs
  iio: adc: stm32-dfsdm: fix device used to request dma
  iio: adc: stm32-dfsdm: Use dma_request_chan() instead dma_request_slave_channel()
  iio: adc: stm32-adc: fix device used to request dma
  iio: adc: stm32-adc: Use dma_request_chan() instead dma_request_slave_channel()
  x86/unwind/orc: Fix unwind_get_return_address_ptr() for inactive tasks
  rxrpc: Fix a memory leak in rxkad_verify_response()
  rapidio: fix an error in get_user_pages_fast() error handling
  ipack: tpci200: fix error return code in tpci200_register()
  mei: release me_cl object reference
  misc: rtsx: Add short delay after exit from ASPM
  iio: dac: vf610: Fix an error handling path in 'vf610_dac_probe()'
  iio: sca3000: Remove an erroneous 'get_device()'
  staging: greybus: Fix uninitialized scalar variable
  staging: iio: ad2s1210: Fix SPI reading
  Revert "gfs2: Don't demote a glock until its revokes are written"
  brcmfmac: abort and release host after error
  tty: serial: qcom_geni_serial: Fix wrap around of TX buffer
  cxgb4/cxgb4vf: Fix mac_hlist initialization and free
  cxgb4: free mac_hlist properly
  net: bcmgenet: abort suspend on error
  net: bcmgenet: code movement
  Revert "net/ibmvnic: Fix EOI when running in XIVE mode"
  media: fdp1: Fix R-Car M3-N naming in debug message
  thunderbolt: Drop duplicated get_switch_at_route()
  staging: most: core: replace strcpy() by strscpy()
  libnvdimm/btt: Fix LBA masking during 'free list' population
  libnvdimm/btt: Remove unnecessary code in btt_freelist_init
  nfit: Add Hyper-V NVDIMM DSM command set to white list
  powerpc/64s: Disable STRICT_KERNEL_RWX
  powerpc: Remove STRICT_KERNEL_RWX incompatibility with RELOCATABLE
  drm/i915/gvt: Init DPLL/DDI vreg for virtual display instead of inheritance.
  dmaengine: owl: Use correct lock in owl_dma_get_pchan()
  dmaengine: tegra210-adma: Fix an error handling path in 'tegra_adma_probe()'
  apparmor: Fix aa_label refcnt leak in policy_update
  apparmor: fix potential label refcnt leak in aa_change_profile
  apparmor: Fix use-after-free in aa_audit_rule_init
  drm/etnaviv: fix perfmon domain interation
  ALSA: hda/realtek - Add more fixup entries for Clevo machines
  ALSA: hda/realtek - Fix silent output on Gigabyte X570 Aorus Xtreme
  ALSA: pcm: fix incorrect hw_base increase
  ALSA: iec1712: Initialize STDSP24 properly when using the model=staudio option
  padata: purge get_cpu and reorder_via_wq from padata_do_serial
  padata: initialize pd->cpu with effective cpumask
  padata: Replace delayed timer with immediate workqueue in padata_reorder
  ARM: futex: Address build warning
  platform/x86: asus-nb-wmi: Do not load on Asus T100TA and T200TA
  USB: core: Fix misleading driver bug report
  stmmac: fix pointer check after utilization in stmmac_interrupt
  ceph: fix double unlock in handle_cap_export()
  HID: quirks: Add HID_QUIRK_NO_INIT_REPORTS quirk for Dell K12A keyboard-dock
  gtp: set NLM_F_MULTI flag in gtp_genl_dump_pdp()
  x86/apic: Move TSC deadline timer debug printk
  HID: i2c-hid: reset Synaptics SYNA2393 on resume
  scsi: ibmvscsi: Fix WARN_ON during event pool release
  component: Silence bind error on -EPROBE_DEFER
  aquantia: Fix the media type of AQC100 ethernet controller in the driver
  vhost/vsock: fix packet delivery order to monitoring devices
  configfs: fix config_item refcnt leak in configfs_rmdir()
  scsi: qla2xxx: Delete all sessions before unregister local nvme port
  scsi: qla2xxx: Fix hang when issuing nvme disconnect-all in NPIV
  HID: alps: ALPS_1657 is too specific; use U1_UNICORN_LEGACY instead
  HID: alps: Add AUI1657 device ID
  HID: multitouch: add eGalaxTouch P80H84 support
  gcc-common.h: Update for GCC 10
  ubi: Fix seq_file usage in detailed_erase_block_info debugfs file
  i2c: mux: demux-pinctrl: Fix an error handling path in 'i2c_demux_pinctrl_probe()'
  iommu/amd: Fix over-read of ACPI UID from IVRS table
  ubifs: remove broken lazytime support
  fix multiplication overflow in copy_fdtable()
  mtd: spinand: Propagate ECC information to the MTD structure
  ima: Fix return value of ima_write_policy()
  evm: Check also if *tfm is an error pointer in init_desc()
  ima: Set file->f_mode instead of file->f_flags in ima_calc_file_hash()
  riscv: set max_pfn to the PFN of the last page
  KVM: SVM: Fix potential memory leak in svm_cpu_init()
  i2c: dev: Fix the race between the release of i2c_dev and cdev
  ubsan: build ubsan.c more conservatively
  x86/uaccess, ubsan: Fix UBSAN vs. SMAP
  ANDROID: scsi: ufs: Handle clocks when lrbp fails
  ANDROID: fscrypt: handle direct I/O with IV_INO_LBLK_32
  BACKPORT: FROMLIST: fscrypt: add support for IV_INO_LBLK_32 policies
  ANDROID: Update the ABI xml and qcom whitelist
  ANDROID: Fix build.config.gki-debug
  Linux 4.19.124
  Makefile: disallow data races on gcc-10 as well
  KVM: x86: Fix off-by-one error in kvm_vcpu_ioctl_x86_setup_mce
  ARM: dts: r8a7740: Add missing extal2 to CPG node
  arm64: dts: renesas: r8a77980: Fix IPMMU VIP[01] nodes
  ARM: dts: r8a73a4: Add missing CMT1 interrupts
  arm64: dts: rockchip: Rename dwc3 device nodes on rk3399 to make dtc happy
  arm64: dts: rockchip: Replace RK805 PMIC node name with "pmic" on rk3328 boards
  clk: Unlink clock if failed to prepare or enable
  Revert "ALSA: hda/realtek: Fix pop noise on ALC225"
  usb: gadget: legacy: fix error return code in cdc_bind()
  usb: gadget: legacy: fix error return code in gncm_bind()
  usb: gadget: audio: Fix a missing error return value in audio_bind()
  usb: gadget: net2272: Fix a memory leak in an error handling path in 'net2272_plat_probe()'
  dwc3: Remove check for HWO flag in dwc3_gadget_ep_reclaim_trb_sg()
  clk: rockchip: fix incorrect configuration of rk3228 aclk_gpu* clocks
  exec: Move would_dump into flush_old_exec
  x86/unwind/orc: Fix error handling in __unwind_start()
  x86: Fix early boot crash on gcc-10, third try
  cifs: fix leaked reference on requeued write
  ARM: dts: imx27-phytec-phycard-s-rdk: Fix the I2C1 pinctrl entries
  ARM: dts: dra7: Fix bus_dma_limit for PCIe
  usb: xhci: Fix NULL pointer dereference when enqueuing trbs from urb sg list
  USB: gadget: fix illegal array access in binding with UDC
  usb: host: xhci-plat: keep runtime active when removing host
  usb: core: hub: limit HUB_QUIRK_DISABLE_AUTOSUSPEND to USB5534B
  ALSA: usb-audio: Add control message quirk delay for Kingston HyperX headset
  ALSA: rawmidi: Fix racy buffer resize under concurrent accesses
  ALSA: hda/realtek - Limit int mic boost for Thinkpad T530
  gcc-10: avoid shadowing standard library 'free()' in crypto
  gcc-10: disable 'restrict' warning for now
  gcc-10: disable 'stringop-overflow' warning for now
  gcc-10: disable 'array-bounds' warning for now
  gcc-10: disable 'zero-length-bounds' warning for now
  Stop the ad-hoc games with -Wno-maybe-initialized
  kbuild: compute false-positive -Wmaybe-uninitialized cases in Kconfig
  gcc-10 warnings: fix low-hanging fruit
  pnp: Use list_for_each_entry() instead of open coding
  hwmon: (da9052) Synchronize access with mfd
  IB/mlx4: Test return value of calls to ib_get_cached_pkey
  netfilter: nft_set_rbtree: Introduce and use nft_rbtree_interval_start()
  arm64: fix the flush_icache_range arguments in machine_kexec
  netfilter: conntrack: avoid gcc-10 zero-length-bounds warning
  NFSv4: Fix fscache cookie aux_data to ensure change_attr is included
  nfs: fscache: use timespec64 in inode auxdata
  NFS: Fix fscache super_cookie index_key from changing after umount
  mmc: block: Fix request completion in the CQE timeout path
  mmc: core: Check request type before completing the request
  i40iw: Fix error handling in i40iw_manage_arp_cache()
  pinctrl: cherryview: Add missing spinlock usage in chv_gpio_irq_handler
  pinctrl: baytrail: Enable pin configuration setting for GPIO chip
  gfs2: Another gfs2_walk_metadata fix
  ALSA: hda/realtek - Fix S3 pop noise on Dell Wyse
  ipc/util.c: sysvipc_find_ipc() incorrectly updates position index
  drm/qxl: lost qxl_bo_kunmap_atomic_page in qxl_image_init_helper()
  ALSA: hda/hdmi: fix race in monitor detection during probe
  cpufreq: intel_pstate: Only mention the BIOS disabling turbo mode once
  dmaengine: mmp_tdma: Reset channel error on release
  dmaengine: pch_dma.c: Avoid data race between probe and irq handler
  riscv: fix vdso build with lld
  tcp: fix SO_RCVLOWAT hangs with fat skbs
  net: tcp: fix rx timestamp behavior for tcp_recvmsg
  netprio_cgroup: Fix unlimited memory leak of v2 cgroups
  net: ipv4: really enforce backoff for redirects
  net: dsa: loop: Add module soft dependency
  hinic: fix a bug of ndo_stop
  virtio_net: fix lockdep warning on 32 bit
  tcp: fix error recovery in tcp_zerocopy_receive()
  Revert "ipv6: add mtu lock check in __ip6_rt_update_pmtu"
  pppoe: only process PADT targeted at local interfaces
  net: phy: fix aneg restart in phy_ethtool_set_eee
  netlabel: cope with NULL catmap
  net: fix a potential recursive NETDEV_FEAT_CHANGE
  mmc: sdhci-acpi: Add SDHCI_QUIRK2_BROKEN_64_BIT_DMA for AMDI0040
  scsi: sg: add sg_remove_request in sg_write
  virtio-blk: handle block_device_operations callbacks after hot unplug
  drop_monitor: work around gcc-10 stringop-overflow warning
  net: moxa: Fix a potential double 'free_irq()'
  net/sonic: Fix a resource leak in an error handling path in 'jazz_sonic_probe()'
  shmem: fix possible deadlocks on shmlock_user_lock
  net: dsa: Do not make user port errors fatal
  ANDROID: rtc: class: call hctosys in resource managed registration
  ANDROID: GKI: Update the ABI xml and whitelist
  ANDROID: power_supply: Add RTX power-supply property
  f2fs: flush dirty meta pages when flushing them
  f2fs: fix checkpoint=disable:%u%%
  f2fs: rework filename handling
  f2fs: split f2fs_d_compare() from f2fs_match_name()
  f2fs: don't leak filename in f2fs_try_convert_inline_dir()
  ANDROID: clang: update to 11.0.1
  FROMLIST: x86_64: fix jiffies ODR violation
  ANDROID: arm64: vdso: Fix removing SCS flags
  ANDROID: GKI: Update the ABI xml and whitelist
  ANDROID: Incremental fs: wake up log pollers less often
  ANDROID: Incremental fs: Fix scheduling while atomic error
  ANDROID: Incremental fs: Avoid continually recalculating hashes
  ANDROID: export: Disable symbol trimming on modules
  ANDROID: GKI: Update the ABI xml and whitelist
  ANDROID: fscrypt: set dun_bytes more precisely
  ANDROID: dm-default-key: set dun_bytes more precisely
  ANDROID: block: backport the ability to specify max_dun_bytes
  ANDROID: Revert "ANDROID: GKI: gki_defconfig: CONFIG_DM_DEFAULT_KEY=m"
  Linux 4.19.123
  ipc/mqueue.c: change __do_notify() to bypass check_kill_permission()
  scripts/decodecode: fix trapping instruction formatting
  objtool: Fix stack offset tracking for indirect CFAs
  netfilter: nf_osf: avoid passing pointer to local var
  netfilter: nat: never update the UDP checksum when it's 0
  x86/unwind/orc: Fix premature unwind stoppage due to IRET frames
  x86/unwind/orc: Fix error path for bad ORC entry type
  x86/unwind/orc: Prevent unwinding before ORC initialization
  x86/unwind/orc: Don't skip the first frame for inactive tasks
  x86/entry/64: Fix unwind hints in rewind_stack_do_exit()
  x86/entry/64: Fix unwind hints in kernel exit path
  x86/entry/64: Fix unwind hints in register clearing code
  batman-adv: Fix refcnt leak in batadv_v_ogm_process
  batman-adv: Fix refcnt leak in batadv_store_throughput_override
  batman-adv: Fix refcnt leak in batadv_show_throughput_override
  batman-adv: fix batadv_nc_random_weight_tq
  KVM: VMX: Mark RCX, RDX and RSI as clobbered in vmx_vcpu_run()'s asm blob
  KVM: VMX: Explicitly reference RCX as the vmx_vcpu pointer in asm blobs
  coredump: fix crash when umh is disabled
  staging: gasket: Check the return value of gasket_get_bar_index()
  mm/page_alloc: fix watchdog soft lockups during set_zone_contiguous()
  arm64: hugetlb: avoid potential NULL dereference
  KVM: arm64: Fix 32bit PC wrap-around
  KVM: arm: vgic: Fix limit condition when writing to GICD_I[CS]ACTIVER
  tracing: Add a vmalloc_sync_mappings() for safe measure
  USB: serial: garmin_gps: add sanity checking for data length
  USB: uas: add quirk for LaCie 2Big Quadra
  HID: usbhid: Fix race between usbhid_close() and usbhid_stop()
  sctp: Fix bundling of SHUTDOWN with COOKIE-ACK
  HID: wacom: Read HID_DG_CONTACTMAX directly for non-generic devices
  net: stricter validation of untrusted gso packets
  bnxt_en: Fix VF anti-spoof filter setup.
  bnxt_en: Improve AER slot reset.
  net/mlx5: Fix command entry leak in Internal Error State
  net/mlx5: Fix forced completion access non initialized command entry
  bnxt_en: Fix VLAN acceleration handling in bnxt_fix_features().
  tipc: fix partial topology connection closure
  sch_sfq: validate silly quantum values
  sch_choke: avoid potential panic in choke_reset()
  net: usb: qmi_wwan: add support for DW5816e
  net_sched: sch_skbprio: add message validation to skbprio_change()
  net/mlx4_core: Fix use of ENOSPC around mlx4_counter_alloc()
  net: macsec: preserve ingress frame ordering
  fq_codel: fix TCA_FQ_CODEL_DROP_BATCH_SIZE sanity checks
  dp83640: reverse arguments to list_add_tail
  vt: fix unicode console freeing with a common interface
  tracing/kprobes: Fix a double initialization typo
  USB: serial: qcserial: Add DW5816e support
  ANDROID: usb: gadget: Add missing inline qualifier to stub functions
  ANDROID: Drop ABI monitoring from KASAN build config
  ANDROID: Rename build.config.gki.arch_kasan
  ANDROID: GKI: Enable CONFIG_STATIC_USERMODEHELPER
  ANDROID: dm-default-key: Update key size for wrapped keys
  ANDROID: gki_defconfig: enable CONFIG_MMC_CRYPTO
  ANDROID: mmc: MMC crypto API
  ANDROID: GKI: Update the ABI xml and whitelist
  ANDROID: GKI: add missing exports for cam_smmu_api.ko
  Linux 4.19.122
  drm/atomic: Take the atomic toys away from X
  cgroup, netclassid: remove double cond_resched
  mac80211: add ieee80211_is_any_nullfunc()
  platform/x86: GPD pocket fan: Fix error message when temp-limits are out of range
  ALSA: hda: Match both PCI ID and SSID for driver blacklist
  hexagon: define ioremap_uc
  hexagon: clean up ioremap
  mfd: intel-lpss: Use devm_ioremap_uc for MMIO
  lib: devres: add a helper function for ioremap_uc
  drm/amdgpu: Fix oops when pp_funcs is unset in ACPI event
  sctp: Fix SHUTDOWN CTSN Ack in the peer restart case
  net: systemport: suppress warnings on failed Rx SKB allocations
  net: bcmgenet: suppress warnings on failed Rx SKB allocations
  lib/mpi: Fix building for powerpc with clang
  scripts/config: allow colons in option strings for sed
  s390/ftrace: fix potential crashes when switching tracers
  cifs: protect updating server->dstaddr with a spinlock
  ASoC: rsnd: Fix "status check failed" spam for multi-SSI
  ASoC: rsnd: Don't treat master SSI in multi SSI setup as parent
  net: stmmac: Fix sub-second increment
  net: stmmac: fix enabling socfpga's ptp_ref_clock
  wimax/i2400m: Fix potential urb refcnt leak
  drm/amdgpu: Correctly initialize thermal controller for GPUs with Powerplay table v0 (e.g Hawaii)
  ASoC: codecs: hdac_hdmi: Fix incorrect use of list_for_each_entry
  ASoC: rsnd: Fix HDMI channel mapping for multi-SSI mode
  ASoC: rsnd: Fix parent SSI start/stop in multi-SSI mode
  usb: dwc3: gadget: Properly set maxpacket limit
  ASoC: sgtl5000: Fix VAG power-on handling
  selftests/ipc: Fix test failure seen after initial test run
  ASoC: topology: Check return value of pcm_new_ver
  powerpc/pci/of: Parse unassigned resources
  vhost: vsock: kick send_pkt worker once device is started
  ANDROID: GKI: fix build warning on 32bits due to ASoC msm change
  ANDROID: GKI: fix build error on 32bits due to ASoC msm change
  ANDROID: GKI: update abi definition due to FAIR_GROUP_SCHED removal
  ANDROID: GKI: Remove FAIR_GROUP_SCHED
  ANDROID: GKI: BULK update ABI XML representation and qcom whitelist
  ANDROID: build.config.gki.aarch64: Enable WHITELIST_STRICT_MODE
  ANDROID: GKI: Update the ABI xml and qcom whitelist
  ANDROID: remove unused variable
  ANDROID: Drop ABI monitoring from KASAN build config
  Linux 4.19.121
  mmc: meson-mx-sdio: remove the broken ->card_busy() op
  mmc: meson-mx-sdio: Set MMC_CAP_WAIT_WHILE_BUSY
  mmc: sdhci-msm: Enable host capabilities pertains to R1b response
  mmc: sdhci-pci: Fix eMMC driver strength for BYT-based controllers
  mmc: sdhci-xenon: fix annoying 1.8V regulator warning
  mmc: cqhci: Avoid false "cqhci: CQE stuck on" by not open-coding timeout loop
  btrfs: transaction: Avoid deadlock due to bad initialization timing of fs_info::journal_info
  btrfs: fix partial loss of prealloc extent past i_size after fsync
  selinux: properly handle multiple messages in selinux_netlink_send()
  dmaengine: dmatest: Fix iteration non-stop logic
  nfs: Fix potential posix_acl refcnt leak in nfs3_set_acl
  ALSA: opti9xx: shut up gcc-10 range warning
  iommu/amd: Fix legacy interrupt remapping for x2APIC-enabled system
  scsi: target/iblock: fix WRITE SAME zeroing
  iommu/qcom: Fix local_base status check
  vfio/type1: Fix VA->PA translation for PFNMAP VMAs in vaddr_get_pfn()
  vfio: avoid possible overflow in vfio_iommu_type1_pin_pages
  RDMA/core: Fix race between destroy and release FD object
  RDMA/core: Prevent mixed use of FDs between shared ufiles
  RDMA/mlx4: Initialize ib_spec on the stack
  RDMA/mlx5: Set GRH fields in query QP on RoCE
  scsi: qla2xxx: check UNLOADING before posting async work
  scsi: qla2xxx: set UNLOADING before waiting for session deletion
  dm multipath: use updated MPATHF_QUEUE_IO on mapping for bio-based mpath
  dm writecache: fix data corruption when reloading the target
  dm verity fec: fix hash block number in verity_fec_decode
  PM: hibernate: Freeze kernel threads in software_resume()
  PM: ACPI: Output correct message on target power state
  ALSA: pcm: oss: Place the plugin buffer overflow checks correctly
  ALSA: hda/hdmi: fix without unlocked before return
  ALSA: usb-audio: Correct a typo of NuPrime DAC-10 USB ID
  ALSA: hda/realtek - Two front mics on a Lenovo ThinkCenter
  btrfs: fix block group leak when removing fails
  drm/qxl: qxl_release use after free
  drm/qxl: qxl_release leak in qxl_hw_surface_alloc()
  drm/qxl: qxl_release leak in qxl_draw_dirty_fb()
  drm/edid: Fix off-by-one in DispID DTD pixel clock
  ANDROID: GKI: Bulk update ABI XML representation
  ANDROID: GKI: Enable net testing options
  ANDROID: gki_defconfig: Enable CONFIG_REMOTEPROC
  ANDROID: Rename build.config.gki.arch_kasan
  ANDROID: GKI: Update ABI for IOMMU
  ANDROID: Incremental fs: Fix issues with very large files
  ANDROID: Correct build.config branch name
  ANDROID: GKI: Bulk update ABI XML representation and whitelist.
  UPSTREAM: vdso: Fix clocksource.h macro detection
  ANDROID: GKI: update abi definition due to added padding
  ANDROID: GKI: networking: add Android ABI padding to a lot of networking structures
  ANDROID: GKI: dma-mapping.h: add Android ABI padding to a structure
  ANDROID: GKI: ioport.h: add Android ABI padding to a structure
  ANDROID: GKI: iomap.h: add Android ABI padding to a structure
  ANDROID: GKI: genhd.h: add Android ABI padding to some structures
  ANDROID: GKI: hrtimer.h: add Android ABI padding to a structure
  ANDROID: GKI: ethtool.h: add Android ABI padding to a structure
  ANDROID: GKI: sched: add Android ABI padding to some structures
  ANDROID: GKI: kernfs.h: add Android ABI padding to some structures
  ANDROID: GKI: kobject.h: add Android ABI padding to some structures
  ANDROID: GKI: mm.h: add Android ABI padding to a structure
  ANDROID: GKI: mmu_notifier.h: add Android ABI padding to some structures
  ANDROID: GKI: pci: add Android ABI padding to some structures
  ANDROID: GKI: irqdomain.h: add Android ABI padding to a structure
  ANDROID: GKI: blk_types.h: add Android ABI padding to a structure
  ANDROID: GKI: scsi.h: add Android ABI padding to a structure
  ANDROID: GKI: quota.h: add Android ABI padding to some structures
  ANDROID: GKI: timer.h: add Android ABI padding to a structure
  ANDROID: GKI: user_namespace.h: add Android ABI padding to a structure
  FROMGIT: f2fs: fix missing check for f2fs_unlock_op
  Linux 4.19.120
  propagate_one(): mnt_set_mountpoint() needs mount_lock
  ext4: check for non-zero journal inum in ext4_calculate_overhead
  qed: Fix use after free in qed_chain_free
  bpf, x86_32: Fix clobbering of dst for BPF_JSET
  hwmon: (jc42) Fix name to have no illegal characters
  ext4: convert BUG_ON's to WARN_ON's in mballoc.c
  ext4: increase wait time needed before reuse of deleted inode numbers
  ext4: use matching invalidatepage in ext4_writepage
  arm64: Delete the space separator in __emit_inst
  ALSA: hda: call runtime_allow() for all hda controllers
  xen/xenbus: ensure xenbus_map_ring_valloc() returns proper grant status
  objtool: Support Clang non-section symbols in ORC dump
  objtool: Fix CONFIG_UBSAN_TRAP unreachable warnings
  scsi: target: tcmu: reset_ring should reset TCMU_DEV_BIT_BROKEN
  scsi: target: fix PR IN / READ FULL STATUS for FC
  ALSA: hda: Explicitly permit using autosuspend if runtime PM is supported
  ALSA: hda: Keep the controller initialization even if no codecs found
  xfs: fix partially uninitialized structure in xfs_reflink_remap_extent
  x86: hyperv: report value of misc_features
  net: fec: set GPR bit on suspend by DT configuration.
  bpf, x86: Fix encoding for lower 8-bit registers in BPF_STX BPF_B
  xfs: clear PF_MEMALLOC before exiting xfsaild thread
  mm: shmem: disable interrupt when acquiring info->lock in userfaultfd_copy path
  bpf, x86_32: Fix incorrect encoding in BPF_LDX zero-extension
  perf/core: fix parent pid/tid in task exit events
  net/mlx5: Fix failing fw tracer allocation on s390
  cpumap: Avoid warning when CONFIG_DEBUG_PER_CPU_MAPS is enabled
  ARM: dts: bcm283x: Disable dsi0 node
  PCI: Move Apex Edge TPU class quirk to fix BAR assignment
  PCI: Avoid ASMedia XHCI USB PME# from D0 defect
  svcrdma: Fix leak of svc_rdma_recv_ctxt objects
  svcrdma: Fix trace point use-after-free race
  xfs: acquire superblock freeze protection on eofblocks scans
  net/cxgb4: Check the return from t4_query_params properly
  rxrpc: Fix DATA Tx to disable nofrag for UDP on AF_INET6 socket
  i2c: altera: use proper variable to hold errno
  nfsd: memory corruption in nfsd4_lock()
  ASoC: wm8960: Fix wrong clock after suspend & resume
  ASoC: tas571x: disable regulators on failed probe
  ASoC: q6dsp6: q6afe-dai: add missing channels to MI2S DAIs
  iio:ad7797: Use correct attribute_group
  usb: gadget: udc: bdc: Remove unnecessary NULL checks in bdc_req_complete
  usb: dwc3: gadget: Do link recovery for SS and SSP
  binder: take read mode of mmap_sem in binder_alloc_free_page()
  include/uapi/linux/swab.h: fix userspace breakage, use __BITS_PER_LONG for swap
  mtd: cfi: fix deadloop in cfi_cmdset_0002.c do_write_buffer
  remoteproc: Fix wrong rvring index computation
  FROMLIST: PM / devfreq: Restart previous governor if new governor fails to start
  ANDROID: GKI: arm64: Enable GZIP and LZ4 kernel compression modes
  ANDROID: GKI: arm64: gki_defconfig: Set arm_smmu configuration
  ANDROID: GKI: iommu/arm-smmu: Modularize ARM SMMU driver
  ANDROID: GKI: iommu: Snapshot of vendor changes
  ANDROID: GKI: Additions to ARM SMMU register definitions
  ANDROID: GKI: iommu/io-pgtable-arm: LPAE related updates by vendor
  ANDROID: GKI: common: dma-mapping: make dma_common_contiguous_remap more robust
  ANDROID: GKI: dma-coherent: Expose device base address and size
  ANDROID: GKI: arm64: add support for NO_KERNEL_MAPPING and STRONGLY_ORDERED
  ANDROID: GKI: dma-mapping: Add dma_remap functions
  ANDROID: GKI: arm64: Support early fixup for CMA
  ANDROID: GKI: iommu: dma-mapping-fast: Fast ARMv7/v8 Long Descriptor Format
  ANDROID: GKI: arm64: dma-mapping: add support for IOMMU mapper
  ANDROID: GKI: add ARCH_NR_GPIO for ABI match
  ANDROID: GKI: kernel: Export symbol of `cpu_do_idle`
  ANDROID: GKI: kernel: Export symbols needed by msm_minidump.ko and minidump_log.ko (again)
  ANDROID: GKI: add missing exports for __flush_dcache_area
  ANDROID: GKI: arm64: Export caching APIs
  ANDROID: GKI: arm64: provide dma cache routines with same API as 32 bit
  ANDROID: gki_defconfig: add FORTIFY_SOURCE, remove SPMI_MSM_PMIC_ARB
  Revert "ANDROID: GKI: spmi: pmic-arb: don't enable SPMI_MSM_PMIC_ARB by default"
  ANDROID: GKI: update abi definitions after adding padding
  ANDROID: GKI: elevator: add Android ABI padding to some structures
  ANDROID: GKI: dentry: add Android ABI padding to some structures
  ANDROID: GKI: bio: add Android ABI padding to some structures
  ANDROID: GKI: scsi: add Android ABI padding to some structures
  ANDROID: GKI: ufs: add Android ABI padding to some structures
  ANDROID: GKI: workqueue.h: add Android ABI padding to some structures
  ANDROID: GKI: fs.h: add Android ABI padding to some structures
  ANDROID: GKI: USB: add Android ABI padding to some structures
  ANDROID: GKI: mm: add Android ABI padding to some structures
  ANDROID: GKI: mount.h: add Android ABI padding to some structures
  ANDROID: GKI: sched.h: add Android ABI padding to some structures
  ANDROID: GKI: sock.h: add Android ABI padding to some structures
  ANDROID: GKI: module.h: add Android ABI padding to some structures
  ANDROID: GKI: device.h: add Android ABI padding to some structures
  ANDROID: GKI: phy: add Android ABI padding to some structures
  ANDROID: GKI: add android_kabi.h
  ANDROID: ABI: update due to previous changes in the tree
  BACKPORT: sched/core: Fix reset-on-fork from RT with uclamp
  ANDROID: GKI: Add support for missing V4L2 symbols
  ANDROID: GKI: Bulk update ABI XML representation
  ANDROID: GKI: arm64: psci: Support for OS initiated scheme
  ANDROID: GKI: net: add counter for number of frames coalesced in GRO
  ANDROID: GKI: cfg80211: Include length of kek in rekey data
  BACKPORT: loop: change queue block size to match when using DIO
  ANDROID: Incremental fs: Add setattr call
  ANDROID: GKI: enable CONFIG_RTC_SYSTOHC
  ANDROID: GKI: ipv4: add vendor padding to __IPV4_DEVCONF_* enums
  Revert "ANDROID: GKI: ipv4: increase __IPV4_DEVCONF_MAX to 64"
  ANDROID: driver: gpu: drm: fix export symbol types
  ANDROID: SoC: core: fix export symbol type
  ANDROID: ufshcd-crypto: fix export symbol type
  ANDROID: GKI: drivers: mailbox: fix race resulting in multiple message submission
  ANDROID: GKI: arm64: gki_defconfig: Enable a few thermal configs
  Revert "ANDROID: GKI: add base.h include to match MODULE_VERSIONS"
  FROMLIST: thermal: Make cooling device trip point writable from sysfs
  ANDROID: GKI: drivers: thermal: cpu_cooling: Use CPU ID as cooling device ID
  ANDROID: GKI: PM / devfreq: Allow min freq to be 0
  ANDROID: GKI: arm64: gki_defconfig: Enable REGULATOR_PROXY_CONSUMER
  ANDROID: GKI: Bulk Update ABI XML representation
  ANDROID: KASAN support for GKI remove CONFIG_CC_WERROR
  ANDROID: KASAN support for GKI
  ANDROID: virt_wifi: fix export symbol types
  ANDROID: vfs: fix export symbol type
  ANDROID: vfs: fix export symbol types
  ANDROID: fscrypt: fix export symbol type
  ANDROID: cfi: fix export symbol types
  ANDROID: bpf: fix export symbol type
  Linux 4.19.119
  s390/mm: fix page table upgrade vs 2ndary address mode accesses
  xfs: Fix deadlock between AGI and AGF with RENAME_WHITEOUT
  serial: sh-sci: Make sure status register SCxSR is read in correct sequence
  xhci: prevent bus suspend if a roothub port detected a over-current condition
  usb: f_fs: Clear OS Extended descriptor counts to zero in ffs_data_reset()
  usb: dwc3: gadget: Fix request completion check
  UAS: fix deadlock in error handling and PM flushing work
  UAS: no use logging any details in case of ENODEV
  cdc-acm: introduce a cool down
  cdc-acm: close race betrween suspend() and acm_softint
  staging: vt6656: Power save stop wake_up_count wrap around.
  staging: vt6656: Fix pairwise key entry save.
  staging: vt6656: Fix drivers TBTT timing counter.
  staging: vt6656: Fix calling conditions of vnt_set_bss_mode
  staging: vt6656: Don't set RCR_MULTICAST or RCR_BROADCAST by default.
  vt: don't use kmalloc() for the unicode screen buffer
  vt: don't hardcode the mem allocation upper bound
  staging: comedi: Fix comedi_device refcnt leak in comedi_open
  staging: comedi: dt2815: fix writing hi byte of analog output
  powerpc/setup_64: Set cache-line-size based on cache-block-size
  ARM: imx: provide v7_cpu_resume() only on ARM_CPU_SUSPEND=y
  iwlwifi: mvm: beacon statistics shouldn't go backwards
  iwlwifi: pcie: actually release queue memory in TVQM
  ASoC: dapm: fixup dapm kcontrol widget
  audit: check the length of userspace generated audit records
  usb-storage: Add unusual_devs entry for JMicron JMS566
  tty: rocket, avoid OOB access
  tty: hvc: fix buffer overflow during hvc_alloc().
  KVM: VMX: Enable machine check support for 32bit targets
  KVM: Check validity of resolved slot when searching memslots
  KVM: s390: Return last valid slot if approx index is out-of-bounds
  tpm: ibmvtpm: retry on H_CLOSED in tpm_ibmvtpm_send()
  tpm/tpm_tis: Free IRQ if probing fails
  ALSA: usb-audio: Filter out unsupported sample rates on Focusrite devices
  ALSA: usb-audio: Fix usb audio refcnt leak when getting spdif
  ALSA: hda/realtek - Add new codec supported for ALC245
  ALSA: hda/realtek - Fix unexpected init_amp override
  ALSA: usx2y: Fix potential NULL dereference
  tools/vm: fix cross-compile build
  mm/ksm: fix NULL pointer dereference when KSM zero page is enabled
  mm/hugetlb: fix a addressing exception caused by huge_pte_offset
  vmalloc: fix remap_vmalloc_range() bounds checks
  USB: hub: Fix handling of connect changes during sleep
  USB: core: Fix free-while-in-use bug in the USB S-Glibrary
  USB: early: Handle AMD's spec-compliant identifiers, too
  USB: Add USB_QUIRK_DELAY_CTRL_MSG and USB_QUIRK_DELAY_INIT for Corsair K70 RGB RAPIDFIRE
  USB: sisusbvga: Change port variable from signed to unsigned
  fs/namespace.c: fix mountpoint reference counter race
  iio: xilinx-xadc: Make sure not exceed maximum samplerate
  iio: xilinx-xadc: Fix sequencer configuration for aux channels in simultaneous mode
  iio: xilinx-xadc: Fix clearing interrupt when enabling trigger
  iio: xilinx-xadc: Fix ADC-B powerdown
  iio: adc: stm32-adc: fix sleep in atomic context
  iio: st_sensors: rely on odr mask to know if odr can be set
  iio: core: remove extra semi-colon from devm_iio_device_register() macro
  ALSA: usb-audio: Add connector notifier delegation
  ALSA: usb-audio: Add static mapping table for ALC1220-VB-based mobos
  ALSA: hda: Remove ASUS ROG Zenith from the blacklist
  KEYS: Avoid false positive ENOMEM error on key read
  mlxsw: Fix some IS_ERR() vs NULL bugs
  vrf: Check skb for XFRM_TRANSFORMED flag
  xfrm: Always set XFRM_TRANSFORMED in xfrm{4,6}_output_finish
  net: dsa: b53: b53_arl_rw_op() needs to select IVL or SVL
  net: dsa: b53: Rework ARL bin logic
  net: dsa: b53: Fix ARL register definitions
  net: dsa: b53: Lookup VID in ARL searches when VLAN is enabled
  vrf: Fix IPv6 with qdisc and xfrm
  team: fix hang in team_mode_get()
  tcp: cache line align MAX_TCP_HEADER
  sched: etf: do not assume all sockets are full blown
  net/x25: Fix x25_neigh refcnt leak when receiving frame
  net: stmmac: dwmac-meson8b: Add missing boundary to RGMII TX clock array
  net: netrom: Fix potential nr_neigh refcnt leak in nr_add_node
  net: bcmgenet: correct per TX/RX ring statistics
  macvlan: fix null dereference in macvlan_device_event()
  macsec: avoid to set wrong mtu
  ipv6: fix restrict IPV6_ADDRFORM operation
  cxgb4: fix large delays in PTP synchronization
  cxgb4: fix adapter crash due to wrong MC size
  x86/KVM: Clean up host's steal time structure
  x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not missed
  x86/kvm: Cache gfn to pfn translation
  x86/kvm: Introduce kvm_(un)map_gfn()
  KVM: Properly check if "page" is valid in kvm_vcpu_unmap
  kvm: fix compile on s390 part 2
  kvm: fix compilation on s390
  kvm: fix compilation on aarch64
  KVM: Introduce a new guest mapping API
  KVM: nVMX: Always sync GUEST_BNDCFGS when it comes from vmcs01
  KVM: VMX: Zero out *all* general purpose registers after VM-Exit
  f2fs: fix to avoid memory leakage in f2fs_listxattr
  blktrace: fix dereference after null check
  blktrace: Protect q->blk_trace with RCU
  net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup
  net: ipv6: add net argument to ip6_dst_lookup_flow
  PCI/ASPM: Allow re-enabling Clock PM
  scsi: smartpqi: fix call trace in device discovery
  virtio-blk: improve virtqueue error to BLK_STS
  tracing/selftests: Turn off timeout setting
  drm/amd/display: Not doing optimize bandwidth if flip pending.
  xhci: Ensure link state is U3 after setting USB_SS_PORT_LS_U3
  ASoC: Intel: bytcr_rt5640: Add quirk for MPMAN MPWIN895CL tablet
  perf/core: Disable page faults when getting phys address
  pwm: bcm2835: Dynamically allocate base
  pwm: renesas-tpu: Fix late Runtime PM enablement
  Revert "powerpc/64: irq_work avoid interrupt when called with hardware irqs enabled"
  loop: Better discard support for block devices
  s390/cio: avoid duplicated 'ADD' uevents
  kconfig: qconf: Fix a few alignment issues
  ipc/util.c: sysvipc_find_ipc() should increase position index
  selftests: kmod: fix handling test numbers above 9
  kernel/gcov/fs.c: gcov_seq_next() should increase position index
  nvme: fix deadlock caused by ANA update wrong locking
  ASoC: Intel: atom: Take the drv->lock mutex before calling sst_send_slot_map()
  scsi: iscsi: Report unbind session event when the target has been removed
  pwm: rcar: Fix late Runtime PM enablement
  ceph: don't skip updating wanted caps when cap is stale
  ceph: return ceph_mdsc_do_request() errors from __get_parent()
  scsi: lpfc: Fix crash in target side cable pulls hitting WAIT_FOR_UNREG
  scsi: lpfc: Fix kasan slab-out-of-bounds error in lpfc_unreg_login
  watchdog: reset last_hw_keepalive time at start
  arm64: Silence clang warning on mismatched value/register sizes
  arm64: compat: Workaround Neoverse-N1 #1542419 for compat user-space
  arm64: Fake the IminLine size on systems affected by Neoverse-N1 #1542419
  arm64: errata: Hide CTR_EL0.DIC on systems affected by Neoverse-N1 #1542419
  arm64: Add part number for Neoverse N1
  vti4: removed duplicate log message.
  crypto: mxs-dcp - make symbols 'sha1_null_hash' and 'sha256_null_hash' static
  bpftool: Fix printing incorrect pointer in btf_dump_ptr
  drm/msm: Use the correct dma_sync calls harder
  ext4: fix extent_status fragmentation for plain files
  ANDROID: abi_gki_aarch64_cuttlefish_whitelist: remove stale symbols
  ANDROID: GKI: ipv4: increase __IPV4_DEVCONF_MAX to 64
  ANDROID: GKI: power: add missing export for POWER_RESET_QCOM=m
  BACKPORT: cfg80211: Support key configuration for Beacon protection (BIGTK)
  BACKPORT: cfg80211: Enhance the AKM advertizement to support per interface.
  UPSTREAM: sysrq: Use panic() to force a crash
  ANDROID: GKI: kernel: sound: update codec options with block size
  ANDROID: add compat cross compiler
  ANDROID: x86/vdso: disable LTO only for VDSO
  BACKPORT: arm64: vdso32: Enable Clang Compilation
  UPSTREAM: arm64: compat: vdso: Expose BUILD_VDSO32
  BACKPORT: lib/vdso: Enable common headers
  BACKPORT: arm: vdso: Enable arm to use common headers
  BACKPORT: x86/vdso: Enable x86 to use common headers
  BACKPORT: mips: vdso: Enable mips to use common headers
  UPSTREAM: arm64: vdso32: Include common headers in the vdso library
  UPSTREAM: arm64: vdso: Include common headers in the vdso library
  UPSTREAM: arm64: Introduce asm/vdso/processor.h
  BACKPORT: arm64: vdso32: Code clean up
  UPSTREAM: linux/elfnote.h: Replace elf.h with UAPI equivalent
  UPSTREAM: scripts: Fix the inclusion order in modpost
  UPSTREAM: common: Introduce processor.h
  UPSTREAM: linux/ktime.h: Extract common header for vDSO
  UPSTREAM: linux/jiffies.h: Extract common header for vDSO
  UPSTREAM: linux/time64.h: Extract common header for vDSO
  BACKPORT: linux/time32.h: Extract common header for vDSO
  BACKPORT: linux/time.h: Extract common header for vDSO
  UPSTREAM: linux/math64.h: Extract common header for vDSO
  BACKPORT: linux/clocksource.h: Extract common header for vDSO
  BACKPORT: mips: Introduce asm/vdso/clocksource.h
  BACKPORT: arm64: Introduce asm/vdso/clocksource.h
  BACKPORT: arm: Introduce asm/vdso/clocksource.h
  BACKPORT: x86: Introduce asm/vdso/clocksource.h
  UPSTREAM: linux/limits.h: Extract common header for vDSO
  BACKPORT: linux/kernel.h: split *_MAX and *_MIN macros into <linux/limits.h>
  BACKPORT: linux/bits.h: Extract common header for vDSO
  UPSTREAM: linux/const.h: Extract common header for vDSO
  BACKPORT: arm64: vdso: fix flip/flop vdso build bug
  UPSTREAM: lib/vdso: Allow the high resolution parts to be compiled out
  UPSTREAM: lib/vdso: Only read hrtimer_res when needed in __cvdso_clock_getres()
  UPSTREAM: lib/vdso: Mark do_hres() and do_coarse() as __always_inline
  UPSTREAM: lib/vdso: Avoid duplication in __cvdso_clock_getres()
  UPSTREAM: lib/vdso: Let do_coarse() return 0 to simplify the callsite
  UPSTREAM: lib/vdso: Remove checks on return value for 32 bit vDSO
  UPSTREAM: lib/vdso: Build 32 bit specific functions in the right context
  UPSTREAM: lib/vdso: Make __cvdso_clock_getres() static
  UPSTREAM: lib/vdso: Make clock_getres() POSIX compliant again
  UPSTREAM: lib/vdso/32: Provide legacy syscall fallbacks
  UPSTREAM: lib/vdso: Move fallback invocation to the callers
  UPSTREAM: lib/vdso/32: Remove inconsistent NULL pointer checks
  UPSTREAM: lib/vdso: Make delta calculation work correctly
  UPSTREAM: arm64: compat: Fix syscall number of compat_clock_getres
  BACKPORT: arm64: lse: Fix LSE atomics with LLVM
  UPSTREAM: mips: Fix gettimeofday() in the vdso library
  UPSTREAM: mips: vdso: Fix __arch_get_hw_counter()
  BACKPORT: arm64: Kconfig: Make CONFIG_COMPAT_VDSO a proper Kconfig option
  UPSTREAM: arm64: vdso32: Rename COMPATCC to CC_COMPAT
  UPSTREAM: arm64: vdso32: Pass '--target' option to clang via VDSO_CAFLAGS
  UPSTREAM: arm64: vdso32: Don't use KBUILD_CPPFLAGS unconditionally
  UPSTREAM: arm64: vdso32: Move definition of COMPATCC into vdso32/Makefile
  UPSTREAM: arm64: Default to building compat vDSO with clang when CONFIG_CC_IS_CLANG
  UPSTREAM: lib: vdso: Remove CROSS_COMPILE_COMPAT_VDSO
  UPSTREAM: arm64: vdso32: Remove jump label config option in Makefile
  UPSTREAM: arm64: vdso32: Detect binutils support for dmb ishld
  BACKPORT: arm64: vdso: Remove stale files from old assembly implementation
  UPSTREAM: arm64: vdso32: Fix broken compat vDSO build warnings
  UPSTREAM: mips: compat: vdso: Use legacy syscalls as fallback
  BACKPORT: arm64: Relax Documentation/arm64/tagged-pointers.rst
  BACKPORT: arm64: Add tagged-address-abi.rst to index.rst
  UPSTREAM: arm64: vdso: Fix Makefile regression
  UPSTREAM: mips: vdso: Fix flip/flop vdso building bug
  UPSTREAM: mips: vdso: Fix source path
  UPSTREAM: mips: Add clock_gettime64 entry point
  UPSTREAM: mips: Add clock_getres entry point
  BACKPORT: mips: Add support for generic vDSO
  BACKPORT: arm64: vdso: Explicitly add build-id option
  BACKPORT: arm64: vdso: use $(LD) instead of $(CC) to link VDSO
  BACKPORT: arm64: vdso: Cleanup Makefiles
  UPSTREAM: arm64: vdso: Fix population of AT_SYSINFO_EHDR for compat vdso
  UPSTREAM: arm64: vdso: Fix compilation with clang older than 8
  UPSTREAM: arm64: compat: Fix __arch_get_hw_counter() implementation
  UPSTREAM: arm64: Fix __arch_get_hw_counter() implementation
  UPSTREAM: x86/vdso/32: Use 32bit syscall fallback
  UPSTREAM: x86/vdso: Fix flip/flop vdso build bug
  UPSTREAM: x86/vdso: Give the [ph]vclock_page declarations real types
  UPSTREAM: x86/vdso: Add clock_gettime64() entry point
  BACKPORT: x86/vdso: Add clock_getres() entry point
  BACKPORT: x86/vdso: Switch to generic vDSO implementation
  UPSTREAM: x86/segments: Introduce the 'CPUNODE' naming to better document the segment limit CPU/node NR trick
  UPSTREAM: x86/vdso: Initialize the CPU/node NR segment descriptor earlier
  UPSTREAM: x86/vdso: Introduce helper functions for CPU and node number
  UPSTREAM: x86/segments/64: Rename the GDT PER_CPU entry to CPU_NUMBER
  BACKPORT: arm64: vdso: Enable vDSO compat support
  UPSTREAM: arm64: compat: Get sigreturn trampolines from vDSO
  UPSTREAM: arm64: elf: VDSO code page discovery
  UPSTREAM: arm64: compat: VDSO setup for compat layer
  UPSTREAM: arm64: vdso: Refactor vDSO code
  BACKPORT: arm64: compat: Add vDSO
  UPSTREAM: arm64: compat: Generate asm offsets for signals
  UPSTREAM: arm64: compat: Expose signal related structures
  UPSTREAM: arm64: compat: Add missing syscall numbers
  BACKPORT: arm64: vdso: Substitute gettimeofday() with C implementation
  UPSTREAM: timekeeping: Provide a generic update_vsyscall() implementation
  UPSTREAM: lib/vdso: Add compat support
  UPSTREAM: lib/vdso: Provide generic VDSO implementation
  UPSTREAM: vdso: Define standardized vdso_datapage
  UPSTREAM: hrtimer: Split out hrtimer defines into separate header
  UPSTREAM: nds32: Fix vDSO clock_getres()
  UPSTREAM: arm64: compat: Reduce address limit for 64K pages
  BACKPORT: arm64: compat: Add KUSER_HELPERS config option
  UPSTREAM: arm64: compat: Refactor aarch32_alloc_vdso_pages()
  BACKPORT: arm64: compat: Split kuser32
  UPSTREAM: arm64: compat: Alloc separate pages for vectors and sigpage
  ANDROID: GKI: Update ABI XML representation
  ANDROID: GKI: Enable GENERIC_IRQ_CHIP
  ANDROID: GKI: power_supply: Add FG_TYPE power-supply property
  ANDROID: GKI: mm: export mm_trace_rss_stat for modules to report RSS changes
  ANDROID: GKI: gki_defconfig: Enable CONFIG_LEDS_TRIGGER_TRANSIENT
  ANDROID: GKI: gki_defconfig: Enable CONFIG_CPU_FREQ_STAT
  ANDROID: GKI: arm64: gki_defconfig: Disable HW tracing features
  ANDROID: GKI: gki_defconfig: Enable CONFIG_I2C_CHARDEV
  ANDROID: Incremental fs: Use simple compression in log buffer
  ANDROID: GKI: usb: core: Add support to parse config summary capability descriptors
  ANDROID: GKI: Update ABI XML representation
  ANDROID: dm-bow: Fix not to skip trim at framented range
  ANDROID: Remove VLA from uid_sys_stats.c
  f2fs: fix missing check for f2fs_unlock_op
  ANDROID: fix wakeup reason findings
  UPSTREAM: cfg80211: fix and clean up cfg80211_gen_new_bssid()
  UPSTREAM: cfg80211: save multi-bssid properties
  UPSTREAM: cfg80211: make BSSID generation function inline
  UPSTREAM: cfg80211: parse multi-bssid only if HW supports it
  UPSTREAM: cfg80211: Move Multiple BSS info to struct cfg80211_bss to be visible
  UPSTREAM: cfg80211: Properly track transmitting and non-transmitting BSS
  UPSTREAM: cfg80211: use for_each_element() for multi-bssid parsing
  UPSTREAM: cfg80211: Parsing of Multiple BSSID information in scanning
  UPSTREAM: cfg80211/nl80211: Offload OWE processing to user space in AP mode
  ANDROID: GKI: cfg80211: Sync nl80211 commands/feature with upstream
  ANDROID: GKI: gki_defconfig: Enable FW_LOADER_USER_HELPER*
  ANDROID: GKI: arm64: gki_defconfig: Disable CONFIG_ARM64_TAGGED_ADDR_ABI
  ANDROID: GKI: gki_defconfig: CONFIG_CHR_DEV_SG=y
  ANDROID: GKI: gki_defconfig: CONFIG_DM_DEFAULT_KEY=m
  ANDROID: update the ABI xml representation
  ANDROID: init: GKI: enable hidden configs for GPU
  Linux 4.19.118
  bpf: fix buggy r0 retval refinement for tracing helpers
  KEYS: Don't write out to userspace while holding key semaphore
  mtd: phram: fix a double free issue in error path
  mtd: lpddr: Fix a double free in probe()
  mtd: spinand: Explicitly use MTD_OPS_RAW to write the bad block marker to OOB
  locktorture: Print ratio of acquisitions, not failures
  tty: evh_bytechan: Fix out of bounds accesses
  iio: si1133: read 24-bit signed integer for measurement
  fbdev: potential information leak in do_fb_ioctl()
  net: dsa: bcm_sf2: Fix overflow checks
  f2fs: fix to wait all node page writeback
  iommu/amd: Fix the configuration of GCR3 table root pointer
  libnvdimm: Out of bounds read in __nd_ioctl()
  power: supply: axp288_fuel_gauge: Broaden vendor check for Intel Compute Sticks.
  ext2: fix debug reference to ext2_xattr_cache
  ext2: fix empty body warnings when -Wextra is used
  iommu/vt-d: Fix mm reference leak
  drm/vc4: Fix HDMI mode validation
  f2fs: fix NULL pointer dereference in f2fs_write_begin()
  NFS: Fix memory leaks in nfs_pageio_stop_mirroring()
  drm/amdkfd: kfree the wrong pointer
  x86: ACPI: fix CPU hotplug deadlock
  KVM: s390: vsie: Fix possible race when shadowing region 3 tables
  compiler.h: fix error in BUILD_BUG_ON() reporting
  percpu_counter: fix a data race at vm_committed_as
  include/linux/swapops.h: correct guards for non_swap_entry()
  cifs: Allocate encryption header through kmalloc
  um: ubd: Prevent buffer overrun on command completion
  ext4: do not commit super on read-only bdev
  s390/cpum_sf: Fix wrong page count in error message
  powerpc/maple: Fix declaration made after definition
  s390/cpuinfo: fix wrong output when CPU0 is offline
  NFS: direct.c: Fix memory leak of dreq when nfs_get_lock_context fails
  NFSv4/pnfs: Return valid stateids in nfs_layout_find_inode_by_stateid()
  rtc: 88pm860x: fix possible race condition
  soc: imx: gpc: fix power up sequencing
  clk: tegra: Fix Tegra PMC clock out parents
  power: supply: bq27xxx_battery: Silence deferred-probe error
  clk: at91: usb: continue if clk_hw_round_rate() return zero
  x86/Hyper-V: Report crash data in die() when panic_on_oops is set
  x86/Hyper-V: Report crash register data when sysctl_record_panic_msg is not set
  x86/Hyper-V: Trigger crash enlightenment only once during system crash.
  x86/Hyper-V: Free hv_panic_page when fail to register kmsg dump
  x86/Hyper-V: Unload vmbus channel in hv panic callback
  xsk: Add missing check on user supplied headroom size
  rbd: call rbd_dev_unprobe() after unwatching and flushing notifies
  rbd: avoid a deadlock on header_rwsem when flushing notifies
  video: fbdev: sis: Remove unnecessary parentheses and commented code
  lib/raid6: use vdupq_n_u8 to avoid endianness warnings
  x86/Hyper-V: Report crash register data or kmsg before running crash kernel
  of: overlay: kmemleak in dup_and_fixup_symbol_prop()
  of: unittest: kmemleak in of_unittest_overlay_high_level()
  of: unittest: kmemleak in of_unittest_platform_populate()
  of: unittest: kmemleak on changeset destroy
  ALSA: hda: Don't release card at firmware loading error
  irqchip/mbigen: Free msi_desc on device teardown
  netfilter: nf_tables: report EOPNOTSUPP on unsupported flags/object type
  ARM: dts: imx6: Use gpc for FEC interrupt controller to fix wake on LAN.
  arm, bpf: Fix bugs with ALU64 {RSH, ARSH} BPF_K shift by 0
  watchdog: sp805: fix restart handler
  ext4: use non-movable memory for superblock readahead
  scsi: sg: add sg_remove_request in sg_common_write
  objtool: Fix switch table detection in .text.unlikely
  arm, bpf: Fix offset overflow for BPF_MEM BPF_DW
  ANDROID: GKI: Bulk update ABI report.
  ANDROID: GKI: qos: Register irq notify after adding the qos request
  ANDROID: GKI: Add dual role mode to usb_dr_modes array
  UPSTREAM: virtio-gpu api: comment feature flags
  ANDROID: arch:arm64: Increase kernel command line size
  ANDROID: GKI: Add special linux_banner_ptr for modules
  Revert "ANDROID: GKI: Make linux_banner a C pointer"
  ANDROID: GKI: PM / devfreq: Add new flag to do simple clock scaling
  ANDROID: GKI: Resolve ABI diff for struct snd_usb_audio
  ANDROID: GKI: Bulk update ABI
  ANDROID: GKI: Update the whitelist for qcom SoCs
  ANDROID: GKI: arm64: gki_defconfig: Set CONFIG_SCSI_UFSHCD=m
  ANDROID: GKI: scsi: add option to override the command timeout
  ANDROID: GKI: scsi: Adjust DBD setting in mode sense for caching mode page per LLD
  ANDROID: add ion_stat tracepoint to common kernel
  UPSTREAM: gpu/trace: add a gpu total memory usage tracepoint
  Linux 4.19.117
  mm/vmalloc.c: move 'area->pages' after if statement
  wil6210: remove reset file from debugfs
  wil6210: make sure Rx ring sizes are correlated
  wil6210: add general initialization/size checks
  wil6210: ignore HALP ICR if already handled
  wil6210: check rx_buff_mgmt before accessing it
  x86/resctrl: Fix invalid attempt at removing the default resource group
  x86/resctrl: Preserve CDP enable over CPU hotplug
  x86/microcode/AMD: Increase microcode PATCH_MAX_SIZE
  scsi: target: fix hang when multiple threads try to destroy the same iscsi session
  scsi: target: remove boilerplate code
  kvm: x86: Host feature SSBD doesn't imply guest feature SPEC_CTRL_SSBD
  ext4: do not zeroout extents beyond i_disksize
  drm/amd/powerplay: force the trim of the mclk dpm_levels if OD is enabled
  usb: dwc3: gadget: Don't clear flags before transfer ended
  usb: dwc3: gadget: don't enable interrupt when disabling endpoint
  mac80211_hwsim: Use kstrndup() in place of kasprintf()
  btrfs: check commit root generation in should_ignore_root
  tracing: Fix the race between registering 'snapshot' event trigger and triggering 'snapshot' operation
  keys: Fix proc_keys_next to increase position index
  ALSA: usb-audio: Check mapping at creating connector controls, too
  ALSA: usb-audio: Don't create jack controls for PCM terminals
  ALSA: usb-audio: Don't override ignore_ctl_error value from the map
  ALSA: usb-audio: Filter error from connector kctl ops, too
  ASoC: Intel: mrfld: return error codes when an error occurs
  ASoC: Intel: mrfld: fix incorrect check on p->sink
  ext4: fix incorrect inodes per group in error message
  ext4: fix incorrect group count in ext4_fill_super error message
  pwm: pca9685: Fix PWM/GPIO inter-operation
  jbd2: improve comments about freeing data buffers whose page mapping is NULL
  scsi: ufs: Fix ufshcd_hold() caused scheduling while atomic
  ovl: fix value of i_ino for lower hardlink corner case
  net: dsa: mt7530: fix tagged frames pass-through in VLAN-unaware mode
  net: stmmac: dwmac-sunxi: Provide TX and RX fifo sizes
  net: revert default NAPI poll timeout to 2 jiffies
  net: qrtr: send msgs from local of same id as broadcast
  net: ipv6: do not consider routes via gateways for anycast address check
  net: ipv4: devinet: Fix crash when add/del multicast IP with autojoin
  hsr: check protocol version in hsr_newlink()
  amd-xgbe: Use __napi_schedule() in BH context
  ANDROID: GKI: drivers: of-thermal: Relate thermal zones using same sensor
  ANDROID: GKI: Bulk ABI update
  ANDROID: GKI: dma: Add set_dma_mask hook to struct dma_map_ops
  ANDROID: GKI: ABI update due to recent patches
  FROMLIST: drm/prime: add support for virtio exported objects
  FROMLIST: dma-buf: add support for virtio exported objects
  UPSTREAM: drm/virtio: module_param_named() requires linux/moduleparam.h
  UPSTREAM: drm/virtio: fix resource id creation race
  UPSTREAM: drm/virtio: make resource id workaround runtime switchable.
  BACKPORT: drm/virtio: Drop deprecated load/unload initialization
  ANDROID: GKI: Add DRM_TTM config to GKI
  ANDROID: Bulk update the ABI xml representation
  ANDROID: GKI: spmi: pmic-arb: don't enable SPMI_MSM_PMIC_ARB by default
  ANDROID: GKI: attribute page lock and waitqueue functions as sched
  ANDROID: GKI: extcon: Fix Add usage of blocking notifier chain
  ANDROID: GKI: USB: pd: Extcon fix for C current
  ANDROID: drm/dsi: Fix byte order of DCS set/get brightness
  ANDROID: GKI: mm: Export symbols to modularize CONFIG_MSM_DRM
  ANDROID: GKI: ALSA: compress: Add support to send codec specific data
  ANDROID: GKI: ALSA: Compress - dont use lock for all ioctls
  ANDROID: GKI: ASoC: msm: qdsp6v2: add support for AMR_WB_PLUS offload
  ANDROID: GKI: msm: dolby: MAT and THD audiocodec name modification
  ANDROID: GKI: asoc: msm: Add support for compressed perf mode
  ANDROID: GKI: msm: audio: support for gapless_pcm
  ANDROID: GKI: uapi: msm: dolby: Support for TrueHD and MAT decoders
  ANDROID: GKI: ASoC: msm: qdsp6v2: Add TrueHD HDMI compress pass-though
  ANDROID: GKI: ALSA: compress: Add APTX format support in ALSA
  ANDROID: GKI: msm: qdsp6v2: Add timestamp support for compress capture
  ANDROID: GKI: SoC: msm: Add support for meta data in compressed TX
  ANDROID: GKI: ALSA: compress: Add DSD format support for ALSA
  ANDROID: GKI: ASoC: msm: qdsp6v2: add support for ALAC and APE offload
  ANDROID: GKI: SoC: msm: Add compressed TX and passthrough support
  ANDROID: GKI: ASoC: msm: qdsp6v2: Add FLAC in compress offload path
  ANDROID: GKI: ASoC: msm: add support for different compressed formats
  ANDROID: GKI: ASoC: msm: Update the encode option and sample rate
  ANDROID: GKI: Enable CONFIG_SND_VERBOSE_PROCFS in gki_defconfig
  ANDROID: GKI: Add hidden CONFIG_SND_SOC_COMPRESS to gki_defconfig
  ANDROID: GKI: ALSA: pcm: add locks for accessing runtime resource
  ANDROID: GKI: Update ABI for DRM changes
  ANDROID: GKI: Add drm_dp_send_dpcd_{read,write} accessor functions
  ANDROID: GKI: drm: Add drm_dp_mst_get_max_sdp_streams_supported accessor function
  ANDROID: GKI: drm: Add drm_dp_mst_has_fec accessor function
  ANDROID: GKI: Add 'dsc_info' to struct drm_dp_mst_port
  ANDROID: GKI: usb: Add support to handle USB SMMU S1 address
  ANDROID: GKI: usb: Add helper APIs to return xhci phys addresses
  ANDROID: Add C protos for dma_buf/drm_prime get_uuid
  ANDROID: GKI: Make linux_banner a C pointer
  ANDROID: GKI: Add 'refresh_rate', 'id' to struct drm_panel_notifier
  ANDROID: GKI: Add 'i2c_mutex' to struct drm_dp_aux
  ANDROID: GKI: Add 'checksum' to struct drm_connector
  Revert "BACKPORT: drm: Add HDR source metadata property"
  Revert "BACKPORT: drm: Parse HDR metadata info from EDID"
  ANDROID: drm: Add DP colorspace property
  ANDROID: GKI: drm: Initialize display->hdmi when parsing vsdb
  ANDROID: drivers: gpu: drm: add support to batch commands
  ANDROID: ABI: update the qcom whitelist
  ANDROID: GKI: ARM64: smp: add vendor field pending_ipi
  ANDROID: gki_defconfig: enable msm serial early console
  ANDROID: serial: msm_geni_serial_console : Add Earlycon support
  ANDROID: GKI: serial: core: export uart_console_device
  f2fs: fix quota_sync failure due to f2fs_lock_op
  f2fs: support read iostat
  f2fs: Fix the accounting of dcc->undiscard_blks
  f2fs: fix to handle error path of f2fs_ra_meta_pages()
  f2fs: report the discard cmd errors properly
  f2fs: fix long latency due to discard during umount
  f2fs: add tracepoint for f2fs iostat
  f2fs: introduce sysfs/data_io_flag to attach REQ_META/FUA
  ANDROID: GKI: update abi definition due to previous changes in the tree
  Linux 4.19.116
  efi/x86: Fix the deletion of variables in mixed mode
  mfd: dln2: Fix sanity checking for endpoints
  etnaviv: perfmon: fix total and idle HI cyleces readout
  misc: echo: Remove unnecessary parentheses and simplify check for zero
  powerpc/fsl_booke: Avoid creating duplicate tlb1 entry
  ftrace/kprobe: Show the maxactive number on kprobe_events
  drm: Remove PageReserved manipulation from drm_pci_alloc
  drm/dp_mst: Fix clearing payload state on topology disable
  Revert "drm/dp_mst: Remove VCPI while disabling topology mgr"
  crypto: ccree - only try to map auth tag if needed
  crypto: ccree - dec auth tag size from cryptlen map
  crypto: ccree - don't mangle the request assoclen
  crypto: ccree - zero out internal struct before use
  crypto: ccree - improve error handling
  crypto: caam - update xts sector size for large input length
  dm zoned: remove duplicate nr_rnd_zones increase in dmz_init_zone()
  btrfs: use nofs allocations for running delayed items
  powerpc: Make setjmp/longjmp signature standard
  powerpc: Add attributes for setjmp/longjmp
  scsi: mpt3sas: Fix kernel panic observed on soft HBA unplug
  powerpc/kprobes: Ignore traps that happened in real mode
  powerpc/xive: Use XIVE_BAD_IRQ instead of zero to catch non configured IPIs
  powerpc/hash64/devmap: Use H_PAGE_THP_HUGE when setting up huge devmap PTE entries
  powerpc/64/tm: Don't let userspace set regs->trap via sigreturn
  powerpc/powernv/idle: Restore AMR/UAMOR/AMOR after idle
  xen/blkfront: fix memory allocation flags in blkfront_setup_indirect()
  ipmi: fix hung processes in __get_guid()
  libata: Return correct status in sata_pmp_eh_recover_pm() when ATA_DFLAG_DETACH is set
  hfsplus: fix crash and filesystem corruption when deleting files
  cpufreq: powernv: Fix use-after-free
  kmod: make request_module() return an error when autoloading is disabled
  clk: ingenic/jz4770: Exit with error if CGU init failed
  Input: i8042 - add Acer Aspire 5738z to nomux list
  s390/diag: fix display of diagnose call statistics
  perf tools: Support Python 3.8+ in Makefile
  ocfs2: no need try to truncate file beyond i_size
  fs/filesystems.c: downgrade user-reachable WARN_ONCE() to pr_warn_once()
  ext4: fix a data race at inode->i_blocks
  NFS: Fix a page leak in nfs_destroy_unlinked_subrequests()
  powerpc/pseries: Avoid NULL pointer dereference when drmem is unavailable
  drm/etnaviv: rework perfmon query infrastructure
  rtc: omap: Use define directive for PIN_CONFIG_ACTIVE_HIGH
  selftests: vm: drop dependencies on page flags from mlock2 tests
  arm64: armv8_deprecated: Fix undef_hook mask for thumb setend
  scsi: zfcp: fix missing erp_lock in port recovery trigger for point-to-point
  dm verity fec: fix memory leak in verity_fec_dtr
  dm writecache: add cond_resched to avoid CPU hangs
  arm64: dts: allwinner: h6: Fix PMU compatible
  net: qualcomm: rmnet: Allow configuration updates to existing devices
  mm: Use fixed constant in page_frag_alloc instead of size + 1
  tools: gpio: Fix out-of-tree build regression
  x86/speculation: Remove redundant arch_smt_update() invocation
  powerpc/pseries: Drop pointless static qualifier in vpa_debugfs_init()
  erofs: correct the remaining shrink objects
  crypto: mxs-dcp - fix scatterlist linearization for hash
  btrfs: fix missing semaphore unlock in btrfs_sync_file
  btrfs: fix missing file extent item for hole after ranged fsync
  btrfs: drop block from cache on error in relocation
  btrfs: set update the uuid generation as soon as possible
  Btrfs: fix crash during unmount due to race with delayed inode workers
  mtd: spinand: Do not erase the block before writing a bad block marker
  mtd: spinand: Stop using spinand->oobbuf for buffering bad block markers
  CIFS: Fix bug which the return value by asynchronous read is error
  KVM: VMX: fix crash cleanup when KVM wasn't used
  KVM: x86: Gracefully handle __vmalloc() failure during VM allocation
  KVM: VMX: Always VMCLEAR in-use VMCSes during crash with kexec support
  KVM: x86: Allocate new rmap and large page tracking when moving memslot
  KVM: s390: vsie: Fix delivery of addressing exceptions
  KVM: s390: vsie: Fix region 1 ASCE sanity shadow address checks
  KVM: nVMX: Properly handle userspace interrupt window request
  x86/entry/32: Add missing ASM_CLAC to general_protection entry
  signal: Extend exec_id to 64bits
  ath9k: Handle txpower changes even when TPC is disabled
  MIPS: OCTEON: irq: Fix potential NULL pointer dereference
  MIPS/tlbex: Fix LDDIR usage in setup_pw() for Loongson-3
  pstore: pstore_ftrace_seq_next should increase position index
  irqchip/versatile-fpga: Apply clear-mask earlier
  KEYS: reaching the keys quotas correctly
  tpm: tpm2_bios_measurements_next should increase position index
  tpm: tpm1_bios_measurements_next should increase position index
  tpm: Don't make log failures fatal
  PCI: endpoint: Fix for concurrent memory allocation in OB address region
  PCI: Add boot interrupt quirk mechanism for Xeon chipsets
  PCI/ASPM: Clear the correct bits when enabling L1 substates
  PCI: pciehp: Fix indefinite wait on sysfs requests
  nvme: Treat discovery subsystems as unique subsystems
  nvme-fc: Revert "add module to ops template to allow module references"
  thermal: devfreq_cooling: inline all stubs for CONFIG_DEVFREQ_THERMAL=n
  acpi/x86: ignore unspecified bit positions in the ACPI global lock field
  media: ti-vpe: cal: fix disable_irqs to only the intended target
  ALSA: hda/realtek - Add quirk for MSI GL63
  ALSA: hda/realtek - Remove now-unnecessary XPS 13 headphone noise fixups
  ALSA: hda/realtek - Set principled PC Beep configuration for ALC256
  ALSA: doc: Document PC Beep Hidden Register on Realtek ALC256
  ALSA: pcm: oss: Fix regression by buffer overflow fix
  ALSA: ice1724: Fix invalid access for enumerated ctl items
  ALSA: hda: Fix potential access overflow in beep helper
  ALSA: hda: Add driver blacklist
  ALSA: usb-audio: Add mixer workaround for TRX40 and co
  usb: gadget: composite: Inform controller driver of self-powered
  usb: gadget: f_fs: Fix use after free issue as part of queue failure
  ASoC: topology: use name_prefix for new kcontrol
  ASoC: dpcm: allow start or stop during pause for backend
  ASoC: dapm: connect virtual mux with default value
  ASoC: fix regwmask
  slub: improve bit diffusion for freelist ptr obfuscation
  uapi: rename ext2_swab() to swab() and share globally in swab.h
  IB/mlx5: Replace tunnel mpls capability bits for tunnel_offloads
  btrfs: track reloc roots based on their commit root bytenr
  btrfs: remove a BUG_ON() from merge_reloc_roots()
  btrfs: qgroup: ensure qgroup_rescan_running is only set when the worker is at least queued
  block, bfq: fix use-after-free in bfq_idle_slice_timer_body
  locking/lockdep: Avoid recursion in lockdep_count_{for,back}ward_deps()
  firmware: fix a double abort case with fw_load_sysfs_fallback
  md: check arrays is suspended in mddev_detach before call quiesce operations
  irqchip/gic-v4: Provide irq_retrigger to avoid circular locking dependency
  usb: dwc3: core: add support for disabling SS instances in park mode
  media: i2c: ov5695: Fix power on and off sequences
  block: Fix use-after-free issue accessing struct io_cq
  genirq/irqdomain: Check pointer in irq_domain_alloc_irqs_hierarchy()
  efi/x86: Ignore the memory attributes table on i386
  x86/boot: Use unsigned comparison for addresses
  gfs2: Don't demote a glock until its revokes are written
  pstore/platform: fix potential mem leak if pstore_init_fs failed
  libata: Remove extra scsi_host_put() in ata_scsi_add_hosts()
  media: i2c: video-i2c: fix build errors due to 'imply hwmon'
  PCI/switchtec: Fix init_completion race condition with poll_wait()
  selftests/x86/ptrace_syscall_32: Fix no-vDSO segfault
  sched: Avoid scale real weight down to zero
  irqchip/versatile-fpga: Handle chained IRQs properly
  block: keep bdi->io_pages in sync with max_sectors_kb for stacked devices
  x86: Don't let pgprot_modify() change the page encryption bit
  xhci: bail out early if driver can't accress host in resume
  null_blk: fix spurious IO errors after failed past-wp access
  null_blk: Handle null_add_dev() failures properly
  null_blk: Fix the null_add_dev() error path
  firmware: arm_sdei: fix double-lock on hibernate with shared events
  media: venus: hfi_parser: Ignore HEVC encoding for V1
  cpufreq: imx6q: Fixes unwanted cpu overclocking on i.MX6ULL
  i2c: st: fix missing struct parameter description
  qlcnic: Fix bad kzalloc null test
  cxgb4/ptp: pass the sign of offset delta in FW CMD
  hinic: fix wrong para of wait_for_completion_timeout
  hinic: fix a bug of waitting for IO stopped
  net: vxge: fix wrong __VA_ARGS__ usage
  bus: sunxi-rsb: Return correct data when mixing 16-bit and 8-bit reads
  ARM: dts: sun8i-a83t-tbs-a711: HM5065 doesn't like such a high voltage
  ANDROID: build.config.allmodconfig: Re-enable XFS_FS
  FROMGIT: of: property: Add device link support for extcon
  ANDROID: GKI: arm64: gki_defconfig: enable CONFIG_MM_EVENT_STAT
  ANDROID: GKI: add fields from per-process mm event tracking feature
  ANDROID: GKI: fix ABI diffs caused by ION heap and pool vmstat additions
  UPSTREAM: GKI: panic/reboot: allow specifying reboot_mode for panic only
  ANDROID: GKI: of: property: Add device link support for phys property
  ANDROID: GKI: usb: phy: Fix ABI diff for usb_otg_state
  ANDROID: GKI: usb: phy: Fix ABI diff due to usb_phy.drive_dp_pulse
  ANDROID: GKI: usb: phy: Fix ABI diff for usb_phy_type and usb_phy.reset
  ANDROID: gki_defconfig: enable CONFIG_GPIO_SYSFS
  ANDROID: GKI: qcom: Fix compile issue when setting msm_lmh_dcvs as a module
  ANDROID: GKI: drivers: cpu_cooling: allow platform freq mitigation
  ANDROID: GKI: ASoC: Add locking in DAPM widget power update
  ANDROID: GKI: ASoC: jack: Fix buttons enum value
  ANDROID: GKI: ALSA: jack: Add support to report second microphone
  ANDROID: GKI: ALSA: jack: Update supported jack switch types
  ANDROID: GKI: ALSA: jack: update jack types
  ANDROID: GKI: Export symbols arm_cpuidle_suspend, cpuidle_dev and cpuidle_register_governor
  ANDROID: GKI: usb: hcd: Add USB atomic notifier callback for HC died error
  ANDROID: media: increase video max frame number
  BACKPORT: nvmem: core: add NVMEM_SYSFS Kconfig
  UPSTREAM: nvmem: add support for cell info
  UPSTREAM: nvmem: remove the global cell list
  UPSTREAM: nvmem: use kref
  UPSTREAM: nvmem: use list_for_each_entry_safe in nvmem_device_remove_all_cells()
  UPSTREAM: nvmem: provide nvmem_dev_name()
  ANDROID: GKI: Bulk ABI update
  ANDROID: GKI: cpuhotplug: adding hotplug enums for vendor code
  ANDROID: Incremental fs: Fix create_file performance
  ANDROID: build.config.common: Add BUILDTOOLS_PREBUILT_BIN
  UPSTREAM: kheaders: include only headers into kheaders_data.tar.xz
  UPSTREAM: kheaders: remove meaningless -R option of 'ls'
  ANDROID: GKI: of: platform: initialize of_reserved_mem
  ANDROID: driver: gpu: drm: add notifier for panel related events
  ANDROID: include: drm: support unicasting mipi cmds to dsi ctrls
  ANDROID: include: drm: increase DRM max property count to 64
  BACKPORT: drm: Add HDMI colorspace property
  ANDROID: drm: edid: add support for additional CEA extension blocks
  BACKPORT: drm: Parse HDR metadata info from EDID
  BACKPORT: drm: Add HDR source metadata property
  BACKPORT: drm/dp_mst: Parse FEC capability on MST ports
  ANDROID: GKI: ABI update for DRM changes
  ANDROID: ABI: add missing elf variables to representation
  ANDROID: GKI: power_supply: Add PROP_MOISTURE_DETECTION_ENABLED
  ANDROID: include: drm: add the definitions for DP Link Compliance tests
  ANDROID: drivers: gpu: drm: fix bugs encountered while fuzzing
  FROMLIST: power_supply: Add additional health properties to the header
  UPSTREAM: power: supply: core: Update sysfs-class-power ABI document
  UPSTREAM: Merge remote-tracking branch 'aosp/upstream-f2fs-stable-linux-4.19.y' into android-4.19 (v5.7-rc1)
  ANDROID: drivers: gpu: drm: add support for secure framebuffer
  ANDROID: include: uapi: drm: add additional QCOM modifiers
  ANDROID: drm: dsi: add two DSI mode flags for BLLP
  ANDROID: include: uapi: drm: add additional drm mode flags
  UPSTREAM: drm: plug memory leak on drm_setup() failure
  UPSTREAM: drm: factor out drm_close_helper() function
  ANDROID: GKI: Bulk ABI update
  BACKPORT: nl80211: Add per peer statistics to compute FCS error rate
  ANDROID: GKI: sound: usb: Add snd_usb_enable_audio_stream/find_snd_usb_substream
  ANDROID: GKI: add dma-buf includes
  ANDROID: GKI: sched: struct fields for Per-Sched-domain over utilization
  ANDROID: GKI: Add vendor fields to root_domain
  ANDROID: gki_defconfig: Enable CONFIG_IRQ_TIME_ACCOUNTING
  ANDROID: fix allmodconfig build to use the right toolchain
  ANDROID: fix allmodconfig build to use the right toolchain
  ANDROID: GKI: Update ABI
  Revert "UPSTREAM: mm, page_alloc: spread allocations across zones before introducing fragmentation"
  Revert "UPSTREAM: mm: use alloc_flags to record if kswapd can wake"
  Revert "BACKPORT: mm: move zone watermark accesses behind an accessor"
  Revert "BACKPORT: mm: reclaim small amounts of memory when an external fragmentation event occurs"
  Revert "BACKPORT: mm, compaction: be selective about what pageblocks to clear skip hints"
  ANDROID: GKI: panic: add vendor callback function in panic()
  UPSTREAM: GKI: thermal: make device_register's type argument const
  ANDROID: GKI: add base.h include to match MODULE_VERSIONS
  ANDROID: update the ABI based on the new whitelist
  ANDROID: GKI: fdt: export symbols required by modules
  ANDROID: GKI: drivers: of: Add APIs to find DDR device rank, HBB
  ANDROID: GKI: security: Add mmap export symbols for modules
  ANDROID: GKI: arch: add stub symbols for boot_reason and cold_boot
  ANDROID: GKI: USB: Fix ABI diff for struct usb_bus
  ANDROID: GKI: USB: Resolve ABI diff for usb_gadget and usb_gadget_ops
  ANDROID: GKI: add hidden V4L2_MEM2MEM_DEV
  ANDROID: GKI: enable VIDEO_V4L2_SUBDEV_API
  ANDROID: GKI: export symbols from abi_gki_aarch64_qcom_whitelist
  ANDROID: Update the whitelist for qcom SoCs
  ANDROID: Incremental fs: Fix compound page usercopy crash
  ANDROID: Incremental fs: Clean up incfs_test build process
  ANDROID: Incremental fs: make remount log buffer change atomic
  ANDROID: Incremental fs: Optimize get_filled_block
  ANDROID: Incremental fs: Fix mislabeled __user ptrs
  ANDROID: Incremental fs: Use 64-bit int for file_size when writing hash blocks
  Linux 4.19.115
  drm/msm: Use the correct dma_sync calls in msm_gem
  drm_dp_mst_topology: fix broken drm_dp_sideband_parse_remote_dpcd_read()
  usb: dwc3: don't set gadget->is_otg flag
  rpmsg: glink: Remove chunk size word align warning
  arm64: Fix size of __early_cpu_boot_status
  drm/msm: stop abusing dma_map/unmap for cache
  clk: qcom: rcg: Return failure for RCG update
  fbcon: fix null-ptr-deref in fbcon_switch
  RDMA/cm: Update num_paths in cma_resolve_iboe_route error flow
  Bluetooth: RFCOMM: fix ODEBUG bug in rfcomm_dev_ioctl
  RDMA/cma: Teach lockdep about the order of rtnl and lock
  RDMA/ucma: Put a lock around every call to the rdma_cm layer
  ceph: canonicalize server path in place
  ceph: remove the extra slashes in the server path
  IB/hfi1: Fix memory leaks in sysfs registration and unregistration
  IB/hfi1: Call kobject_put() when kobject_init_and_add() fails
  ASoC: jz4740-i2s: Fix divider written at incorrect offset in register
  hwrng: imx-rngc - fix an error path
  tools/accounting/getdelays.c: fix netlink attribute length
  usb: dwc3: gadget: Wrap around when skip TRBs
  random: always use batched entropy for get_random_u{32,64}
  mlxsw: spectrum_flower: Do not stop at FLOW_ACTION_VLAN_MANGLE
  slcan: Don't transmit uninitialized stack data in padding
  net: stmmac: dwmac1000: fix out-of-bounds mac address reg setting
  net: phy: micrel: kszphy_resume(): add delay after genphy_resume() before accessing PHY registers
  net: dsa: bcm_sf2: Ensure correct sub-node is parsed
  net: dsa: bcm_sf2: Do not register slave MDIO bus with OF
  ipv6: don't auto-add link-local address to lag ports
  mm: mempolicy: require at least one nodeid for MPOL_PREFERRED
  include/linux/notifier.h: SRCU: fix ctags
  bitops: protect variables in set_mask_bits() macro
  padata: always acquire cpu_hotplug_lock before pinst->lock
  net: Fix Tx hash bound checking
  rxrpc: Fix sendmsg(MSG_WAITALL) handling
  ALSA: hda/ca0132 - Add Recon3Di quirk to handle integrated sound on EVGA X99 Classified motherboard
  power: supply: axp288_charger: Add special handling for HP Pavilion x2 10
  extcon: axp288: Add wakeup support
  mei: me: add cedar fork device ids
  coresight: do not use the BIT() macro in the UAPI header
  misc: pci_endpoint_test: Avoid using module parameter to determine irqtype
  misc: pci_endpoint_test: Fix to support > 10 pci-endpoint-test devices
  misc: rtsx: set correct pcr_ops for rts522A
  media: rc: IR signal for Panasonic air conditioner too long
  drm/etnaviv: replace MMU flush marker with flush sequence
  tools/power turbostat: Fix missing SYS_LPI counter on some Chromebooks
  tools/power turbostat: Fix gcc build warnings
  drm/amdgpu: fix typo for vcn1 idle check
  initramfs: restore default compression behavior
  drm/bochs: downgrade pci_request_region failure from error to warning
  drm/amd/display: Add link_rate quirk for Apple 15" MBP 2017
  nvme-rdma: Avoid double freeing of async event data
  sctp: fix possibly using a bad saddr with a given dst
  sctp: fix refcount bug in sctp_wfree
  net, ip_tunnel: fix interface lookup with no key
  ipv4: fix a RCU-list lock in fib_triestat_seq_show
  ANDROID: GKI: export symbols required by SPECTRA_CAMERA
  ANDROID: GKI: ARM/ARM64: Introduce arch_read_hardware_id
  ANDROID: GKI: drivers: base: soc: export symbols for socinfo
  ANDROID: GKI: Update ABI
  ANDROID: GKI: ASoC: msm: fix integer overflow for long duration offload playback
  ANDROID: GKI: Bulk ABI update
  Revert "ANDROID: GKI: mm: add struct/enum fields for SPECULATIVE_PAGE_FAULTS"
  ANDROID: GKI: Revert "arm64: kill flush_cache_all()"
  ANDROID: GKI: Revert "arm64: Remove unused macros from assembler.h"
  ANDROID: GKI: kernel/dma, mm/cma: Export symbols needed by vendor modules
  ANDROID: GKI: mm: Export symbols __next_zones_zonelist and zone_watermark_ok_safe
  ANDROID: GKI: mm/memblock: export memblock_overlaps_memory
  ANDROID: GKI: net, skbuff: export symbols needed by vendor drivers
  ANDROID: GKI: Add stub __cpu_isolated_mask symbol
  ANDROID: GKI: sched: stub sched_isolate symbols
  ANDROID: GKI: export saved_command_line
  ANDROID: GKI: Update ABI
  ANDROID: GKI: ASoC: core: Update ALSA core to issue restart in underrun.
  ANDROID: GKI: SoC: pcm: Add a restart callback field to struct snd_pcm_ops
  ANDROID: GKI: SoC: pcm: Add fields to struct snd_pcm_ops and struct snd_soc_component_driver
  ANDROID: GKI: ASoC: core: Add compat_ioctl callback to struct snd_pcm_ops
  ANDROID: GKI: ALSA: core: modify, rename and export create_subdir API
  ANDROID: GKI: usb: Add helper API to issue stop endpoint command
  ANDROID: GKI: Thermal: thermal_zone_get_cdev_by_name added
  ANDROID: GKI: add missing exports for CONFIG_ARM_SMMU=m
  ANDROID: power: wakeup_reason: wake reason enhancements
  BACKPORT: FROMGIT: kbuild: mkcompile_h: Include $LD version in /proc/version
  ANDROID: GKI: kernel: Export symbols needed by msm_minidump.ko and minidump_log.ko
  ubifs: wire up FS_IOC_GET_ENCRYPTION_NONCE
  f2fs: wire up FS_IOC_GET_ENCRYPTION_NONCE
  ext4: wire up FS_IOC_GET_ENCRYPTION_NONCE
  fscrypt: add FS_IOC_GET_ENCRYPTION_NONCE ioctl
  ANDROID: Bulk update the ABI xml
  ANDROID: gki_defconfig: add CONFIG_IPV6_SUBTREES
  ANDROID: GKI: arm64: reserve space in cpu_hwcaps and cpu_hwcap_keys arrays
  ANDROID: GKI: of: reserved_mem: Fix kmemleak crash on no-map region
  ANDROID: GKI: sched: add task boost vendor fields to task_struct
  ANDROID: GKI: mm: add rss counter for unreclaimable pages
  ANDROID: GKI: irqdomain: add bus token DOMAIN_BUS_WAKEUP
  ANDROID: GKI: arm64: fault: do_tlb_conf_fault_cb register fault callback
  ANDROID: GKI: QoS: Enhance framework to support cpu/irq specific QoS requests
  ANDROID: GKI: Bulk ABI update
  ANDROID: GKI: PM/devfreq: Do not switch governors from sysfs when device is suspended
  ANDROID: GKI: PM / devfreq: Fix race condition between suspend/resume and governor_store
  ANDROID: GKI: PM / devfreq: Introduce a sysfs lock
  ANDROID: GKI: regmap: irq: Add support to clear ack registers
  ANDROID: GKI: Remove SCHED_AUTOGROUP
  ANDROID: ignore compiler tag __must_check for GENKSYMS
  ANDROID: GKI: Bulk update ABI
  ANDROID: GKI: Fix ABI diff for struct thermal_cooling_device_ops
  ANDROID: GKI: ASoC: soc-core: export function to find components
  ANDROID: GKI: thermal: thermal_sys: Add configurable thermal trip points.
  ANDROID: fscrypt: fall back to filesystem-layer crypto when needed
  ANDROID: block: require drivers to declare supported crypto key type(s)
  ANDROID: block: make blk_crypto_start_using_mode() properly check for support
  ANDROID: GKI: power: supply: format regression
  ANDROID: GKI: kobject: increase number of kobject uevent pointers to 64
  ANDROID: GKI: drivers: video: backlight: Fix ABI diff for struct backlight_device
  ANDROID: GKI: usb: xhci: Add support for secondary interrupters
  ANDROID: GKI: usb: host: xhci: Add support for usb core indexing
  ANDROID: gki_defconfig: enable USB_XHCI_HCD
  ANDROID: gki_defconfig: enable CONFIG_BRIDGE
  ANDROID: GKI: Update ABI report
  ANDROID: GKI: arm64: smp: Add set_update_ipi_history_callback
  ANDROID: kbuild: ensure __cfi_check is correctly aligned
  f2fs: keep inline_data when compression conversion
  f2fs: fix to disable compression on directory
  f2fs: add missing CONFIG_F2FS_FS_COMPRESSION
  f2fs: switch discard_policy.timeout to bool type
  f2fs: fix to verify tpage before releasing in f2fs_free_dic()
  f2fs: show compression in statx
  f2fs: clean up dic->tpages assignment
  f2fs: compress: support zstd compress algorithm
  f2fs: compress: add .{init,destroy}_decompress_ctx callback
  f2fs: compress: fix to call missing destroy_compress_ctx()
  f2fs: change default compression algorithm
  f2fs: clean up {cic,dic}.ref handling
  f2fs: fix to use f2fs_readpage_limit() in f2fs_read_multi_pages()
  f2fs: xattr.h: Make stub helpers inline
  f2fs: fix to avoid double unlock
  f2fs: fix potential .flags overflow on 32bit architecture
  f2fs: fix NULL pointer dereference in f2fs_verity_work()
  f2fs: fix to clear PG_error if fsverity failed
  f2fs: don't call fscrypt_get_encryption_info() explicitly in f2fs_tmpfile()
  f2fs: don't trigger data flush in foreground operation
  f2fs: fix NULL pointer dereference in f2fs_write_begin()
  f2fs: clean up f2fs_may_encrypt()
  f2fs: fix to avoid potential deadlock
  f2fs: don't change inode status under page lock
  f2fs: fix potential deadlock on compressed quota file
  f2fs: delete DIO read lock
  f2fs: don't mark compressed inode dirty during f2fs_iget()
  f2fs: fix to account compressed blocks in f2fs_compressed_blocks()
  f2fs: xattr.h: Replace zero-length array with flexible-array member
  f2fs: fix to update f2fs_super_block fields under sb_lock
  f2fs: Add a new CP flag to help fsck fix resize SPO issues
  f2fs: Fix mount failure due to SPO after a successful online resize FS
  f2fs: use kmem_cache pool during inline xattr lookups
  f2fs: skip migration only when BG_GC is called
  f2fs: fix to show tracepoint correctly
  f2fs: avoid __GFP_NOFAIL in f2fs_bio_alloc
  f2fs: introduce F2FS_IOC_GET_COMPRESS_BLOCKS
  f2fs: fix to avoid triggering IO in write path
  f2fs: add prefix for f2fs slab cache name
  f2fs: introduce DEFAULT_IO_TIMEOUT
  f2fs: skip GC when section is full
  f2fs: add migration count iff migration happens
  f2fs: clean up bggc mount option
  f2fs: clean up lfs/adaptive mount option
  f2fs: fix to show norecovery mount option
  f2fs: clean up parameter of macro XATTR_SIZE()
  f2fs: clean up codes with {f2fs_,}data_blkaddr()
  f2fs: show mounted time
  f2fs: Use scnprintf() for avoiding potential buffer overflow
  f2fs: allow to clear F2FS_COMPR_FL flag
  f2fs: fix to check dirty pages during compressed inode conversion
  f2fs: fix to account compressed inode correctly
  f2fs: fix wrong check on F2FS_IOC_FSSETXATTR
  f2fs: fix to avoid use-after-free in f2fs_write_multi_pages()
  f2fs: fix to avoid using uninitialized variable
  f2fs: fix inconsistent comments
  f2fs: remove i_sem lock coverage in f2fs_setxattr()
  f2fs: cover last_disk_size update with spinlock
  f2fs: fix to check i_compr_blocks correctly
  FROMLIST: kmod: make request_module() return an error when autoloading is disabled
  ANDROID: GKI: Update ABI report
  ANDROID: GKI: ARM64: dma-mapping: export symbol arch_setup_dma_ops
  ANDROID: GKI: ARM: dma-mapping: export symbol arch_setup_dma_ops
  ANDROID: GKI: ASoC: dapm: Avoid static route b/w cpu and codec dai
  ANDROID: GKI: ASoC: pcm: Add support for hostless playback/capture
  ANDROID: GKI: ASoC: core - add hostless DAI support
  ANDROID: GKI: drivers: thermal: Resolve ABI diff for struct thermal_zone_device_ops
  ANDROID: GKI: drivers: thermal: Add support for getting trip temperature
  ANDROID: GKI: Add functions of_thermal_handle_trip/of_thermal_handle_trip_temp
  ANDROID: GKI: drivers: thermal: Add post suspend evaluate flag to thermal zone devicetree
  UPSTREAM: loop: Only freeze block queue when needed.
  UPSTREAM: loop: Only change blocksize when needed.
  ANDROID: Fix wq fp check for CFI builds
  ANDROID: GKI: update abi definition after CONFIG_DEBUG_LIST was enabled
  ANDROID: gki_defconfig: enable CONFIG_DEBUG_LIST
  ANDROID: GKI: Update ABI definition
  ANDROID: GKI: remove condition causing sk_buff struct ABI differences
  ANDROID: GKI: Export symbol arch_timer_mem_get_cval
  ANDROID: GKI: pwm: core: Add option to config PWM duty/period with u64 data length
  ANDROID: Update ABI whitelist for qcom SoCs
  ANDROID: Incremental fs: Fix remount
  ANDROID: Incremental fs: Protect get_fill_block, and add a field
  ANDROID: Incremental fs: Fix crash polling 0 size read_log
  ANDROID: Incremental fs: get_filled_blocks: better index_out
  ANDROID: GKI: of: property: Add device links support for "qcom,wrapper-dev"
  ANDROID: GKI: update abi definitions due to recent changes
  ANDROID: GKI: clk: Initialize in stack clk_init_data to 0 in all drivers
  ANDROID: GKI: drivers: clksource: Add API to return cval
  ANDROID: GKI: clk: Add support for voltage voting
  ANDROID: GKI: kernel: Export task and IRQ affinity symbols
  ANDROID: GKI: regulator: core: Add support for regulator providers with sync state
  ANDROID: GKI: regulator: Call proxy-consumer functions for each regulator registered
  ANDROID: GKI: regulator: Add proxy consumer driver
  ANDROID: GKI: regulator: core: allow long device tree supply regulator property names
  ANDROID: GKI: Revert "regulator: Enable supply regulator if child rail is enabled."
  ANDROID: GKI: regulator: Remove redundant set_mode call in drms_uA_update
  ANDROID: GKI: net: Add the get current NAPI context API
  ANDROID: GKI: remove DRM_KMS_CMA_HELPER from GKI configuration
  ANDROID: GKI: edac: Fix ABI diffs in edac_device_ctl_info struct
  ANDROID: GKI: pwm: Add different PWM output types support
  UPSTREAM: cfg80211: Authentication offload to user space in AP mode
  Linux 4.19.114
  arm64: dts: ls1046ardb: set RGMII interfaces to RGMII_ID mode
  arm64: dts: ls1043a-rdb: correct RGMII delay mode to rgmii-id
  ARM: dts: N900: fix onenand timings
  ARM: dts: imx6: phycore-som: fix arm and soc minimum voltage
  ARM: bcm2835-rpi-zero-w: Add missing pinctrl name
  ARM: dts: oxnas: Fix clear-mask property
  perf map: Fix off by one in strncpy() size argument
  arm64: alternative: fix build with clang integrated assembler
  net: ks8851-ml: Fix IO operations, again
  gpiolib: acpi: Add quirk to ignore EC wakeups on HP x2 10 CHT + AXP288 model
  bpf: Explicitly memset some bpf info structures declared on the stack
  bpf: Explicitly memset the bpf_attr structure
  platform/x86: pmc_atom: Add Lex 2I385SW to critclk_systems DMI table
  vt: vt_ioctl: fix use-after-free in vt_in_use()
  vt: vt_ioctl: fix VT_DISALLOCATE freeing in-use virtual console
  vt: vt_ioctl: remove unnecessary console allocation checks
  vt: switch vt_dont_switch to bool
  vt: ioctl, switch VT_IS_IN_USE and VT_BUSY to inlines
  vt: selection, introduce vc_is_sel
  mac80211: fix authentication with iwlwifi/mvm
  mac80211: Check port authorization in the ieee80211_tx_dequeue() case
  media: xirlink_cit: add missing descriptor sanity checks
  media: stv06xx: add missing descriptor sanity checks
  media: dib0700: fix rc endpoint lookup
  media: ov519: add missing endpoint sanity checks
  libfs: fix infoleak in simple_attr_read()
  ahci: Add Intel Comet Lake H RAID PCI ID
  staging: wlan-ng: fix use-after-free Read in hfa384x_usbin_callback
  staging: wlan-ng: fix ODEBUG bug in prism2sta_disconnect_usb
  staging: rtl8188eu: Add ASUS USB-N10 Nano B1 to device table
  media: usbtv: fix control-message timeouts
  media: flexcop-usb: fix endpoint sanity check
  usb: musb: fix crash with highmen PIO and usbmon
  USB: serial: io_edgeport: fix slab-out-of-bounds read in edge_interrupt_callback
  USB: cdc-acm: restore capability check order
  USB: serial: option: add Wistron Neweb D19Q1
  USB: serial: option: add BroadMobi BM806U
  USB: serial: option: add support for ASKEY WWHC050
  mac80211: set IEEE80211_TX_CTRL_PORT_CTRL_PROTO for nl80211 TX
  mac80211: add option for setting control flags
  Revert "r8169: check that Realtek PHY driver module is loaded"
  vti6: Fix memory leak of skb if input policy check fails
  bpf/btf: Fix BTF verification of enum members in struct/union
  netfilter: nft_fwd_netdev: validate family and chain type
  netfilter: flowtable: reload ip{v6}h in nf_flow_tuple_ip{v6}
  afs: Fix some tracing details
  xfrm: policy: Fix doulbe free in xfrm_policy_timer
  xfrm: add the missing verify_sec_ctx_len check in xfrm_add_acquire
  xfrm: fix uctx len check in verify_sec_ctx_len
  RDMA/mlx5: Block delay drop to unprivileged users
  vti[6]: fix packet tx through bpf_redirect() in XinY cases
  xfrm: handle NETDEV_UNREGISTER for xfrm device
  genirq: Fix reference leaks on irq affinity notifiers
  RDMA/core: Ensure security pkey modify is not lost
  gpiolib: acpi: Add quirk to ignore EC wakeups on HP x2 10 BYT + AXP288 model
  gpiolib: acpi: Rework honor_wakeup option into an ignore_wake option
  gpiolib: acpi: Correct comment for HP x2 10 honor_wakeup quirk
  mac80211: mark station unauthorized before key removal
  nl80211: fix NL80211_ATTR_CHANNEL_WIDTH attribute type
  scsi: sd: Fix optimal I/O size for devices that change reported values
  scripts/dtc: Remove redundant YYLOC global declaration
  tools: Let O= makes handle a relative path with -C option
  perf probe: Do not depend on dwfl_module_addrsym()
  ARM: dts: omap5: Add bus_dma_limit for L3 bus
  ARM: dts: dra7: Add bus_dma_limit for L3 bus
  ceph: check POOL_FLAG_FULL/NEARFULL in addition to OSDMAP_FULL/NEARFULL
  Input: avoid BIT() macro usage in the serio.h UAPI header
  Input: synaptics - enable RMI on HP Envy 13-ad105ng
  Input: raydium_i2c_ts - fix error codes in raydium_i2c_boot_trigger()
  i2c: hix5hd2: add missed clk_disable_unprepare in remove
  ftrace/x86: Anotate text_mutex split between ftrace_arch_code_modify_post_process() and ftrace_arch_code_modify_prepare()
  sxgbe: Fix off by one in samsung driver strncpy size arg
  dpaa_eth: Remove unnecessary boolean expression in dpaa_get_headroom
  mac80211: Do not send mesh HWMP PREQ if HWMP is disabled
  scsi: ipr: Fix softlockup when rescanning devices in petitboot
  s390/qeth: handle error when backing RX buffer
  fsl/fman: detect FMan erratum A050385
  arm64: dts: ls1043a: FMan erratum A050385
  dt-bindings: net: FMan erratum A050385
  cgroup1: don't call release_agent when it is ""
  drivers/of/of_mdio.c:fix of_mdiobus_register()
  cpupower: avoid multiple definition with gcc -fno-common
  nfs: add minor version to nfs_server_key for fscache
  cgroup-v1: cgroup_pidlist_next should update position index
  hsr: set .netnsok flag
  hsr: add restart routine into hsr_get_node_list()
  hsr: use rcu_read_lock() in hsr_get_node_{list/status}()
  vxlan: check return value of gro_cells_init()
  tcp: repair: fix TCP_QUEUE_SEQ implementation
  r8169: re-enable MSI on RTL8168c
  net: phy: mdio-mux-bcm-iproc: check clk_prepare_enable() return value
  net: dsa: mt7530: Change the LINK bit to reflect the link status
  net: ip_gre: Accept IFLA_INFO_DATA-less configuration
  net: ip_gre: Separate ERSPAN newlink / changelink callbacks
  bnxt_en: Reset rings if ring reservation fails during open()
  bnxt_en: fix memory leaks in bnxt_dcbnl_ieee_getets()
  slcan: not call free_netdev before rtnl_unlock in slcan_open
  NFC: fdp: Fix a signedness bug in fdp_nci_send_patch()
  net: stmmac: dwmac-rk: fix error path in rk_gmac_probe
  net_sched: keep alloc_hash updated after hash allocation
  net_sched: cls_route: remove the right filter from hashtable
  net: qmi_wwan: add support for ASKEY WWHC050
  net/packet: tpacket_rcv: avoid a producer race condition
  net: mvneta: Fix the case where the last poll did not process all rx
  net: dsa: Fix duplicate frames flooded by learning
  net: cbs: Fix software cbs to consider packet sending time
  mlxsw: spectrum_mr: Fix list iteration in error path
  macsec: restrict to ethernet devices
  hsr: fix general protection fault in hsr_addr_is_self()
  geneve: move debug check after netdev unregister
  Revert "drm/dp_mst: Skip validating ports during destruction, just ref"
  mmc: sdhci-tegra: Fix busy detection by enabling MMC_CAP_NEED_RSP_BUSY
  mmc: sdhci-omap: Fix busy detection by enabling MMC_CAP_NEED_RSP_BUSY
  mmc: core: Respect MMC_CAP_NEED_RSP_BUSY for eMMC sleep command
  mmc: core: Respect MMC_CAP_NEED_RSP_BUSY for erase/trim/discard
  mmc: core: Allow host controllers to require R1B for CMD6
  f2fs: fix to avoid potential deadlock
  f2fs: add missing function name in kernel message
  f2fs: recycle unused compress_data.chksum feild
  f2fs: fix to avoid NULL pointer dereference
  f2fs: fix leaking uninitialized memory in compressed clusters
  f2fs: fix the panic in do_checkpoint()
  f2fs: fix to wait all node page writeback
  mm/swapfile.c: move inode_lock out of claim_swapfile
  fscrypt: don't evict dirty inodes after removing key

 Conflicts:
	Documentation/arm64/silicon-errata.txt
	Documentation/devicetree/bindings
	Documentation/devicetree/bindings/net/fsl-fman.txt
	arch/arm/kernel/setup.c
	arch/arm/kernel/smp.c
	arch/arm/mm/dma-mapping.c
	arch/arm64/Kconfig
	arch/arm64/Makefile
	arch/arm64/include/asm/cpucaps.h
	arch/arm64/include/asm/cputype.h
	arch/arm64/include/asm/proc-fns.h
	arch/arm64/include/asm/traps.h
	arch/arm64/kernel/arm64ksyms.c
	arch/arm64/kernel/cpu_errata.c
	arch/arm64/kernel/setup.c
	arch/arm64/kernel/smp.c
	arch/arm64/mm/dma-mapping.c
	arch/arm64/mm/fault.c
	arch/arm64/mm/proc.S
	drivers/base/power/wakeup.c
	drivers/clk/clk.c
	drivers/clk/qcom/clk-rcg2.c
	drivers/clocksource/arm_arch_timer.c
	drivers/devfreq/devfreq.c
	drivers/devfreq/governor_simpleondemand.c
	drivers/dma-buf/dma-buf.c
	drivers/extcon/extcon.c
	drivers/gpu/Makefile
	drivers/gpu/drm/drm_connector.c
	drivers/gpu/drm/drm_dp_mst_topology.c
	drivers/gpu/drm/drm_edid.c
	drivers/gpu/drm/drm_file.c
	drivers/gpu/drm/drm_panel.c
	drivers/gpu/drm/drm_property.c
	drivers/iommu/Kconfig
	drivers/iommu/Makefile
	drivers/iommu/arm-smmu.c
	drivers/iommu/dma-iommu.c
	drivers/iommu/dma-mapping-fast.c
	drivers/iommu/io-pgtable-arm.c
	drivers/iommu/io-pgtable-fast.c
	drivers/iommu/io-pgtable.c
	drivers/iommu/iommu.c
	drivers/irqchip/irq-gic-v3.c
	drivers/media/v4l2-core/v4l2-ioctl.c
	drivers/mmc/core/Kconfig
	drivers/mmc/core/block.c
	drivers/mmc/core/queue.c
	drivers/mmc/host/cqhci.c
	drivers/mmc/host/sdhci-msm.c
	drivers/net/wireless/ath/wil6210/interrupt.c
	drivers/net/wireless/ath/wil6210/main.c
	drivers/net/wireless/ath/wil6210/wil6210.h
	drivers/net/wireless/ath/wil6210/wmi.c
	drivers/nvmem/core.c
	drivers/nvmem/nvmem-sysfs.c
	drivers/of/fdt.c
	drivers/power/supply/power_supply_sysfs.c
	drivers/pwm/sysfs.c
	drivers/regulator/core.c
	drivers/scsi/sd.c
	drivers/scsi/ufs/ufshcd.c
	drivers/tty/serial/Kconfig
	drivers/tty/serial/Makefile
	drivers/usb/common/common.c
	fs/crypto/crypto.c
	fs/f2fs/checkpoint.c
	fs/f2fs/f2fs.h
	include/drm/drm_connector.h
	include/drm/drm_dp_mst_helper.h
	include/drm/drm_panel.h
	include/linux/clk-provider.h
	include/linux/dma-buf.h
	include/linux/dma-mapping-fast.h
	include/linux/dma-mapping.h
	include/linux/extcon.h
	include/linux/io-pgtable.h
	include/linux/iommu.h
	include/linux/kobject.h
	include/linux/mm.h
	include/linux/mm_types.h
	include/linux/mmc/host.h
	include/linux/netdevice.h
	include/linux/power_supply.h
	include/linux/pwm.h
	include/linux/regulator/driver.h
	include/linux/thermal.h
	include/linux/vm_event_item.h
	include/net/cfg80211.h
	include/scsi/scsi_device.h
	include/sound/pcm.h
	include/sound/soc.h
	include/uapi/drm/drm_mode.h
	include/uapi/linux/coresight-stm.h
	include/uapi/linux/ip.h
	include/uapi/linux/nl80211.h
	include/uapi/linux/videodev2.h
	include/uapi/sound/compress_offload.h
	kernel/dma/coherent.c
	kernel/dma/mapping.c
	kernel/panic.c
	kernel/power/qos.c
	kernel/sched/sched.h
	mm/Kconfig
	mm/filemap.c
	mm/swapfile.c
	mm/vmalloc.c
	mm/vmstat.c
	net/qrtr/qrtr.c
	net/wireless/nl80211.c
	net/wireless/scan.c
	sound/core/compress_offload.c
	sound/soc/soc-core.c
	sound/usb/card.c
	sound/usb/pcm.c
	sound/usb/pcm.h
	sound/usb/usbaudio.h

 Fixed build errors:
	drivers/base/power/main.c
	drivers/thermal/thermal_core.c
	drivers/cpuidle/lpm-levels.c
	include/soc/qcom/lpm_levels.h

Change-Id: Idf25b239f53681bdfa2ef371a91720fadf1a3f01
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
2020-09-20 23:45:10 +05:30

5886 lines
162 KiB
C

/*
* kernel/workqueue.c - generic async execution with shared worker pool
*
* Copyright (C) 2002 Ingo Molnar
*
* Derived from the taskqueue/keventd code by:
* David Woodhouse <dwmw2@infradead.org>
* Andrew Morton
* Kai Petzke <wpp@marie.physik.tu-berlin.de>
* Theodore Ts'o <tytso@mit.edu>
*
* Made to use alloc_percpu by Christoph Lameter.
*
* Copyright (C) 2010 SUSE Linux Products GmbH
* Copyright (C) 2010 Tejun Heo <tj@kernel.org>
*
* This is the generic async execution mechanism. Work items as are
* executed in process context. The worker pool is shared and
* automatically managed. There are two worker pools for each CPU (one for
* normal work items and the other for high priority ones) and some extra
* pools for workqueues which are not bound to any specific CPU - the
* number of these backing pools is dynamic.
*
* Please read Documentation/core-api/workqueue.rst for details.
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/hardirq.h>
#include <linux/mempolicy.h>
#include <linux/freezer.h>
#include <linux/debug_locks.h>
#include <linux/lockdep.h>
#include <linux/idr.h>
#include <linux/jhash.h>
#include <linux/hashtable.h>
#include <linux/rculist.h>
#include <linux/nodemask.h>
#include <linux/moduleparam.h>
#include <linux/uaccess.h>
#include <linux/sched/isolation.h>
#include <linux/nmi.h>
#include <linux/bug.h>
#include <linux/delay.h>
#include "workqueue_internal.h"
enum {
/*
* worker_pool flags
*
* A bound pool is either associated or disassociated with its CPU.
* While associated (!DISASSOCIATED), all workers are bound to the
* CPU and none has %WORKER_UNBOUND set and concurrency management
* is in effect.
*
* While DISASSOCIATED, the cpu may be offline and all workers have
* %WORKER_UNBOUND set and concurrency management disabled, and may
* be executing on any CPU. The pool behaves as an unbound one.
*
* Note that DISASSOCIATED should be flipped only while holding
* wq_pool_attach_mutex to avoid changing binding state while
* worker_attach_to_pool() is in progress.
*/
POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
/* worker flags */
WORKER_DIE = 1 << 1, /* die die die */
WORKER_IDLE = 1 << 2, /* is idle */
WORKER_PREP = 1 << 3, /* preparing to run works */
WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
WORKER_UNBOUND = 1 << 7, /* worker is unbound */
WORKER_REBOUND = 1 << 8, /* worker was rebound */
WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
WORKER_UNBOUND | WORKER_REBOUND,
NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
/* call for help after 10ms
(min two ticks) */
MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
CREATE_COOLDOWN = HZ, /* time to breath after fail */
/*
* Rescue workers are used only on emergencies and shared by
* all cpus. Give MIN_NICE.
*/
RESCUER_NICE_LEVEL = MIN_NICE,
HIGHPRI_NICE_LEVEL = MIN_NICE,
WQ_NAME_LEN = 24,
};
/*
* Structure fields follow one of the following exclusion rules.
*
* I: Modifiable by initialization/destruction paths and read-only for
* everyone else.
*
* P: Preemption protected. Disabling preemption is enough and should
* only be modified and accessed from the local cpu.
*
* L: pool->lock protected. Access with pool->lock held.
*
* X: During normal operation, modification requires pool->lock and should
* be done only from local cpu. Either disabling preemption on local
* cpu or grabbing pool->lock is enough for read access. If
* POOL_DISASSOCIATED is set, it's identical to L.
*
* A: wq_pool_attach_mutex protected.
*
* PL: wq_pool_mutex protected.
*
* PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
*
* PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
*
* PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
* sched-RCU for reads.
*
* WQ: wq->mutex protected.
*
* WR: wq->mutex protected for writes. Sched-RCU protected for reads.
*
* MD: wq_mayday_lock protected.
*/
/* struct worker is defined in workqueue_internal.h */
struct worker_pool {
spinlock_t lock; /* the pool lock */
int cpu; /* I: the associated cpu */
int node; /* I: the associated node ID */
int id; /* I: pool ID */
unsigned int flags; /* X: flags */
unsigned long watchdog_ts; /* L: watchdog timestamp */
struct list_head worklist; /* L: list of pending works */
int nr_workers; /* L: total number of workers */
int nr_idle; /* L: currently idle workers */
struct list_head idle_list; /* X: list of idle workers */
struct timer_list idle_timer; /* L: worker idle timeout */
struct timer_list mayday_timer; /* L: SOS timer for workers */
/* a workers is either on busy_hash or idle_list, or the manager */
DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
/* L: hash of busy workers */
struct worker *manager; /* L: purely informational */
struct list_head workers; /* A: attached workers */
struct completion *detach_completion; /* all workers detached */
struct ida worker_ida; /* worker IDs for task name */
struct workqueue_attrs *attrs; /* I: worker attributes */
struct hlist_node hash_node; /* PL: unbound_pool_hash node */
int refcnt; /* PL: refcnt for unbound pools */
/*
* The current concurrency level. As it's likely to be accessed
* from other CPUs during try_to_wake_up(), put it in a separate
* cacheline.
*/
atomic_t nr_running ____cacheline_aligned_in_smp;
/*
* Destruction of pool is sched-RCU protected to allow dereferences
* from get_work_pool().
*/
struct rcu_head rcu;
} ____cacheline_aligned_in_smp;
/*
* The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
* of work_struct->data are used for flags and the remaining high bits
* point to the pwq; thus, pwqs need to be aligned at two's power of the
* number of flag bits.
*/
struct pool_workqueue {
struct worker_pool *pool; /* I: the associated pool */
struct workqueue_struct *wq; /* I: the owning workqueue */
int work_color; /* L: current color */
int flush_color; /* L: flushing color */
int refcnt; /* L: reference count */
int nr_in_flight[WORK_NR_COLORS];
/* L: nr of in_flight works */
int nr_active; /* L: nr of active works */
int max_active; /* L: max active works */
struct list_head delayed_works; /* L: delayed works */
struct list_head pwqs_node; /* WR: node on wq->pwqs */
struct list_head mayday_node; /* MD: node on wq->maydays */
/*
* Release of unbound pwq is punted to system_wq. See put_pwq()
* and pwq_unbound_release_workfn() for details. pool_workqueue
* itself is also sched-RCU protected so that the first pwq can be
* determined without grabbing wq->mutex.
*/
struct work_struct unbound_release_work;
struct rcu_head rcu;
} __aligned(1 << WORK_STRUCT_FLAG_BITS);
/*
* Structure used to wait for workqueue flush.
*/
struct wq_flusher {
struct list_head list; /* WQ: list of flushers */
int flush_color; /* WQ: flush color waiting for */
struct completion done; /* flush completion */
};
struct wq_device;
/*
* The externally visible workqueue. It relays the issued work items to
* the appropriate worker_pool through its pool_workqueues.
*/
struct workqueue_struct {
struct list_head pwqs; /* WR: all pwqs of this wq */
struct list_head list; /* PR: list of all workqueues */
struct mutex mutex; /* protects this wq */
int work_color; /* WQ: current work color */
int flush_color; /* WQ: current flush color */
atomic_t nr_pwqs_to_flush; /* flush in progress */
struct wq_flusher *first_flusher; /* WQ: first flusher */
struct list_head flusher_queue; /* WQ: flush waiters */
struct list_head flusher_overflow; /* WQ: flush overflow list */
struct list_head maydays; /* MD: pwqs requesting rescue */
struct worker *rescuer; /* I: rescue worker */
int nr_drainers; /* WQ: drain in progress */
int saved_max_active; /* WQ: saved pwq max_active */
struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */
#ifdef CONFIG_SYSFS
struct wq_device *wq_dev; /* I: for sysfs interface */
#endif
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
char name[WQ_NAME_LEN]; /* I: workqueue name */
/*
* Destruction of workqueue_struct is sched-RCU protected to allow
* walking the workqueues list without grabbing wq_pool_mutex.
* This is used to dump all workqueues from sysrq.
*/
struct rcu_head rcu;
/* hot fields used during command issue, aligned to cacheline */
unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
};
static struct kmem_cache *pwq_cache;
static cpumask_var_t *wq_numa_possible_cpumask;
/* possible CPUs of each node */
static bool wq_disable_numa;
module_param_named(disable_numa, wq_disable_numa, bool, 0444);
/* see the comment above the definition of WQ_POWER_EFFICIENT */
static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
module_param_named(power_efficient, wq_power_efficient, bool, 0444);
static bool wq_online; /* can kworkers be created yet? */
static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
static bool workqueue_freezing; /* PL: have wqs started freezing? */
/* PL: allowable cpus for unbound wqs and work items */
static cpumask_var_t wq_unbound_cpumask;
/* CPU where unbound work was last round robin scheduled from this CPU */
static DEFINE_PER_CPU(int, wq_rr_cpu_last);
/*
* Local execution of unbound work items is no longer guaranteed. The
* following always forces round-robin CPU selection on unbound work items
* to uncover usages which depend on it.
*/
#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
static bool wq_debug_force_rr_cpu = true;
#else
static bool wq_debug_force_rr_cpu = false;
#endif
module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
/* the per-cpu worker pools */
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
/* PL: hash of all unbound pools keyed by pool->attrs */
static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
/* I: attributes used when instantiating standard unbound pools on demand */
static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
/* I: attributes used when instantiating ordered pools on demand */
static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
struct workqueue_struct *system_wq __read_mostly;
EXPORT_SYMBOL(system_wq);
struct workqueue_struct *system_highpri_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_highpri_wq);
struct workqueue_struct *system_long_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_long_wq);
struct workqueue_struct *system_unbound_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_unbound_wq);
struct workqueue_struct *system_freezable_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_freezable_wq);
struct workqueue_struct *system_power_efficient_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_power_efficient_wq);
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
static int worker_thread(void *__worker);
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
#define CREATE_TRACE_POINTS
#include <trace/events/workqueue.h>
#define assert_rcu_or_pool_mutex() \
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
!lockdep_is_held(&wq_pool_mutex), \
"sched RCU or wq_pool_mutex should be held")
#define assert_rcu_or_wq_mutex(wq) \
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
!lockdep_is_held(&wq->mutex), \
"sched RCU or wq->mutex should be held")
#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
!lockdep_is_held(&wq->mutex) && \
!lockdep_is_held(&wq_pool_mutex), \
"sched RCU, wq->mutex or wq_pool_mutex should be held")
#define for_each_cpu_worker_pool(pool, cpu) \
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
(pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
(pool)++)
/**
* for_each_pool - iterate through all worker_pools in the system
* @pool: iteration cursor
* @pi: integer used for iteration
*
* This must be called either with wq_pool_mutex held or sched RCU read
* locked. If the pool needs to be used beyond the locking in effect, the
* caller is responsible for guaranteeing that the pool stays online.
*
* The if/else clause exists only for the lockdep assertion and can be
* ignored.
*/
#define for_each_pool(pool, pi) \
idr_for_each_entry(&worker_pool_idr, pool, pi) \
if (({ assert_rcu_or_pool_mutex(); false; })) { } \
else
/**
* for_each_pool_worker - iterate through all workers of a worker_pool
* @worker: iteration cursor
* @pool: worker_pool to iterate workers of
*
* This must be called with wq_pool_attach_mutex.
*
* The if/else clause exists only for the lockdep assertion and can be
* ignored.
*/
#define for_each_pool_worker(worker, pool) \
list_for_each_entry((worker), &(pool)->workers, node) \
if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
else
/**
* for_each_pwq - iterate through all pool_workqueues of the specified workqueue
* @pwq: iteration cursor
* @wq: the target workqueue
*
* This must be called either with wq->mutex held or sched RCU read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
* The if/else clause exists only for the lockdep assertion and can be
* ignored.
*/
#define for_each_pwq(pwq, wq) \
list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
else
#ifdef CONFIG_DEBUG_OBJECTS_WORK
static struct debug_obj_descr work_debug_descr;
static void *work_debug_hint(void *addr)
{
return ((struct work_struct *) addr)->func;
}
static bool work_is_static_object(void *addr)
{
struct work_struct *work = addr;
return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
}
/*
* fixup_init is called when:
* - an active object is initialized
*/
static bool work_fixup_init(void *addr, enum debug_obj_state state)
{
struct work_struct *work = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
cancel_work_sync(work);
debug_object_init(work, &work_debug_descr);
return true;
default:
return false;
}
}
/*
* fixup_free is called when:
* - an active object is freed
*/
static bool work_fixup_free(void *addr, enum debug_obj_state state)
{
struct work_struct *work = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
cancel_work_sync(work);
debug_object_free(work, &work_debug_descr);
return true;
default:
return false;
}
}
static struct debug_obj_descr work_debug_descr = {
.name = "work_struct",
.debug_hint = work_debug_hint,
.is_static_object = work_is_static_object,
.fixup_init = work_fixup_init,
.fixup_free = work_fixup_free,
};
static inline void debug_work_activate(struct work_struct *work)
{
debug_object_activate(work, &work_debug_descr);
}
static inline void debug_work_deactivate(struct work_struct *work)
{
debug_object_deactivate(work, &work_debug_descr);
}
void __init_work(struct work_struct *work, int onstack)
{
if (onstack)
debug_object_init_on_stack(work, &work_debug_descr);
else
debug_object_init(work, &work_debug_descr);
}
EXPORT_SYMBOL_GPL(__init_work);
void destroy_work_on_stack(struct work_struct *work)
{
debug_object_free(work, &work_debug_descr);
}
EXPORT_SYMBOL_GPL(destroy_work_on_stack);
void destroy_delayed_work_on_stack(struct delayed_work *work)
{
destroy_timer_on_stack(&work->timer);
debug_object_free(&work->work, &work_debug_descr);
}
EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
#else
static inline void debug_work_activate(struct work_struct *work) { }
static inline void debug_work_deactivate(struct work_struct *work) { }
#endif
/**
* worker_pool_assign_id - allocate ID and assing it to @pool
* @pool: the pool pointer of interest
*
* Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
* successfully, -errno on failure.
*/
static int worker_pool_assign_id(struct worker_pool *pool)
{
int ret;
lockdep_assert_held(&wq_pool_mutex);
ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
GFP_KERNEL);
if (ret >= 0) {
pool->id = ret;
return 0;
}
return ret;
}
/**
* unbound_pwq_by_node - return the unbound pool_workqueue for the given node
* @wq: the target workqueue
* @node: the node ID
*
* This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
* read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
* Return: The unbound pool_workqueue for @node.
*/
static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
int node)
{
assert_rcu_or_wq_mutex_or_pool_mutex(wq);
/*
* XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
* delayed item is pending. The plan is to keep CPU -> NODE
* mapping valid and stable across CPU on/offlines. Once that
* happens, this workaround can be removed.
*/
if (unlikely(node == NUMA_NO_NODE))
return wq->dfl_pwq;
return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
}
static unsigned int work_color_to_flags(int color)
{
return color << WORK_STRUCT_COLOR_SHIFT;
}
static int get_work_color(struct work_struct *work)
{
return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
((1 << WORK_STRUCT_COLOR_BITS) - 1);
}
static int work_next_color(int color)
{
return (color + 1) % WORK_NR_COLORS;
}
/*
* While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
* contain the pointer to the queued pwq. Once execution starts, the flag
* is cleared and the high bits contain OFFQ flags and pool ID.
*
* set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
* and clear_work_data() can be used to set the pwq, pool or clear
* work->data. These functions should only be called while the work is
* owned - ie. while the PENDING bit is set.
*
* get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
* corresponding to a work. Pool is available once the work has been
* queued anywhere after initialization until it is sync canceled. pwq is
* available only while the work item is queued.
*
* %WORK_OFFQ_CANCELING is used to mark a work item which is being
* canceled. While being canceled, a work item may have its PENDING set
* but stay off timer and worklist for arbitrarily long and nobody should
* try to steal the PENDING bit.
*/
static inline void set_work_data(struct work_struct *work, unsigned long data,
unsigned long flags)
{
WARN_ON_ONCE(!work_pending(work));
atomic_long_set(&work->data, data | flags | work_static(work));
}
static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
unsigned long extra_flags)
{
set_work_data(work, (unsigned long)pwq,
WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
}
static void set_work_pool_and_keep_pending(struct work_struct *work,
int pool_id)
{
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
WORK_STRUCT_PENDING);
}
static void set_work_pool_and_clear_pending(struct work_struct *work,
int pool_id)
{
/*
* The following wmb is paired with the implied mb in
* test_and_set_bit(PENDING) and ensures all updates to @work made
* here are visible to and precede any updates by the next PENDING
* owner.
*/
smp_wmb();
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
/*
* The following mb guarantees that previous clear of a PENDING bit
* will not be reordered with any speculative LOADS or STORES from
* work->current_func, which is executed afterwards. This possible
* reordering can lead to a missed execution on attempt to qeueue
* the same @work. E.g. consider this case:
*
* CPU#0 CPU#1
* ---------------------------- --------------------------------
*
* 1 STORE event_indicated
* 2 queue_work_on() {
* 3 test_and_set_bit(PENDING)
* 4 } set_..._and_clear_pending() {
* 5 set_work_data() # clear bit
* 6 smp_mb()
* 7 work->current_func() {
* 8 LOAD event_indicated
* }
*
* Without an explicit full barrier speculative LOAD on line 8 can
* be executed before CPU#0 does STORE on line 1. If that happens,
* CPU#0 observes the PENDING bit is still set and new execution of
* a @work is not queued in a hope, that CPU#1 will eventually
* finish the queued @work. Meanwhile CPU#1 does not see
* event_indicated is set, because speculative LOAD was executed
* before actual STORE.
*/
smp_mb();
}
static void clear_work_data(struct work_struct *work)
{
smp_wmb(); /* see set_work_pool_and_clear_pending() */
set_work_data(work, WORK_STRUCT_NO_POOL, 0);
}
static struct pool_workqueue *get_work_pwq(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
if (data & WORK_STRUCT_PWQ)
return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
else
return NULL;
}
/**
* get_work_pool - return the worker_pool a given work was associated with
* @work: the work item of interest
*
* Pools are created and destroyed under wq_pool_mutex, and allows read
* access under sched-RCU read lock. As such, this function should be
* called under wq_pool_mutex or with preemption disabled.
*
* All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used
* beyond the critical section, the caller is responsible for ensuring the
* returned pool is and stays online.
*
* Return: The worker_pool @work was last associated with. %NULL if none.
*/
static struct worker_pool *get_work_pool(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
int pool_id;
assert_rcu_or_pool_mutex();
if (data & WORK_STRUCT_PWQ)
return ((struct pool_workqueue *)
(data & WORK_STRUCT_WQ_DATA_MASK))->pool;
pool_id = data >> WORK_OFFQ_POOL_SHIFT;
if (pool_id == WORK_OFFQ_POOL_NONE)
return NULL;
return idr_find(&worker_pool_idr, pool_id);
}
/**
* get_work_pool_id - return the worker pool ID a given work is associated with
* @work: the work item of interest
*
* Return: The worker_pool ID @work was last associated with.
* %WORK_OFFQ_POOL_NONE if none.
*/
static int get_work_pool_id(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
if (data & WORK_STRUCT_PWQ)
return ((struct pool_workqueue *)
(data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
return data >> WORK_OFFQ_POOL_SHIFT;
}
static void mark_work_canceling(struct work_struct *work)
{
unsigned long pool_id = get_work_pool_id(work);
pool_id <<= WORK_OFFQ_POOL_SHIFT;
set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
}
static bool work_is_canceling(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
}
/*
* Policy functions. These define the policies on how the global worker
* pools are managed. Unless noted otherwise, these functions assume that
* they're being called with pool->lock held.
*/
static bool __need_more_worker(struct worker_pool *pool)
{
return !atomic_read(&pool->nr_running);
}
/*
* Need to wake up a worker? Called from anything but currently
* running workers.
*
* Note that, because unbound workers never contribute to nr_running, this
* function will always return %true for unbound pools as long as the
* worklist isn't empty.
*/
static bool need_more_worker(struct worker_pool *pool)
{
return !list_empty(&pool->worklist) && __need_more_worker(pool);
}
/* Can I start working? Called from busy but !running workers. */
static bool may_start_working(struct worker_pool *pool)
{
return pool->nr_idle;
}
/* Do I need to keep working? Called from currently running workers. */
static bool keep_working(struct worker_pool *pool)
{
return !list_empty(&pool->worklist) &&
atomic_read(&pool->nr_running) <= 1;
}
/* Do we need a new worker? Called from manager. */
static bool need_to_create_worker(struct worker_pool *pool)
{
return need_more_worker(pool) && !may_start_working(pool);
}
/* Do we have too many workers and should some go away? */
static bool too_many_workers(struct worker_pool *pool)
{
bool managing = pool->flags & POOL_MANAGER_ACTIVE;
int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
int nr_busy = pool->nr_workers - nr_idle;
return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
}
/*
* Wake up functions.
*/
/* Return the first idle worker. Safe with preemption disabled */
static struct worker *first_idle_worker(struct worker_pool *pool)
{
if (unlikely(list_empty(&pool->idle_list)))
return NULL;
return list_first_entry(&pool->idle_list, struct worker, entry);
}
/**
* wake_up_worker - wake up an idle worker
* @pool: worker pool to wake worker from
*
* Wake up the first idle worker of @pool.
*
* CONTEXT:
* spin_lock_irq(pool->lock).
*/
static void wake_up_worker(struct worker_pool *pool)
{
struct worker *worker = first_idle_worker(pool);
if (likely(worker))
wake_up_process(worker->task);
}
/**
* wq_worker_waking_up - a worker is waking up
* @task: task waking up
* @cpu: CPU @task is waking up to
*
* This function is called during try_to_wake_up() when a worker is
* being awoken.
*
* CONTEXT:
* spin_lock_irq(rq->lock)
*/
void wq_worker_waking_up(struct task_struct *task, int cpu)
{
struct worker *worker = kthread_data(task);
if (!(worker->flags & WORKER_NOT_RUNNING)) {
WARN_ON_ONCE(worker->pool->cpu != cpu);
atomic_inc(&worker->pool->nr_running);
}
}
/**
* wq_worker_sleeping - a worker is going to sleep
* @task: task going to sleep
*
* This function is called during schedule() when a busy worker is
* going to sleep. Worker on the same cpu can be woken up by
* returning pointer to its task.
*
* CONTEXT:
* spin_lock_irq(rq->lock)
*
* Return:
* Worker task on @cpu to wake up, %NULL if none.
*/
struct task_struct *wq_worker_sleeping(struct task_struct *task)
{
struct worker *worker = kthread_data(task), *to_wakeup = NULL;
struct worker_pool *pool;
/*
* Rescuers, which may not have all the fields set up like normal
* workers, also reach here, let's not access anything before
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING)
return NULL;
pool = worker->pool;
/* this can only happen on the local cpu */
if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id()))
return NULL;
/*
* The counterpart of the following dec_and_test, implied mb,
* worklist not empty test sequence is in insert_work().
* Please read comment there.
*
* NOT_RUNNING is clear. This means that we're bound to and
* running on the local cpu w/ rq lock held and preemption
* disabled, which in turn means that none else could be
* manipulating idle_list, so dereferencing idle_list without pool
* lock is safe.
*/
if (atomic_dec_and_test(&pool->nr_running) &&
!list_empty(&pool->worklist))
to_wakeup = first_idle_worker(pool);
return to_wakeup ? to_wakeup->task : NULL;
}
/**
* wq_worker_last_func - retrieve worker's last work function
*
* Determine the last function a worker executed. This is called from
* the scheduler to get a worker's last known identity.
*
* CONTEXT:
* spin_lock_irq(rq->lock)
*
* This function is called during schedule() when a kworker is going
* to sleep. It's used by psi to identify aggregation workers during
* dequeuing, to allow periodic aggregation to shut-off when that
* worker is the last task in the system or cgroup to go to sleep.
*
* As this function doesn't involve any workqueue-related locking, it
* only returns stable values when called from inside the scheduler's
* queuing and dequeuing paths, when @task, which must be a kworker,
* is guaranteed to not be processing any works.
*
* Return:
* The last work function %current executed as a worker, NULL if it
* hasn't executed any work yet.
*/
work_func_t wq_worker_last_func(struct task_struct *task)
{
struct worker *worker = kthread_data(task);
return worker->last_func;
}
/**
* worker_set_flags - set worker flags and adjust nr_running accordingly
* @worker: self
* @flags: flags to set
*
* Set @flags in @worker->flags and adjust nr_running accordingly.
*
* CONTEXT:
* spin_lock_irq(pool->lock)
*/
static inline void worker_set_flags(struct worker *worker, unsigned int flags)
{
struct worker_pool *pool = worker->pool;
WARN_ON_ONCE(worker->task != current);
/* If transitioning into NOT_RUNNING, adjust nr_running. */
if ((flags & WORKER_NOT_RUNNING) &&
!(worker->flags & WORKER_NOT_RUNNING)) {
atomic_dec(&pool->nr_running);
}
worker->flags |= flags;
}
/**
* worker_clr_flags - clear worker flags and adjust nr_running accordingly
* @worker: self
* @flags: flags to clear
*
* Clear @flags in @worker->flags and adjust nr_running accordingly.
*
* CONTEXT:
* spin_lock_irq(pool->lock)
*/
static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
{
struct worker_pool *pool = worker->pool;
unsigned int oflags = worker->flags;
WARN_ON_ONCE(worker->task != current);
worker->flags &= ~flags;
/*
* If transitioning out of NOT_RUNNING, increment nr_running. Note
* that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
* of multiple flags, not a single flag.
*/
if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
if (!(worker->flags & WORKER_NOT_RUNNING))
atomic_inc(&pool->nr_running);
}
/**
* find_worker_executing_work - find worker which is executing a work
* @pool: pool of interest
* @work: work to find worker for
*
* Find a worker which is executing @work on @pool by searching
* @pool->busy_hash which is keyed by the address of @work. For a worker
* to match, its current execution should match the address of @work and
* its work function. This is to avoid unwanted dependency between
* unrelated work executions through a work item being recycled while still
* being executed.
*
* This is a bit tricky. A work item may be freed once its execution
* starts and nothing prevents the freed area from being recycled for
* another work item. If the same work item address ends up being reused
* before the original execution finishes, workqueue will identify the
* recycled work item as currently executing and make it wait until the
* current execution finishes, introducing an unwanted dependency.
*
* This function checks the work item address and work function to avoid
* false positives. Note that this isn't complete as one may construct a
* work function which can introduce dependency onto itself through a
* recycled work item. Well, if somebody wants to shoot oneself in the
* foot that badly, there's only so much we can do, and if such deadlock
* actually occurs, it should be easy to locate the culprit work function.
*
* CONTEXT:
* spin_lock_irq(pool->lock).
*
* Return:
* Pointer to worker which is executing @work if found, %NULL
* otherwise.
*/
static struct worker *find_worker_executing_work(struct worker_pool *pool,
struct work_struct *work)
{
struct worker *worker;
hash_for_each_possible(pool->busy_hash, worker, hentry,
(unsigned long)work)
if (worker->current_work == work &&
worker->current_func == work->func)
return worker;
return NULL;
}
/**
* move_linked_works - move linked works to a list
* @work: start of series of works to be scheduled
* @head: target list to append @work to
* @nextp: out parameter for nested worklist walking
*
* Schedule linked works starting from @work to @head. Work series to
* be scheduled starts at @work and includes any consecutive work with
* WORK_STRUCT_LINKED set in its predecessor.
*
* If @nextp is not NULL, it's updated to point to the next work of
* the last scheduled work. This allows move_linked_works() to be
* nested inside outer list_for_each_entry_safe().
*
* CONTEXT:
* spin_lock_irq(pool->lock).
*/
static void move_linked_works(struct work_struct *work, struct list_head *head,
struct work_struct **nextp)
{
struct work_struct *n;
/*
* Linked worklist will always end before the end of the list,
* use NULL for list head.
*/
list_for_each_entry_safe_from(work, n, NULL, entry) {
list_move_tail(&work->entry, head);
if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
break;
}
/*
* If we're already inside safe list traversal and have moved
* multiple works to the scheduled queue, the next position
* needs to be updated.
*/
if (nextp)
*nextp = n;
}
/**
* get_pwq - get an extra reference on the specified pool_workqueue
* @pwq: pool_workqueue to get
*
* Obtain an extra reference on @pwq. The caller should guarantee that
* @pwq has positive refcnt and be holding the matching pool->lock.
*/
static void get_pwq(struct pool_workqueue *pwq)
{
lockdep_assert_held(&pwq->pool->lock);
WARN_ON_ONCE(pwq->refcnt <= 0);
pwq->refcnt++;
}
/**
* put_pwq - put a pool_workqueue reference
* @pwq: pool_workqueue to put
*
* Drop a reference of @pwq. If its refcnt reaches zero, schedule its
* destruction. The caller should be holding the matching pool->lock.
*/
static void put_pwq(struct pool_workqueue *pwq)
{
lockdep_assert_held(&pwq->pool->lock);
if (likely(--pwq->refcnt))
return;
if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
return;
/*
* @pwq can't be released under pool->lock, bounce to
* pwq_unbound_release_workfn(). This never recurses on the same
* pool->lock as this path is taken only for unbound workqueues and
* the release work item is scheduled on a per-cpu workqueue. To
* avoid lockdep warning, unbound pool->locks are given lockdep
* subclass of 1 in get_unbound_pool().
*/
schedule_work(&pwq->unbound_release_work);
}
/**
* put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
* @pwq: pool_workqueue to put (can be %NULL)
*
* put_pwq() with locking. This function also allows %NULL @pwq.
*/
static void put_pwq_unlocked(struct pool_workqueue *pwq)
{
if (pwq) {
/*
* As both pwqs and pools are sched-RCU protected, the
* following lock operations are safe.
*/
spin_lock_irq(&pwq->pool->lock);
put_pwq(pwq);
spin_unlock_irq(&pwq->pool->lock);
}
}
static void pwq_activate_delayed_work(struct work_struct *work)
{
struct pool_workqueue *pwq = get_work_pwq(work);
trace_workqueue_activate_work(work);
if (list_empty(&pwq->pool->worklist))
pwq->pool->watchdog_ts = jiffies;
move_linked_works(work, &pwq->pool->worklist, NULL);
__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
pwq->nr_active++;
}
static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
{
struct work_struct *work = list_first_entry(&pwq->delayed_works,
struct work_struct, entry);
pwq_activate_delayed_work(work);
}
/**
* pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
* @pwq: pwq of interest
* @color: color of work which left the queue
*
* A work either has completed or is removed from pending queue,
* decrement nr_in_flight of its pwq and handle workqueue flushing.
*
* CONTEXT:
* spin_lock_irq(pool->lock).
*/
static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
{
/* uncolored work items don't participate in flushing or nr_active */
if (color == WORK_NO_COLOR)
goto out_put;
pwq->nr_in_flight[color]--;
pwq->nr_active--;
if (!list_empty(&pwq->delayed_works)) {
/* one down, submit a delayed one */
if (pwq->nr_active < pwq->max_active)
pwq_activate_first_delayed(pwq);
}
/* is flush in progress and are we at the flushing tip? */
if (likely(pwq->flush_color != color))
goto out_put;
/* are there still in-flight works? */
if (pwq->nr_in_flight[color])
goto out_put;
/* this pwq is done, clear flush_color */
pwq->flush_color = -1;
/*
* If this was the last pwq, wake up the first flusher. It
* will handle the rest.
*/
if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
complete(&pwq->wq->first_flusher->done);
out_put:
put_pwq(pwq);
}
/**
* try_to_grab_pending - steal work item from worklist and disable irq
* @work: work item to steal
* @is_dwork: @work is a delayed_work
* @flags: place to store irq state
*
* Try to grab PENDING bit of @work. This function can handle @work in any
* stable state - idle, on timer or on worklist.
*
* Return:
* 1 if @work was pending and we successfully stole PENDING
* 0 if @work was idle and we claimed PENDING
* -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
* -ENOENT if someone else is canceling @work, this state may persist
* for arbitrarily long
*
* Note:
* On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
* interrupted while holding PENDING and @work off queue, irq must be
* disabled on entry. This, combined with delayed_work->timer being
* irqsafe, ensures that we return -EAGAIN for finite short period of time.
*
* On successful return, >= 0, irq is disabled and the caller is
* responsible for releasing it using local_irq_restore(*@flags).
*
* This function is safe to call from any context including IRQ handler.
*/
static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
unsigned long *flags)
{
struct worker_pool *pool;
struct pool_workqueue *pwq;
local_irq_save(*flags);
/* try to steal the timer if it exists */
if (is_dwork) {
struct delayed_work *dwork = to_delayed_work(work);
/*
* dwork->timer is irqsafe. If del_timer() fails, it's
* guaranteed that the timer is not queued anywhere and not
* running on the local CPU.
*/
if (likely(del_timer(&dwork->timer)))
return 1;
}
/* try to claim PENDING the normal way */
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
*/
pool = get_work_pool(work);
if (!pool)
goto fail;
spin_lock(&pool->lock);
/*
* work->data is guaranteed to point to pwq only while the work
* item is queued on pwq->wq, and both updating work->data to point
* to pwq on queueing and to pool on dequeueing are done under
* pwq->pool->lock. This in turn guarantees that, if work->data
* points to pwq which is associated with a locked pool, the work
* item is currently queued on that pool.
*/
pwq = get_work_pwq(work);
if (pwq && pwq->pool == pool) {
debug_work_deactivate(work);
/*
* A delayed work item cannot be grabbed directly because
* it might have linked NO_COLOR work items which, if left
* on the delayed_list, will confuse pwq->nr_active
* management later on and cause stall. Make sure the work
* item is activated before grabbing.
*/
if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
pwq_activate_delayed_work(work);
list_del_init(&work->entry);
pwq_dec_nr_in_flight(pwq, get_work_color(work));
/* work->data points to pwq iff queued, point to pool */
set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock);
return 1;
}
spin_unlock(&pool->lock);
fail:
local_irq_restore(*flags);
if (work_is_canceling(work))
return -ENOENT;
cpu_relax();
/*
* The queueing is in progress in another context. If we keep
* taking the pool->lock in a busy loop, the other context may
* never get the lock. Give 1 usec delay to avoid this contention.
*/
udelay(1);
return -EAGAIN;
}
/**
* insert_work - insert a work into a pool
* @pwq: pwq @work belongs to
* @work: work to insert
* @head: insertion point
* @extra_flags: extra WORK_STRUCT_* flags to set
*
* Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
* work_struct flags.
*
* CONTEXT:
* spin_lock_irq(pool->lock).
*/
static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
struct list_head *head, unsigned int extra_flags)
{
struct worker_pool *pool = pwq->pool;
/* we own @work, set data and link */
set_work_pwq(work, pwq, extra_flags);
list_add_tail(&work->entry, head);
get_pwq(pwq);
/*
* Ensure either wq_worker_sleeping() sees the above
* list_add_tail() or we see zero nr_running to avoid workers lying
* around lazily while there are works to be processed.
*/
smp_mb();
if (__need_more_worker(pool))
wake_up_worker(pool);
}
/*
* Test whether @work is being queued from another work executing on the
* same workqueue.
*/
static bool is_chained_work(struct workqueue_struct *wq)
{
struct worker *worker;
worker = current_wq_worker();
/*
* Return %true iff I'm a worker execuing a work item on @wq. If
* I'm @worker, it's safe to dereference it without locking.
*/
return worker && worker->current_pwq->wq == wq;
}
/*
* When queueing an unbound work item to a wq, prefer local CPU if allowed
* by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to
* avoid perturbing sensitive tasks.
*/
static int wq_select_unbound_cpu(int cpu)
{
static bool printed_dbg_warning;
int new_cpu;
if (likely(!wq_debug_force_rr_cpu)) {
if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
return cpu;
} else if (!printed_dbg_warning) {
pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
printed_dbg_warning = true;
}
if (cpumask_empty(wq_unbound_cpumask))
return cpu;
new_cpu = __this_cpu_read(wq_rr_cpu_last);
new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
if (unlikely(new_cpu >= nr_cpu_ids)) {
new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
if (unlikely(new_cpu >= nr_cpu_ids))
return cpu;
}
__this_cpu_write(wq_rr_cpu_last, new_cpu);
return new_cpu;
}
static void __queue_work(int cpu, struct workqueue_struct *wq,
struct work_struct *work)
{
struct pool_workqueue *pwq;
struct worker_pool *last_pool;
struct list_head *worklist;
unsigned int work_flags;
unsigned int req_cpu = cpu;
/*
* While a work item is PENDING && off queue, a task trying to
* steal the PENDING will busy-loop waiting for it to either get
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
lockdep_assert_irqs_disabled();
debug_work_activate(work);
/* if draining, only works from the same workqueue are allowed */
if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
retry:
/* pwq which will be used unless @work is executing elsewhere */
if (wq->flags & WQ_UNBOUND) {
if (req_cpu == WORK_CPU_UNBOUND)
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
} else {
if (req_cpu == WORK_CPU_UNBOUND)
cpu = raw_smp_processor_id();
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
}
/*
* If @work was previously on a different pool, it might still be
* running there, in which case the work needs to be queued on that
* pool to guarantee non-reentrancy.
*/
last_pool = get_work_pool(work);
if (last_pool && last_pool != pwq->pool) {
struct worker *worker;
spin_lock(&last_pool->lock);
worker = find_worker_executing_work(last_pool, work);
if (worker && worker->current_pwq->wq == wq) {
pwq = worker->current_pwq;
} else {
/* meh... not running there, queue here */
spin_unlock(&last_pool->lock);
spin_lock(&pwq->pool->lock);
}
} else {
spin_lock(&pwq->pool->lock);
}
/*
* pwq is determined and locked. For unbound pools, we could have
* raced with pwq release and it could already be dead. If its
* refcnt is zero, repeat pwq selection. Note that pwqs never die
* without another pwq replacing it in the numa_pwq_tbl or while
* work items are executing on it, so the retrying is guaranteed to
* make forward-progress.
*/
if (unlikely(!pwq->refcnt)) {
if (wq->flags & WQ_UNBOUND) {
spin_unlock(&pwq->pool->lock);
cpu_relax();
goto retry;
}
/* oops */
WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
wq->name, cpu);
}
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
if (WARN_ON(!list_empty(&work->entry))) {
spin_unlock(&pwq->pool->lock);
return;
}
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
if (likely(pwq->nr_active < pwq->max_active)) {
trace_workqueue_activate_work(work);
pwq->nr_active++;
worklist = &pwq->pool->worklist;
if (list_empty(worklist))
pwq->pool->watchdog_ts = jiffies;
} else {
work_flags |= WORK_STRUCT_DELAYED;
worklist = &pwq->delayed_works;
}
insert_work(pwq, work, worklist, work_flags);
spin_unlock(&pwq->pool->lock);
}
/**
* queue_work_on - queue work on specific cpu
* @cpu: CPU number to execute work on
* @wq: workqueue to use
* @work: work to queue
*
* We queue the work to a specific CPU, the caller must ensure it
* can't go away.
*
* Return: %false if @work was already on a queue, %true otherwise.
*/
bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work)
{
bool ret = false;
unsigned long flags;
local_irq_save(flags);
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
__queue_work(cpu, wq, work);
ret = true;
}
local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL(queue_work_on);
void delayed_work_timer_fn(struct timer_list *t)
{
struct delayed_work *dwork = from_timer(dwork, t, timer);
/* should have been called from irqsafe timer with irq already off */
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
}
EXPORT_SYMBOL(delayed_work_timer_fn);
static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work;
WARN_ON_ONCE(!wq);
#ifndef CONFIG_CFI_CLANG
WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
#endif
WARN_ON_ONCE(timer_pending(timer));
WARN_ON_ONCE(!list_empty(&work->entry));
/*
* If @delay is 0, queue @dwork->work immediately. This is for
* both optimization and correctness. The earliest @timer can
* expire is on the closest next tick and delayed_work users depend
* on that there's no such delay when @delay is 0.
*/
if (!delay) {
__queue_work(cpu, wq, &dwork->work);
return;
}
dwork->wq = wq;
dwork->cpu = cpu;
timer->expires = jiffies + delay;
if (unlikely(cpu != WORK_CPU_UNBOUND))
add_timer_on(timer, cpu);
else
add_timer(timer);
}
/**
* queue_delayed_work_on - queue work on specific CPU after delay
* @cpu: CPU number to execute work on
* @wq: workqueue to use
* @dwork: work to queue
* @delay: number of jiffies to wait before queueing
*
* Return: %false if @work was already on a queue, %true otherwise. If
* @delay is zero and @dwork is idle, it will be scheduled for immediate
* execution.
*/
bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
struct work_struct *work = &dwork->work;
bool ret = false;
unsigned long flags;
/* read the comment in __queue_work() */
local_irq_save(flags);
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
__queue_delayed_work(cpu, wq, dwork, delay);
ret = true;
}
local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL(queue_delayed_work_on);
/**
* mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
* @cpu: CPU number to execute work on
* @wq: workqueue to use
* @dwork: work to queue
* @delay: number of jiffies to wait before queueing
*
* If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
* modify @dwork's timer so that it expires after @delay. If @delay is
* zero, @work is guaranteed to be scheduled immediately regardless of its
* current state.
*
* Return: %false if @dwork was idle and queued, %true if @dwork was
* pending and its timer was modified.
*
* This function is safe to call from any context including IRQ handler.
* See try_to_grab_pending() for details.
*/
bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
unsigned long flags;
int ret;
do {
ret = try_to_grab_pending(&dwork->work, true, &flags);
} while (unlikely(ret == -EAGAIN));
if (likely(ret >= 0)) {
__queue_delayed_work(cpu, wq, dwork, delay);
local_irq_restore(flags);
}
/* -ENOENT from try_to_grab_pending() becomes %true */
return ret;
}
EXPORT_SYMBOL_GPL(mod_delayed_work_on);
static void rcu_work_rcufn(struct rcu_head *rcu)
{
struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
/* read the comment in __queue_work() */
local_irq_disable();
__queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
local_irq_enable();
}
/**
* queue_rcu_work - queue work after a RCU grace period
* @wq: workqueue to use
* @rwork: work to queue
*
* Return: %false if @rwork was already pending, %true otherwise. Note
* that a full RCU grace period is guaranteed only after a %true return.
* While @rwork is guarnateed to be executed after a %false return, the
* execution may happen before a full RCU grace period has passed.
*/
bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
{
struct work_struct *work = &rwork->work;
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
rwork->wq = wq;
call_rcu(&rwork->rcu, rcu_work_rcufn);
return true;
}
return false;
}
EXPORT_SYMBOL(queue_rcu_work);
/**
* worker_enter_idle - enter idle state
* @worker: worker which is entering idle state
*
* @worker is entering idle state. Update stats and idle timer if
* necessary.
*
* LOCKING:
* spin_lock_irq(pool->lock).
*/
static void worker_enter_idle(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
WARN_ON_ONCE(!list_empty(&worker->entry) &&
(worker->hentry.next || worker->hentry.pprev)))
return;
/* can't use worker_set_flags(), also called from create_worker() */
worker->flags |= WORKER_IDLE;
pool->nr_idle++;
worker->last_active = jiffies;
/* idle_list is LIFO */
list_add(&worker->entry, &pool->idle_list);
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
/*
* Sanity check nr_running. Because unbind_workers() releases
* pool->lock between setting %WORKER_UNBOUND and zapping
* nr_running, the warning may trigger spuriously. Check iff
* unbind is not in progress.
*/
WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
pool->nr_workers == pool->nr_idle &&
atomic_read(&pool->nr_running));
}
/**
* worker_leave_idle - leave idle state
* @worker: worker which is leaving idle state
*
* @worker is leaving idle state. Update stats.
*
* LOCKING:
* spin_lock_irq(pool->lock).
*/
static void worker_leave_idle(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
return;
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
list_del_init(&worker->entry);
}
static struct worker *alloc_worker(int node)
{
struct worker *worker;
worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
if (worker) {
INIT_LIST_HEAD(&worker->entry);
INIT_LIST_HEAD(&worker->scheduled);
INIT_LIST_HEAD(&worker->node);
/* on creation a worker is in !idle && prep state */
worker->flags = WORKER_PREP;
}
return worker;
}
/**
* worker_attach_to_pool() - attach a worker to a pool
* @worker: worker to be attached
* @pool: the target pool
*
* Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
* cpu-binding of @worker are kept coordinated with the pool across
* cpu-[un]hotplugs.
*/
static void worker_attach_to_pool(struct worker *worker,
struct worker_pool *pool)
{
mutex_lock(&wq_pool_attach_mutex);
/*
* set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
* online CPUs. It'll be re-applied when any of the CPUs come up.
*/
set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
/*
* The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
* stable across this function. See the comments above the flag
* definition for details.
*/
if (pool->flags & POOL_DISASSOCIATED)
worker->flags |= WORKER_UNBOUND;
list_add_tail(&worker->node, &pool->workers);
worker->pool = pool;
mutex_unlock(&wq_pool_attach_mutex);
}
/**
* worker_detach_from_pool() - detach a worker from its pool
* @worker: worker which is attached to its pool
*
* Undo the attaching which had been done in worker_attach_to_pool(). The
* caller worker shouldn't access to the pool after detached except it has
* other reference to the pool.
*/
static void worker_detach_from_pool(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
struct completion *detach_completion = NULL;
mutex_lock(&wq_pool_attach_mutex);
list_del(&worker->node);
worker->pool = NULL;
if (list_empty(&pool->workers))
detach_completion = pool->detach_completion;
mutex_unlock(&wq_pool_attach_mutex);
/* clear leftover flags without pool->lock after it is detached */
worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
if (detach_completion)
complete(detach_completion);
}
/**
* create_worker - create a new workqueue worker
* @pool: pool the new worker will belong to
*
* Create and start a new worker which is attached to @pool.
*
* CONTEXT:
* Might sleep. Does GFP_KERNEL allocations.
*
* Return:
* Pointer to the newly created worker.
*/
static struct worker *create_worker(struct worker_pool *pool)
{
struct worker *worker = NULL;
int id = -1;
char id_buf[16];
/* ID is needed to determine kthread name */
id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
if (id < 0)
goto fail;
worker = alloc_worker(pool->node);
if (!worker)
goto fail;
worker->id = id;
if (pool->cpu >= 0)
snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
pool->attrs->nice < 0 ? "H" : "");
else
snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
"kworker/%s", id_buf);
if (IS_ERR(worker->task))
goto fail;
set_user_nice(worker->task, pool->attrs->nice);
kthread_bind_mask(worker->task, pool->attrs->cpumask);
/* successful, attach the worker to the pool */
worker_attach_to_pool(worker, pool);
/* start the newly created worker */
spin_lock_irq(&pool->lock);
worker->pool->nr_workers++;
worker_enter_idle(worker);
wake_up_process(worker->task);
spin_unlock_irq(&pool->lock);
return worker;
fail:
if (id >= 0)
ida_simple_remove(&pool->worker_ida, id);
kfree(worker);
return NULL;
}
/**
* destroy_worker - destroy a workqueue worker
* @worker: worker to be destroyed
*
* Destroy @worker and adjust @pool stats accordingly. The worker should
* be idle.
*
* CONTEXT:
* spin_lock_irq(pool->lock).
*/
static void destroy_worker(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
lockdep_assert_held(&pool->lock);
/* sanity check frenzy */
if (WARN_ON(worker->current_work) ||
WARN_ON(!list_empty(&worker->scheduled)) ||
WARN_ON(!(worker->flags & WORKER_IDLE)))
return;
pool->nr_workers--;
pool->nr_idle--;
list_del_init(&worker->entry);
worker->flags |= WORKER_DIE;
wake_up_process(worker->task);
}
static void idle_worker_timeout(struct timer_list *t)
{
struct worker_pool *pool = from_timer(pool, t, idle_timer);
spin_lock_irq(&pool->lock);
while (too_many_workers(pool)) {
struct worker *worker;
unsigned long expires;
/* idle_list is kept in LIFO order, check the last one */
worker = list_entry(pool->idle_list.prev, struct worker, entry);
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
if (time_before(jiffies, expires)) {
mod_timer(&pool->idle_timer, expires);
break;
}
destroy_worker(worker);
}
spin_unlock_irq(&pool->lock);
}
static void send_mayday(struct work_struct *work)
{
struct pool_workqueue *pwq = get_work_pwq(work);
struct workqueue_struct *wq = pwq->wq;
lockdep_assert_held(&wq_mayday_lock);
if (!wq->rescuer)
return;
/* mayday mayday mayday */
if (list_empty(&pwq->mayday_node)) {
/*
* If @pwq is for an unbound wq, its base ref may be put at
* any time due to an attribute change. Pin @pwq until the
* rescuer is done with it.
*/
get_pwq(pwq);
list_add_tail(&pwq->mayday_node, &wq->maydays);
wake_up_process(wq->rescuer->task);
}
}
static void pool_mayday_timeout(struct timer_list *t)
{
struct worker_pool *pool = from_timer(pool, t, mayday_timer);
struct work_struct *work;
spin_lock_irq(&pool->lock);
spin_lock(&wq_mayday_lock); /* for wq->maydays */
if (need_to_create_worker(pool)) {
/*
* We've been trying to create a new worker but
* haven't been successful. We might be hitting an
* allocation deadlock. Send distress signals to
* rescuers.
*/
list_for_each_entry(work, &pool->worklist, entry)
send_mayday(work);
}
spin_unlock(&wq_mayday_lock);
spin_unlock_irq(&pool->lock);
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
}
/**
* maybe_create_worker - create a new worker if necessary
* @pool: pool to create a new worker for
*
* Create a new worker for @pool if necessary. @pool is guaranteed to
* have at least one idle worker on return from this function. If
* creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
* sent to all rescuers with works scheduled on @pool to resolve
* possible allocation deadlock.
*
* On return, need_to_create_worker() is guaranteed to be %false and
* may_start_working() %true.
*
* LOCKING:
* spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations. Called only from
* manager.
*/
static void maybe_create_worker(struct worker_pool *pool)
__releases(&pool->lock)
__acquires(&pool->lock)
{
restart:
spin_unlock_irq(&pool->lock);
/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
while (true) {
if (create_worker(pool) || !need_to_create_worker(pool))
break;
schedule_timeout_interruptible(CREATE_COOLDOWN);
if (!need_to_create_worker(pool))
break;
}
del_timer_sync(&pool->mayday_timer);
spin_lock_irq(&pool->lock);
/*
* This is necessary even after a new worker was just successfully
* created as @pool->lock was dropped and the new worker might have
* already become busy.
*/
if (need_to_create_worker(pool))
goto restart;
}
/**
* manage_workers - manage worker pool
* @worker: self
*
* Assume the manager role and manage the worker pool @worker belongs
* to. At any given time, there can be only zero or one manager per
* pool. The exclusion is handled automatically by this function.
*
* The caller can safely start processing works on false return. On
* true return, it's guaranteed that need_to_create_worker() is false
* and may_start_working() is true.
*
* CONTEXT:
* spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations.
*
* Return:
* %false if the pool doesn't need management and the caller can safely
* start processing works, %true if management function was performed and
* the conditions that the caller verified before calling the function may
* no longer be true.
*/
static bool manage_workers(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
if (pool->flags & POOL_MANAGER_ACTIVE)
return false;
pool->flags |= POOL_MANAGER_ACTIVE;
pool->manager = worker;
maybe_create_worker(pool);
pool->manager = NULL;
pool->flags &= ~POOL_MANAGER_ACTIVE;
wake_up(&wq_manager_wait);
return true;
}
/**
* process_one_work - process single work
* @worker: self
* @work: work to process
*
* Process @work. This function contains all the logics necessary to
* process a single work including synchronization against and
* interaction with other workers on the same cpu, queueing and
* flushing. As long as context requirement is met, any worker can
* call this function to process a work.
*
* CONTEXT:
* spin_lock_irq(pool->lock) which is released and regrabbed.
*/
static void process_one_work(struct worker *worker, struct work_struct *work)
__releases(&pool->lock)
__acquires(&pool->lock)
{
struct pool_workqueue *pwq = get_work_pwq(work);
struct worker_pool *pool = worker->pool;
bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
int work_color;
struct worker *collision;
#ifdef CONFIG_LOCKDEP
/*
* It is permissible to free the struct work_struct from
* inside the function that is called from it, this we need to
* take into account for lockdep too. To avoid bogus "held
* lock freed" warnings as well as problems when looking into
* work->lockdep_map, make a copy and use that here.
*/
struct lockdep_map lockdep_map;
lockdep_copy_map(&lockdep_map, &work->lockdep_map);
#endif
/* ensure we're on the correct CPU */
WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
raw_smp_processor_id() != pool->cpu);
/*
* A single work shouldn't be executed concurrently by
* multiple workers on a single cpu. Check whether anyone is
* already processing the work. If so, defer the work to the
* currently executing one.
*/
collision = find_worker_executing_work(pool, work);
if (unlikely(collision)) {
move_linked_works(work, &collision->scheduled, NULL);
return;
}
/* claim and dequeue */
debug_work_deactivate(work);
hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
worker->current_work = work;
worker->current_func = work->func;
worker->current_pwq = pwq;
work_color = get_work_color(work);
/*
* Record wq name for cmdline and debug reporting, may get
* overridden through set_worker_desc().
*/
strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
list_del_init(&work->entry);
/*
* CPU intensive works don't participate in concurrency management.
* They're the scheduler's responsibility. This takes @worker out
* of concurrency management and the next code block will chain
* execution of the pending work items.
*/
if (unlikely(cpu_intensive))
worker_set_flags(worker, WORKER_CPU_INTENSIVE);
/*
* Wake up another worker if necessary. The condition is always
* false for normal per-cpu workers since nr_running would always
* be >= 1 at this point. This is used to chain execution of the
* pending work items for WORKER_NOT_RUNNING workers such as the
* UNBOUND and CPU_INTENSIVE ones.
*/
if (need_more_worker(pool))
wake_up_worker(pool);
/*
* Record the last pool and clear PENDING which should be the last
* update to @work. Also, do this inside @pool->lock so that
* PENDING and queued state changes happen together while IRQ is
* disabled.
*/
set_work_pool_and_clear_pending(work, pool->id);
spin_unlock_irq(&pool->lock);
lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
/*
* Strictly speaking we should mark the invariant state without holding
* any locks, that is, before these two lock_map_acquire()'s.
*
* However, that would result in:
*
* A(W1)
* WFC(C)
* A(W1)
* C(C)
*
* Which would create W1->C->W1 dependencies, even though there is no
* actual deadlock possible. There are two solutions, using a
* read-recursive acquire on the work(queue) 'locks', but this will then
* hit the lockdep limitation on recursive locks, or simply discard
* these locks.
*
* AFAICT there is no possible deadlock scenario between the
* flush_work() and complete() primitives (except for single-threaded
* workqueues), so hiding them isn't a problem.
*/
lockdep_invariant_state(true);
trace_workqueue_execute_start(work);
worker->current_func(work);
/*
* While we must be careful to not use "work" after this, the trace
* point will only record its address.
*/
trace_workqueue_execute_end(work);
lock_map_release(&lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
" last function: %pf\n",
current->comm, preempt_count(), task_pid_nr(current),
worker->current_func);
debug_show_held_locks(current);
dump_stack();
}
/*
* The following prevents a kworker from hogging CPU on !PREEMPT
* kernels, where a requeueing work item waiting for something to
* happen could deadlock with stop_machine as such work item could
* indefinitely requeue itself while all other CPUs are trapped in
* stop_machine. At the same time, report a quiescent RCU state so
* the same condition doesn't freeze RCU.
*/
cond_resched();
spin_lock_irq(&pool->lock);
/* clear cpu intensive status */
if (unlikely(cpu_intensive))
worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
/* tag the worker for identification in schedule() */
worker->last_func = worker->current_func;
/* we're done with it, release */
hash_del(&worker->hentry);
worker->current_work = NULL;
worker->current_func = NULL;
worker->current_pwq = NULL;
pwq_dec_nr_in_flight(pwq, work_color);
}
/**
* process_scheduled_works - process scheduled works
* @worker: self
*
* Process all scheduled works. Please note that the scheduled list
* may change while processing a work, so this function repeatedly
* fetches a work from the top and executes it.
*
* CONTEXT:
* spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times.
*/
static void process_scheduled_works(struct worker *worker)
{
while (!list_empty(&worker->scheduled)) {
struct work_struct *work = list_first_entry(&worker->scheduled,
struct work_struct, entry);
process_one_work(worker, work);
}
}
static void set_pf_worker(bool val)
{
mutex_lock(&wq_pool_attach_mutex);
if (val)
current->flags |= PF_WQ_WORKER;
else
current->flags &= ~PF_WQ_WORKER;
mutex_unlock(&wq_pool_attach_mutex);
}
/**
* worker_thread - the worker thread function
* @__worker: self
*
* The worker thread function. All workers belong to a worker_pool -
* either a per-cpu one or dynamic unbound one. These workers process all
* work items regardless of their specific target workqueue. The only
* exception is work items which belong to workqueues with a rescuer which
* will be explained in rescuer_thread().
*
* Return: 0
*/
static int worker_thread(void *__worker)
{
struct worker *worker = __worker;
struct worker_pool *pool = worker->pool;
/* tell the scheduler that this is a workqueue worker */
set_pf_worker(true);
woke_up:
spin_lock_irq(&pool->lock);
/* am I supposed to die? */
if (unlikely(worker->flags & WORKER_DIE)) {
spin_unlock_irq(&pool->lock);
WARN_ON_ONCE(!list_empty(&worker->entry));
set_pf_worker(false);
set_task_comm(worker->task, "kworker/dying");
ida_simple_remove(&pool->worker_ida, worker->id);
worker_detach_from_pool(worker);
kfree(worker);
return 0;
}
worker_leave_idle(worker);
recheck:
/* no more worker necessary? */
if (!need_more_worker(pool))
goto sleep;
/* do we need to manage? */
if (unlikely(!may_start_working(pool)) && manage_workers(worker))
goto recheck;
/*
* ->scheduled list can only be filled while a worker is
* preparing to process a work or actually processing it.
* Make sure nobody diddled with it while I was sleeping.
*/
WARN_ON_ONCE(!list_empty(&worker->scheduled));
/*
* Finish PREP stage. We're guaranteed to have at least one idle
* worker or that someone else has already assumed the manager
* role. This is where @worker starts participating in concurrency
* management if applicable and concurrency management is restored
* after being rebound. See rebind_workers() for details.
*/
worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
do {
struct work_struct *work =
list_first_entry(&pool->worklist,
struct work_struct, entry);
pool->watchdog_ts = jiffies;
if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
/* optimization path, not strictly necessary */
process_one_work(worker, work);
if (unlikely(!list_empty(&worker->scheduled)))
process_scheduled_works(worker);
} else {
move_linked_works(work, &worker->scheduled, NULL);
process_scheduled_works(worker);
}
} while (keep_working(pool));
worker_set_flags(worker, WORKER_PREP);
sleep:
/*
* pool->lock is held and there's no work to process and no need to
* manage, sleep. Workers are woken up only while holding
* pool->lock or from local cpu, so setting the current state
* before releasing pool->lock is enough to prevent losing any
* event.
*/
worker_enter_idle(worker);
__set_current_state(TASK_IDLE);
spin_unlock_irq(&pool->lock);
schedule();
goto woke_up;
}
/**
* rescuer_thread - the rescuer thread function
* @__rescuer: self
*
* Workqueue rescuer thread function. There's one rescuer for each
* workqueue which has WQ_MEM_RECLAIM set.
*
* Regular work processing on a pool may block trying to create a new
* worker which uses GFP_KERNEL allocation which has slight chance of
* developing into deadlock if some works currently on the same queue
* need to be processed to satisfy the GFP_KERNEL allocation. This is
* the problem rescuer solves.
*
* When such condition is possible, the pool summons rescuers of all
* workqueues which have works queued on the pool and let them process
* those works so that forward progress can be guaranteed.
*
* This should happen rarely.
*
* Return: 0
*/
static int rescuer_thread(void *__rescuer)
{
struct worker *rescuer = __rescuer;
struct workqueue_struct *wq = rescuer->rescue_wq;
struct list_head *scheduled = &rescuer->scheduled;
bool should_stop;
set_user_nice(current, RESCUER_NICE_LEVEL);
/*
* Mark rescuer as worker too. As WORKER_PREP is never cleared, it
* doesn't participate in concurrency management.
*/
set_pf_worker(true);
repeat:
set_current_state(TASK_IDLE);
/*
* By the time the rescuer is requested to stop, the workqueue
* shouldn't have any work pending, but @wq->maydays may still have
* pwq(s) queued. This can happen by non-rescuer workers consuming
* all the work items before the rescuer got to them. Go through
* @wq->maydays processing before acting on should_stop so that the
* list is always empty on exit.
*/
should_stop = kthread_should_stop();
/* see whether any pwq is asking for help */
spin_lock_irq(&wq_mayday_lock);
while (!list_empty(&wq->maydays)) {
struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
struct pool_workqueue, mayday_node);
struct worker_pool *pool = pwq->pool;
struct work_struct *work, *n;
bool first = true;
__set_current_state(TASK_RUNNING);
list_del_init(&pwq->mayday_node);
spin_unlock_irq(&wq_mayday_lock);
worker_attach_to_pool(rescuer, pool);
spin_lock_irq(&pool->lock);
/*
* Slurp in all works issued via this workqueue and
* process'em.
*/
WARN_ON_ONCE(!list_empty(scheduled));
list_for_each_entry_safe(work, n, &pool->worklist, entry) {
if (get_work_pwq(work) == pwq) {
if (first)
pool->watchdog_ts = jiffies;
move_linked_works(work, scheduled, &n);
}
first = false;
}
if (!list_empty(scheduled)) {
process_scheduled_works(rescuer);
/*
* The above execution of rescued work items could
* have created more to rescue through
* pwq_activate_first_delayed() or chained
* queueing. Let's put @pwq back on mayday list so
* that such back-to-back work items, which may be
* being used to relieve memory pressure, don't
* incur MAYDAY_INTERVAL delay inbetween.
*/
if (need_to_create_worker(pool)) {
spin_lock(&wq_mayday_lock);
/*
* Queue iff we aren't racing destruction
* and somebody else hasn't queued it already.
*/
if (wq->rescuer && list_empty(&pwq->mayday_node)) {
get_pwq(pwq);
list_add_tail(&pwq->mayday_node, &wq->maydays);
}
spin_unlock(&wq_mayday_lock);
}
}
/*
* Put the reference grabbed by send_mayday(). @pool won't
* go away while we're still attached to it.
*/
put_pwq(pwq);
/*
* Leave this pool. If need_more_worker() is %true, notify a
* regular worker; otherwise, we end up with 0 concurrency
* and stalling the execution.
*/
if (need_more_worker(pool))
wake_up_worker(pool);
spin_unlock_irq(&pool->lock);
worker_detach_from_pool(rescuer);
spin_lock_irq(&wq_mayday_lock);
}
spin_unlock_irq(&wq_mayday_lock);
if (should_stop) {
__set_current_state(TASK_RUNNING);
set_pf_worker(false);
return 0;
}
/* rescuers should never participate in concurrency management */
WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
schedule();
goto repeat;
}
/**
* check_flush_dependency - check for flush dependency sanity
* @target_wq: workqueue being flushed
* @target_work: work item being flushed (NULL for workqueue flushes)
*
* %current is trying to flush the whole @target_wq or @target_work on it.
* If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
* reclaiming memory or running on a workqueue which doesn't have
* %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
* a deadlock.
*/
static void check_flush_dependency(struct workqueue_struct *target_wq,
struct work_struct *target_work)
{
work_func_t target_func = target_work ? target_work->func : NULL;
struct worker *worker;
if (target_wq->flags & WQ_MEM_RECLAIM)
return;
worker = current_wq_worker();
WARN_ONCE(current->flags & PF_MEMALLOC,
"workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf",
current->pid, current->comm, target_wq->name, target_func);
WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
(WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
"workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf",
worker->current_pwq->wq->name, worker->current_func,
target_wq->name, target_func);
}
struct wq_barrier {
struct work_struct work;
struct completion done;
struct task_struct *task; /* purely informational */
};
static void wq_barrier_func(struct work_struct *work)
{
struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
complete(&barr->done);
}
/**
* insert_wq_barrier - insert a barrier work
* @pwq: pwq to insert barrier into
* @barr: wq_barrier to insert
* @target: target work to attach @barr to
* @worker: worker currently executing @target, NULL if @target is not executing
*
* @barr is linked to @target such that @barr is completed only after
* @target finishes execution. Please note that the ordering
* guarantee is observed only with respect to @target and on the local
* cpu.
*
* Currently, a queued barrier can't be canceled. This is because
* try_to_grab_pending() can't determine whether the work to be
* grabbed is at the head of the queue and thus can't clear LINKED
* flag of the previous work while there must be a valid next work
* after a work with LINKED flag set.
*
* Note that when @worker is non-NULL, @target may be modified
* underneath us, so we can't reliably determine pwq from @target.
*
* CONTEXT:
* spin_lock_irq(pool->lock).
*/
static void insert_wq_barrier(struct pool_workqueue *pwq,
struct wq_barrier *barr,
struct work_struct *target, struct worker *worker)
{
struct list_head *head;
unsigned int linked = 0;
/*
* debugobject calls are safe here even with pool->lock locked
* as we know for sure that this will not trigger any of the
* checks and call back into the fixup functions where we
* might deadlock.
*/
INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
init_completion_map(&barr->done, &target->lockdep_map);
barr->task = current;
/*
* If @target is currently being executed, schedule the
* barrier to the worker; otherwise, put it after @target.
*/
if (worker)
head = worker->scheduled.next;
else {
unsigned long *bits = work_data_bits(target);
head = target->entry.next;
/* there can already be other linked works, inherit and set */
linked = *bits & WORK_STRUCT_LINKED;
__set_bit(WORK_STRUCT_LINKED_BIT, bits);
}
debug_work_activate(&barr->work);
insert_work(pwq, &barr->work, head,
work_color_to_flags(WORK_NO_COLOR) | linked);
}
/**
* flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
* @wq: workqueue being flushed
* @flush_color: new flush color, < 0 for no-op
* @work_color: new work color, < 0 for no-op
*
* Prepare pwqs for workqueue flushing.
*
* If @flush_color is non-negative, flush_color on all pwqs should be
* -1. If no pwq has in-flight commands at the specified color, all
* pwq->flush_color's stay at -1 and %false is returned. If any pwq
* has in flight commands, its pwq->flush_color is set to
* @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
* wakeup logic is armed and %true is returned.
*
* The caller should have initialized @wq->first_flusher prior to
* calling this function with non-negative @flush_color. If
* @flush_color is negative, no flush color update is done and %false
* is returned.
*
* If @work_color is non-negative, all pwqs should have the same
* work_color which is previous to @work_color and all will be
* advanced to @work_color.
*
* CONTEXT:
* mutex_lock(wq->mutex).
*
* Return:
* %true if @flush_color >= 0 and there's something to flush. %false
* otherwise.
*/
static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
int flush_color, int work_color)
{
bool wait = false;
struct pool_workqueue *pwq;
if (flush_color >= 0) {
WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
atomic_set(&wq->nr_pwqs_to_flush, 1);
}
for_each_pwq(pwq, wq) {
struct worker_pool *pool = pwq->pool;
spin_lock_irq(&pool->lock);
if (flush_color >= 0) {
WARN_ON_ONCE(pwq->flush_color != -1);
if (pwq->nr_in_flight[flush_color]) {
pwq->flush_color = flush_color;
atomic_inc(&wq->nr_pwqs_to_flush);
wait = true;
}
}
if (work_color >= 0) {
WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
pwq->work_color = work_color;
}
spin_unlock_irq(&pool->lock);
}
if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
complete(&wq->first_flusher->done);
return wait;
}
/**
* flush_workqueue - ensure that any scheduled work has run to completion.
* @wq: workqueue to flush
*
* This function sleeps until all work items which were queued on entry
* have finished execution, but it is not livelocked by new incoming ones.
*/
void flush_workqueue(struct workqueue_struct *wq)
{
struct wq_flusher this_flusher = {
.list = LIST_HEAD_INIT(this_flusher.list),
.flush_color = -1,
.done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
};
int next_color;
if (WARN_ON(!wq_online))
return;
lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map);
mutex_lock(&wq->mutex);
/*
* Start-to-wait phase
*/
next_color = work_next_color(wq->work_color);
if (next_color != wq->flush_color) {
/*
* Color space is not full. The current work_color
* becomes our flush_color and work_color is advanced
* by one.
*/
WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
this_flusher.flush_color = wq->work_color;
wq->work_color = next_color;
if (!wq->first_flusher) {
/* no flush in progress, become the first flusher */
WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
wq->first_flusher = &this_flusher;
if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
wq->work_color)) {
/* nothing to flush, done */
wq->flush_color = next_color;
wq->first_flusher = NULL;
goto out_unlock;
}
} else {
/* wait in queue */
WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
list_add_tail(&this_flusher.list, &wq->flusher_queue);
flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
}
} else {
/*
* Oops, color space is full, wait on overflow queue.
* The next flush completion will assign us
* flush_color and transfer to flusher_queue.
*/
list_add_tail(&this_flusher.list, &wq->flusher_overflow);
}
check_flush_dependency(wq, NULL);
mutex_unlock(&wq->mutex);
wait_for_completion(&this_flusher.done);
/*
* Wake-up-and-cascade phase
*
* First flushers are responsible for cascading flushes and
* handling overflow. Non-first flushers can simply return.
*/
if (wq->first_flusher != &this_flusher)
return;
mutex_lock(&wq->mutex);
/* we might have raced, check again with mutex held */
if (wq->first_flusher != &this_flusher)
goto out_unlock;
wq->first_flusher = NULL;
WARN_ON_ONCE(!list_empty(&this_flusher.list));
WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
while (true) {
struct wq_flusher *next, *tmp;
/* complete all the flushers sharing the current flush color */
list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
if (next->flush_color != wq->flush_color)
break;
list_del_init(&next->list);
complete(&next->done);
}
WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
wq->flush_color != work_next_color(wq->work_color));
/* this flush_color is finished, advance by one */
wq->flush_color = work_next_color(wq->flush_color);
/* one color has been freed, handle overflow queue */
if (!list_empty(&wq->flusher_overflow)) {
/*
* Assign the same color to all overflowed
* flushers, advance work_color and append to
* flusher_queue. This is the start-to-wait
* phase for these overflowed flushers.
*/
list_for_each_entry(tmp, &wq->flusher_overflow, list)
tmp->flush_color = wq->work_color;
wq->work_color = work_next_color(wq->work_color);
list_splice_tail_init(&wq->flusher_overflow,
&wq->flusher_queue);
flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
}
if (list_empty(&wq->flusher_queue)) {
WARN_ON_ONCE(wq->flush_color != wq->work_color);
break;
}
/*
* Need to flush more colors. Make the next flusher
* the new first flusher and arm pwqs.
*/
WARN_ON_ONCE(wq->flush_color == wq->work_color);
WARN_ON_ONCE(wq->flush_color != next->flush_color);
list_del_init(&next->list);
wq->first_flusher = next;
if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
break;
/*
* Meh... this color is already done, clear first
* flusher and repeat cascading.
*/
wq->first_flusher = NULL;
}
out_unlock:
mutex_unlock(&wq->mutex);
}
EXPORT_SYMBOL(flush_workqueue);
/**
* drain_workqueue - drain a workqueue
* @wq: workqueue to drain
*
* Wait until the workqueue becomes empty. While draining is in progress,
* only chain queueing is allowed. IOW, only currently pending or running
* work items on @wq can queue further work items on it. @wq is flushed
* repeatedly until it becomes empty. The number of flushing is determined
* by the depth of chaining and should be relatively short. Whine if it
* takes too long.
*/
void drain_workqueue(struct workqueue_struct *wq)
{
unsigned int flush_cnt = 0;
struct pool_workqueue *pwq;
/*
* __queue_work() needs to test whether there are drainers, is much
* hotter than drain_workqueue() and already looks at @wq->flags.
* Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
*/
mutex_lock(&wq->mutex);
if (!wq->nr_drainers++)
wq->flags |= __WQ_DRAINING;
mutex_unlock(&wq->mutex);
reflush:
flush_workqueue(wq);
mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq) {
bool drained;
spin_lock_irq(&pwq->pool->lock);
drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
spin_unlock_irq(&pwq->pool->lock);
if (drained)
continue;
if (++flush_cnt == 10 ||
(flush_cnt % 100 == 0 && flush_cnt <= 1000))
pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
wq->name, flush_cnt);
mutex_unlock(&wq->mutex);
goto reflush;
}
if (!--wq->nr_drainers)
wq->flags &= ~__WQ_DRAINING;
mutex_unlock(&wq->mutex);
}
EXPORT_SYMBOL_GPL(drain_workqueue);
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
bool from_cancel)
{
struct worker *worker = NULL;
struct worker_pool *pool;
struct pool_workqueue *pwq;
might_sleep();
local_irq_disable();
pool = get_work_pool(work);
if (!pool) {
local_irq_enable();
return false;
}
spin_lock(&pool->lock);
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
if (unlikely(pwq->pool != pool))
goto already_gone;
} else {
worker = find_worker_executing_work(pool, work);
if (!worker)
goto already_gone;
pwq = worker->current_pwq;
}
check_flush_dependency(pwq->wq, work);
insert_wq_barrier(pwq, barr, work, worker);
spin_unlock_irq(&pool->lock);
/*
* Force a lock recursion deadlock when using flush_work() inside a
* single-threaded or rescuer equipped workqueue.
*
* For single threaded workqueues the deadlock happens when the work
* is after the work issuing the flush_work(). For rescuer equipped
* workqueues the deadlock happens when the rescuer stalls, blocking
* forward progress.
*/
if (!from_cancel &&
(pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
}
return true;
already_gone:
spin_unlock_irq(&pool->lock);
return false;
}
static bool __flush_work(struct work_struct *work, bool from_cancel)
{
struct wq_barrier barr;
if (WARN_ON(!wq_online))
return false;
if (WARN_ON(!work->func))
return false;
if (!from_cancel) {
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
}
if (start_flush_work(work, &barr, from_cancel)) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
return true;
} else {
return false;
}
}
/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
*
* Wait until @work has finished execution. @work is guaranteed to be idle
* on return if it hasn't been requeued since flush started.
*
* Return:
* %true if flush_work() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_work(struct work_struct *work)
{
return __flush_work(work, false);
}
EXPORT_SYMBOL_GPL(flush_work);
struct cwt_wait {
wait_queue_entry_t wait;
struct work_struct *work;
};
static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
if (cwait->work != key)
return 0;
return autoremove_wake_function(wait, mode, sync, key);
}
static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
{
static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
unsigned long flags;
int ret;
do {
ret = try_to_grab_pending(work, is_dwork, &flags);
/*
* If someone else is already canceling, wait for it to
* finish. flush_work() doesn't work for PREEMPT_NONE
* because we may get scheduled between @work's completion
* and the other canceling task resuming and clearing
* CANCELING - flush_work() will return false immediately
* as @work is no longer busy, try_to_grab_pending() will
* return -ENOENT as @work is still being canceled and the
* other canceling task won't be able to clear CANCELING as
* we're hogging the CPU.
*
* Let's wait for completion using a waitqueue. As this
* may lead to the thundering herd problem, use a custom
* wake function which matches @work along with exclusive
* wait and wakeup.
*/
if (unlikely(ret == -ENOENT)) {
struct cwt_wait cwait;
init_wait(&cwait.wait);
cwait.wait.func = cwt_wakefn;
cwait.work = work;
prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
TASK_UNINTERRUPTIBLE);
if (work_is_canceling(work))
schedule();
finish_wait(&cancel_waitq, &cwait.wait);
}
} while (unlikely(ret < 0));
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
local_irq_restore(flags);
/*
* This allows canceling during early boot. We know that @work
* isn't executing.
*/
if (wq_online)
__flush_work(work, true);
clear_work_data(work);
/*
* Paired with prepare_to_wait() above so that either
* waitqueue_active() is visible here or !work_is_canceling() is
* visible there.
*/
smp_mb();
if (waitqueue_active(&cancel_waitq))
__wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
return ret;
}
/**
* cancel_work_sync - cancel a work and wait for it to finish
* @work: the work to cancel
*
* Cancel @work and wait for its execution to finish. This function
* can be used even if the work re-queues itself or migrates to
* another workqueue. On return from this function, @work is
* guaranteed to be not pending or executing on any CPU.
*
* cancel_work_sync(&delayed_work->work) must not be used for
* delayed_work's. Use cancel_delayed_work_sync() instead.
*
* The caller must ensure that the workqueue on which @work was last
* queued can't be destroyed before this function returns.
*
* Return:
* %true if @work was pending, %false otherwise.
*/
bool cancel_work_sync(struct work_struct *work)
{
return __cancel_work_timer(work, false);
}
EXPORT_SYMBOL_GPL(cancel_work_sync);
/**
* flush_delayed_work - wait for a dwork to finish executing the last queueing
* @dwork: the delayed work to flush
*
* Delayed timer is cancelled and the pending work is queued for
* immediate execution. Like flush_work(), this function only
* considers the last queueing instance of @dwork.
*
* Return:
* %true if flush_work() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
local_irq_disable();
if (del_timer_sync(&dwork->timer))
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
local_irq_enable();
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
/**
* flush_rcu_work - wait for a rwork to finish executing the last queueing
* @rwork: the rcu work to flush
*
* Return:
* %true if flush_rcu_work() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_rcu_work(struct rcu_work *rwork)
{
if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
rcu_barrier();
flush_work(&rwork->work);
return true;
} else {
return flush_work(&rwork->work);
}
}
EXPORT_SYMBOL(flush_rcu_work);
static bool __cancel_work(struct work_struct *work, bool is_dwork)
{
unsigned long flags;
int ret;
do {
ret = try_to_grab_pending(work, is_dwork, &flags);
} while (unlikely(ret == -EAGAIN));
if (unlikely(ret < 0))
return false;
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
local_irq_restore(flags);
return ret;
}
/**
* cancel_delayed_work - cancel a delayed work
* @dwork: delayed_work to cancel
*
* Kill off a pending delayed_work.
*
* Return: %true if @dwork was pending and canceled; %false if it wasn't
* pending.
*
* Note:
* The work callback function may still be running on return, unless
* it returns %true and the work doesn't re-arm itself. Explicitly flush or
* use cancel_delayed_work_sync() to wait on it.
*
* This function is safe to call from any context including IRQ handler.
*/
bool cancel_delayed_work(struct delayed_work *dwork)
{
return __cancel_work(&dwork->work, true);
}
EXPORT_SYMBOL(cancel_delayed_work);
/**
* cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
* @dwork: the delayed work cancel
*
* This is cancel_work_sync() for delayed works.
*
* Return:
* %true if @dwork was pending, %false otherwise.
*/
bool cancel_delayed_work_sync(struct delayed_work *dwork)
{
return __cancel_work_timer(&dwork->work, true);
}
EXPORT_SYMBOL(cancel_delayed_work_sync);
/**
* schedule_on_each_cpu - execute a function synchronously on each online CPU
* @func: the function to call
*
* schedule_on_each_cpu() executes @func on each online CPU using the
* system workqueue and blocks until all CPUs have completed.
* schedule_on_each_cpu() is very slow.
*
* Return:
* 0 on success, -errno on failure.
*/
int schedule_on_each_cpu(work_func_t func)
{
int cpu;
struct work_struct __percpu *works;
works = alloc_percpu(struct work_struct);
if (!works)
return -ENOMEM;
get_online_cpus();
for_each_online_cpu(cpu) {
struct work_struct *work = per_cpu_ptr(works, cpu);
INIT_WORK(work, func);
schedule_work_on(cpu, work);
}
for_each_online_cpu(cpu)
flush_work(per_cpu_ptr(works, cpu));
put_online_cpus();
free_percpu(works);
return 0;
}
/**
* execute_in_process_context - reliably execute the routine with user context
* @fn: the function to execute
* @ew: guaranteed storage for the execute work structure (must
* be available when the work executes)
*
* Executes the function immediately if process context is available,
* otherwise schedules the function for delayed execution.
*
* Return: 0 - function was executed
* 1 - function was scheduled for execution
*/
int execute_in_process_context(work_func_t fn, struct execute_work *ew)
{
if (!in_interrupt()) {
fn(&ew->work);
return 0;
}
INIT_WORK(&ew->work, fn);
schedule_work(&ew->work);
return 1;
}
EXPORT_SYMBOL_GPL(execute_in_process_context);
/**
* free_workqueue_attrs - free a workqueue_attrs
* @attrs: workqueue_attrs to free
*
* Undo alloc_workqueue_attrs().
*/
void free_workqueue_attrs(struct workqueue_attrs *attrs)
{
if (attrs) {
free_cpumask_var(attrs->cpumask);
kfree(attrs);
}
}
/**
* alloc_workqueue_attrs - allocate a workqueue_attrs
* @gfp_mask: allocation mask to use
*
* Allocate a new workqueue_attrs, initialize with default settings and
* return it.
*
* Return: The allocated new workqueue_attr on success. %NULL on failure.
*/
struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
{
struct workqueue_attrs *attrs;
attrs = kzalloc(sizeof(*attrs), gfp_mask);
if (!attrs)
goto fail;
if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
goto fail;
cpumask_copy(attrs->cpumask, cpu_possible_mask);
return attrs;
fail:
free_workqueue_attrs(attrs);
return NULL;
}
static void copy_workqueue_attrs(struct workqueue_attrs *to,
const struct workqueue_attrs *from)
{
to->nice = from->nice;
cpumask_copy(to->cpumask, from->cpumask);
/*
* Unlike hash and equality test, this function doesn't ignore
* ->no_numa as it is used for both pool and wq attrs. Instead,
* get_unbound_pool() explicitly clears ->no_numa after copying.
*/
to->no_numa = from->no_numa;
}
/* hash value of the content of @attr */
static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
{
u32 hash = 0;
hash = jhash_1word(attrs->nice, hash);
hash = jhash(cpumask_bits(attrs->cpumask),
BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
return hash;
}
/* content equality test */
static bool wqattrs_equal(const struct workqueue_attrs *a,
const struct workqueue_attrs *b)
{
if (a->nice != b->nice)
return false;
if (!cpumask_equal(a->cpumask, b->cpumask))
return false;
return true;
}
/**
* init_worker_pool - initialize a newly zalloc'd worker_pool
* @pool: worker_pool to initialize
*
* Initialize a newly zalloc'd @pool. It also allocates @pool->attrs.
*
* Return: 0 on success, -errno on failure. Even on failure, all fields
* inside @pool proper are initialized and put_unbound_pool() can be called
* on @pool safely to release it.
*/
static int init_worker_pool(struct worker_pool *pool)
{
spin_lock_init(&pool->lock);
pool->id = -1;
pool->cpu = -1;
pool->node = NUMA_NO_NODE;
pool->flags |= POOL_DISASSOCIATED;
pool->watchdog_ts = jiffies;
INIT_LIST_HEAD(&pool->worklist);
INIT_LIST_HEAD(&pool->idle_list);
hash_init(pool->busy_hash);
timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
INIT_LIST_HEAD(&pool->workers);
ida_init(&pool->worker_ida);
INIT_HLIST_NODE(&pool->hash_node);
pool->refcnt = 1;
/* shouldn't fail above this point */
pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
if (!pool->attrs)
return -ENOMEM;
return 0;
}
static void rcu_free_wq(struct rcu_head *rcu)
{
struct workqueue_struct *wq =
container_of(rcu, struct workqueue_struct, rcu);
if (!(wq->flags & WQ_UNBOUND))
free_percpu(wq->cpu_pwqs);
else
free_workqueue_attrs(wq->unbound_attrs);
kfree(wq->rescuer);
kfree(wq);
}
static void rcu_free_pool(struct rcu_head *rcu)
{
struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
ida_destroy(&pool->worker_ida);
free_workqueue_attrs(pool->attrs);
kfree(pool);
}
/**
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
* Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
*
* Should be called with wq_pool_mutex held.
*/
static void put_unbound_pool(struct worker_pool *pool)
{
DECLARE_COMPLETION_ONSTACK(detach_completion);
struct worker *worker;
lockdep_assert_held(&wq_pool_mutex);
if (--pool->refcnt)
return;
/* sanity checks */
if (WARN_ON(!(pool->cpu < 0)) ||
WARN_ON(!list_empty(&pool->worklist)))
return;
/* release id and unhash */
if (pool->id >= 0)
idr_remove(&worker_pool_idr, pool->id);
hash_del(&pool->hash_node);
/*
* Become the manager and destroy all workers. This prevents
* @pool's workers from blocking on attach_mutex. We're the last
* manager and @pool gets freed with the flag set.
*/
spin_lock_irq(&pool->lock);
wait_event_lock_irq(wq_manager_wait,
!(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
pool->flags |= POOL_MANAGER_ACTIVE;
while ((worker = first_idle_worker(pool)))
destroy_worker(worker);
WARN_ON(pool->nr_workers || pool->nr_idle);
spin_unlock_irq(&pool->lock);
mutex_lock(&wq_pool_attach_mutex);
if (!list_empty(&pool->workers))
pool->detach_completion = &detach_completion;
mutex_unlock(&wq_pool_attach_mutex);
if (pool->detach_completion)
wait_for_completion(pool->detach_completion);
/* shut down the timers */
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
/* sched-RCU protected to allow dereferences from get_work_pool() */
call_rcu_sched(&pool->rcu, rcu_free_pool);
}
/**
* get_unbound_pool - get a worker_pool with the specified attributes
* @attrs: the attributes of the worker_pool to get
*
* Obtain a worker_pool which has the same attributes as @attrs, bump the
* reference count and return it. If there already is a matching
* worker_pool, it will be used; otherwise, this function attempts to
* create a new one.
*
* Should be called with wq_pool_mutex held.
*
* Return: On success, a worker_pool with the same attributes as @attrs.
* On failure, %NULL.
*/
static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
{
u32 hash = wqattrs_hash(attrs);
struct worker_pool *pool;
int node;
int target_node = NUMA_NO_NODE;
lockdep_assert_held(&wq_pool_mutex);
/* do we already have a matching pool? */
hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
if (wqattrs_equal(pool->attrs, attrs)) {
pool->refcnt++;
return pool;
}
}
/* if cpumask is contained inside a NUMA node, we belong to that node */
if (wq_numa_enabled) {
for_each_node(node) {
if (cpumask_subset(attrs->cpumask,
wq_numa_possible_cpumask[node])) {
target_node = node;
break;
}
}
}
/* nope, create a new one */
pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
if (!pool || init_worker_pool(pool) < 0)
goto fail;
lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
copy_workqueue_attrs(pool->attrs, attrs);
pool->node = target_node;
/*
* no_numa isn't a worker_pool attribute, always clear it. See
* 'struct workqueue_attrs' comments for detail.
*/
pool->attrs->no_numa = false;
if (worker_pool_assign_id(pool) < 0)
goto fail;
/* create and start the initial worker */
if (wq_online && !create_worker(pool))
goto fail;
/* install */
hash_add(unbound_pool_hash, &pool->hash_node, hash);
return pool;
fail:
if (pool)
put_unbound_pool(pool);
return NULL;
}
static void rcu_free_pwq(struct rcu_head *rcu)
{
kmem_cache_free(pwq_cache,
container_of(rcu, struct pool_workqueue, rcu));
}
/*
* Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
* and needs to be destroyed.
*/
static void pwq_unbound_release_workfn(struct work_struct *work)
{
struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
unbound_release_work);
struct workqueue_struct *wq = pwq->wq;
struct worker_pool *pool = pwq->pool;
bool is_last;
if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
return;
mutex_lock(&wq->mutex);
list_del_rcu(&pwq->pwqs_node);
is_last = list_empty(&wq->pwqs);
mutex_unlock(&wq->mutex);
mutex_lock(&wq_pool_mutex);
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
call_rcu_sched(&pwq->rcu, rcu_free_pwq);
/*
* If we're the last pwq going away, @wq is already dead and no one
* is gonna access it anymore. Schedule RCU free.
*/
if (is_last)
call_rcu_sched(&wq->rcu, rcu_free_wq);
}
/**
* pwq_adjust_max_active - update a pwq's max_active to the current setting
* @pwq: target pool_workqueue
*
* If @pwq isn't freezing, set @pwq->max_active to the associated
* workqueue's saved_max_active and activate delayed work items
* accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
*/
static void pwq_adjust_max_active(struct pool_workqueue *pwq)
{
struct workqueue_struct *wq = pwq->wq;
bool freezable = wq->flags & WQ_FREEZABLE;
unsigned long flags;
/* for @wq->saved_max_active */
lockdep_assert_held(&wq->mutex);
/* fast exit for non-freezable wqs */
if (!freezable && pwq->max_active == wq->saved_max_active)
return;
/* this function can be called during early boot w/ irq disabled */
spin_lock_irqsave(&pwq->pool->lock, flags);
/*
* During [un]freezing, the caller is responsible for ensuring that
* this function is called at least once after @workqueue_freezing
* is updated and visible.
*/
if (!freezable || !workqueue_freezing) {
pwq->max_active = wq->saved_max_active;
while (!list_empty(&pwq->delayed_works) &&
pwq->nr_active < pwq->max_active)
pwq_activate_first_delayed(pwq);
/*
* Need to kick a worker after thawed or an unbound wq's
* max_active is bumped. It's a slow path. Do it always.
*/
wake_up_worker(pwq->pool);
} else {
pwq->max_active = 0;
}
spin_unlock_irqrestore(&pwq->pool->lock, flags);
}
/* initialize newly alloced @pwq which is associated with @wq and @pool */
static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
struct worker_pool *pool)
{
BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
memset(pwq, 0, sizeof(*pwq));
pwq->pool = pool;
pwq->wq = wq;
pwq->flush_color = -1;
pwq->refcnt = 1;
INIT_LIST_HEAD(&pwq->delayed_works);
INIT_LIST_HEAD(&pwq->pwqs_node);
INIT_LIST_HEAD(&pwq->mayday_node);
INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
}
/* sync @pwq with the current state of its associated wq and link it */
static void link_pwq(struct pool_workqueue *pwq)
{
struct workqueue_struct *wq = pwq->wq;
lockdep_assert_held(&wq->mutex);
/* may be called multiple times, ignore if already linked */
if (!list_empty(&pwq->pwqs_node))
return;
/* set the matching work_color */
pwq->work_color = wq->work_color;
/* sync max_active to the current setting */
pwq_adjust_max_active(pwq);
/* link in @pwq */
list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
}
/* obtain a pool matching @attr and create a pwq associating the pool and @wq */
static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs)
{
struct worker_pool *pool;
struct pool_workqueue *pwq;
lockdep_assert_held(&wq_pool_mutex);
pool = get_unbound_pool(attrs);
if (!pool)
return NULL;
pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
if (!pwq) {
put_unbound_pool(pool);
return NULL;
}
init_pwq(pwq, wq, pool);
return pwq;
}
/**
* wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
* @attrs: the wq_attrs of the default pwq of the target workqueue
* @node: the target NUMA node
* @cpu_going_down: if >= 0, the CPU to consider as offline
* @cpumask: outarg, the resulting cpumask
*
* Calculate the cpumask a workqueue with @attrs should use on @node. If
* @cpu_going_down is >= 0, that cpu is considered offline during
* calculation. The result is stored in @cpumask.
*
* If NUMA affinity is not enabled, @attrs->cpumask is always used. If
* enabled and @node has online CPUs requested by @attrs, the returned
* cpumask is the intersection of the possible CPUs of @node and
* @attrs->cpumask.
*
* The caller is responsible for ensuring that the cpumask of @node stays
* stable.
*
* Return: %true if the resulting @cpumask is different from @attrs->cpumask,
* %false if equal.
*/
static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
int cpu_going_down, cpumask_t *cpumask)
{
if (!wq_numa_enabled || attrs->no_numa)
goto use_dfl;
/* does @node have any online CPUs @attrs wants? */
cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
if (cpu_going_down >= 0)
cpumask_clear_cpu(cpu_going_down, cpumask);
if (cpumask_empty(cpumask))
goto use_dfl;
/* yeap, return possible CPUs in @node that @attrs wants */
cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
if (cpumask_empty(cpumask)) {
pr_warn_once("WARNING: workqueue cpumask: online intersect > "
"possible intersect\n");
return false;
}
return !cpumask_equal(cpumask, attrs->cpumask);
use_dfl:
cpumask_copy(cpumask, attrs->cpumask);
return false;
}
/* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
int node,
struct pool_workqueue *pwq)
{
struct pool_workqueue *old_pwq;
lockdep_assert_held(&wq_pool_mutex);
lockdep_assert_held(&wq->mutex);
/* link_pwq() can handle duplicate calls */
link_pwq(pwq);
old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
return old_pwq;
}
/* context to store the prepared attrs & pwqs before applying */
struct apply_wqattrs_ctx {
struct workqueue_struct *wq; /* target workqueue */
struct workqueue_attrs *attrs; /* attrs to apply */
struct list_head list; /* queued for batching commit */
struct pool_workqueue *dfl_pwq;
struct pool_workqueue *pwq_tbl[];
};
/* free the resources after success or abort */
static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
{
if (ctx) {
int node;
for_each_node(node)
put_pwq_unlocked(ctx->pwq_tbl[node]);
put_pwq_unlocked(ctx->dfl_pwq);
free_workqueue_attrs(ctx->attrs);
kfree(ctx);
}
}
/* allocate the attrs and pwqs for later installation */
static struct apply_wqattrs_ctx *
apply_wqattrs_prepare(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs)
{
struct apply_wqattrs_ctx *ctx;
struct workqueue_attrs *new_attrs, *tmp_attrs;
int node;
lockdep_assert_held(&wq_pool_mutex);
ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
if (!ctx || !new_attrs || !tmp_attrs)
goto out_free;
/*
* Calculate the attrs of the default pwq.
* If the user configured cpumask doesn't overlap with the
* wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
*/
copy_workqueue_attrs(new_attrs, attrs);
cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
if (unlikely(cpumask_empty(new_attrs->cpumask)))
cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
/*
* We may create multiple pwqs with differing cpumasks. Make a
* copy of @new_attrs which will be modified and used to obtain
* pools.
*/
copy_workqueue_attrs(tmp_attrs, new_attrs);
/*
* If something goes wrong during CPU up/down, we'll fall back to
* the default pwq covering whole @attrs->cpumask. Always create
* it even if we don't use it immediately.
*/
ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
if (!ctx->dfl_pwq)
goto out_free;
for_each_node(node) {
if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
if (!ctx->pwq_tbl[node])
goto out_free;
} else {
ctx->dfl_pwq->refcnt++;
ctx->pwq_tbl[node] = ctx->dfl_pwq;
}
}
/* save the user configured attrs and sanitize it. */
copy_workqueue_attrs(new_attrs, attrs);
cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
ctx->attrs = new_attrs;
ctx->wq = wq;
free_workqueue_attrs(tmp_attrs);
return ctx;
out_free:
free_workqueue_attrs(tmp_attrs);
free_workqueue_attrs(new_attrs);
apply_wqattrs_cleanup(ctx);
return NULL;
}
/* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
{
int node;
/* all pwqs have been created successfully, let's install'em */
mutex_lock(&ctx->wq->mutex);
copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
/* save the previous pwq and install the new one */
for_each_node(node)
ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
ctx->pwq_tbl[node]);
/* @dfl_pwq might not have been used, ensure it's linked */
link_pwq(ctx->dfl_pwq);
swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
mutex_unlock(&ctx->wq->mutex);
}
static void apply_wqattrs_lock(void)
{
/* CPUs should stay stable across pwq creations and installations */
get_online_cpus();
mutex_lock(&wq_pool_mutex);
}
static void apply_wqattrs_unlock(void)
{
mutex_unlock(&wq_pool_mutex);
put_online_cpus();
}
static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs)
{
struct apply_wqattrs_ctx *ctx;
/* only unbound workqueues can change attributes */
if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
return -EINVAL;
/* creating multiple pwqs breaks ordering guarantee */
if (!list_empty(&wq->pwqs)) {
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
return -EINVAL;
wq->flags &= ~__WQ_ORDERED;
}
ctx = apply_wqattrs_prepare(wq, attrs);
if (!ctx)
return -ENOMEM;
/* the ctx has been prepared successfully, let's commit it */
apply_wqattrs_commit(ctx);
apply_wqattrs_cleanup(ctx);
return 0;
}
/**
* apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
* @wq: the target workqueue
* @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
*
* Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
* machines, this function maps a separate pwq to each NUMA node with
* possibles CPUs in @attrs->cpumask so that work items are affine to the
* NUMA node it was issued on. Older pwqs are released as in-flight work
* items finish. Note that a work item which repeatedly requeues itself
* back-to-back will stay on its current pwq.
*
* Performs GFP_KERNEL allocations.
*
* Return: 0 on success and -errno on failure.
*/
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs)
{
int ret;
apply_wqattrs_lock();
ret = apply_workqueue_attrs_locked(wq, attrs);
apply_wqattrs_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(apply_workqueue_attrs);
/**
* wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
* @wq: the target workqueue
* @cpu: the CPU coming up or going down
* @online: whether @cpu is coming up or going down
*
* This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
* %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of
* @wq accordingly.
*
* If NUMA affinity can't be adjusted due to memory allocation failure, it
* falls back to @wq->dfl_pwq which may not be optimal but is always
* correct.
*
* Note that when the last allowed CPU of a NUMA node goes offline for a
* workqueue with a cpumask spanning multiple nodes, the workers which were
* already executing the work items for the workqueue will lose their CPU
* affinity and may execute on any CPU. This is similar to how per-cpu
* workqueues behave on CPU_DOWN. If a workqueue user wants strict
* affinity, it's the user's responsibility to flush the work item from
* CPU_DOWN_PREPARE.
*/
static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
bool online)
{
int node = cpu_to_node(cpu);
int cpu_off = online ? -1 : cpu;
struct pool_workqueue *old_pwq = NULL, *pwq;
struct workqueue_attrs *target_attrs;
cpumask_t *cpumask;
lockdep_assert_held(&wq_pool_mutex);
if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
wq->unbound_attrs->no_numa)
return;
/*
* We don't wanna alloc/free wq_attrs for each wq for each CPU.
* Let's use a preallocated one. The following buf is protected by
* CPU hotplug exclusion.
*/
target_attrs = wq_update_unbound_numa_attrs_buf;
cpumask = target_attrs->cpumask;
copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
pwq = unbound_pwq_by_node(wq, node);
/*
* Let's determine what needs to be done. If the target cpumask is
* different from the default pwq's, we need to compare it to @pwq's
* and create a new one if they don't match. If the target cpumask
* equals the default pwq's, the default pwq should be used.
*/
if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
return;
} else {
goto use_dfl_pwq;
}
/* create a new pwq */
pwq = alloc_unbound_pwq(wq, target_attrs);
if (!pwq) {
pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
wq->name);
goto use_dfl_pwq;
}
/* Install the new pwq. */
mutex_lock(&wq->mutex);
old_pwq = numa_pwq_tbl_install(wq, node, pwq);
goto out_unlock;
use_dfl_pwq:
mutex_lock(&wq->mutex);
spin_lock_irq(&wq->dfl_pwq->pool->lock);
get_pwq(wq->dfl_pwq);
spin_unlock_irq(&wq->dfl_pwq->pool->lock);
old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
out_unlock:
mutex_unlock(&wq->mutex);
put_pwq_unlocked(old_pwq);
}
static int alloc_and_link_pwqs(struct workqueue_struct *wq)
{
bool highpri = wq->flags & WQ_HIGHPRI;
int cpu, ret;
if (!(wq->flags & WQ_UNBOUND)) {
wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
if (!wq->cpu_pwqs)
return -ENOMEM;
for_each_possible_cpu(cpu) {
struct pool_workqueue *pwq =
per_cpu_ptr(wq->cpu_pwqs, cpu);
struct worker_pool *cpu_pools =
per_cpu(cpu_worker_pools, cpu);
init_pwq(pwq, wq, &cpu_pools[highpri]);
mutex_lock(&wq->mutex);
link_pwq(pwq);
mutex_unlock(&wq->mutex);
}
return 0;
} else if (wq->flags & __WQ_ORDERED) {
ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
/* there should only be single pwq for ordering guarantee */
WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
"ordering guarantee broken for workqueue %s\n", wq->name);
return ret;
} else {
return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
}
}
static int wq_clamp_max_active(int max_active, unsigned int flags,
const char *name)
{
int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
if (max_active < 1 || max_active > lim)
pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
max_active, name, 1, lim);
return clamp_val(max_active, 1, lim);
}
/*
* Workqueues which may be used during memory reclaim should have a rescuer
* to guarantee forward progress.
*/
static int init_rescuer(struct workqueue_struct *wq)
{
struct worker *rescuer;
int ret;
if (!(wq->flags & WQ_MEM_RECLAIM))
return 0;
rescuer = alloc_worker(NUMA_NO_NODE);
if (!rescuer)
return -ENOMEM;
rescuer->rescue_wq = wq;
rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
ret = PTR_ERR_OR_ZERO(rescuer->task);
if (ret) {
kfree(rescuer);
return ret;
}
wq->rescuer = rescuer;
kthread_bind_mask(rescuer->task, cpu_possible_mask);
wake_up_process(rescuer->task);
return 0;
}
struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
unsigned int flags,
int max_active,
struct lock_class_key *key,
const char *lock_name, ...)
{
size_t tbl_size = 0;
va_list args;
struct workqueue_struct *wq;
struct pool_workqueue *pwq;
/*
* Unbound && max_active == 1 used to imply ordered, which is no
* longer the case on NUMA machines due to per-node pools. While
* alloc_ordered_workqueue() is the right way to create an ordered
* workqueue, keep the previous behavior to avoid subtle breakages
* on NUMA.
*/
if ((flags & WQ_UNBOUND) && max_active == 1)
flags |= __WQ_ORDERED;
/* see the comment above the definition of WQ_POWER_EFFICIENT */
if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
flags |= WQ_UNBOUND;
/* allocate wq and format name */
if (flags & WQ_UNBOUND)
tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
if (!wq)
return NULL;
if (flags & WQ_UNBOUND) {
wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
if (!wq->unbound_attrs)
goto err_free_wq;
}
va_start(args, lock_name);
vsnprintf(wq->name, sizeof(wq->name), fmt, args);
va_end(args);
max_active = max_active ?: WQ_DFL_ACTIVE;
max_active = wq_clamp_max_active(max_active, flags, wq->name);
/* init wq */
wq->flags = flags;
wq->saved_max_active = max_active;
mutex_init(&wq->mutex);
atomic_set(&wq->nr_pwqs_to_flush, 0);
INIT_LIST_HEAD(&wq->pwqs);
INIT_LIST_HEAD(&wq->flusher_queue);
INIT_LIST_HEAD(&wq->flusher_overflow);
INIT_LIST_HEAD(&wq->maydays);
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
INIT_LIST_HEAD(&wq->list);
if (alloc_and_link_pwqs(wq) < 0)
goto err_free_wq;
if (wq_online && init_rescuer(wq) < 0)
goto err_destroy;
if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
goto err_destroy;
/*
* wq_pool_mutex protects global freeze state and workqueues list.
* Grab it, adjust max_active and add the new @wq to workqueues
* list.
*/
mutex_lock(&wq_pool_mutex);
mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq)
pwq_adjust_max_active(pwq);
mutex_unlock(&wq->mutex);
list_add_tail_rcu(&wq->list, &workqueues);
mutex_unlock(&wq_pool_mutex);
return wq;
err_free_wq:
free_workqueue_attrs(wq->unbound_attrs);
kfree(wq);
return NULL;
err_destroy:
destroy_workqueue(wq);
return NULL;
}
EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
/**
* destroy_workqueue - safely terminate a workqueue
* @wq: target workqueue
*
* Safely destroy a workqueue. All work currently pending will be done first.
*/
void destroy_workqueue(struct workqueue_struct *wq)
{
struct pool_workqueue *pwq;
int node;
/*
* Remove it from sysfs first so that sanity check failure doesn't
* lead to sysfs name conflicts.
*/
workqueue_sysfs_unregister(wq);
/* drain it before proceeding with destruction */
drain_workqueue(wq);
/* kill rescuer, if sanity checks fail, leave it w/o rescuer */
if (wq->rescuer) {
struct worker *rescuer = wq->rescuer;
/* this prevents new queueing */
spin_lock_irq(&wq_mayday_lock);
wq->rescuer = NULL;
spin_unlock_irq(&wq_mayday_lock);
/* rescuer will empty maydays list before exiting */
kthread_stop(rescuer->task);
kfree(rescuer);
}
/* sanity checks */
mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq) {
int i;
for (i = 0; i < WORK_NR_COLORS; i++) {
if (WARN_ON(pwq->nr_in_flight[i])) {
mutex_unlock(&wq->mutex);
show_workqueue_state();
return;
}
}
if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
WARN_ON(pwq->nr_active) ||
WARN_ON(!list_empty(&pwq->delayed_works))) {
mutex_unlock(&wq->mutex);
show_workqueue_state();
return;
}
}
mutex_unlock(&wq->mutex);
/*
* wq list is used to freeze wq, remove from list after
* flushing is complete in case freeze races us.
*/
mutex_lock(&wq_pool_mutex);
list_del_rcu(&wq->list);
mutex_unlock(&wq_pool_mutex);
if (!(wq->flags & WQ_UNBOUND)) {
/*
* The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free.
*/
call_rcu_sched(&wq->rcu, rcu_free_wq);
} else {
/*
* We're the sole accessor of @wq at this point. Directly
* access numa_pwq_tbl[] and dfl_pwq to put the base refs.
* @wq will be freed when the last pwq is released.
*/
for_each_node(node) {
pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
put_pwq_unlocked(pwq);
}
/*
* Put dfl_pwq. @wq may be freed any time after dfl_pwq is
* put. Don't access it afterwards.
*/
pwq = wq->dfl_pwq;
wq->dfl_pwq = NULL;
put_pwq_unlocked(pwq);
}
}
EXPORT_SYMBOL_GPL(destroy_workqueue);
/**
* workqueue_set_max_active - adjust max_active of a workqueue
* @wq: target workqueue
* @max_active: new max_active value.
*
* Set max_active of @wq to @max_active.
*
* CONTEXT:
* Don't call from IRQ context.
*/
void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
{
struct pool_workqueue *pwq;
/* disallow meddling with max_active for ordered workqueues */
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
return;
max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
mutex_lock(&wq->mutex);
wq->flags &= ~__WQ_ORDERED;
wq->saved_max_active = max_active;
for_each_pwq(pwq, wq)
pwq_adjust_max_active(pwq);
mutex_unlock(&wq->mutex);
}
EXPORT_SYMBOL_GPL(workqueue_set_max_active);
/**
* current_work - retrieve %current task's work struct
*
* Determine if %current task is a workqueue worker and what it's working on.
* Useful to find out the context that the %current task is running in.
*
* Return: work struct if %current task is a workqueue worker, %NULL otherwise.
*/
struct work_struct *current_work(void)
{
struct worker *worker = current_wq_worker();
return worker ? worker->current_work : NULL;
}
EXPORT_SYMBOL(current_work);
/**
* current_is_workqueue_rescuer - is %current workqueue rescuer?
*
* Determine whether %current is a workqueue rescuer. Can be used from
* work functions to determine whether it's being run off the rescuer task.
*
* Return: %true if %current is a workqueue rescuer. %false otherwise.
*/
bool current_is_workqueue_rescuer(void)
{
struct worker *worker = current_wq_worker();
return worker && worker->rescue_wq;
}
/**
* workqueue_congested - test whether a workqueue is congested
* @cpu: CPU in question
* @wq: target workqueue
*
* Test whether @wq's cpu workqueue for @cpu is congested. There is
* no synchronization around this function and the test result is
* unreliable and only useful as advisory hints or for debugging.
*
* If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
* Note that both per-cpu and unbound workqueues may be associated with
* multiple pool_workqueues which have separate congested states. A
* workqueue being congested on one CPU doesn't mean the workqueue is also
* contested on other CPUs / NUMA nodes.
*
* Return:
* %true if congested, %false otherwise.
*/
bool workqueue_congested(int cpu, struct workqueue_struct *wq)
{
struct pool_workqueue *pwq;
bool ret;
rcu_read_lock_sched();
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
if (!(wq->flags & WQ_UNBOUND))
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
else
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
rcu_read_unlock_sched();
return ret;
}
EXPORT_SYMBOL_GPL(workqueue_congested);
/**
* work_busy - test whether a work is currently pending or running
* @work: the work to be tested
*
* Test whether @work is currently pending or running. There is no
* synchronization around this function and the test result is
* unreliable and only useful as advisory hints or for debugging.
*
* Return:
* OR'd bitmask of WORK_BUSY_* bits.
*/
unsigned int work_busy(struct work_struct *work)
{
struct worker_pool *pool;
unsigned long flags;
unsigned int ret = 0;
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
local_irq_save(flags);
pool = get_work_pool(work);
if (pool) {
spin_lock(&pool->lock);
if (find_worker_executing_work(pool, work))
ret |= WORK_BUSY_RUNNING;
spin_unlock(&pool->lock);
}
local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL_GPL(work_busy);
/**
* set_worker_desc - set description for the current work item
* @fmt: printf-style format string
* @...: arguments for the format string
*
* This function can be called by a running work function to describe what
* the work item is about. If the worker task gets dumped, this
* information will be printed out together to help debugging. The
* description can be at most WORKER_DESC_LEN including the trailing '\0'.
*/
void set_worker_desc(const char *fmt, ...)
{
struct worker *worker = current_wq_worker();
va_list args;
if (worker) {
va_start(args, fmt);
vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
va_end(args);
}
}
EXPORT_SYMBOL_GPL(set_worker_desc);
/**
* print_worker_info - print out worker information and description
* @log_lvl: the log level to use when printing
* @task: target task
*
* If @task is a worker and currently executing a work item, print out the
* name of the workqueue being serviced and worker description set with
* set_worker_desc() by the currently executing work item.
*
* This function can be safely called on any task as long as the
* task_struct itself is accessible. While safe, this function isn't
* synchronized and may print out mixups or garbages of limited length.
*/
void print_worker_info(const char *log_lvl, struct task_struct *task)
{
work_func_t *fn = NULL;
char name[WQ_NAME_LEN] = { };
char desc[WORKER_DESC_LEN] = { };
struct pool_workqueue *pwq = NULL;
struct workqueue_struct *wq = NULL;
struct worker *worker;
if (!(task->flags & PF_WQ_WORKER))
return;
/*
* This function is called without any synchronization and @task
* could be in any state. Be careful with dereferences.
*/
worker = kthread_probe_data(task);
/*
* Carefully copy the associated workqueue's workfn, name and desc.
* Keep the original last '\0' in case the original is garbage.
*/
probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
probe_kernel_read(name, wq->name, sizeof(name) - 1);
probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
if (fn || name[0] || desc[0]) {
printk("%sWorkqueue: %s %pf", log_lvl, name, fn);
if (strcmp(name, desc))
pr_cont(" (%s)", desc);
pr_cont("\n");
}
}
static void pr_cont_pool_info(struct worker_pool *pool)
{
pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
if (pool->node != NUMA_NO_NODE)
pr_cont(" node=%d", pool->node);
pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
}
static void pr_cont_work(bool comma, struct work_struct *work)
{
if (work->func == wq_barrier_func) {
struct wq_barrier *barr;
barr = container_of(work, struct wq_barrier, work);
pr_cont("%s BAR(%d)", comma ? "," : "",
task_pid_nr(barr->task));
} else {
pr_cont("%s %pf", comma ? "," : "", work->func);
}
}
static void show_pwq(struct pool_workqueue *pwq)
{
struct worker_pool *pool = pwq->pool;
struct work_struct *work;
struct worker *worker;
bool has_in_flight = false, has_pending = false;
int bkt;
pr_info(" pwq %d:", pool->id);
pr_cont_pool_info(pool);
pr_cont(" active=%d/%d refcnt=%d%s\n",
pwq->nr_active, pwq->max_active, pwq->refcnt,
!list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
hash_for_each(pool->busy_hash, bkt, worker, hentry) {
if (worker->current_pwq == pwq) {
has_in_flight = true;
break;
}
}
if (has_in_flight) {
bool comma = false;
pr_info(" in-flight:");
hash_for_each(pool->busy_hash, bkt, worker, hentry) {
if (worker->current_pwq != pwq)
continue;
pr_cont("%s %d%s:%pf", comma ? "," : "",
task_pid_nr(worker->task),
worker == pwq->wq->rescuer ? "(RESCUER)" : "",
worker->current_func);
list_for_each_entry(work, &worker->scheduled, entry)
pr_cont_work(false, work);
comma = true;
}
pr_cont("\n");
}
list_for_each_entry(work, &pool->worklist, entry) {
if (get_work_pwq(work) == pwq) {
has_pending = true;
break;
}
}
if (has_pending) {
bool comma = false;
pr_info(" pending:");
list_for_each_entry(work, &pool->worklist, entry) {
if (get_work_pwq(work) != pwq)
continue;
pr_cont_work(comma, work);
comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
}
pr_cont("\n");
}
if (!list_empty(&pwq->delayed_works)) {
bool comma = false;
pr_info(" delayed:");
list_for_each_entry(work, &pwq->delayed_works, entry) {
pr_cont_work(comma, work);
comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
}
pr_cont("\n");
}
}
/**
* show_workqueue_state - dump workqueue state
*
* Called from a sysrq handler or try_to_freeze_tasks() and prints out
* all busy workqueues and pools.
*/
void show_workqueue_state(void)
{
struct workqueue_struct *wq;
struct worker_pool *pool;
unsigned long flags;
int pi;
rcu_read_lock_sched();
pr_info("Showing busy workqueues and worker pools:\n");
list_for_each_entry_rcu(wq, &workqueues, list) {
struct pool_workqueue *pwq;
bool idle = true;
for_each_pwq(pwq, wq) {
if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
idle = false;
break;
}
}
if (idle)
continue;
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
for_each_pwq(pwq, wq) {
spin_lock_irqsave(&pwq->pool->lock, flags);
if (pwq->nr_active || !list_empty(&pwq->delayed_works))
show_pwq(pwq);
spin_unlock_irqrestore(&pwq->pool->lock, flags);
/*
* We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_workqueue_state(). Avoid triggering
* hard lockup.
*/
touch_nmi_watchdog();
}
}
for_each_pool(pool, pi) {
struct worker *worker;
bool first = true;
spin_lock_irqsave(&pool->lock, flags);
if (pool->nr_workers == pool->nr_idle)
goto next_pool;
pr_info("pool %d:", pool->id);
pr_cont_pool_info(pool);
pr_cont(" hung=%us workers=%d",
jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
pool->nr_workers);
if (pool->manager)
pr_cont(" manager: %d",
task_pid_nr(pool->manager->task));
list_for_each_entry(worker, &pool->idle_list, entry) {
pr_cont(" %s%d", first ? "idle: " : "",
task_pid_nr(worker->task));
first = false;
}
pr_cont("\n");
next_pool:
spin_unlock_irqrestore(&pool->lock, flags);
/*
* We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_workqueue_state(). Avoid triggering
* hard lockup.
*/
touch_nmi_watchdog();
}
rcu_read_unlock_sched();
}
/* used to show worker information through /proc/PID/{comm,stat,status} */
void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
{
int off;
/* always show the actual comm */
off = strscpy(buf, task->comm, size);
if (off < 0)
return;
/* stabilize PF_WQ_WORKER and worker pool association */
mutex_lock(&wq_pool_attach_mutex);
if (task->flags & PF_WQ_WORKER) {
struct worker *worker = kthread_data(task);
struct worker_pool *pool = worker->pool;
if (pool) {
spin_lock_irq(&pool->lock);
/*
* ->desc tracks information (wq name or
* set_worker_desc()) for the latest execution. If
* current, prepend '+', otherwise '-'.
*/
if (worker->desc[0] != '\0') {
if (worker->current_work)
scnprintf(buf + off, size - off, "+%s",
worker->desc);
else
scnprintf(buf + off, size - off, "-%s",
worker->desc);
}
spin_unlock_irq(&pool->lock);
}
}
mutex_unlock(&wq_pool_attach_mutex);
}
#ifdef CONFIG_SMP
/*
* CPU hotplug.
*
* There are two challenges in supporting CPU hotplug. Firstly, there
* are a lot of assumptions on strong associations among work, pwq and
* pool which make migrating pending and scheduled works very
* difficult to implement without impacting hot paths. Secondly,
* worker pools serve mix of short, long and very long running works making
* blocked draining impractical.
*
* This is solved by allowing the pools to be disassociated from the CPU
* running as an unbound one and allowing it to be reattached later if the
* cpu comes back online.
*/
static void unbind_workers(int cpu)
{
struct worker_pool *pool;
struct worker *worker;
for_each_cpu_worker_pool(pool, cpu) {
mutex_lock(&wq_pool_attach_mutex);
spin_lock_irq(&pool->lock);
/*
* We've blocked all attach/detach operations. Make all workers
* unbound and set DISASSOCIATED. Before this, all workers
* except for the ones which are still executing works from
* before the last CPU down must be on the cpu. After
* this, they may become diasporas.
*/
for_each_pool_worker(worker, pool)
worker->flags |= WORKER_UNBOUND;
pool->flags |= POOL_DISASSOCIATED;
spin_unlock_irq(&pool->lock);
mutex_unlock(&wq_pool_attach_mutex);
/*
* Call schedule() so that we cross rq->lock and thus can
* guarantee sched callbacks see the %WORKER_UNBOUND flag.
* This is necessary as scheduler callbacks may be invoked
* from other cpus.
*/
schedule();
/*
* Sched callbacks are disabled now. Zap nr_running.
* After this, nr_running stays zero and need_more_worker()
* and keep_working() are always true as long as the
* worklist is not empty. This pool now behaves as an
* unbound (in terms of concurrency management) pool which
* are served by workers tied to the pool.
*/
atomic_set(&pool->nr_running, 0);
/*
* With concurrency management just turned off, a busy
* worker blocking could lead to lengthy stalls. Kick off
* unbound chain execution of currently pending work items.
*/
spin_lock_irq(&pool->lock);
wake_up_worker(pool);
spin_unlock_irq(&pool->lock);
}
}
/**
* rebind_workers - rebind all workers of a pool to the associated CPU
* @pool: pool of interest
*
* @pool->cpu is coming online. Rebind all workers to the CPU.
*/
static void rebind_workers(struct worker_pool *pool)
{
struct worker *worker;
lockdep_assert_held(&wq_pool_attach_mutex);
/*
* Restore CPU affinity of all workers. As all idle workers should
* be on the run-queue of the associated CPU before any local
* wake-ups for concurrency management happen, restore CPU affinity
* of all workers first and then clear UNBOUND. As we're called
* from CPU_ONLINE, the following shouldn't fail.
*/
for_each_pool_worker(worker, pool)
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
pool->attrs->cpumask) < 0);
spin_lock_irq(&pool->lock);
pool->flags &= ~POOL_DISASSOCIATED;
for_each_pool_worker(worker, pool) {
unsigned int worker_flags = worker->flags;
/*
* A bound idle worker should actually be on the runqueue
* of the associated CPU for local wake-ups targeting it to
* work. Kick all idle workers so that they migrate to the
* associated CPU. Doing this in the same loop as
* replacing UNBOUND with REBOUND is safe as no worker will
* be bound before @pool->lock is released.
*/
if (worker_flags & WORKER_IDLE)
wake_up_process(worker->task);
/*
* We want to clear UNBOUND but can't directly call
* worker_clr_flags() or adjust nr_running. Atomically
* replace UNBOUND with another NOT_RUNNING flag REBOUND.
* @worker will clear REBOUND using worker_clr_flags() when
* it initiates the next execution cycle thus restoring
* concurrency management. Note that when or whether
* @worker clears REBOUND doesn't affect correctness.
*
* WRITE_ONCE() is necessary because @worker->flags may be
* tested without holding any lock in
* wq_worker_waking_up(). Without it, NOT_RUNNING test may
* fail incorrectly leading to premature concurrency
* management operations.
*/
WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
worker_flags |= WORKER_REBOUND;
worker_flags &= ~WORKER_UNBOUND;
WRITE_ONCE(worker->flags, worker_flags);
}
spin_unlock_irq(&pool->lock);
}
/**
* restore_unbound_workers_cpumask - restore cpumask of unbound workers
* @pool: unbound pool of interest
* @cpu: the CPU which is coming up
*
* An unbound pool may end up with a cpumask which doesn't have any online
* CPUs. When a worker of such pool get scheduled, the scheduler resets
* its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
* online CPU before, cpus_allowed of all its workers should be restored.
*/
static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
{
static cpumask_t cpumask;
struct worker *worker;
lockdep_assert_held(&wq_pool_attach_mutex);
/* is @cpu allowed for @pool? */
if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
return;
cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
/* as we're called from CPU_ONLINE, the following shouldn't fail */
for_each_pool_worker(worker, pool)
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
}
int workqueue_prepare_cpu(unsigned int cpu)
{
struct worker_pool *pool;
for_each_cpu_worker_pool(pool, cpu) {
if (pool->nr_workers)
continue;
if (!create_worker(pool))
return -ENOMEM;
}
return 0;
}
int workqueue_online_cpu(unsigned int cpu)
{
struct worker_pool *pool;
struct workqueue_struct *wq;
int pi;
mutex_lock(&wq_pool_mutex);
for_each_pool(pool, pi) {
mutex_lock(&wq_pool_attach_mutex);
if (pool->cpu == cpu)
rebind_workers(pool);
else if (pool->cpu < 0)
restore_unbound_workers_cpumask(pool, cpu);
mutex_unlock(&wq_pool_attach_mutex);
}
/* update NUMA affinity of unbound workqueues */
list_for_each_entry(wq, &workqueues, list)
wq_update_unbound_numa(wq, cpu, true);
mutex_unlock(&wq_pool_mutex);
return 0;
}
int workqueue_offline_cpu(unsigned int cpu)
{
struct workqueue_struct *wq;
/* unbinding per-cpu workers should happen on the local CPU */
if (WARN_ON(cpu != smp_processor_id()))
return -1;
unbind_workers(cpu);
/* update NUMA affinity of unbound workqueues */
mutex_lock(&wq_pool_mutex);
list_for_each_entry(wq, &workqueues, list)
wq_update_unbound_numa(wq, cpu, false);
mutex_unlock(&wq_pool_mutex);
return 0;
}
struct work_for_cpu {
struct work_struct work;
long (*fn)(void *);
void *arg;
long ret;
};
static void work_for_cpu_fn(struct work_struct *work)
{
struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
wfc->ret = wfc->fn(wfc->arg);
}
/**
* work_on_cpu - run a function in thread context on a particular cpu
* @cpu: the cpu to run on
* @fn: the function to run
* @arg: the function arg
*
* It is up to the caller to ensure that the cpu doesn't go offline.
* The caller must not hold any locks which would prevent @fn from completing.
*
* Return: The value @fn returns.
*/
long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
{
struct work_for_cpu wfc = { .fn = fn, .arg = arg };
INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
schedule_work_on(cpu, &wfc.work);
flush_work(&wfc.work);
destroy_work_on_stack(&wfc.work);
return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu);
/**
* work_on_cpu_safe - run a function in thread context on a particular cpu
* @cpu: the cpu to run on
* @fn: the function to run
* @arg: the function argument
*
* Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
* any locks which would prevent @fn from completing.
*
* Return: The value @fn returns.
*/
long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
{
long ret = -ENODEV;
get_online_cpus();
if (cpu_online(cpu))
ret = work_on_cpu(cpu, fn, arg);
put_online_cpus();
return ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu_safe);
#endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER
/**
* freeze_workqueues_begin - begin freezing workqueues
*
* Start freezing workqueues. After this function returns, all freezable
* workqueues will queue new works to their delayed_works list instead of
* pool->worklist.
*
* CONTEXT:
* Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
*/
void freeze_workqueues_begin(void)
{
struct workqueue_struct *wq;
struct pool_workqueue *pwq;
mutex_lock(&wq_pool_mutex);
WARN_ON_ONCE(workqueue_freezing);
workqueue_freezing = true;
list_for_each_entry(wq, &workqueues, list) {
mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq)
pwq_adjust_max_active(pwq);
mutex_unlock(&wq->mutex);
}
mutex_unlock(&wq_pool_mutex);
}
/**
* freeze_workqueues_busy - are freezable workqueues still busy?
*
* Check whether freezing is complete. This function must be called
* between freeze_workqueues_begin() and thaw_workqueues().
*
* CONTEXT:
* Grabs and releases wq_pool_mutex.
*
* Return:
* %true if some freezable workqueues are still busy. %false if freezing
* is complete.
*/
bool freeze_workqueues_busy(void)
{
bool busy = false;
struct workqueue_struct *wq;
struct pool_workqueue *pwq;
mutex_lock(&wq_pool_mutex);
WARN_ON_ONCE(!workqueue_freezing);
list_for_each_entry(wq, &workqueues, list) {
if (!(wq->flags & WQ_FREEZABLE))
continue;
/*
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
rcu_read_lock_sched();
for_each_pwq(pwq, wq) {
WARN_ON_ONCE(pwq->nr_active < 0);
if (pwq->nr_active) {
busy = true;
rcu_read_unlock_sched();
goto out_unlock;
}
}
rcu_read_unlock_sched();
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
return busy;
}
/**
* thaw_workqueues - thaw workqueues
*
* Thaw workqueues. Normal queueing is restored and all collected
* frozen works are transferred to their respective pool worklists.
*
* CONTEXT:
* Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
*/
void thaw_workqueues(void)
{
struct workqueue_struct *wq;
struct pool_workqueue *pwq;
mutex_lock(&wq_pool_mutex);
if (!workqueue_freezing)
goto out_unlock;
workqueue_freezing = false;
/* restore max_active and repopulate worklist */
list_for_each_entry(wq, &workqueues, list) {
mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq)
pwq_adjust_max_active(pwq);
mutex_unlock(&wq->mutex);
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
}
#endif /* CONFIG_FREEZER */
static int workqueue_apply_unbound_cpumask(void)
{
LIST_HEAD(ctxs);
int ret = 0;
struct workqueue_struct *wq;
struct apply_wqattrs_ctx *ctx, *n;
lockdep_assert_held(&wq_pool_mutex);
list_for_each_entry(wq, &workqueues, list) {
if (!(wq->flags & WQ_UNBOUND))
continue;
/* creating multiple pwqs breaks ordering guarantee */
if (wq->flags & __WQ_ORDERED)
continue;
ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
if (!ctx) {
ret = -ENOMEM;
break;
}
list_add_tail(&ctx->list, &ctxs);
}
list_for_each_entry_safe(ctx, n, &ctxs, list) {
if (!ret)
apply_wqattrs_commit(ctx);
apply_wqattrs_cleanup(ctx);
}
return ret;
}
/**
* workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
* @cpumask: the cpumask to set
*
* The low-level workqueues cpumask is a global cpumask that limits
* the affinity of all unbound workqueues. This function check the @cpumask
* and apply it to all unbound workqueues and updates all pwqs of them.
*
* Retun: 0 - Success
* -EINVAL - Invalid @cpumask
* -ENOMEM - Failed to allocate memory for attrs or pwqs.
*/
int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
{
int ret = -EINVAL;
cpumask_var_t saved_cpumask;
if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
return -ENOMEM;
/*
* Not excluding isolated cpus on purpose.
* If the user wishes to include them, we allow that.
*/
cpumask_and(cpumask, cpumask, cpu_possible_mask);
if (!cpumask_empty(cpumask)) {
apply_wqattrs_lock();
/* save the old wq_unbound_cpumask. */
cpumask_copy(saved_cpumask, wq_unbound_cpumask);
/* update wq_unbound_cpumask at first and apply it to wqs. */
cpumask_copy(wq_unbound_cpumask, cpumask);
ret = workqueue_apply_unbound_cpumask();
/* restore the wq_unbound_cpumask when failed. */
if (ret < 0)
cpumask_copy(wq_unbound_cpumask, saved_cpumask);
apply_wqattrs_unlock();
}
free_cpumask_var(saved_cpumask);
return ret;
}
#ifdef CONFIG_SYSFS
/*
* Workqueues with WQ_SYSFS flag set is visible to userland via
* /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the
* following attributes.
*
* per_cpu RO bool : whether the workqueue is per-cpu or unbound
* max_active RW int : maximum number of in-flight work items
*
* Unbound workqueues have the following extra attributes.
*
* pool_ids RO int : the associated pool IDs for each node
* nice RW int : nice value of the workers
* cpumask RW mask : bitmask of allowed CPUs for the workers
* numa RW bool : whether enable NUMA affinity
*/
struct wq_device {
struct workqueue_struct *wq;
struct device dev;
};
static struct workqueue_struct *dev_to_wq(struct device *dev)
{
struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
return wq_dev->wq;
}
static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
}
static DEVICE_ATTR_RO(per_cpu);
static ssize_t max_active_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
}
static ssize_t max_active_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct workqueue_struct *wq = dev_to_wq(dev);
int val;
if (sscanf(buf, "%d", &val) != 1 || val <= 0)
return -EINVAL;
workqueue_set_max_active(wq, val);
return count;
}
static DEVICE_ATTR_RW(max_active);
static struct attribute *wq_sysfs_attrs[] = {
&dev_attr_per_cpu.attr,
&dev_attr_max_active.attr,
NULL,
};
ATTRIBUTE_GROUPS(wq_sysfs);
static ssize_t wq_pool_ids_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
const char *delim = "";
int node, written = 0;
rcu_read_lock_sched();
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
unbound_pwq_by_node(wq, node)->pool->id);
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
rcu_read_unlock_sched();
return written;
}
static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
int written;
mutex_lock(&wq->mutex);
written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
mutex_unlock(&wq->mutex);
return written;
}
/* prepare workqueue_attrs for sysfs store operations */
static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
{
struct workqueue_attrs *attrs;
lockdep_assert_held(&wq_pool_mutex);
attrs = alloc_workqueue_attrs(GFP_KERNEL);
if (!attrs)
return NULL;
copy_workqueue_attrs(attrs, wq->unbound_attrs);
return attrs;
}
static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct workqueue_struct *wq = dev_to_wq(dev);
struct workqueue_attrs *attrs;
int ret = -ENOMEM;
apply_wqattrs_lock();
attrs = wq_sysfs_prep_attrs(wq);
if (!attrs)
goto out_unlock;
if (sscanf(buf, "%d", &attrs->nice) == 1 &&
attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
ret = apply_workqueue_attrs_locked(wq, attrs);
else
ret = -EINVAL;
out_unlock:
apply_wqattrs_unlock();
free_workqueue_attrs(attrs);
return ret ?: count;
}
static ssize_t wq_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
int written;
mutex_lock(&wq->mutex);
written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
cpumask_pr_args(wq->unbound_attrs->cpumask));
mutex_unlock(&wq->mutex);
return written;
}
static ssize_t wq_cpumask_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct workqueue_struct *wq = dev_to_wq(dev);
struct workqueue_attrs *attrs;
int ret = -ENOMEM;
apply_wqattrs_lock();
attrs = wq_sysfs_prep_attrs(wq);
if (!attrs)
goto out_unlock;
ret = cpumask_parse(buf, attrs->cpumask);
if (!ret)
ret = apply_workqueue_attrs_locked(wq, attrs);
out_unlock:
apply_wqattrs_unlock();
free_workqueue_attrs(attrs);
return ret ?: count;
}
static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
int written;
mutex_lock(&wq->mutex);
written = scnprintf(buf, PAGE_SIZE, "%d\n",
!wq->unbound_attrs->no_numa);
mutex_unlock(&wq->mutex);
return written;
}
static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct workqueue_struct *wq = dev_to_wq(dev);
struct workqueue_attrs *attrs;
int v, ret = -ENOMEM;
apply_wqattrs_lock();
attrs = wq_sysfs_prep_attrs(wq);
if (!attrs)
goto out_unlock;
ret = -EINVAL;
if (sscanf(buf, "%d", &v) == 1) {
attrs->no_numa = !v;
ret = apply_workqueue_attrs_locked(wq, attrs);
}
out_unlock:
apply_wqattrs_unlock();
free_workqueue_attrs(attrs);
return ret ?: count;
}
static struct device_attribute wq_sysfs_unbound_attrs[] = {
__ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
__ATTR(nice, 0644, wq_nice_show, wq_nice_store),
__ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
__ATTR(numa, 0644, wq_numa_show, wq_numa_store),
__ATTR_NULL,
};
static struct bus_type wq_subsys = {
.name = "workqueue",
.dev_groups = wq_sysfs_groups,
};
static ssize_t wq_unbound_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int written;
mutex_lock(&wq_pool_mutex);
written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
cpumask_pr_args(wq_unbound_cpumask));
mutex_unlock(&wq_pool_mutex);
return written;
}
static ssize_t wq_unbound_cpumask_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
cpumask_var_t cpumask;
int ret;
if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
return -ENOMEM;
ret = cpumask_parse(buf, cpumask);
if (!ret)
ret = workqueue_set_unbound_cpumask(cpumask);
free_cpumask_var(cpumask);
return ret ? ret : count;
}
static struct device_attribute wq_sysfs_cpumask_attr =
__ATTR(cpumask, 0644, wq_unbound_cpumask_show,
wq_unbound_cpumask_store);
static int __init wq_sysfs_init(void)
{
int err;
err = subsys_virtual_register(&wq_subsys, NULL);
if (err)
return err;
return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
}
core_initcall(wq_sysfs_init);
static void wq_device_release(struct device *dev)
{
struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
kfree(wq_dev);
}
/**
* workqueue_sysfs_register - make a workqueue visible in sysfs
* @wq: the workqueue to register
*
* Expose @wq in sysfs under /sys/bus/workqueue/devices.
* alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
* which is the preferred method.
*
* Workqueue user should use this function directly iff it wants to apply
* workqueue_attrs before making the workqueue visible in sysfs; otherwise,
* apply_workqueue_attrs() may race against userland updating the
* attributes.
*
* Return: 0 on success, -errno on failure.
*/
int workqueue_sysfs_register(struct workqueue_struct *wq)
{
struct wq_device *wq_dev;
int ret;
/*
* Adjusting max_active or creating new pwqs by applying
* attributes breaks ordering guarantee. Disallow exposing ordered
* workqueues.
*/
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
return -EINVAL;
wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
if (!wq_dev)
return -ENOMEM;
wq_dev->wq = wq;
wq_dev->dev.bus = &wq_subsys;
wq_dev->dev.release = wq_device_release;
dev_set_name(&wq_dev->dev, "%s", wq->name);
/*
* unbound_attrs are created separately. Suppress uevent until
* everything is ready.
*/
dev_set_uevent_suppress(&wq_dev->dev, true);
ret = device_register(&wq_dev->dev);
if (ret) {
put_device(&wq_dev->dev);
wq->wq_dev = NULL;
return ret;
}
if (wq->flags & WQ_UNBOUND) {
struct device_attribute *attr;
for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
ret = device_create_file(&wq_dev->dev, attr);
if (ret) {
device_unregister(&wq_dev->dev);
wq->wq_dev = NULL;
return ret;
}
}
}
dev_set_uevent_suppress(&wq_dev->dev, false);
kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
return 0;
}
/**
* workqueue_sysfs_unregister - undo workqueue_sysfs_register()
* @wq: the workqueue to unregister
*
* If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
*/
static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
{
struct wq_device *wq_dev = wq->wq_dev;
if (!wq->wq_dev)
return;
wq->wq_dev = NULL;
device_unregister(&wq_dev->dev);
}
#else /* CONFIG_SYSFS */
static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
#endif /* CONFIG_SYSFS */
/*
* Workqueue watchdog.
*
* Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
* flush dependency, a concurrency managed work item which stays RUNNING
* indefinitely. Workqueue stalls can be very difficult to debug as the
* usual warning mechanisms don't trigger and internal workqueue state is
* largely opaque.
*
* Workqueue watchdog monitors all worker pools periodically and dumps
* state if some pools failed to make forward progress for a while where
* forward progress is defined as the first item on ->worklist changing.
*
* This mechanism is controlled through the kernel parameter
* "workqueue.watchdog_thresh" which can be updated at runtime through the
* corresponding sysfs parameter file.
*/
#ifdef CONFIG_WQ_WATCHDOG
static unsigned long wq_watchdog_thresh = 30;
static struct timer_list wq_watchdog_timer;
static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
static void wq_watchdog_reset_touched(void)
{
int cpu;
wq_watchdog_touched = jiffies;
for_each_possible_cpu(cpu)
per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
}
static void wq_watchdog_timer_fn(struct timer_list *unused)
{
unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
bool lockup_detected = false;
struct worker_pool *pool;
int pi;
if (!thresh)
return;
rcu_read_lock();
for_each_pool(pool, pi) {
unsigned long pool_ts, touched, ts;
if (list_empty(&pool->worklist))
continue;
/* get the latest of pool and touched timestamps */
pool_ts = READ_ONCE(pool->watchdog_ts);
touched = READ_ONCE(wq_watchdog_touched);
if (time_after(pool_ts, touched))
ts = pool_ts;
else
ts = touched;
if (pool->cpu >= 0) {
unsigned long cpu_touched =
READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
pool->cpu));
if (time_after(cpu_touched, ts))
ts = cpu_touched;
}
/* did we stall? */
if (time_after(jiffies, ts + thresh)) {
lockup_detected = true;
pr_emerg("BUG: workqueue lockup - pool");
pr_cont_pool_info(pool);
pr_cont(" stuck for %us!\n",
jiffies_to_msecs(jiffies - pool_ts) / 1000);
}
}
rcu_read_unlock();
if (lockup_detected)
show_workqueue_state();
wq_watchdog_reset_touched();
mod_timer(&wq_watchdog_timer, jiffies + thresh);
}
notrace void wq_watchdog_touch(int cpu)
{
if (cpu >= 0)
per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
else
wq_watchdog_touched = jiffies;
}
static void wq_watchdog_set_thresh(unsigned long thresh)
{
wq_watchdog_thresh = 0;
del_timer_sync(&wq_watchdog_timer);
if (thresh) {
wq_watchdog_thresh = thresh;
wq_watchdog_reset_touched();
mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
}
}
static int wq_watchdog_param_set_thresh(const char *val,
const struct kernel_param *kp)
{
unsigned long thresh;
int ret;
ret = kstrtoul(val, 0, &thresh);
if (ret)
return ret;
if (system_wq)
wq_watchdog_set_thresh(thresh);
else
wq_watchdog_thresh = thresh;
return 0;
}
static const struct kernel_param_ops wq_watchdog_thresh_ops = {
.set = wq_watchdog_param_set_thresh,
.get = param_get_ulong,
};
module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
0644);
static void wq_watchdog_init(void)
{
timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
wq_watchdog_set_thresh(wq_watchdog_thresh);
}
#else /* CONFIG_WQ_WATCHDOG */
static inline void wq_watchdog_init(void) { }
#endif /* CONFIG_WQ_WATCHDOG */
static void __init wq_numa_init(void)
{
cpumask_var_t *tbl;
int node, cpu;
if (num_possible_nodes() <= 1)
return;
if (wq_disable_numa) {
pr_info("workqueue: NUMA affinity support disabled\n");
return;
}
wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
BUG_ON(!wq_update_unbound_numa_attrs_buf);
/*
* We want masks of possible CPUs of each node which isn't readily
* available. Build one from cpu_to_node() which should have been
* fully initialized by now.
*/
tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
BUG_ON(!tbl);
for_each_node(node)
BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
node_online(node) ? node : NUMA_NO_NODE));
for_each_possible_cpu(cpu) {
node = cpu_to_node(cpu);
if (WARN_ON(node == NUMA_NO_NODE)) {
pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
/* happens iff arch is bonkers, let's just proceed */
return;
}
cpumask_set_cpu(cpu, tbl[node]);
}
wq_numa_possible_cpumask = tbl;
wq_numa_enabled = true;
}
/**
* workqueue_init_early - early init for workqueue subsystem
*
* This is the first half of two-staged workqueue subsystem initialization
* and invoked as soon as the bare basics - memory allocation, cpumasks and
* idr are up. It sets up all the data structures and system workqueues
* and allows early boot code to create workqueues and queue/cancel work
* items. Actual work item execution starts only after kthreads can be
* created and scheduled right before early initcalls.
*/
int __init workqueue_init_early(void)
{
int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
int i, cpu;
WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags));
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
/* initialize CPU pools */
for_each_possible_cpu(cpu) {
struct worker_pool *pool;
i = 0;
for_each_cpu_worker_pool(pool, cpu) {
BUG_ON(init_worker_pool(pool));
pool->cpu = cpu;
cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
pool->attrs->nice = std_nice[i++];
pool->node = cpu_to_node(cpu);
/* alloc pool ID */
mutex_lock(&wq_pool_mutex);
BUG_ON(worker_pool_assign_id(pool));
mutex_unlock(&wq_pool_mutex);
}
}
/* create default unbound and ordered wq attrs */
for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
struct workqueue_attrs *attrs;
BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
attrs->nice = std_nice[i];
unbound_std_wq_attrs[i] = attrs;
/*
* An ordered wq should have only one pwq as ordering is
* guaranteed by max_active which is enforced by pwqs.
* Turn off NUMA so that dfl_pwq is used for all nodes.
*/
BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
attrs->nice = std_nice[i];
attrs->no_numa = true;
ordered_wq_attrs[i] = attrs;
}
system_wq = alloc_workqueue("events", 0, 0);
system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
system_long_wq = alloc_workqueue("events_long", 0, 0);
system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
WQ_UNBOUND_MAX_ACTIVE);
system_freezable_wq = alloc_workqueue("events_freezable",
WQ_FREEZABLE, 0);
system_power_efficient_wq = alloc_workqueue("events_power_efficient",
WQ_POWER_EFFICIENT, 0);
system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
WQ_FREEZABLE | WQ_POWER_EFFICIENT,
0);
BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
!system_unbound_wq || !system_freezable_wq ||
!system_power_efficient_wq ||
!system_freezable_power_efficient_wq);
return 0;
}
/**
* workqueue_init - bring workqueue subsystem fully online
*
* This is the latter half of two-staged workqueue subsystem initialization
* and invoked as soon as kthreads can be created and scheduled.
* Workqueues have been created and work items queued on them, but there
* are no kworkers executing the work items yet. Populate the worker pools
* with the initial workers and enable future kworker creations.
*/
int __init workqueue_init(void)
{
struct workqueue_struct *wq;
struct worker_pool *pool;
int cpu, bkt;
/*
* It'd be simpler to initialize NUMA in workqueue_init_early() but
* CPU to node mapping may not be available that early on some
* archs such as power and arm64. As per-cpu pools created
* previously could be missing node hint and unbound pools NUMA
* affinity, fix them up.
*
* Also, while iterating workqueues, create rescuers if requested.
*/
wq_numa_init();
mutex_lock(&wq_pool_mutex);
for_each_possible_cpu(cpu) {
for_each_cpu_worker_pool(pool, cpu) {
pool->node = cpu_to_node(cpu);
}
}
list_for_each_entry(wq, &workqueues, list) {
wq_update_unbound_numa(wq, smp_processor_id(), true);
WARN(init_rescuer(wq),
"workqueue: failed to create early rescuer for %s",
wq->name);
}
mutex_unlock(&wq_pool_mutex);
/* create the initial workers */
for_each_online_cpu(cpu) {
for_each_cpu_worker_pool(pool, cpu) {
pool->flags &= ~POOL_DISASSOCIATED;
BUG_ON(!create_worker(pool));
}
}
hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
BUG_ON(!create_worker(pool));
wq_online = true;
wq_watchdog_init();
return 0;
}