kernel-fxtec-pro1x/kernel/sched/deadline.c
Srinivasarao P 0cc34620e8 Merge android-4.19-stable.136 (204dd19) into msm-4.19
* refs/heads/tmp-204dd19:
  UPSTREAM: driver core: Avoid deferred probe due to fw_devlink_pause/resume()
  UPSTREAM: driver core: Rename dev_links_info.defer_sync to defer_hook
  UPSTREAM: driver core: Don't do deferred probe in parallel with kernel_init thread
  Restore sdcardfs feature
  Revert rpmh and usb changes
  Linux 4.19.136
  regmap: debugfs: check count when read regmap file
  rtnetlink: Fix memory(net_device) leak when ->newlink fails
  udp: Improve load balancing for SO_REUSEPORT.
  udp: Copy has_conns in reuseport_grow().
  sctp: shrink stream outq when fails to do addstream reconf
  sctp: shrink stream outq only when new outcnt < old outcnt
  AX.25: Prevent integer overflows in connect and sendmsg
  tcp: allow at most one TLP probe per flight
  rxrpc: Fix sendmsg() returning EPIPE due to recvmsg() returning ENODATA
  qrtr: orphan socket in qrtr_release()
  net: udp: Fix wrong clean up for IS_UDPLITE macro
  net-sysfs: add a newline when printing 'tx_timeout' by sysfs
  ip6_gre: fix null-ptr-deref in ip6gre_init_net()
  drivers/net/wan/x25_asy: Fix to make it work
  dev: Defer free of skbs in flush_backlog
  AX.25: Prevent out-of-bounds read in ax25_sendmsg()
  AX.25: Fix out-of-bounds read in ax25_connect()
  Linux 4.19.135
  ath9k: Fix regression with Atheros 9271
  ath9k: Fix general protection fault in ath9k_hif_usb_rx_cb
  dm integrity: fix integrity recalculation that is improperly skipped
  ASoC: qcom: Drop HAS_DMA dependency to fix link failure
  ASoC: rt5670: Add new gpio1_is_ext_spk_en quirk and enable it on the Lenovo Miix 2 10
  x86, vmlinux.lds: Page-align end of ..page_aligned sections
  parisc: Add atomic64_set_release() define to avoid CPU soft lockups
  drm/amd/powerplay: fix a crash when overclocking Vega M
  drm/amdgpu: Fix NULL dereference in dpm sysfs handlers
  io-mapping: indicate mapping failure
  mm: memcg/slab: fix memory leak at non-root kmem_cache destroy
  mm: memcg/slab: synchronize access to kmem_cache dying flag using a spinlock
  mm/memcg: fix refcount error while moving and swapping
  Makefile: Fix GCC_TOOLCHAIN_DIR prefix for Clang cross compilation
  vt: Reject zero-sized screen buffer size.
  fbdev: Detect integer underflow at "struct fbcon_ops"->clear_margins.
  serial: 8250_mtk: Fix high-speed baud rates clamping
  serial: 8250: fix null-ptr-deref in serial8250_start_tx()
  staging: comedi: addi_apci_1564: check INSN_CONFIG_DIGITAL_TRIG shift
  staging: comedi: addi_apci_1500: check INSN_CONFIG_DIGITAL_TRIG shift
  staging: comedi: ni_6527: fix INSN_CONFIG_DIGITAL_TRIG support
  staging: comedi: addi_apci_1032: check INSN_CONFIG_DIGITAL_TRIG shift
  staging: wlan-ng: properly check endpoint types
  Revert "cifs: Fix the target file was deleted when rename failed."
  usb: xhci: Fix ASM2142/ASM3142 DMA addressing
  usb: xhci-mtk: fix the failure of bandwidth allocation
  binder: Don't use mmput() from shrinker function.
  RISC-V: Upgrade smp_mb__after_spinlock() to iorw,iorw
  x86: math-emu: Fix up 'cmp' insn for clang ias
  arm64: Use test_tsk_thread_flag() for checking TIF_SINGLESTEP
  hwmon: (scmi) Fix potential buffer overflow in scmi_hwmon_probe()
  hwmon: (adm1275) Make sure we are reading enough data for different chips
  usb: gadget: udc: gr_udc: fix memleak on error handling path in gr_ep_init()
  Input: synaptics - enable InterTouch for ThinkPad X1E 1st gen
  dmaengine: ioat setting ioat timeout as module parameter
  hwmon: (aspeed-pwm-tacho) Avoid possible buffer overflow
  regmap: dev_get_regmap_match(): fix string comparison
  spi: mediatek: use correct SPI_CFG2_REG MACRO
  Input: add `SW_MACHINE_COVER`
  dmaengine: tegra210-adma: Fix runtime PM imbalance on error
  HID: apple: Disable Fn-key key-re-mapping on clone keyboards
  HID: steam: fixes race in handling device list.
  HID: alps: support devices with report id 2
  HID: i2c-hid: add Mediacom FlexBook edge13 to descriptor override
  scripts/gdb: fix lx-symbols 'gdb.error' while loading modules
  scripts/decode_stacktrace: strip basepath from all paths
  serial: exar: Fix GPIO configuration for Sealevel cards based on XR17V35X
  bonding: check return value of register_netdevice() in bond_newlink()
  i2c: rcar: always clear ICSAR to avoid side effects
  net: ethernet: ave: Fix error returns in ave_init
  ipvs: fix the connection sync failed in some cases
  qed: suppress "don't support RoCE & iWARP" flooding on HW init
  mlxsw: destroy workqueue when trap_register in mlxsw_emad_init
  bonding: check error value of register_netdevice() immediately
  net: smc91x: Fix possible memory leak in smc_drv_probe()
  drm: sun4i: hdmi: Fix inverted HPD result
  ieee802154: fix one possible memleak in adf7242_probe
  net: dp83640: fix SIOCSHWTSTAMP to update the struct with actual configuration
  ax88172a: fix ax88172a_unbind() failures
  hippi: Fix a size used in a 'pci_free_consistent()' in an error handling path
  fpga: dfl: fix bug in port reset handshake
  bnxt_en: Fix race when modifying pause settings.
  btrfs: fix page leaks after failure to lock page for delalloc
  btrfs: fix mount failure caused by race with umount
  btrfs: fix double free on ulist after backref resolution failure
  ASoC: rt5670: Correct RT5670_LDO_SEL_MASK
  ALSA: info: Drop WARN_ON() from buffer NULL sanity check
  uprobes: Change handle_swbp() to send SIGTRAP with si_code=SI_KERNEL, to fix GDB regression
  IB/umem: fix reference count leak in ib_umem_odp_get()
  tipc: clean up skb list lock handling on send path
  spi: spi-fsl-dspi: Exit the ISR with IRQ_NONE when it's not ours
  SUNRPC reverting d03727b248d0 ("NFSv4 fix CLOSE not waiting for direct IO compeletion")
  irqdomain/treewide: Keep firmware node unconditionally allocated
  fuse: fix weird page warning
  drivers/firmware/psci: Fix memory leakage in alloc_init_cpu_groups()
  drm/nouveau/i2c/g94-: increase NV_PMGR_DP_AUXCTL_TRANSACTREQ timeout
  net: sky2: initialize return of gm_phy_read
  drivers/net/wan/lapbether: Fixed the value of hard_header_len
  xtensa: update *pos in cpuinfo_op.next
  xtensa: fix __sync_fetch_and_{and,or}_4 declarations
  scsi: scsi_transport_spi: Fix function pointer check
  mac80211: allow rx of mesh eapol frames with default rx key
  pinctrl: amd: fix npins for uart0 in kerncz_groups
  gpio: arizona: put pm_runtime in case of failure
  gpio: arizona: handle pm_runtime_get_sync failure case
  soc: qcom: rpmh: Dirt can only make you dirtier, not cleaner
  ANDROID: build: update ABI definitions
  ANDROID: update the kernel release format for GKI
  ANDROID: Incremental fs: magic number compatible 32-bit
  ANDROID: kbuild: don't merge .*..compoundliteral in modules
  ANDROID: GKI: preserve ABI for struct sock_cgroup_data
  Revert "genetlink: remove genl_bind"
  Revert "arm64/alternatives: use subsections for replacement sequences"
  Linux 4.19.134
  spi: sprd: switch the sequence of setting WDG_LOAD_LOW and _HIGH
  rxrpc: Fix trace string
  libceph: don't omit recovery_deletes in target_copy()
  printk: queue wake_up_klogd irq_work only if per-CPU areas are ready
  genirq/affinity: Handle affinity setting on inactive interrupts correctly
  sched/fair: handle case of task_h_load() returning 0
  sched: Fix unreliable rseq cpu_id for new tasks
  arm64: compat: Ensure upper 32 bits of x0 are zero on syscall return
  arm64: ptrace: Consistently use pseudo-singlestep exceptions
  arm64: ptrace: Override SPSR.SS when single-stepping is enabled
  thermal/drivers/cpufreq_cooling: Fix wrong frequency converted from power
  misc: atmel-ssc: lock with mutex instead of spinlock
  dmaengine: fsl-edma: Fix NULL pointer exception in fsl_edma_tx_handler
  intel_th: Fix a NULL dereference when hub driver is not loaded
  intel_th: pci: Add Emmitsburg PCH support
  intel_th: pci: Add Tiger Lake PCH-H support
  intel_th: pci: Add Jasper Lake CPU support
  powerpc/book3s64/pkeys: Fix pkey_access_permitted() for execute disable pkey
  hwmon: (emc2103) fix unable to change fan pwm1_enable attribute
  riscv: use 16KB kernel stack on 64-bit
  MIPS: Fix build for LTS kernel caused by backporting lpj adjustment
  timer: Fix wheel index calculation on last level
  timer: Prevent base->clk from moving backward
  uio_pdrv_genirq: fix use without device tree and no interrupt
  Input: i8042 - add Lenovo XiaoXin Air 12 to i8042 nomux list
  mei: bus: don't clean driver pointer
  Revert "zram: convert remaining CLASS_ATTR() to CLASS_ATTR_RO()"
  fuse: Fix parameter for FS_IOC_{GET,SET}FLAGS
  ovl: fix unneeded call to ovl_change_flags()
  ovl: relax WARN_ON() when decoding lower directory file handle
  ovl: inode reference leak in ovl_is_inuse true case.
  serial: mxs-auart: add missed iounmap() in probe failure and remove
  virtio: virtio_console: add missing MODULE_DEVICE_TABLE() for rproc serial
  virt: vbox: Fix guest capabilities mask check
  virt: vbox: Fix VBGL_IOCTL_VMMDEV_REQUEST_BIG and _LOG req numbers to match upstream
  USB: serial: option: add Quectel EG95 LTE modem
  USB: serial: option: add GosunCn GM500 series
  USB: serial: ch341: add new Product ID for CH340
  USB: serial: cypress_m8: enable Simply Automated UPB PIM
  USB: serial: iuu_phoenix: fix memory corruption
  usb: gadget: function: fix missing spinlock in f_uac1_legacy
  usb: chipidea: core: add wakeup support for extcon
  usb: dwc2: Fix shutdown callback in platform
  USB: c67x00: fix use after free in c67x00_giveback_urb
  ALSA: hda/realtek - Enable Speaker for ASUS UX533 and UX534
  ALSA: hda/realtek - change to suitable link model for ASUS platform
  ALSA: usb-audio: Fix race against the error recovery URB submission
  ALSA: line6: Sync the pending work cancel at disconnection
  ALSA: line6: Perform sanity check for each URB creation
  HID: quirks: Ignore Simply Automated UPB PIM
  HID: quirks: Always poll Obins Anne Pro 2 keyboard
  HID: magicmouse: do not set up autorepeat
  slimbus: core: Fix mismatch in of_node_get/put
  mtd: rawnand: oxnas: Release all devices in the _remove() path
  mtd: rawnand: oxnas: Unregister all devices on error
  mtd: rawnand: oxnas: Keep track of registered devices
  mtd: rawnand: brcmnand: fix CS0 layout
  mtd: rawnand: timings: Fix default tR_max and tCCS_min timings
  mtd: rawnand: marvell: Fix probe error path
  mtd: rawnand: marvell: Use nand_cleanup() when the device is not yet registered
  soc: qcom: rpmh-rsc: Allow using free WAKE TCS for active request
  soc: qcom: rpmh-rsc: Clear active mode configuration for wake TCS
  soc: qcom: rpmh: Invalidate SLEEP and WAKE TCSes before flushing new data
  soc: qcom: rpmh: Update dirty flag only when data changes
  perf stat: Zero all the 'ena' and 'run' array slot stats for interval mode
  apparmor: ensure that dfa state tables have entries
  copy_xstate_to_kernel: Fix typo which caused GDB regression
  regmap: debugfs: Don't sleep while atomic for fast_io regmaps
  ARM: dts: socfpga: Align L2 cache-controller nodename with dtschema
  Revert "thermal: mediatek: fix register index error"
  staging: comedi: verify array index is correct before using it
  usb: gadget: udc: atmel: fix uninitialized read in debug printk
  spi: spi-sun6i: sun6i_spi_transfer_one(): fix setting of clock rate
  arm64: dts: meson: add missing gxl rng clock
  phy: sun4i-usb: fix dereference of pointer phy0 before it is null checked
  iio:health:afe4404 Fix timestamp alignment and prevent data leak.
  ALSA: usb-audio: Add registration quirk for Kingston HyperX Cloud Flight S
  ACPI: video: Use native backlight on Acer TravelMate 5735Z
  Input: mms114 - add extra compatible for mms345l
  ALSA: usb-audio: Add registration quirk for Kingston HyperX Cloud Alpha S
  ACPI: video: Use native backlight on Acer Aspire 5783z
  ALSA: usb-audio: Rewrite registration quirk handling
  mmc: sdhci: do not enable card detect interrupt for gpio cd type
  doc: dt: bindings: usb: dwc3: Update entries for disabling SS instances in park mode
  ALSA: usb-audio: Create a registration quirk for Kingston HyperX Amp (0951:16d8)
  scsi: sr: remove references to BLK_DEV_SR_VENDOR, leave it enabled
  ARM: at91: pm: add quirk for sam9x60's ulp1
  HID: quirks: Remove ITE 8595 entry from hid_have_special_driver
  net: sfp: add some quirks for GPON modules
  net: sfp: add support for module quirks
  Revert "usb/ehci-platform: Set PM runtime as active on resume"
  Revert "usb/xhci-plat: Set PM runtime as active on resume"
  Revert "usb/ohci-platform: Fix a warning when hibernating"
  of: of_mdio: Correct loop scanning logic
  net: dsa: bcm_sf2: Fix node reference count
  spi: spi-fsl-dspi: Fix lockup if device is shutdown during SPI transfer
  spi: fix initial SPI_SR value in spi-fsl-dspi
  iio:health:afe4403 Fix timestamp alignment and prevent data leak.
  iio:pressure:ms5611 Fix buffer element alignment
  iio:humidity:hts221 Fix alignment and data leak issues
  iio: pressure: zpa2326: handle pm_runtime_get_sync failure
  iio: mma8452: Add missed iio_device_unregister() call in mma8452_probe()
  iio: magnetometer: ak8974: Fix runtime PM imbalance on error
  iio:humidity:hdc100x Fix alignment and data leak issues
  iio:magnetometer:ak8974: Fix alignment and data leak issues
  arm64/alternatives: don't patch up internal branches
  i2c: eg20t: Load module automatically if ID matches
  gfs2: read-only mounts should grab the sd_freeze_gl glock
  tpm_tis: extra chip->ops check on error path in tpm_tis_core_init
  arm64/alternatives: use subsections for replacement sequences
  m68k: mm: fix node memblock init
  m68k: nommu: register start of the memory with memblock
  drm/exynos: fix ref count leak in mic_pre_enable
  drm/msm: fix potential memleak in error branch
  vlan: consolidate VLAN parsing code and limit max parsing depth
  sched: consistently handle layer3 header accesses in the presence of VLANs
  cgroup: Fix sock_cgroup_data on big-endian.
  cgroup: fix cgroup_sk_alloc() for sk_clone_lock()
  tcp: md5: allow changing MD5 keys in all socket states
  tcp: md5: refine tcp_md5_do_add()/tcp_md5_hash_key() barriers
  tcp: md5: do not send silly options in SYNCOOKIES
  tcp: md5: add missing memory barriers in tcp_md5_do_add()/tcp_md5_hash_key()
  tcp: make sure listeners don't initialize congestion-control state
  tcp: fix SO_RCVLOWAT possible hangs under high mem pressure
  net: usb: qmi_wwan: add support for Quectel EG95 LTE modem
  net_sched: fix a memory leak in atm_tc_init()
  net: Added pointer check for dst->ops->neigh_lookup in dst_neigh_lookup_skb
  llc: make sure applications use ARPHRD_ETHER
  l2tp: remove skb_dst_set() from l2tp_xmit_skb()
  ipv4: fill fl4_icmp_{type,code} in ping_v4_sendmsg
  genetlink: remove genl_bind
  net: rmnet: fix lower interface leak
  perf: Make perf able to build with latest libbfd
  UPSTREAM: media: v4l2-ctrl: Add H264 profile and levels
  UPSTREAM: media: v4l2-ctrl: Add control for h.264 chroma qp offset
  ANDROID: GKI: ASoC: compress: revert some code to avoid race condition
  ANDROID: GKI: Update the ABI xml representation.
  ANDROID: GKI: kernel: tick-sched: Add an API for wakeup callbacks
  ANDROID: ASoC: Compress: Check and set pcm_new driver op
  Revert "ANDROID: GKI: arm64: gki_defconfig: Disable CONFIG_ARM64_TAGGED_ADDR_ABI"
  ANDROID: arm64: configs: enabe CONFIG_TMPFS
  Revert "ALSA: compress: fix partial_drain completion state"
  ANDROID: GKI: enable CONFIG_EXT4_FS_POSIX_ACL.
  ANDROID: GKI: set CONFIG_STATIC_USERMODEHELPER_PATH
  Linux 4.19.133
  s390/mm: fix huge pte soft dirty copying
  ARC: elf: use right ELF_ARCH
  ARC: entry: fix potential EFA clobber when TIF_SYSCALL_TRACE
  dm: use noio when sending kobject event
  drm/radeon: fix double free
  btrfs: fix fatal extent_buffer readahead vs releasepage race
  Revert "ath9k: Fix general protection fault in ath9k_hif_usb_rx_cb"
  bpf: Check correct cred for CAP_SYSLOG in bpf_dump_raw_ok()
  kprobes: Do not expose probe addresses to non-CAP_SYSLOG
  module: Do not expose section addresses to non-CAP_SYSLOG
  module: Refactor section attr into bin attribute
  kernel: module: Use struct_size() helper
  kallsyms: Refactor kallsyms_show_value() to take cred
  KVM: x86: Mark CR4.TSD as being possibly owned by the guest
  KVM: x86: Inject #GP if guest attempts to toggle CR4.LA57 in 64-bit mode
  KVM: x86: bit 8 of non-leaf PDPEs is not reserved
  KVM: arm64: Stop clobbering x0 for HVC_SOFT_RESTART
  KVM: arm64: Fix definition of PAGE_HYP_DEVICE
  ALSA: usb-audio: add quirk for MacroSilicon MS2109
  ALSA: hda - let hs_mic be picked ahead of hp_mic
  ALSA: opl3: fix infoleak in opl3
  mlxsw: spectrum_router: Remove inappropriate usage of WARN_ON()
  net: macb: mark device wake capable when "magic-packet" property present
  bnxt_en: fix NULL dereference in case SR-IOV configuration fails
  cxgb4: fix all-mask IP address comparison
  nbd: Fix memory leak in nbd_add_socket
  arm64: kgdb: Fix single-step exception handling oops
  ALSA: compress: fix partial_drain completion state
  net: hns3: fix use-after-free when doing self test
  smsc95xx: avoid memory leak in smsc95xx_bind
  smsc95xx: check return value of smsc95xx_reset
  net: cxgb4: fix return error value in t4_prep_fw
  drm/mediatek: Check plane visibility in atomic_update
  net: qrtr: Fix an out of bounds read qrtr_endpoint_post()
  x86/entry: Increase entry_stack size to a full page
  nvme-rdma: assign completion vector correctly
  block: release bip in a right way in error path
  usb: dwc3: pci: Fix reference count leak in dwc3_pci_resume_work
  scsi: mptscsih: Fix read sense data size
  ARM: imx6: add missing put_device() call in imx6q_suspend_init()
  cifs: update ctime and mtime during truncate
  s390/kasan: fix early pgm check handler execution
  drm: panel-orientation-quirks: Use generic orientation-data for Acer S1003
  drm: panel-orientation-quirks: Add quirk for Asus T101HA panel
  i40e: protect ring accesses with READ- and WRITE_ONCE
  ixgbe: protect ring accesses with READ- and WRITE_ONCE
  spi: spidev: fix a potential use-after-free in spidev_release()
  spi: spidev: fix a race between spidev_release and spidev_remove
  gpu: host1x: Detach driver on unregister
  drm/tegra: hub: Do not enable orphaned window group
  ARM: dts: omap4-droid4: Fix spi configuration and increase rate
  regmap: fix alignment issue
  spi: spi-fsl-dspi: Fix external abort on interrupt in resume or exit paths
  spi: spi-fsl-dspi: use IRQF_SHARED mode to request IRQ
  spi: spi-fsl-dspi: Fix lockup if device is removed during SPI transfer
  spi: spi-fsl-dspi: Adding shutdown hook
  KVM: s390: reduce number of IO pins to 1
  ANDROID: GKI: update abi based on padding fields being added
  ANDROID: GKI: USB: Gadget: add Android ABI padding to struct usb_gadget
  ANDROID: GKI: sound/usb/card.h: add Android ABI padding to struct snd_usb_endpoint
  ANDROID: fscrypt: fix DUN contiguity with inline encryption + IV_INO_LBLK_32 policies
  ANDROID: f2fs: add back compress inode check
  Linux 4.19.132
  efi: Make it possible to disable efivar_ssdt entirely
  dm zoned: assign max_io_len correctly
  irqchip/gic: Atomically update affinity
  MIPS: Add missing EHB in mtc0 -> mfc0 sequence for DSPen
  cifs: Fix the target file was deleted when rename failed.
  SMB3: Honor lease disabling for multiuser mounts
  SMB3: Honor persistent/resilient handle flags for multiuser mounts
  SMB3: Honor 'seal' flag for multiuser mounts
  Revert "ALSA: usb-audio: Improve frames size computation"
  nfsd: apply umask on fs without ACL support
  i2c: mlxcpld: check correct size of maximum RECV_LEN packet
  i2c: algo-pca: Add 0x78 as SCL stuck low status for PCA9665
  nvme: fix a crash in nvme_mpath_add_disk
  SMB3: Honor 'posix' flag for multiuser mounts
  virtio-blk: free vblk-vqs in error path of virtblk_probe()
  drm: sun4i: hdmi: Remove extra HPD polling
  hwmon: (acpi_power_meter) Fix potential memory leak in acpi_power_meter_add()
  hwmon: (max6697) Make sure the OVERT mask is set correctly
  cxgb4: fix SGE queue dump destination buffer context
  cxgb4: use correct type for all-mask IP address comparison
  cxgb4: parse TC-U32 key values and masks natively
  cxgb4: use unaligned conversion for fetching timestamp
  drm/msm/dpu: fix error return code in dpu_encoder_init
  crypto: af_alg - fix use-after-free in af_alg_accept() due to bh_lock_sock()
  kgdb: Avoid suspicious RCU usage warning
  nvme-multipath: fix deadlock between ana_work and scan_work
  nvme-multipath: set bdi capabilities once
  s390/debug: avoid kernel warning on too large number of pages
  usb: usbtest: fix missing kfree(dev->buf) in usbtest_disconnect
  mm/slub: fix stack overruns with SLUB_STATS
  mm/slub.c: fix corrupted freechain in deactivate_slab()
  usbnet: smsc95xx: Fix use-after-free after removal
  EDAC/amd64: Read back the scrub rate PCI register on F15h
  mm: fix swap cache node allocation mask
  btrfs: fix a block group ref counter leak after failure to remove block group
  ANDROID: Update ABI representation for libabigail update
  ANDROID: Update the ABI representation
  ANDROID: Update the ABI xml representation
  ANDROID: GKI: fix ABI diffs caused by GPU heap and pool vmstat additions
  ANDROID: sched: consider stune boost margin when computing energy
  ANDROID: GKI: move abi files to android/
  ANDROID: GKI: drop unneeded "_whitelist" off of symbol filenames
  UPSTREAM: binder: fix null deref of proc->context
  ANDROID: cpufreq: schedutil: maintain raw cache when next_f is not changed
  UPSTREAM: net: bpf: Make bpf_ktime_get_ns() available to non GPL programs
  UPSTREAM: usb: musb: mediatek: add reset FADDR to zero in reset interrupt handle
  ANDROID: GKI: scripts: Makefile: update the lz4 command (#2)
  ANDROID: Update the ABI xml representation
  Revert "drm/dsi: Fix byte order of DCS set/get brightness"
  Linux 4.19.131
  Revert "tty: hvc: Fix data abort due to race in hvc_open"
  xfs: add agf freeblocks verify in xfs_agf_verify
  dm writecache: add cond_resched to loop in persistent_memory_claim()
  dm writecache: correct uncommitted_block when discarding uncommitted entry
  NFSv4 fix CLOSE not waiting for direct IO compeletion
  pNFS/flexfiles: Fix list corruption if the mirror count changes
  SUNRPC: Properly set the @subbuf parameter of xdr_buf_subsegment()
  sunrpc: fixed rollback in rpc_gssd_dummy_populate()
  Staging: rtl8723bs: prevent buffer overflow in update_sta_support_rate()
  drm/radeon: fix fb_div check in ni_init_smc_spll_table()
  drm: rcar-du: Fix build error
  ring-buffer: Zero out time extend if it is nested and not absolute
  tracing: Fix event trigger to accept redundant spaces
  arm64: perf: Report the PC value in REGS_ABI_32 mode
  ocfs2: fix panic on nfs server over ocfs2
  ocfs2: fix value of OCFS2_INVALID_SLOT
  ocfs2: load global_inode_alloc
  ocfs2: avoid inode removal while nfsd is accessing it
  mm/slab: use memzero_explicit() in kzfree()
  btrfs: fix failure of RWF_NOWAIT write into prealloc extent beyond eof
  btrfs: fix data block group relocation failure due to concurrent scrub
  x86/asm/64: Align start of __clear_user() loop to 16-bytes
  KVM: nVMX: Plumb L2 GPA through to PML emulation
  KVM: X86: Fix MSR range of APIC registers in X2APIC mode
  erofs: fix partially uninitialized misuse in z_erofs_onlinepage_fixup
  ACPI: sysfs: Fix pm_profile_attr type
  ALSA: hda/realtek - Add quirk for MSI GE63 laptop
  ALSA: hda: Add NVIDIA codec IDs 9a & 9d through a0 to patch table
  RISC-V: Don't allow write+exec only page mapping request in mmap
  blktrace: break out of blktrace setup on concurrent calls
  kbuild: improve cc-option to clean up all temporary files
  arm64: sve: Fix build failure when ARM64_SVE=y and SYSCTL=n
  s390/vdso: fix vDSO clock_getres()
  s390/ptrace: fix setting syscall number
  net: alx: fix race condition in alx_remove
  ibmvnic: Harden device login requests
  hwrng: ks-sa - Fix runtime PM imbalance on error
  riscv/atomic: Fix sign extension for RV64I
  drm/amd/display: Use kfree() to free rgb_user in calculate_user_regamma_ramp()
  ata/libata: Fix usage of page address by page_address in ata_scsi_mode_select_xlat function
  sata_rcar: handle pm_runtime_get_sync failure cases
  sched/core: Fix PI boosting between RT and DEADLINE tasks
  sched/deadline: Initialize ->dl_boosted
  i2c: core: check returned size of emulated smbus block read
  i2c: fsi: Fix the port number field in status register
  net: bcmgenet: use hardware padding of runt frames
  netfilter: ipset: fix unaligned atomic access
  usb: gadget: udc: Potential Oops in error handling code
  ARM: imx5: add missing put_device() call in imx_suspend_alloc_ocram()
  cxgb4: move handling L2T ARP failures to caller
  net: qed: fix excessive QM ILT lines consumption
  net: qed: fix NVMe login fails over VFs
  net: qed: fix left elements count calculation
  RDMA/mad: Fix possible memory leak in ib_mad_post_receive_mads()
  ASoC: rockchip: Fix a reference count leak.
  RDMA/cma: Protect bind_list and listen_list while finding matching cm id
  RDMA/qedr: Fix KASAN: use-after-free in ucma_event_handler+0x532
  rxrpc: Fix handling of rwind from an ACK packet
  ARM: dts: NSP: Correct FA2 mailbox node
  regmap: Fix memory leak from regmap_register_patch
  x86/resctrl: Fix a NULL vs IS_ERR() static checker warning in rdt_cdp_peer_get()
  ARM: dts: Fix duovero smsc interrupt for suspend
  ASoC: fsl_ssi: Fix bclk calculation for mono channel
  regualtor: pfuze100: correct sw1a/sw2 on pfuze3000
  efi/esrt: Fix reference count leak in esre_create_sysfs_entry.
  ASoC: q6asm: handle EOS correctly
  xfrm: Fix double ESP trailer insertion in IPsec crypto offload.
  cifs/smb3: Fix data inconsistent when zero file range
  cifs/smb3: Fix data inconsistent when punch hole
  IB/mad: Fix use after free when destroying MAD agent
  loop: replace kill_bdev with invalidate_bdev
  cdc-acm: Add DISABLE_ECHO quirk for Microchip/SMSC chip
  xhci: Return if xHCI doesn't support LPM
  xhci: Fix enumeration issue when setting max packet size for FS devices.
  xhci: Fix incorrect EP_STATE_MASK
  scsi: zfcp: Fix panic on ERP timeout for previously dismissed ERP action
  ALSA: usb-audio: Fix OOB access of mixer element list
  ALSA: usb-audio: add quirk for Samsung USBC Headset (AKG)
  ALSA: usb-audio: add quirk for Denon DCD-1500RE
  usb: typec: tcpci_rt1711h: avoid screaming irq causing boot hangs
  usb: host: ehci-exynos: Fix error check in exynos_ehci_probe()
  xhci: Poll for U0 after disabling USB2 LPM
  usb: host: xhci-mtk: avoid runtime suspend when removing hcd
  USB: ehci: reopen solution for Synopsys HC bug
  usb: add USB_QUIRK_DELAY_INIT for Logitech C922
  usb: dwc2: Postponed gadget registration to the udc class driver
  USB: ohci-sm501: Add missed iounmap() in remove
  net: core: reduce recursion limit value
  net: Do not clear the sock TX queue in sk_set_socket()
  net: Fix the arp error in some cases
  sch_cake: don't call diffserv parsing code when it is not needed
  tcp_cubic: fix spurious HYSTART_DELAY exit upon drop in min RTT
  sch_cake: fix a few style nits
  sch_cake: don't try to reallocate or unshare skb unconditionally
  ip_tunnel: fix use-after-free in ip_tunnel_lookup()
  net: phy: Check harder for errors in get_phy_id()
  ip6_gre: fix use-after-free in ip6gre_tunnel_lookup()
  tg3: driver sleeps indefinitely when EEH errors exceed eeh_max_freezes
  tcp: grow window for OOO packets only for SACK flows
  tcp: don't ignore ECN CWR on pure ACK
  sctp: Don't advertise IPv4 addresses if ipv6only is set on the socket
  rxrpc: Fix notification call on completion of discarded calls
  rocker: fix incorrect error handling in dma_rings_init
  net: usb: ax88179_178a: fix packet alignment padding
  net: increment xmit_recursion level in dev_direct_xmit()
  net: use correct this_cpu primitive in dev_recursion_level
  net: place xmit recursion in softnet data
  net: fix memleak in register_netdevice()
  net: bridge: enfore alignment for ethernet address
  mld: fix memory leak in ipv6_mc_destroy_dev()
  ibmveth: Fix max MTU limit
  apparmor: don't try to replace stale label in ptraceme check
  ALSA: hda/realtek - Enable micmute LED on and HP system
  ALSA: hda/realtek: Enable mute LED on an HP system
  ALSA: hda/realtek - Enable the headset of ASUS B9450FA with ALC294
  fix a braino in "sparc32: fix register window handling in genregs32_[gs]et()"
  i2c: tegra: Fix Maximum transfer size
  i2c: tegra: Add missing kerneldoc for some fields
  i2c: tegra: Cleanup kerneldoc comments
  EDAC/amd64: Add Family 17h Model 30h PCI IDs
  net: sched: export __netdev_watchdog_up()
  net: bcmgenet: remove HFB_CTRL access
  mtd: rawnand: marvell: Fix the condition on a return code
  fanotify: fix ignore mask logic for events on child and on dir
  block/bio-integrity: don't free 'buf' if bio_integrity_add_page() failed
  net: be more gentle about silly gso requests coming from user
  ANDROID: lib/vdso: do not update timespec if clock_getres() fails
  Revert "ANDROID: fscrypt: add key removal notifier chain"
  ANDROID: update the ABI xml and qcom whitelist
  ANDROID: fs: export vfs_{read|write}
  ANDROID: GKI: update abi definitions now that sdcardfs is gone
  Revert "ANDROID: sdcardfs: Enable modular sdcardfs"
  Revert "ANDROID: vfs: Add setattr2 for filesystems with per mount permissions"
  Revert "ANDROID: vfs: fix export symbol type"
  Revert "ANDROID: vfs: Add permission2 for filesystems with per mount permissions"
  Revert "ANDROID: vfs: fix export symbol types"
  Revert "ANDROID: vfs: add d_canonical_path for stacked filesystem support"
  Revert "ANDROID: fs: Restore vfs_path_lookup() export"
  ANDROID: sdcardfs: remove sdcardfs from system
  Revert "ALSA: usb-audio: Improve frames size computation"
  ANDROID: Makefile: append BUILD_NUMBER to version string when defined
  ANDROID: GKI: Update ABI for incremental fs
  ANDROID: GKI: Update cuttlefish whitelist
  ANDROID: GKI: Disable INCREMENTAL_FS on x86 too
  ANDROID: cpufreq: schedutil: drop cache when update skipped due to rate limit
  Linux 4.19.130
  KVM: x86/mmu: Set mmio_value to '0' if reserved #PF can't be generated
  kvm: x86: Fix reserved bits related calculation errors caused by MKTME
  kvm: x86: Move kvm_set_mmio_spte_mask() from x86.c to mmu.c
  md: add feature flag MD_FEATURE_RAID0_LAYOUT
  Revert "dpaa_eth: fix usage as DSA master, try 3"
  net: core: device_rename: Use rwsem instead of a seqcount
  sched/rt, net: Use CONFIG_PREEMPTION.patch
  kretprobe: Prevent triggering kretprobe from within kprobe_flush_task
  net: octeon: mgmt: Repair filling of RX ring
  e1000e: Do not wake up the system via WOL if device wakeup is disabled
  kprobes: Fix to protect kick_kprobe_optimizer() by kprobe_mutex
  crypto: algboss - don't wait during notifier callback
  crypto: algif_skcipher - Cap recv SG list at ctx->used
  drm/i915/icl+: Fix hotplug interrupt disabling after storm detection
  drm/i915: Whitelist context-local timestamp in the gen9 cmdparser
  s390: fix syscall_get_error for compat processes
  mtd: rawnand: tmio: Fix the probe error path
  mtd: rawnand: mtk: Fix the probe error path
  mtd: rawnand: plat_nand: Fix the probe error path
  mtd: rawnand: socrates: Fix the probe error path
  mtd: rawnand: oxnas: Fix the probe error path
  mtd: rawnand: oxnas: Add of_node_put()
  mtd: rawnand: orion: Fix the probe error path
  mtd: rawnand: xway: Fix the probe error path
  mtd: rawnand: sharpsl: Fix the probe error path
  mtd: rawnand: diskonchip: Fix the probe error path
  mtd: rawnand: Pass a nand_chip object to nand_release()
  mtd: rawnand: Pass a nand_chip object to nand_scan()
  block: nr_sects_write(): Disable preemption on seqcount write
  x86/boot/compressed: Relax sed symbol type regex for LLVM ld.lld
  drm/dp_mst: Increase ACT retry timeout to 3s
  ext4: avoid race conditions when remounting with options that change dax
  ext4: fix partial cluster initialization when splitting extent
  selinux: fix double free
  drm/amdgpu: Replace invalid device ID with a valid device ID
  drm/qxl: Use correct notify port address when creating cursor ring
  drm/dp_mst: Reformat drm_dp_check_act_status() a bit
  drm: encoder_slave: fix refcouting error for modules
  libata: Use per port sync for detach
  arm64: hw_breakpoint: Don't invoke overflow handler on uaccess watchpoints
  block: Fix use-after-free in blkdev_get()
  afs: afs_write_end() should change i_size under the right lock
  afs: Fix non-setting of mtime when writing into mmap
  bcache: fix potential deadlock problem in btree_gc_coalesce
  ext4: stop overwrite the errcode in ext4_setup_super
  perf report: Fix NULL pointer dereference in hists__fprintf_nr_sample_events()
  usb/ehci-platform: Set PM runtime as active on resume
  usb: host: ehci-platform: add a quirk to avoid stuck
  usb/xhci-plat: Set PM runtime as active on resume
  xdp: Fix xsk_generic_xmit errno
  net/filter: Permit reading NET in load_bytes_relative when MAC not set
  x86/idt: Keep spurious entries unset in system_vectors
  scsi: acornscsi: Fix an error handling path in acornscsi_probe()
  drm/sun4i: hdmi ddc clk: Fix size of m divider
  ASoC: rt5645: Add platform-data for Asus T101HA
  ASoC: Intel: bytcr_rt5640: Add quirk for Toshiba Encore WT10-A tablet
  ASoC: core: only convert non DPCM link to DPCM link
  afs: Fix memory leak in afs_put_sysnames()
  selftests/net: in timestamping, strncpy needs to preserve null byte
  drivers/perf: hisi: Fix wrong value for all counters enable
  NTB: ntb_test: Fix bug when counting remote files
  NTB: perf: Fix race condition when run with ntb_test
  NTB: perf: Fix support for hardware that doesn't have port numbers
  NTB: perf: Don't require one more memory window than number of peers
  NTB: Revert the change to use the NTB device dev for DMA allocations
  NTB: ntb_tool: reading the link file should not end in a NULL byte
  ntb_tool: pass correct struct device to dma_alloc_coherent
  ntb_perf: pass correct struct device to dma_alloc_coherent
  gfs2: fix use-after-free on transaction ail lists
  blktrace: fix endianness for blk_log_remap()
  blktrace: fix endianness in get_pdu_int()
  blktrace: use errno instead of bi_status
  selftests/vm/pkeys: fix alloc_random_pkey() to make it really random
  elfnote: mark all .note sections SHF_ALLOC
  include/linux/bitops.h: avoid clang shift-count-overflow warnings
  lib/zlib: remove outdated and incorrect pre-increment optimization
  geneve: change from tx_error to tx_dropped on missing metadata
  crypto: omap-sham - add proper load balancing support for multicore
  pinctrl: freescale: imx: Fix an error handling path in 'imx_pinctrl_probe()'
  pinctrl: imxl: Fix an error handling path in 'imx1_pinctrl_core_probe()'
  scsi: ufs: Don't update urgent bkops level when toggling auto bkops
  scsi: iscsi: Fix reference count leak in iscsi_boot_create_kobj
  gfs2: Allow lock_nolock mount to specify jid=X
  openrisc: Fix issue with argument clobbering for clone/fork
  rxrpc: Adjust /proc/net/rxrpc/calls to display call->debug_id not user_ID
  vfio/mdev: Fix reference count leak in add_mdev_supported_type
  ASoC: fsl_asrc_dma: Fix dma_chan leak when config DMA channel failed
  extcon: adc-jack: Fix an error handling path in 'adc_jack_probe()'
  powerpc/4xx: Don't unmap NULL mbase
  of: Fix a refcounting bug in __of_attach_node_sysfs()
  NFSv4.1 fix rpc_call_done assignment for BIND_CONN_TO_SESSION
  net: sunrpc: Fix off-by-one issues in 'rpc_ntop6'
  clk: sprd: return correct type of value for _sprd_pll_recalc_rate
  KVM: PPC: Book3S HV: Ignore kmemleak false positives
  scsi: ufs-qcom: Fix scheduling while atomic issue
  clk: bcm2835: Fix return type of bcm2835_register_gate
  scsi: target: tcmu: Fix a use after free in tcmu_check_expired_queue_cmd()
  ASoC: fix incomplete error-handling in img_i2s_in_probe.
  x86/apic: Make TSC deadline timer detection message visible
  RDMA/iw_cxgb4: cleanup device debugfs entries on ULD remove
  usb: gadget: Fix issue with config_ep_by_speed function
  usb: gadget: fix potential double-free in m66592_probe.
  usb: gadget: lpc32xx_udc: don't dereference ep pointer before null check
  USB: gadget: udc: s3c2410_udc: Remove pointless NULL check in s3c2410_udc_nuke
  usb: dwc2: gadget: move gadget resume after the core is in L0 state
  watchdog: da9062: No need to ping manually before setting timeout
  IB/cma: Fix ports memory leak in cma_configfs
  PCI: dwc: Fix inner MSI IRQ domain registration
  PCI/PTM: Inherit Switch Downstream Port PTM settings from Upstream Port
  dm zoned: return NULL if dmz_get_zone_for_reclaim() fails to find a zone
  powerpc/64s/pgtable: fix an undefined behaviour
  arm64: tegra: Fix ethernet phy-mode for Jetson Xavier
  scsi: target: tcmu: Userspace must not complete queued commands
  clk: samsung: exynos5433: Add IGNORE_UNUSED flag to sclk_i2s1
  fpga: dfl: afu: Corrected error handling levels
  tty: n_gsm: Fix bogus i++ in gsm_data_kick
  USB: host: ehci-mxc: Add error handling in ehci_mxc_drv_probe()
  ASoC: Intel: bytcr_rt5640: Add quirk for Toshiba Encore WT8-A tablet
  drm/msm/mdp5: Fix mdp5_init error path for failed mdp5_kms allocation
  usb/ohci-platform: Fix a warning when hibernating
  vfio-pci: Mask cap zero
  powerpc/ps3: Fix kexec shutdown hang
  powerpc/pseries/ras: Fix FWNMI_VALID off by one
  ipmi: use vzalloc instead of kmalloc for user creation
  HID: Add quirks for Trust Panora Graphic Tablet
  tty: n_gsm: Fix waking up upper tty layer when room available
  tty: n_gsm: Fix SOF skipping
  powerpc/64: Don't initialise init_task->thread.regs
  PCI: Fix pci_register_host_bridge() device_register() error handling
  clk: ti: composite: fix memory leak
  dlm: remove BUG() before panic()
  pinctrl: rockchip: fix memleak in rockchip_dt_node_to_map
  scsi: mpt3sas: Fix double free warnings
  power: supply: smb347-charger: IRQSTAT_D is volatile
  power: supply: lp8788: Fix an error handling path in 'lp8788_charger_probe()'
  scsi: qla2xxx: Fix warning after FC target reset
  PCI/ASPM: Allow ASPM on links to PCIe-to-PCI/PCI-X Bridges
  PCI: rcar: Fix incorrect programming of OB windows
  drivers: base: Fix NULL pointer exception in __platform_driver_probe() if a driver developer is foolish
  serial: amba-pl011: Make sure we initialize the port.lock spinlock
  i2c: pxa: fix i2c_pxa_scream_blue_murder() debug output
  PCI: v3-semi: Fix a memory leak in v3_pci_probe() error handling paths
  staging: sm750fb: add missing case while setting FB_VISUAL
  usb: dwc3: gadget: Properly handle failed kick_transfer
  thermal/drivers/ti-soc-thermal: Avoid dereferencing ERR_PTR
  slimbus: ngd: get drvdata from correct device
  tty: hvc: Fix data abort due to race in hvc_open
  s390/qdio: put thinint indicator after early error
  ALSA: usb-audio: Fix racy list management in output queue
  ALSA: usb-audio: Improve frames size computation
  staging: gasket: Fix mapping refcnt leak when register/store fails
  staging: gasket: Fix mapping refcnt leak when put attribute fails
  firmware: qcom_scm: fix bogous abuse of dma-direct internals
  pinctrl: rza1: Fix wrong array assignment of rza1l_swio_entries
  scsi: qedf: Fix crash when MFW calls for protocol stats while function is still probing
  gpio: dwapb: Append MODULE_ALIAS for platform driver
  ARM: dts: sun8i-h2-plus-bananapi-m2-zero: Fix led polarity
  scsi: qedi: Do not flush offload work if ARP not resolved
  arm64: dts: mt8173: fix unit name warnings
  staging: greybus: fix a missing-check bug in gb_lights_light_config()
  x86/purgatory: Disable various profiling and sanitizing options
  apparmor: fix nnp subset test for unconfined
  scsi: ibmvscsi: Don't send host info in adapter info MAD after LPM
  scsi: sr: Fix sr_probe() missing deallocate of device minor
  ASoC: meson: add missing free_irq() in error path
  apparmor: check/put label on apparmor_sk_clone_security()
  apparmor: fix introspection of of task mode for unconfined tasks
  mksysmap: Fix the mismatch of '.L' symbols in System.map
  NTB: Fix the default port and peer numbers for legacy drivers
  NTB: ntb_pingpong: Choose doorbells based on port number
  yam: fix possible memory leak in yam_init_driver
  pwm: img: Call pm_runtime_put() in pm_runtime_get_sync() failed case
  powerpc/crashkernel: Take "mem=" option into account
  PCI: vmd: Filter resource type bits from shadow register
  nfsd: Fix svc_xprt refcnt leak when setup callback client failed
  powerpc/perf/hv-24x7: Fix inconsistent output values incase multiple hv-24x7 events run
  clk: clk-flexgen: fix clock-critical handling
  scsi: lpfc: Fix lpfc_nodelist leak when processing unsolicited event
  mfd: wm8994: Fix driver operation if loaded as modules
  gpio: dwapb: Call acpi_gpiochip_free_interrupts() on GPIO chip de-registration
  m68k/PCI: Fix a memory leak in an error handling path
  RDMA/mlx5: Add init2init as a modify command
  vfio/pci: fix memory leaks in alloc_perm_bits()
  ps3disk: use the default segment boundary
  PCI: aardvark: Don't blindly enable ASPM L0s and don't write to read-only register
  dm mpath: switch paths in dm_blk_ioctl() code path
  serial: 8250: Fix max baud limit in generic 8250 port
  usblp: poison URBs upon disconnect
  clk: samsung: Mark top ISP and CAM clocks on Exynos542x as critical
  i2c: pxa: clear all master action bits in i2c_pxa_stop_message()
  f2fs: report delalloc reserve as non-free in statfs for project quota
  iio: bmp280: fix compensation of humidity
  scsi: qla2xxx: Fix issue with adapter's stopping state
  PCI: Allow pci_resize_resource() for devices on root bus
  ALSA: isa/wavefront: prevent out of bounds write in ioctl
  ALSA: hda/realtek - Introduce polarity for micmute LED GPIO
  scsi: qedi: Check for buffer overflow in qedi_set_path()
  ARM: integrator: Add some Kconfig selections
  ASoC: davinci-mcasp: Fix dma_chan refcnt leak when getting dma type
  backlight: lp855x: Ensure regulators are disabled on probe failure
  clk: qcom: msm8916: Fix the address location of pll->config_reg
  remoteproc: Fix IDR initialisation in rproc_alloc()
  iio: pressure: bmp280: Tolerate IRQ before registering
  i2c: piix4: Detect secondary SMBus controller on AMD AM4 chipsets
  ASoC: tegra: tegra_wm8903: Support nvidia, headset property
  clk: sunxi: Fix incorrect usage of round_down()
  power: supply: bq24257_charger: Replace depends on REGMAP_I2C with select
  ANDROID: ext4: Optimize match for casefolded encrypted dirs
  ANDROID: ext4: Handle casefolding with encryption
  ANDROID: extcon: Remove redundant EXPORT_SYMBOL_GPL
  ANDROID: update the ABI xml representation
  ANDROID: GKI: cfg80211: add ABI changes for CONFIG_NL80211_TESTMODE
  ANDROID: gki_defconfig: x86: Enable KERNEL_LZ4
  ANDROID: GKI: scripts: Makefile: update the lz4 command
  FROMLIST: f2fs: fix use-after-free when accessing bio->bi_crypt_context
  UPSTREAM: fdt: Update CRC check for rng-seed
  ANDROID: GKI: Update ABI for incremental fs
  ANDROID: GKI: Update whitelist and defconfig for incfs
  ANDROID: Use depmod from the hermetic toolchain
  Linux 4.19.129
  perf symbols: Fix debuginfo search for Ubuntu
  perf probe: Check address correctness by map instead of _etext
  perf probe: Fix to check blacklist address correctly
  perf probe: Do not show the skipped events
  w1: omap-hdq: cleanup to add missing newline for some dev_dbg
  mtd: rawnand: pasemi: Fix the probe error path
  mtd: rawnand: brcmnand: fix hamming oob layout
  sunrpc: clean up properly in gss_mech_unregister()
  sunrpc: svcauth_gss_register_pseudoflavor must reject duplicate registrations.
  kbuild: force to build vmlinux if CONFIG_MODVERSION=y
  powerpc/64s: Save FSCR to init_task.thread.fscr after feature init
  powerpc/64s: Don't let DT CPU features set FSCR_DSCR
  drivers/macintosh: Fix memleak in windfarm_pm112 driver
  ARM: dts: s5pv210: Set keep-power-in-suspend for SDHCI1 on Aries
  ARM: dts: at91: sama5d2_ptc_ek: fix vbus pin
  ARM: dts: exynos: Fix GPIO polarity for thr GalaxyS3 CM36651 sensor's bus
  ARM: tegra: Correct PL310 Auxiliary Control Register initialization
  kernel/cpu_pm: Fix uninitted local in cpu_pm
  alpha: fix memory barriers so that they conform to the specification
  dm crypt: avoid truncating the logical block size
  sparc64: fix misuses of access_process_vm() in genregs32_[sg]et()
  sparc32: fix register window handling in genregs32_[gs]et()
  gnss: sirf: fix error return code in sirf_probe()
  pinctrl: samsung: Save/restore eint_mask over suspend for EINT_TYPE GPIOs
  pinctrl: samsung: Correct setting of eint wakeup mask on s5pv210
  power: vexpress: add suppress_bind_attrs to true
  igb: Report speed and duplex as unknown when device is runtime suspended
  media: ov5640: fix use of destroyed mutex
  b43_legacy: Fix connection problem with WPA3
  b43: Fix connection problem with WPA3
  b43legacy: Fix case where channel status is corrupted
  Bluetooth: hci_bcm: fix freeing not-requested IRQ
  media: go7007: fix a miss of snd_card_free
  carl9170: remove P2P_GO support
  e1000e: Relax condition to trigger reset for ME workaround
  e1000e: Disable TSO for buffer overrun workaround
  PCI: Program MPS for RCiEP devices
  ima: Call ima_calc_boot_aggregate() in ima_eventdigest_init()
  btrfs: fix wrong file range cleanup after an error filling dealloc range
  btrfs: fix error handling when submitting direct I/O bio
  PCI: Generalize multi-function power dependency device links
  PCI: Unify ACS quirk desired vs provided checking
  PCI: Make ACS quirk implementations more uniform
  serial: 8250_pci: Move Pericom IDs to pci_ids.h
  PCI: Add Loongson vendor ID
  x86/amd_nb: Add Family 19h PCI IDs
  PCI: vmd: Add device id for VMD device 8086:9A0B
  PCI: Add Amazon's Annapurna Labs vendor ID
  PCI: Add Genesys Logic, Inc. Vendor ID
  ALSA: lx6464es - add support for LX6464ESe pci express variant
  x86/amd_nb: Add PCI device IDs for family 17h, model 70h
  PCI: mediatek: Add controller support for MT7629
  PCI: Enable NVIDIA HDA controllers
  PCI: Add NVIDIA GPU multi-function power dependencies
  PCI: Add Synopsys endpoint EDDA Device ID
  misc: pci_endpoint_test: Add support to test PCI EP in AM654x
  misc: pci_endpoint_test: Add the layerscape EP device support
  PCI: Move Rohm Vendor ID to generic list
  PCI: Move Synopsys HAPS platform device IDs
  PCI: add USR vendor id and use it in r8169 and w6692 driver
  x86/amd_nb: Add PCI device IDs for family 17h, model 30h
  hwmon/k10temp, x86/amd_nb: Consolidate shared device IDs
  pci:ipmi: Move IPMI PCI class id defines to pci_ids.h
  PCI: Remove unused NFP32xx IDs
  PCI: Add ACS quirk for Intel Root Complex Integrated Endpoints
  PCI: Add ACS quirk for iProc PAXB
  PCI: Avoid FLR for AMD Starship USB 3.0
  PCI: Avoid FLR for AMD Matisse HD Audio & USB 3.0
  PCI: Avoid Pericom USB controller OHCI/EHCI PME# defect
  ext4: fix race between ext4_sync_parent() and rename()
  ext4: fix error pointer dereference
  ext4: fix EXT_MAX_EXTENT/INDEX to check for zeroed eh_max
  evm: Fix possible memory leak in evm_calc_hmac_or_hash()
  ima: Directly assign the ima_default_policy pointer to ima_rules
  ima: Fix ima digest hash table key calculation
  mm: initialize deferred pages with interrupts enabled
  mm: thp: make the THP mapcount atomic against __split_huge_pmd_locked()
  btrfs: send: emit file capabilities after chown
  btrfs: include non-missing as a qualifier for the latest_bdev
  string.h: fix incompatibility between FORTIFY_SOURCE and KASAN
  platform/x86: intel-vbtn: Only blacklist SW_TABLET_MODE on the 9 / "Laptop" chasis-type
  platform/x86: intel-hid: Add a quirk to support HP Spectre X2 (2015)
  platform/x86: hp-wmi: Convert simple_strtoul() to kstrtou32()
  cpuidle: Fix three reference count leaks
  spi: dw: Return any value retrieved from the dma_transfer callback
  mmc: sdhci-esdhc-imx: fix the mask for tuning start point
  ixgbe: fix signed-integer-overflow warning
  mmc: via-sdmmc: Respect the cmd->busy_timeout from the mmc core
  staging: greybus: sdio: Respect the cmd->busy_timeout from the mmc core
  mmc: sdhci-msm: Set SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 quirk
  bcache: fix refcount underflow in bcache_device_free()
  MIPS: Fix IRQ tracing when call handle_fpe() and handle_msa_fpe()
  PCI: Don't disable decoding when mmio_always_on is set
  macvlan: Skip loopback packets in RX handler
  btrfs: qgroup: mark qgroup inconsistent if we're inherting snapshot to a new qgroup
  m68k: mac: Don't call via_flush_cache() on Mac IIfx
  x86/mm: Stop printing BRK addresses
  crypto: stm32/crc32 - fix multi-instance
  crypto: stm32/crc32 - fix run-time self test issue.
  crypto: stm32/crc32 - fix ext4 chksum BUG_ON()
  mips: Add udelay lpj numbers adjustment
  mips: MAAR: Use more precise address mask
  x86/boot: Correct relocation destination on old linkers
  mwifiex: Fix memory corruption in dump_station
  rtlwifi: Fix a double free in _rtl_usb_tx_urb_setup()
  net/mlx5e: IPoIB, Drop multicast packets that this interface sent
  veth: Adjust hard_start offset on redirect XDP frames
  md: don't flush workqueue unconditionally in md_open
  mt76: avoid rx reorder buffer overflow
  net: qed*: Reduce RX and TX default ring count when running inside kdump kernel
  wcn36xx: Fix error handling path in 'wcn36xx_probe()'
  ath10k: Remove msdu from idr when management pkt send fails
  nvme: refine the Qemu Identify CNS quirk
  platform/x86: intel-vbtn: Also handle tablet-mode switch on "Detachable" and "Portable" chassis-types
  platform/x86: intel-vbtn: Do not advertise switches to userspace if they are not there
  platform/x86: intel-vbtn: Split keymap into buttons and switches parts
  platform/x86: intel-vbtn: Use acpi_evaluate_integer()
  xfs: fix duplicate verification from xfs_qm_dqflush()
  xfs: reset buffer write failure state on successful completion
  kgdb: Fix spurious true from in_dbg_master()
  mips: cm: Fix an invalid error code of INTVN_*_ERR
  MIPS: Truncate link address into 32bit for 32bit kernel
  Crypto/chcr: fix for ccm(aes) failed test
  xfs: clean up the error handling in xfs_swap_extents
  powerpc/spufs: fix copy_to_user while atomic
  net: allwinner: Fix use correct return type for ndo_start_xmit()
  media: cec: silence shift wrapping warning in __cec_s_log_addrs()
  net: lpc-enet: fix error return code in lpc_mii_init()
  drivers/perf: hisi: Fix typo in events attribute array
  sched/core: Fix illegal RCU from offline CPUs
  exit: Move preemption fixup up, move blocking operations down
  lib/mpi: Fix 64-bit MIPS build with Clang
  net: bcmgenet: set Rx mode before starting netif
  selftests/bpf: Fix memory leak in extract_build_id()
  netfilter: nft_nat: return EOPNOTSUPP if type or flags are not supported
  audit: fix a net reference leak in audit_list_rules_send()
  Bluetooth: btbcm: Add 2 missing models to subver tables
  MIPS: Make sparse_init() using top-down allocation
  media: platform: fcp: Set appropriate DMA parameters
  media: dvb: return -EREMOTEIO on i2c transfer failure.
  audit: fix a net reference leak in audit_send_reply()
  dt-bindings: display: mediatek: control dpi pins mode to avoid leakage
  e1000: Distribute switch variables for initialization
  tools api fs: Make xxx__mountpoint() more scalable
  brcmfmac: fix wrong location to get firmware feature
  staging: android: ion: use vmap instead of vm_map_ram
  net: vmxnet3: fix possible buffer overflow caused by bad DMA value in vmxnet3_get_rss()
  x86/kvm/hyper-v: Explicitly align hcall param for kvm_hyperv_exit
  spi: dw: Fix Rx-only DMA transfers
  mmc: meson-mx-sdio: trigger a soft reset after a timeout or CRC error
  batman-adv: Revert "disable ethtool link speed detection when auto negotiation off"
  ARM: 8978/1: mm: make act_mm() respect THREAD_SIZE
  btrfs: do not ignore error from btrfs_next_leaf() when inserting checksums
  clocksource: dw_apb_timer_of: Fix missing clockevent timers
  clocksource: dw_apb_timer: Make CPU-affiliation being optional
  spi: dw: Enable interrupts in accordance with DMA xfer mode
  kgdb: Prevent infinite recursive entries to the debugger
  kgdb: Disable WARN_CONSOLE_UNLOCKED for all kgdb
  Bluetooth: Add SCO fallback for invalid LMP parameters error
  MIPS: Loongson: Build ATI Radeon GPU driver as module
  ixgbe: Fix XDP redirect on archs with PAGE_SIZE above 4K
  arm64: insn: Fix two bugs in encoding 32-bit logical immediates
  spi: dw: Zero DMA Tx and Rx configurations on stack
  arm64: cacheflush: Fix KGDB trap detection
  efi/libstub/x86: Work around LLVM ELF quirk build regression
  net: ena: fix error returning in ena_com_get_hash_function()
  net: atlantic: make hw_get_regs optional
  spi: pxa2xx: Apply CS clk quirk to BXT
  objtool: Ignore empty alternatives
  media: si2157: Better check for running tuner in init
  crypto: ccp -- don't "select" CONFIG_DMADEVICES
  drm: bridge: adv7511: Extend list of audio sample rates
  ACPI: GED: use correct trigger type field in _Exx / _Lxx handling
  KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception
  xen/pvcalls-back: test for errors when calling backend_connect()
  mmc: sdio: Fix potential NULL pointer error in mmc_sdio_init_card()
  ARM: dts: at91: sama5d2_ptc_ek: fix sdmmc0 node description
  mmc: sdhci-msm: Clear tuning done flag while hs400 tuning
  agp/intel: Reinforce the barrier after GTT updates
  perf: Add cond_resched() to task_function_call()
  fat: don't allow to mount if the FAT length == 0
  mm/slub: fix a memory leak in sysfs_slab_add()
  drm/vkms: Hold gem object while still in-use
  Smack: slab-out-of-bounds in vsscanf
  ath9k: Fix general protection fault in ath9k_hif_usb_rx_cb
  ath9x: Fix stack-out-of-bounds Write in ath9k_hif_usb_rx_cb
  ath9k: Fix use-after-free Write in ath9k_htc_rx_msg
  ath9k: Fix use-after-free Read in ath9k_wmi_ctrl_rx
  scsi: megaraid_sas: TM command refire leads to controller firmware crash
  KVM: arm64: Make vcpu_cp1x() work on Big Endian hosts
  KVM: MIPS: Fix VPN2_MASK definition for variable cpu_vmbits
  KVM: MIPS: Define KVM_ENTRYHI_ASID to cpu_asid_mask(&boot_cpu_data)
  KVM: nVMX: Consult only the "basic" exit reason when routing nested exit
  KVM: nSVM: leave ASID aside in copy_vmcb_control_area
  KVM: nSVM: fix condition for filtering async PF
  video: fbdev: w100fb: Fix a potential double free.
  proc: Use new_inode not new_inode_pseudo
  ovl: initialize error in ovl_copy_xattr
  selftests/net: in rxtimestamp getopt_long needs terminating null entry
  crypto: virtio: Fix dest length calculation in __virtio_crypto_skcipher_do_req()
  crypto: virtio: Fix src/dst scatterlist calculation in __virtio_crypto_skcipher_do_req()
  crypto: virtio: Fix use-after-free in virtio_crypto_skcipher_finalize_req()
  spi: pxa2xx: Fix runtime PM ref imbalance on probe error
  spi: pxa2xx: Balance runtime PM enable/disable on error
  spi: bcm2835: Fix controller unregister order
  spi: pxa2xx: Fix controller unregister order
  spi: Fix controller unregister order
  spi: No need to assign dummy value in spi_unregister_controller()
  x86/speculation: PR_SPEC_FORCE_DISABLE enforcement for indirect branches.
  x86/speculation: Avoid force-disabling IBPB based on STIBP and enhanced IBRS.
  x86/speculation: Add support for STIBP always-on preferred mode
  x86/speculation: Change misspelled STIPB to STIBP
  KVM: x86: only do L1TF workaround on affected processors
  KVM: x86/mmu: Consolidate "is MMIO SPTE" code
  kvm: x86: Fix L1TF mitigation for shadow MMU
  KVM: x86: Fix APIC page invalidation race
  x86/{mce,mm}: Unmap the entire page if the whole page is affected and poisoned
  ALSA: pcm: disallow linking stream to itself
  crypto: cavium/nitrox - Fix 'nitrox_get_first_device()' when ndevlist is fully iterated
  PM: runtime: clk: Fix clk_pm_runtime_get() error path
  spi: bcm-qspi: when tx/rx buffer is NULL set to 0
  spi: bcm2835aux: Fix controller unregister order
  spi: dw: Fix controller unregister order
  nilfs2: fix null pointer dereference at nilfs_segctor_do_construct()
  cgroup, blkcg: Prepare some symbols for module and !CONFIG_CGROUP usages
  ACPI: PM: Avoid using power resources if there are none for D0
  ACPI: GED: add support for _Exx / _Lxx handler methods
  ACPI: CPPC: Fix reference count leak in acpi_cppc_processor_probe()
  ACPI: sysfs: Fix reference count leak in acpi_sysfs_add_hotplug_profile()
  ALSA: usb-audio: Add vendor, product and profile name for HP Thunderbolt Dock
  ALSA: usb-audio: Fix inconsistent card PM state after resume
  ALSA: hda/realtek - add a pintbl quirk for several Lenovo machines
  ALSA: es1688: Add the missed snd_card_free()
  efi/efivars: Add missing kobject_put() in sysfs entry creation error path
  x86/reboot/quirks: Add MacBook6,1 reboot quirk
  x86/speculation: Prevent rogue cross-process SSBD shutdown
  x86/PCI: Mark Intel C620 MROMs as having non-compliant BARs
  x86_64: Fix jiffies ODR violation
  btrfs: tree-checker: Check level for leaves and nodes
  aio: fix async fsync creds
  mm: add kvfree_sensitive() for freeing sensitive data objects
  perf probe: Accept the instance number of kretprobe event
  x86/cpu/amd: Make erratum #1054 a legacy erratum
  RDMA/uverbs: Make the event_queue fds return POLLERR when disassociated
  ath9k_htc: Silence undersized packet warnings
  powerpc/xive: Clear the page tables for the ESB IO mapping
  drivers/net/ibmvnic: Update VNIC protocol version reporting
  Input: synaptics - add a second working PNP_ID for Lenovo T470s
  sched/fair: Don't NUMA balance for kthreads
  ARM: 8977/1: ptrace: Fix mask for thumb breakpoint hook
  Input: mms114 - fix handling of mms345l
  crypto: talitos - fix ECB and CBC algs ivsize
  btrfs: Detect unbalanced tree with empty leaf before crashing btree operations
  btrfs: merge btrfs_find_device and find_device
  lib: Reduce user_access_begin() boundaries in strncpy_from_user() and strnlen_user()
  x86: uaccess: Inhibit speculation past access_ok() in user_access_begin()
  arch/openrisc: Fix issues with access_ok()
  Fix 'acccess_ok()' on alpha and SH
  make 'user_access_begin()' do 'access_ok()'
  selftests: bpf: fix use of undeclared RET_IF macro
  tun: correct header offsets in napi frags mode
  vxlan: Avoid infinite loop when suppressing NS messages with invalid options
  bridge: Avoid infinite loop when suppressing NS messages with invalid options
  net_failover: fixed rollback in net_failover_open()
  ipv6: fix IPV6_ADDRFORM operation logic
  writeback: Drop I_DIRTY_TIME_EXPIRE
  writeback: Fix sync livelock due to b_dirty_time processing
  writeback: Avoid skipping inode writeback
  writeback: Protect inode->i_io_list with inode->i_lock
  Revert "writeback: Avoid skipping inode writeback"
  ANDROID: gki_defconfig: increase vbus_draw to 500mA
  fscrypt: remove stale definition
  fs-verity: remove unnecessary extern keywords
  fs-verity: fix all kerneldoc warnings
  fscrypt: add support for IV_INO_LBLK_32 policies
  fscrypt: make test_dummy_encryption use v2 by default
  fscrypt: support test_dummy_encryption=v2
  fscrypt: add fscrypt_add_test_dummy_key()
  linux/parser.h: add include guards
  fscrypt: remove unnecessary extern keywords
  fscrypt: name all function parameters
  fscrypt: fix all kerneldoc warnings
  ANDROID: Update the ABI
  ANDROID: GKI: power: power-supply: Add POWER_SUPPLY_PROP_CHARGER_STATUS property
  ANDROID: GKI: add dev to usb_gsi_request
  ANDROID: GKI: dma-buf: add dent_count to dma_buf
  ANDROID: Update the ABI xml and whitelist
  ANDROID: GKI: update whitelist
  ANDROID: extcon: Export symbol of `extcon_get_edev_name`
  ANDROID: kbuild: merge more sections with LTO
  UPSTREAM: timekeeping/vsyscall: Update VDSO data unconditionally
  ANDROID: GKI: Revert "genetlink: disallow subscribing to unknown mcast groups"
  BACKPORT: usb: musb: Add support for MediaTek musb controller
  UPSTREAM: usb: musb: Add musb_clearb/w() interface
  UPSTREAM: usb: musb: Add noirq type of dma create interface
  UPSTREAM: usb: musb: Add get/set toggle hooks
  UPSTREAM: dt-bindings: usb: musb: Add support for MediaTek musb controller
  FROMGIT: driver core: Remove unnecessary is_fwnode_dev variable in device_add()
  FROMGIT: driver core: Remove check in driver_deferred_probe_force_trigger()
  FROMGIT: of: platform: Batch fwnode parsing when adding all top level devices
  FROMGIT: BACKPORT: driver core: fw_devlink: Add support for batching fwnode parsing
  BACKPORT: driver core: Look for waiting consumers only for a fwnode's primary device
  BACKPORT: driver core: Add device links from fwnode only for the primary device
  Linux 4.19.128
  Revert "net/mlx5: Annotate mutex destroy for root ns"
  uprobes: ensure that uprobe->offset and ->ref_ctr_offset are properly aligned
  x86/speculation: Add Ivy Bridge to affected list
  x86/speculation: Add SRBDS vulnerability and mitigation documentation
  x86/speculation: Add Special Register Buffer Data Sampling (SRBDS) mitigation
  x86/cpu: Add 'table' argument to cpu_matches()
  x86/cpu: Add a steppings field to struct x86_cpu_id
  nvmem: qfprom: remove incorrect write support
  CDC-ACM: heed quirk also in error handling
  staging: rtl8712: Fix IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK
  tty: hvc_console, fix crashes on parallel open/close
  vt: keyboard: avoid signed integer overflow in k_ascii
  usb: musb: Fix runtime PM imbalance on error
  usb: musb: start session in resume for host port
  iio: vcnl4000: Fix i2c swapped word reading.
  USB: serial: option: add Telit LE910C1-EUX compositions
  USB: serial: usb_wwan: do not resubmit rx urb on fatal errors
  USB: serial: qcserial: add DW5816e QDL support
  net: check untrusted gso_size at kernel entry
  vsock: fix timeout in vsock_accept()
  NFC: st21nfca: add missed kfree_skb() in an error path
  net: usb: qmi_wwan: add Telit LE910C1-EUX composition
  l2tp: do not use inet_hash()/inet_unhash()
  l2tp: add sk_family checks to l2tp_validate_socket
  devinet: fix memleak in inetdev_init()
  Revert "ANDROID: Remove default y on BRIDGE_IGMP_SNOOPING"
  ANDROID: Update the ABI xml and whitelist
  ANDROID: GKI: update whitelist
  ANDROID: arch: arm64: vdso: export the symbols for time()
  ANDROID: Incremental fs: Remove dependency on PKCS7_MESSAGE_PARSER
  ANDROID: dm-bow: Add block_size option
  f2fs: attach IO flags to the missing cases
  f2fs: add node_io_flag for bio flags likewise data_io_flag
  f2fs: remove unused parameter of f2fs_put_rpages_mapping()
  f2fs: handle readonly filesystem in f2fs_ioc_shutdown()
  f2fs: avoid utf8_strncasecmp() with unstable name
  f2fs: don't return vmalloc() memory from f2fs_kmalloc()
  ANDROID: GKI: set CONFIG_BLK_DEV_LOOP_MIN_COUNT to 16
  ANDROID: Incremental fs: Cache successful hash calculations
  ANDROID: Incremental fs: Fix four error-path bugs
  Linux 4.19.127
  net: smsc911x: Fix runtime PM imbalance on error
  net: ethernet: stmmac: Enable interface clocks on probe for IPQ806x
  net/ethernet/freescale: rework quiesce/activate for ucc_geth
  null_blk: return error for invalid zone size
  s390/mm: fix set_huge_pte_at() for empty ptes
  drm/edid: Add Oculus Rift S to non-desktop list
  net: bmac: Fix read of MAC address from ROM
  x86/mmiotrace: Use cpumask_available() for cpumask_var_t variables
  i2c: altera: Fix race between xfer_msg and isr thread
  evm: Fix RCU list related warnings
  ARC: [plat-eznps]: Restrict to CONFIG_ISA_ARCOMPACT
  ARC: Fix ICCM & DCCM runtime size checks
  s390/ftrace: save traced function caller
  spi: dw: use "smp_mb()" to avoid sending spi data error
  powerpc/powernv: Avoid re-registration of imc debugfs directory
  scsi: hisi_sas: Check sas_port before using it
  drm/i915: fix port checks for MST support on gen >= 11
  airo: Fix read overflows sending packets
  net: dsa: mt7530: set CPU port to fallback mode
  scsi: ufs: Release clock if DMA map fails
  mmc: fix compilation of user API
  kernel/relay.c: handle alloc_percpu returning NULL in relay_open
  p54usb: add AirVasT USB stick device-id
  HID: i2c-hid: add Schneider SCL142ALM to descriptor override
  HID: sony: Fix for broken buttons on DS3 USB dongles
  mm: Fix mremap not considering huge pmd devmap
  libnvdimm: Fix endian conversion issues 
  Revert "cgroup: Add memory barriers to plug cgroup_rstat_updated() race window"
  f2fs: fix retry logic in f2fs_write_cache_pages()
  ANDROID: Update ABI representation
  Linux 4.19.126
  mm/vmalloc.c: don't dereference possible NULL pointer in __vunmap()
  netfilter: nf_conntrack_pptp: fix compilation warning with W=1 build
  bonding: Fix reference count leak in bond_sysfs_slave_add.
  crypto: chelsio/chtls: properly set tp->lsndtime
  qlcnic: fix missing release in qlcnic_83xx_interrupt_test.
  xsk: Add overflow check for u64 division, stored into u32
  bnxt_en: Fix accumulation of bp->net_stats_prev.
  esp6: get the right proto for transport mode in esp6_gso_encap
  netfilter: nf_conntrack_pptp: prevent buffer overflows in debug code
  netfilter: nfnetlink_cthelper: unbreak userspace helper support
  netfilter: ipset: Fix subcounter update skip
  netfilter: nft_reject_bridge: enable reject with bridge vlan
  ip_vti: receive ipip packet by calling ip_tunnel_rcv
  vti4: eliminated some duplicate code.
  xfrm: fix error in comment
  xfrm: fix a NULL-ptr deref in xfrm_local_error
  xfrm: fix a warning in xfrm_policy_insert_list
  xfrm interface: fix oops when deleting a x-netns interface
  xfrm: call xfrm_output_gso when inner_protocol is set in xfrm_output
  xfrm: allow to accept packets with ipv6 NEXTHDR_HOP in xfrm_input
  copy_xstate_to_kernel(): don't leave parts of destination uninitialized
  x86/dma: Fix max PFN arithmetic overflow on 32 bit systems
  mac80211: mesh: fix discovery timer re-arming issue / crash
  RDMA/core: Fix double destruction of uobject
  mmc: core: Fix recursive locking issue in CQE recovery path
  parisc: Fix kernel panic in mem_init()
  iommu: Fix reference count leak in iommu_group_alloc.
  include/asm-generic/topology.h: guard cpumask_of_node() macro argument
  fs/binfmt_elf.c: allocate initialized memory in fill_thread_core_info()
  mm: remove VM_BUG_ON(PageSlab()) from page_mapcount()
  IB/ipoib: Fix double free of skb in case of multicast traffic in CM mode
  libceph: ignore pool overlay and cache logic on redirects
  ALSA: hda/realtek - Add new codec supported for ALC287
  ALSA: usb-audio: Quirks for Gigabyte TRX40 Aorus Master onboard audio
  exec: Always set cap_ambient in cap_bprm_set_creds
  ALSA: usb-audio: mixer: volume quirk for ESS Technology Asus USB DAC
  ALSA: hda/realtek - Add a model for Thinkpad T570 without DAC workaround
  ALSA: hwdep: fix a left shifting 1 by 31 UB bug
  RDMA/pvrdma: Fix missing pci disable in pvrdma_pci_probe()
  mmc: block: Fix use-after-free issue for rpmb
  ARM: dts: bcm: HR2: Fix PPI interrupt types
  ARM: dts: bcm2835-rpi-zero-w: Fix led polarity
  ARM: dts/imx6q-bx50v3: Set display interface clock parents
  IB/qib: Call kobject_put() when kobject_init_and_add() fails
  gpio: exar: Fix bad handling for ida_simple_get error path
  ARM: uaccess: fix DACR mismatch with nested exceptions
  ARM: uaccess: integrate uaccess_save and uaccess_restore
  ARM: uaccess: consolidate uaccess asm to asm/uaccess-asm.h
  ARM: 8843/1: use unified assembler in headers
  ARM: 8970/1: decompressor: increase tag size
  Input: synaptics-rmi4 - fix error return code in rmi_driver_probe()
  Input: synaptics-rmi4 - really fix attn_data use-after-free
  Input: i8042 - add ThinkPad S230u to i8042 reset list
  Input: dlink-dir685-touchkeys - fix a typo in driver name
  Input: xpad - add custom init packet for Xbox One S controllers
  Input: evdev - call input_flush_device() on release(), not flush()
  Input: usbtouchscreen - add support for BonXeon TP
  samples: bpf: Fix build error
  cifs: Fix null pointer check in cifs_read
  riscv: stacktrace: Fix undefined reference to `walk_stackframe'
  IB/i40iw: Remove bogus call to netdev_master_upper_dev_get()
  net: freescale: select CONFIG_FIXED_PHY where needed
  usb: gadget: legacy: fix redundant initialization warnings
  usb: dwc3: pci: Enable extcon driver for Intel Merrifield
  cachefiles: Fix race between read_waiter and read_copier involving op->to_do
  gfs2: move privileged user check to gfs2_quota_lock_check
  net: microchip: encx24j600: add missed kthread_stop
  ALSA: usb-audio: add mapping for ASRock TRX40 Creator
  gpio: tegra: mask GPIO IRQs during IRQ shutdown
  ARM: dts: rockchip: fix pinctrl sub nodename for spi in rk322x.dtsi
  ARM: dts: rockchip: swap clock-names of gpu nodes
  arm64: dts: rockchip: swap interrupts interrupt-names rk3399 gpu node
  arm64: dts: rockchip: fix status for &gmac2phy in rk3328-evb.dts
  ARM: dts: rockchip: fix phy nodename for rk3228-evb
  mlxsw: spectrum: Fix use-after-free of split/unsplit/type_set in case reload fails
  net/mlx4_core: fix a memory leak bug.
  net: sun: fix missing release regions in cas_init_one().
  net/mlx5: Annotate mutex destroy for root ns
  net/mlx5e: Update netdev txq on completions during closure
  sctp: Start shutdown on association restart if in SHUTDOWN-SENT state and socket is closed
  sctp: Don't add the shutdown timer if its already been added
  r8152: support additional Microsoft Surface Ethernet Adapter variant
  net sched: fix reporting the first-time use timestamp
  net: revert "net: get rid of an signed integer overflow in ip_idents_reserve()"
  net: qrtr: Fix passing invalid reference to qrtr_local_enqueue()
  net/mlx5: Add command entry handling completion
  net: ipip: fix wrong address family in init error path
  net: inet_csk: Fix so_reuseport bind-address cache in tb->fast*
  __netif_receive_skb_core: pass skb by reference
  net: dsa: mt7530: fix roaming from DSA user ports
  dpaa_eth: fix usage as DSA master, try 3
  ax25: fix setsockopt(SO_BINDTODEVICE)
  ANDROID: modules: fix lockprove warning
  FROMGIT: USB: dummy-hcd: use configurable endpoint naming scheme
  UPSTREAM: usb: raw-gadget: fix null-ptr-deref when reenabling endpoints
  UPSTREAM: usb: raw-gadget: documentation updates
  UPSTREAM: usb: raw-gadget: support stalling/halting/wedging endpoints
  UPSTREAM: usb: raw-gadget: fix gadget endpoint selection
  UPSTREAM: usb: raw-gadget: improve uapi headers comments
  UPSTREAM: usb: raw-gadget: fix return value of ep read ioctls
  UPSTREAM: usb: raw-gadget: fix raw_event_queue_fetch locking
  UPSTREAM: usb: raw-gadget: Fix copy_to/from_user() checks
  f2fs: fix wrong discard space
  f2fs: compress: don't compress any datas after cp stop
  f2fs: remove unneeded return value of __insert_discard_tree()
  f2fs: fix wrong value of tracepoint parameter
  f2fs: protect new segment allocation in expand_inode_data
  f2fs: code cleanup by removing ifdef macro surrounding
  writeback: Avoid skipping inode writeback
  ANDROID: GKI: Update the ABI
  ANDROID: GKI: update whitelist
  ANDROID: GKI: support mm_event for FS/IO/UFS path
  ANDROID: net: bpf: permit redirect from ingress L3 to egress L2 devices at near max mtu
  FROMGIT: driver core: Update device link status correctly for SYNC_STATE_ONLY links
  UPSTREAM: driver core: Fix handling of SYNC_STATE_ONLY + STATELESS device links
  BACKPORT: driver core: Fix SYNC_STATE_ONLY device link implementation
  ANDROID: Bulk update the ABI xml and qcom whitelist
  Revert "ANDROID: Incremental fs: Avoid continually recalculating hashes"
  f2fs: avoid inifinite loop to wait for flushing node pages at cp_error
  f2fs: compress: fix zstd data corruption
  f2fs: add compressed/gc data read IO stat
  f2fs: fix potential use-after-free issue
  f2fs: compress: don't handle non-compressed data in workqueue
  f2fs: remove redundant assignment to variable err
  f2fs: refactor resize_fs to avoid meta updates in progress
  f2fs: use round_up to enhance calculation
  f2fs: introduce F2FS_IOC_RESERVE_COMPRESS_BLOCKS
  f2fs: Avoid double lock for cp_rwsem during checkpoint
  f2fs: report delalloc reserve as non-free in statfs for project quota
  f2fs: Fix wrong stub helper update_sit_info
  f2fs: compress: let lz4 compressor handle output buffer budget properly
  f2fs: remove blk_plugging in block_operations
  f2fs: introduce F2FS_IOC_RELEASE_COMPRESS_BLOCKS
  f2fs: shrink spinlock coverage
  f2fs: correctly fix the parent inode number during fsync()
  f2fs: introduce mempool for {,de}compress intermediate page allocation
  f2fs: introduce f2fs_bmap_compress()
  f2fs: support fiemap on compressed inode
  f2fs: support partial truncation on compressed inode
  f2fs: remove redundant compress inode check
  f2fs: use strcmp() in parse_options()
  f2fs: Use the correct style for SPDX License Identifier

 Conflicts:
	Documentation/devicetree/bindings
	Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
	Documentation/devicetree/bindings/usb/dwc3.txt
	drivers/media/v4l2-core/v4l2-ctrls.c
	drivers/mmc/core/queue.c
	drivers/mmc/host/sdhci-msm.c
	drivers/scsi/ufs/ufs-qcom.c
	drivers/slimbus/qcom-ngd-ctrl.c
	drivers/usb/gadget/composite.c
	fs/crypto/keyring.c
	fs/f2fs/data.c
	include/linux/fs.h
	include/linux/usb/gadget.h
	include/uapi/linux/v4l2-controls.h
	kernel/sched/cpufreq_schedutil.c
	kernel/sched/fair.c
	kernel/time/tick-sched.c
	mm/vmalloc.c
	net/netlink/genetlink.c
	net/qrtr/qrtr.c
	sound/core/compress_offload.c
	sound/soc/soc-compress.c

 Fixed errors:
	drivers/scsi/ufs/ufshcd.c
	drivers/soc/qcom/rq_stats.c

Change-Id: I06ea6a6c3f239045e2947f27af617aa6f523bfdb
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
2020-10-14 20:04:29 +05:30

2801 lines
76 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Deadline Scheduling Class (SCHED_DEADLINE)
*
* Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
*
* Tasks that periodically executes their instances for less than their
* runtime won't miss any of their deadlines.
* Tasks that are not periodic or sporadic or that tries to execute more
* than their reserved bandwidth will be slowed down (and may potentially
* miss some of their deadlines), and won't affect any other task.
*
* Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
* Juri Lelli <juri.lelli@gmail.com>,
* Michael Trimarchi <michael@amarulasolutions.com>,
* Fabio Checconi <fchecconi@gmail.com>
*/
#include "sched.h"
#include "pelt.h"
#include "walt.h"
struct dl_bandwidth def_dl_bandwidth;
static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
{
return container_of(dl_se, struct task_struct, dl);
}
static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
{
return container_of(dl_rq, struct rq, dl);
}
static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
{
struct task_struct *p = dl_task_of(dl_se);
struct rq *rq = task_rq(p);
return &rq->dl;
}
static inline int on_dl_rq(struct sched_dl_entity *dl_se)
{
return !RB_EMPTY_NODE(&dl_se->rb_node);
}
#ifdef CONFIG_SMP
static inline struct dl_bw *dl_bw_of(int i)
{
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
"sched RCU must be held");
return &cpu_rq(i)->rd->dl_bw;
}
static inline int dl_bw_cpus(int i)
{
struct root_domain *rd = cpu_rq(i)->rd;
int cpus = 0;
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
"sched RCU must be held");
for_each_cpu_and(i, rd->span, cpu_active_mask)
cpus++;
return cpus;
}
#else
static inline struct dl_bw *dl_bw_of(int i)
{
return &cpu_rq(i)->dl.dl_bw;
}
static inline int dl_bw_cpus(int i)
{
return 1;
}
#endif
static inline
void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->running_bw;
lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->running_bw += dl_bw;
SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
/* kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
}
static inline
void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->running_bw;
lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->running_bw -= dl_bw;
SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
if (dl_rq->running_bw > old)
dl_rq->running_bw = 0;
/* kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
}
static inline
void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->this_bw;
lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->this_bw += dl_bw;
SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
}
static inline
void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->this_bw;
lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->this_bw -= dl_bw;
SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
if (dl_rq->this_bw > old)
dl_rq->this_bw = 0;
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
}
static inline
void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
if (!dl_entity_is_special(dl_se))
__add_rq_bw(dl_se->dl_bw, dl_rq);
}
static inline
void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
if (!dl_entity_is_special(dl_se))
__sub_rq_bw(dl_se->dl_bw, dl_rq);
}
static inline
void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
if (!dl_entity_is_special(dl_se))
__add_running_bw(dl_se->dl_bw, dl_rq);
}
static inline
void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
if (!dl_entity_is_special(dl_se))
__sub_running_bw(dl_se->dl_bw, dl_rq);
}
void dl_change_utilization(struct task_struct *p, u64 new_bw)
{
struct rq *rq;
BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
if (task_on_rq_queued(p))
return;
rq = task_rq(p);
if (p->dl.dl_non_contending) {
sub_running_bw(&p->dl, &rq->dl);
p->dl.dl_non_contending = 0;
/*
* If the timer handler is currently running and the
* timer cannot be cancelled, inactive_task_timer()
* will see that dl_not_contending is not set, and
* will not touch the rq's active utilization,
* so we are still safe.
*/
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
}
__sub_rq_bw(p->dl.dl_bw, &rq->dl);
__add_rq_bw(new_bw, &rq->dl);
}
/*
* The utilization of a task cannot be immediately removed from
* the rq active utilization (running_bw) when the task blocks.
* Instead, we have to wait for the so called "0-lag time".
*
* If a task blocks before the "0-lag time", a timer (the inactive
* timer) is armed, and running_bw is decreased when the timer
* fires.
*
* If the task wakes up again before the inactive timer fires,
* the timer is cancelled, whereas if the task wakes up after the
* inactive timer fired (and running_bw has been decreased) the
* task's utilization has to be added to running_bw again.
* A flag in the deadline scheduling entity (dl_non_contending)
* is used to avoid race conditions between the inactive timer handler
* and task wakeups.
*
* The following diagram shows how running_bw is updated. A task is
* "ACTIVE" when its utilization contributes to running_bw; an
* "ACTIVE contending" task is in the TASK_RUNNING state, while an
* "ACTIVE non contending" task is a blocked task for which the "0-lag time"
* has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
* time already passed, which does not contribute to running_bw anymore.
* +------------------+
* wakeup | ACTIVE |
* +------------------>+ contending |
* | add_running_bw | |
* | +----+------+------+
* | | ^
* | dequeue | |
* +--------+-------+ | |
* | | t >= 0-lag | | wakeup
* | INACTIVE |<---------------+ |
* | | sub_running_bw | |
* +--------+-------+ | |
* ^ | |
* | t < 0-lag | |
* | | |
* | V |
* | +----+------+------+
* | sub_running_bw | ACTIVE |
* +-------------------+ |
* inactive timer | non contending |
* fired +------------------+
*
* The task_non_contending() function is invoked when a task
* blocks, and checks if the 0-lag time already passed or
* not (in the first case, it directly updates running_bw;
* in the second case, it arms the inactive timer).
*
* The task_contending() function is invoked when a task wakes
* up, and checks if the task is still in the "ACTIVE non contending"
* state or not (in the second case, it updates running_bw).
*/
static void task_non_contending(struct task_struct *p)
{
struct sched_dl_entity *dl_se = &p->dl;
struct hrtimer *timer = &dl_se->inactive_timer;
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
s64 zerolag_time;
/*
* If this is a non-deadline task that has been boosted,
* do nothing
*/
if (dl_se->dl_runtime == 0)
return;
if (dl_entity_is_special(dl_se))
return;
WARN_ON(dl_se->dl_non_contending);
zerolag_time = dl_se->deadline -
div64_long((dl_se->runtime * dl_se->dl_period),
dl_se->dl_runtime);
/*
* Using relative times instead of the absolute "0-lag time"
* allows to simplify the code
*/
zerolag_time -= rq_clock(rq);
/*
* If the "0-lag time" already passed, decrease the active
* utilization now, instead of starting a timer
*/
if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
if (dl_task(p))
sub_running_bw(dl_se, dl_rq);
if (!dl_task(p) || p->state == TASK_DEAD) {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
if (p->state == TASK_DEAD)
sub_rq_bw(&p->dl, &rq->dl);
raw_spin_lock(&dl_b->lock);
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
__dl_clear_params(p);
raw_spin_unlock(&dl_b->lock);
}
return;
}
dl_se->dl_non_contending = 1;
get_task_struct(p);
hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
}
static void task_contending(struct sched_dl_entity *dl_se, int flags)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
/*
* If this is a non-deadline task that has been boosted,
* do nothing
*/
if (dl_se->dl_runtime == 0)
return;
if (flags & ENQUEUE_MIGRATED)
add_rq_bw(dl_se, dl_rq);
if (dl_se->dl_non_contending) {
dl_se->dl_non_contending = 0;
/*
* If the timer handler is currently running and the
* timer cannot be cancelled, inactive_task_timer()
* will see that dl_not_contending is not set, and
* will not touch the rq's active utilization,
* so we are still safe.
*/
if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
put_task_struct(dl_task_of(dl_se));
} else {
/*
* Since "dl_non_contending" is not set, the
* task's utilization has already been removed from
* active utilization (either when the task blocked,
* when the "inactive timer" fired).
* So, add it back.
*/
add_running_bw(dl_se, dl_rq);
}
}
static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
{
struct sched_dl_entity *dl_se = &p->dl;
return dl_rq->root.rb_leftmost == &dl_se->rb_node;
}
void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
{
raw_spin_lock_init(&dl_b->dl_runtime_lock);
dl_b->dl_period = period;
dl_b->dl_runtime = runtime;
}
void init_dl_bw(struct dl_bw *dl_b)
{
raw_spin_lock_init(&dl_b->lock);
raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
if (global_rt_runtime() == RUNTIME_INF)
dl_b->bw = -1;
else
dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
dl_b->total_bw = 0;
}
void init_dl_rq(struct dl_rq *dl_rq)
{
dl_rq->root = RB_ROOT_CACHED;
#ifdef CONFIG_SMP
/* zero means no -deadline tasks */
dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
dl_rq->dl_nr_migratory = 0;
dl_rq->overloaded = 0;
dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
#else
init_dl_bw(&dl_rq->dl_bw);
#endif
dl_rq->running_bw = 0;
dl_rq->this_bw = 0;
init_dl_rq_bw_ratio(dl_rq);
}
#ifdef CONFIG_SMP
static inline int dl_overloaded(struct rq *rq)
{
return atomic_read(&rq->rd->dlo_count);
}
static inline void dl_set_overload(struct rq *rq)
{
if (!rq->online)
return;
cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
/*
* Must be visible before the overload count is
* set (as in sched_rt.c).
*
* Matched by the barrier in pull_dl_task().
*/
smp_wmb();
atomic_inc(&rq->rd->dlo_count);
}
static inline void dl_clear_overload(struct rq *rq)
{
if (!rq->online)
return;
atomic_dec(&rq->rd->dlo_count);
cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
}
static void update_dl_migration(struct dl_rq *dl_rq)
{
if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
if (!dl_rq->overloaded) {
dl_set_overload(rq_of_dl_rq(dl_rq));
dl_rq->overloaded = 1;
}
} else if (dl_rq->overloaded) {
dl_clear_overload(rq_of_dl_rq(dl_rq));
dl_rq->overloaded = 0;
}
}
static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
struct task_struct *p = dl_task_of(dl_se);
if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory++;
update_dl_migration(dl_rq);
}
static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
struct task_struct *p = dl_task_of(dl_se);
if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory--;
update_dl_migration(dl_rq);
}
/*
* The list of pushable -deadline task is not a plist, like in
* sched_rt.c, it is an rb-tree with tasks ordered by deadline.
*/
static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
struct dl_rq *dl_rq = &rq->dl;
struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
struct rb_node *parent = NULL;
struct task_struct *entry;
bool leftmost = true;
BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
while (*link) {
parent = *link;
entry = rb_entry(parent, struct task_struct,
pushable_dl_tasks);
if (dl_entity_preempt(&p->dl, &entry->dl))
link = &parent->rb_left;
else {
link = &parent->rb_right;
leftmost = false;
}
}
if (leftmost)
dl_rq->earliest_dl.next = p->dl.deadline;
rb_link_node(&p->pushable_dl_tasks, parent, link);
rb_insert_color_cached(&p->pushable_dl_tasks,
&dl_rq->pushable_dl_tasks_root, leftmost);
}
static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
struct dl_rq *dl_rq = &rq->dl;
if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
return;
if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
struct rb_node *next_node;
next_node = rb_next(&p->pushable_dl_tasks);
if (next_node) {
dl_rq->earliest_dl.next = rb_entry(next_node,
struct task_struct, pushable_dl_tasks)->dl.deadline;
}
}
rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
}
static inline int has_pushable_dl_tasks(struct rq *rq)
{
return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
}
static int push_dl_task(struct rq *rq);
static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
{
return dl_task(prev);
}
static DEFINE_PER_CPU(struct callback_head, dl_push_head);
static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
static void push_dl_tasks(struct rq *);
static void pull_dl_task(struct rq *);
static inline void deadline_queue_push_tasks(struct rq *rq)
{
if (!has_pushable_dl_tasks(rq))
return;
queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
}
static inline void deadline_queue_pull_task(struct rq *rq)
{
queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
}
static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
{
struct rq *later_rq = NULL;
struct dl_bw *dl_b;
later_rq = find_lock_later_rq(p, rq);
if (!later_rq) {
int cpu;
/*
* If we cannot preempt any rq, fall back to pick any
* online CPU:
*/
cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
if (cpu >= nr_cpu_ids) {
/*
* Failed to find any suitable CPU.
* The task will never come back!
*/
BUG_ON(dl_bandwidth_enabled());
/*
* If admission control is disabled we
* try a little harder to let the task
* run.
*/
cpu = cpumask_any(cpu_active_mask);
}
later_rq = cpu_rq(cpu);
double_lock_balance(rq, later_rq);
}
if (p->dl.dl_non_contending || p->dl.dl_throttled) {
/*
* Inactive timer is armed (or callback is running, but
* waiting for us to release rq locks). In any case, when it
* will fire (or continue), it will see running_bw of this
* task migrated to later_rq (and correctly handle it).
*/
sub_running_bw(&p->dl, &rq->dl);
sub_rq_bw(&p->dl, &rq->dl);
add_rq_bw(&p->dl, &later_rq->dl);
add_running_bw(&p->dl, &later_rq->dl);
} else {
sub_rq_bw(&p->dl, &rq->dl);
add_rq_bw(&p->dl, &later_rq->dl);
}
/*
* And we finally need to fixup root_domain(s) bandwidth accounting,
* since p is still hanging out in the old (now moved to default) root
* domain.
*/
dl_b = &rq->rd->dl_bw;
raw_spin_lock(&dl_b->lock);
__dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
raw_spin_unlock(&dl_b->lock);
dl_b = &later_rq->rd->dl_bw;
raw_spin_lock(&dl_b->lock);
__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
raw_spin_unlock(&dl_b->lock);
set_task_cpu(p, later_rq->cpu);
double_unlock_balance(later_rq, rq);
return later_rq;
}
#else
static inline
void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
}
static inline
void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
}
static inline
void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
}
static inline
void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
}
static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
{
return false;
}
static inline void pull_dl_task(struct rq *rq)
{
}
static inline void deadline_queue_push_tasks(struct rq *rq)
{
}
static inline void deadline_queue_pull_task(struct rq *rq)
{
}
#endif /* CONFIG_SMP */
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
/*
* We are being explicitly informed that a new instance is starting,
* and this means that:
* - the absolute deadline of the entity has to be placed at
* current time + relative deadline;
* - the runtime of the entity has to be set to the maximum value.
*
* The capability of specifying such event is useful whenever a -deadline
* entity wants to (try to!) synchronize its behaviour with the scheduler's
* one, and to (try to!) reconcile itself with its own scheduling
* parameters.
*/
static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
WARN_ON(dl_se->dl_boosted);
WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
/*
* We are racing with the deadline timer. So, do nothing because
* the deadline timer handler will take care of properly recharging
* the runtime and postponing the deadline
*/
if (dl_se->dl_throttled)
return;
/*
* We use the regular wall clock time to set deadlines in the
* future; in fact, we must consider execution overheads (time
* spent on hardirq context, etc.).
*/
dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
dl_se->runtime = dl_se->dl_runtime;
}
/*
* Pure Earliest Deadline First (EDF) scheduling does not deal with the
* possibility of a entity lasting more than what it declared, and thus
* exhausting its runtime.
*
* Here we are interested in making runtime overrun possible, but we do
* not want a entity which is misbehaving to affect the scheduling of all
* other entities.
* Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
* is used, in order to confine each entity within its own bandwidth.
*
* This function deals exactly with that, and ensures that when the runtime
* of a entity is replenished, its deadline is also postponed. That ensures
* the overrunning entity can't interfere with other entity in the system and
* can't make them miss their deadlines. Reasons why this kind of overruns
* could happen are, typically, a entity voluntarily trying to overcome its
* runtime, or it just underestimated it during sched_setattr().
*/
static void replenish_dl_entity(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
BUG_ON(pi_se->dl_runtime <= 0);
/*
* This could be the case for a !-dl task that is boosted.
* Just go with full inherited parameters.
*/
if (dl_se->dl_deadline == 0) {
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime;
}
if (dl_se->dl_yielded && dl_se->runtime > 0)
dl_se->runtime = 0;
/*
* We keep moving the deadline away until we get some
* available runtime for the entity. This ensures correct
* handling of situations where the runtime overrun is
* arbitrary large.
*/
while (dl_se->runtime <= 0) {
dl_se->deadline += pi_se->dl_period;
dl_se->runtime += pi_se->dl_runtime;
}
/*
* At this point, the deadline really should be "in
* the future" with respect to rq->clock. If it's
* not, we are, for some reason, lagging too much!
* Anyway, after having warn userspace abut that,
* we still try to keep the things running by
* resetting the deadline and the budget of the
* entity.
*/
if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
printk_deferred_once("sched: DL replenish lagged too much\n");
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime;
}
if (dl_se->dl_yielded)
dl_se->dl_yielded = 0;
if (dl_se->dl_throttled)
dl_se->dl_throttled = 0;
}
/*
* Here we check if --at time t-- an entity (which is probably being
* [re]activated or, in general, enqueued) can use its remaining runtime
* and its current deadline _without_ exceeding the bandwidth it is
* assigned (function returns true if it can't). We are in fact applying
* one of the CBS rules: when a task wakes up, if the residual runtime
* over residual deadline fits within the allocated bandwidth, then we
* can keep the current (absolute) deadline and residual budget without
* disrupting the schedulability of the system. Otherwise, we should
* refill the runtime and set the deadline a period in the future,
* because keeping the current (absolute) deadline of the task would
* result in breaking guarantees promised to other tasks (refer to
* Documentation/scheduler/sched-deadline.txt for more informations).
*
* This function returns true if:
*
* runtime / (deadline - t) > dl_runtime / dl_deadline ,
*
* IOW we can't recycle current parameters.
*
* Notice that the bandwidth check is done against the deadline. For
* task with deadline equal to period this is the same of using
* dl_period instead of dl_deadline in the equation above.
*/
static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se, u64 t)
{
u64 left, right;
/*
* left and right are the two sides of the equation above,
* after a bit of shuffling to use multiplications instead
* of divisions.
*
* Note that none of the time values involved in the two
* multiplications are absolute: dl_deadline and dl_runtime
* are the relative deadline and the maximum runtime of each
* instance, runtime is the runtime left for the last instance
* and (deadline - t), since t is rq->clock, is the time left
* to the (absolute) deadline. Even if overflowing the u64 type
* is very unlikely to occur in both cases, here we scale down
* as we want to avoid that risk at all. Scaling down by 10
* means that we reduce granularity to 1us. We are fine with it,
* since this is only a true/false check and, anyway, thinking
* of anything below microseconds resolution is actually fiction
* (but still we want to give the user that illusion >;).
*/
left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
right = ((dl_se->deadline - t) >> DL_SCALE) *
(pi_se->dl_runtime >> DL_SCALE);
return dl_time_before(right, left);
}
/*
* Revised wakeup rule [1]: For self-suspending tasks, rather then
* re-initializing task's runtime and deadline, the revised wakeup
* rule adjusts the task's runtime to avoid the task to overrun its
* density.
*
* Reasoning: a task may overrun the density if:
* runtime / (deadline - t) > dl_runtime / dl_deadline
*
* Therefore, runtime can be adjusted to:
* runtime = (dl_runtime / dl_deadline) * (deadline - t)
*
* In such way that runtime will be equal to the maximum density
* the task can use without breaking any rule.
*
* [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
* bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
*/
static void
update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
{
u64 laxity = dl_se->deadline - rq_clock(rq);
/*
* If the task has deadline < period, and the deadline is in the past,
* it should already be throttled before this check.
*
* See update_dl_entity() comments for further details.
*/
WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
}
/*
* Regarding the deadline, a task with implicit deadline has a relative
* deadline == relative period. A task with constrained deadline has a
* relative deadline <= relative period.
*
* We support constrained deadline tasks. However, there are some restrictions
* applied only for tasks which do not have an implicit deadline. See
* update_dl_entity() to know more about such restrictions.
*
* The dl_is_implicit() returns true if the task has an implicit deadline.
*/
static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
{
return dl_se->dl_deadline == dl_se->dl_period;
}
/*
* When a deadline entity is placed in the runqueue, its runtime and deadline
* might need to be updated. This is done by a CBS wake up rule. There are two
* different rules: 1) the original CBS; and 2) the Revisited CBS.
*
* When the task is starting a new period, the Original CBS is used. In this
* case, the runtime is replenished and a new absolute deadline is set.
*
* When a task is queued before the begin of the next period, using the
* remaining runtime and deadline could make the entity to overflow, see
* dl_entity_overflow() to find more about runtime overflow. When such case
* is detected, the runtime and deadline need to be updated.
*
* If the task has an implicit deadline, i.e., deadline == period, the Original
* CBS is applied. the runtime is replenished and a new absolute deadline is
* set, as in the previous cases.
*
* However, the Original CBS does not work properly for tasks with
* deadline < period, which are said to have a constrained deadline. By
* applying the Original CBS, a constrained deadline task would be able to run
* runtime/deadline in a period. With deadline < period, the task would
* overrun the runtime/period allowed bandwidth, breaking the admission test.
*
* In order to prevent this misbehave, the Revisited CBS is used for
* constrained deadline tasks when a runtime overflow is detected. In the
* Revisited CBS, rather than replenishing & setting a new absolute deadline,
* the remaining runtime of the task is reduced to avoid runtime overflow.
* Please refer to the comments update_dl_revised_wakeup() function to find
* more about the Revised CBS rule.
*/
static void update_dl_entity(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
if (unlikely(!dl_is_implicit(dl_se) &&
!dl_time_before(dl_se->deadline, rq_clock(rq)) &&
!dl_se->dl_boosted)){
update_dl_revised_wakeup(dl_se, rq);
return;
}
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime;
}
}
static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
{
return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
}
/*
* If the entity depleted all its runtime, and if we want it to sleep
* while waiting for some new execution time to become available, we
* set the bandwidth replenishment timer to the replenishment instant
* and try to activate it.
*
* Notice that it is important for the caller to know if the timer
* actually started or not (i.e., the replenishment instant is in
* the future or in the past).
*/
static int start_dl_timer(struct task_struct *p)
{
struct sched_dl_entity *dl_se = &p->dl;
struct hrtimer *timer = &dl_se->dl_timer;
struct rq *rq = task_rq(p);
ktime_t now, act;
s64 delta;
lockdep_assert_held(&rq->lock);
/*
* We want the timer to fire at the deadline, but considering
* that it is actually coming from rq->clock and not from
* hrtimer's time base reading.
*/
act = ns_to_ktime(dl_next_period(dl_se));
now = hrtimer_cb_get_time(timer);
delta = ktime_to_ns(now) - rq_clock(rq);
act = ktime_add_ns(act, delta);
/*
* If the expiry time already passed, e.g., because the value
* chosen as the deadline is too small, don't even try to
* start the timer in the past!
*/
if (ktime_us_delta(act, now) < 0)
return 0;
/*
* !enqueued will guarantee another callback; even if one is already in
* progress. This ensures a balanced {get,put}_task_struct().
*
* The race against __run_timer() clearing the enqueued state is
* harmless because we're holding task_rq()->lock, therefore the timer
* expiring after we've done the check will wait on its task_rq_lock()
* and observe our state.
*/
if (!hrtimer_is_queued(timer)) {
get_task_struct(p);
hrtimer_start(timer, act, HRTIMER_MODE_ABS);
}
return 1;
}
/*
* This is the bandwidth enforcement timer callback. If here, we know
* a task is not on its dl_rq, since the fact that the timer was running
* means the task is throttled and needs a runtime replenishment.
*
* However, what we actually do depends on the fact the task is active,
* (it is on its rq) or has been removed from there by a call to
* dequeue_task_dl(). In the former case we must issue the runtime
* replenishment and add the task back to the dl_rq; in the latter, we just
* do nothing but clearing dl_throttled, so that runtime and deadline
* updating (and the queueing back to dl_rq) will be done by the
* next call to enqueue_task_dl().
*/
static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
{
struct sched_dl_entity *dl_se = container_of(timer,
struct sched_dl_entity,
dl_timer);
struct task_struct *p = dl_task_of(dl_se);
struct rq_flags rf;
struct rq *rq;
rq = task_rq_lock(p, &rf);
/*
* The task might have changed its scheduling policy to something
* different than SCHED_DEADLINE (through switched_from_dl()).
*/
if (!dl_task(p))
goto unlock;
/*
* The task might have been boosted by someone else and might be in the
* boosting/deboosting path, its not throttled.
*/
if (dl_se->dl_boosted)
goto unlock;
/*
* Spurious timer due to start_dl_timer() race; or we already received
* a replenishment from rt_mutex_setprio().
*/
if (!dl_se->dl_throttled)
goto unlock;
sched_clock_tick();
update_rq_clock(rq);
/*
* If the throttle happened during sched-out; like:
*
* schedule()
* deactivate_task()
* dequeue_task_dl()
* update_curr_dl()
* start_dl_timer()
* __dequeue_task_dl()
* prev->on_rq = 0;
*
* We can be both throttled and !queued. Replenish the counter
* but do not enqueue -- wait for our wakeup to do that.
*/
if (!task_on_rq_queued(p)) {
replenish_dl_entity(dl_se, dl_se);
goto unlock;
}
#ifdef CONFIG_SMP
if (unlikely(!rq->online)) {
/*
* If the runqueue is no longer available, migrate the
* task elsewhere. This necessarily changes rq.
*/
lockdep_unpin_lock(&rq->lock, rf.cookie);
rq = dl_task_offline_migration(rq, p);
rf.cookie = lockdep_pin_lock(&rq->lock);
update_rq_clock(rq);
/*
* Now that the task has been migrated to the new RQ and we
* have that locked, proceed as normal and enqueue the task
* there.
*/
}
#endif
enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
if (dl_task(rq->curr))
check_preempt_curr_dl(rq, p, 0);
else
resched_curr(rq);
#ifdef CONFIG_SMP
/*
* Queueing this task back might have overloaded rq, check if we need
* to kick someone away.
*/
if (has_pushable_dl_tasks(rq)) {
/*
* Nothing relies on rq->lock after this, so its safe to drop
* rq->lock.
*/
rq_unpin_lock(rq, &rf);
push_dl_task(rq);
rq_repin_lock(rq, &rf);
}
#endif
unlock:
task_rq_unlock(rq, p, &rf);
/*
* This can free the task_struct, including this hrtimer, do not touch
* anything related to that after this.
*/
put_task_struct(p);
return HRTIMER_NORESTART;
}
void init_dl_task_timer(struct sched_dl_entity *dl_se)
{
struct hrtimer *timer = &dl_se->dl_timer;
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
timer->function = dl_task_timer;
}
/*
* During the activation, CBS checks if it can reuse the current task's
* runtime and period. If the deadline of the task is in the past, CBS
* cannot use the runtime, and so it replenishes the task. This rule
* works fine for implicit deadline tasks (deadline == period), and the
* CBS was designed for implicit deadline tasks. However, a task with
* constrained deadline (deadine < period) might be awakened after the
* deadline, but before the next period. In this case, replenishing the
* task would allow it to run for runtime / deadline. As in this case
* deadline < period, CBS enables a task to run for more than the
* runtime / period. In a very loaded system, this can cause a domino
* effect, making other tasks miss their deadlines.
*
* To avoid this problem, in the activation of a constrained deadline
* task after the deadline but before the next period, throttle the
* task and set the replenishing timer to the begin of the next period,
* unless it is boosted.
*/
static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
{
struct task_struct *p = dl_task_of(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
return;
dl_se->dl_throttled = 1;
if (dl_se->runtime > 0)
dl_se->runtime = 0;
}
}
static
int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
{
return (dl_se->runtime <= 0);
}
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
/*
* This function implements the GRUB accounting rule:
* according to the GRUB reclaiming algorithm, the runtime is
* not decreased as "dq = -dt", but as
* "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
* where u is the utilization of the task, Umax is the maximum reclaimable
* utilization, Uinact is the (per-runqueue) inactive utilization, computed
* as the difference between the "total runqueue utilization" and the
* runqueue active utilization, and Uextra is the (per runqueue) extra
* reclaimable utilization.
* Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
* multiplied by 2^BW_SHIFT, the result has to be shifted right by
* BW_SHIFT.
* Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
* dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
* Since delta is a 64 bit variable, to have an overflow its value
* should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
* So, overflow is not an issue here.
*/
static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
{
u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
u64 u_act;
u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
/*
* Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
* we compare u_inact + rq->dl.extra_bw with
* 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
* u_inact + rq->dl.extra_bw can be larger than
* 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
* leading to wrong results)
*/
if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
u_act = u_act_min;
else
u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
return (delta * u_act) >> BW_SHIFT;
}
/*
* Update the current task's runtime statistics (provided it is still
* a -deadline task and has not been removed from the dl_rq).
*/
static void update_curr_dl(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_dl_entity *dl_se = &curr->dl;
u64 delta_exec, scaled_delta_exec;
int cpu = cpu_of(rq);
u64 now;
if (!dl_task(curr) || !on_dl_rq(dl_se))
return;
/*
* Consumed budget is computed considering the time as
* observed by schedulable tasks (excluding time spent
* in hardirq context, etc.). Deadlines are instead
* computed using hard walltime. This seems to be the more
* natural solution, but the full ramifications of this
* approach need further study.
*/
now = rq_clock_task(rq);
delta_exec = now - curr->se.exec_start;
if (unlikely((s64)delta_exec <= 0)) {
if (unlikely(dl_se->dl_yielded))
goto throttle;
return;
}
schedstat_set(curr->se.statistics.exec_max,
max(curr->se.statistics.exec_max, delta_exec));
curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);
curr->se.exec_start = now;
cgroup_account_cputime(curr, delta_exec);
if (dl_entity_is_special(dl_se))
return;
/*
* For tasks that participate in GRUB, we implement GRUB-PA: the
* spare reclaimed bandwidth is used to clock down frequency.
*
* For the others, we still need to scale reservation parameters
* according to current frequency and CPU maximum capacity.
*/
if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
scaled_delta_exec = grub_reclaim(delta_exec,
rq,
&curr->dl);
} else {
unsigned long scale_freq = arch_scale_freq_capacity(cpu);
unsigned long scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
scaled_delta_exec = cap_scale(delta_exec, scale_freq);
scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
}
dl_se->runtime -= scaled_delta_exec;
throttle:
if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
dl_se->dl_throttled = 1;
/* If requested, inform the user about runtime overruns. */
if (dl_runtime_exceeded(dl_se) &&
(dl_se->flags & SCHED_FLAG_DL_OVERRUN))
dl_se->dl_overrun = 1;
__dequeue_task_dl(rq, curr, 0);
if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
if (!is_leftmost(curr, &rq->dl))
resched_curr(rq);
}
/*
* Because -- for now -- we share the rt bandwidth, we need to
* account our runtime there too, otherwise actual rt tasks
* would be able to exceed the shared quota.
*
* Account to the root rt group for now.
*
* The solution we're working towards is having the RT groups scheduled
* using deadline servers -- however there's a few nasties to figure
* out before that can happen.
*/
if (rt_bandwidth_enabled()) {
struct rt_rq *rt_rq = &rq->rt;
raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* We'll let actual RT tasks worry about the overflow here, we
* have our own CBS to keep us inline; only account when RT
* bandwidth is relevant.
*/
if (sched_rt_bandwidth_account(rt_rq))
rt_rq->rt_time += delta_exec;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
}
static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
{
struct sched_dl_entity *dl_se = container_of(timer,
struct sched_dl_entity,
inactive_timer);
struct task_struct *p = dl_task_of(dl_se);
struct rq_flags rf;
struct rq *rq;
rq = task_rq_lock(p, &rf);
sched_clock_tick();
update_rq_clock(rq);
if (!dl_task(p) || p->state == TASK_DEAD) {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
dl_se->dl_non_contending = 0;
}
raw_spin_lock(&dl_b->lock);
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
raw_spin_unlock(&dl_b->lock);
__dl_clear_params(p);
goto unlock;
}
if (dl_se->dl_non_contending == 0)
goto unlock;
sub_running_bw(dl_se, &rq->dl);
dl_se->dl_non_contending = 0;
unlock:
task_rq_unlock(rq, p, &rf);
put_task_struct(p);
return HRTIMER_NORESTART;
}
void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
{
struct hrtimer *timer = &dl_se->inactive_timer;
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
timer->function = inactive_task_timer;
}
#ifdef CONFIG_SMP
static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
{
struct rq *rq = rq_of_dl_rq(dl_rq);
if (dl_rq->earliest_dl.curr == 0 ||
dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
dl_rq->earliest_dl.curr = deadline;
cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
}
}
static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
{
struct rq *rq = rq_of_dl_rq(dl_rq);
/*
* Since we may have removed our earliest (and/or next earliest)
* task we must recompute them.
*/
if (!dl_rq->dl_nr_running) {
dl_rq->earliest_dl.curr = 0;
dl_rq->earliest_dl.next = 0;
cpudl_clear(&rq->rd->cpudl, rq->cpu);
} else {
struct rb_node *leftmost = dl_rq->root.rb_leftmost;
struct sched_dl_entity *entry;
entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
dl_rq->earliest_dl.curr = entry->deadline;
cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
}
}
#else
static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
#endif /* CONFIG_SMP */
static inline
void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
int prio = dl_task_of(dl_se)->prio;
u64 deadline = dl_se->deadline;
WARN_ON(!dl_prio(prio));
dl_rq->dl_nr_running++;
add_nr_running(rq_of_dl_rq(dl_rq), 1);
walt_inc_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
inc_dl_deadline(dl_rq, deadline);
inc_dl_migration(dl_se, dl_rq);
}
static inline
void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
int prio = dl_task_of(dl_se)->prio;
WARN_ON(!dl_prio(prio));
WARN_ON(!dl_rq->dl_nr_running);
dl_rq->dl_nr_running--;
sub_nr_running(rq_of_dl_rq(dl_rq), 1);
walt_dec_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
dec_dl_deadline(dl_rq, dl_se->deadline);
dec_dl_migration(dl_se, dl_rq);
}
static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rb_node **link = &dl_rq->root.rb_root.rb_node;
struct rb_node *parent = NULL;
struct sched_dl_entity *entry;
int leftmost = 1;
BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
while (*link) {
parent = *link;
entry = rb_entry(parent, struct sched_dl_entity, rb_node);
if (dl_time_before(dl_se->deadline, entry->deadline))
link = &parent->rb_left;
else {
link = &parent->rb_right;
leftmost = 0;
}
}
rb_link_node(&dl_se->rb_node, parent, link);
rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
inc_dl_tasks(dl_se, dl_rq);
}
static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
if (RB_EMPTY_NODE(&dl_se->rb_node))
return;
rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
RB_CLEAR_NODE(&dl_se->rb_node);
dec_dl_tasks(dl_se, dl_rq);
}
static void
enqueue_dl_entity(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se, int flags)
{
BUG_ON(on_dl_rq(dl_se));
/*
* If this is a wakeup or a new instance, the scheduling
* parameters of the task might need updating. Otherwise,
* we want a replenishment of its runtime.
*/
if (flags & ENQUEUE_WAKEUP) {
task_contending(dl_se, flags);
update_dl_entity(dl_se, pi_se);
} else if (flags & ENQUEUE_REPLENISH) {
replenish_dl_entity(dl_se, pi_se);
} else if ((flags & ENQUEUE_RESTORE) &&
dl_time_before(dl_se->deadline,
rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
setup_new_dl_entity(dl_se);
}
__enqueue_dl_entity(dl_se);
}
static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
{
__dequeue_dl_entity(dl_se);
}
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
struct task_struct *pi_task = rt_mutex_get_top_task(p);
struct sched_dl_entity *pi_se = &p->dl;
/*
* Use the scheduling parameters of the top pi-waiter task if:
* - we have a top pi-waiter which is a SCHED_DEADLINE task AND
* - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
* smaller than our deadline OR we are a !SCHED_DEADLINE task getting
* boosted due to a SCHED_DEADLINE pi-waiter).
* Otherwise we keep our runtime and deadline.
*/
if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
pi_se = &pi_task->dl;
} else if (!dl_prio(p->normal_prio)) {
/*
* Special case in which we have a !SCHED_DEADLINE task
* that is going to be deboosted, but exceeds its
* runtime while doing so. No point in replenishing
* it, as it's going to return back to its original
* scheduling class after this.
*/
BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
return;
}
/*
* Check if a constrained deadline task was activated
* after the deadline but before the next period.
* If that is the case, the task will be throttled and
* the replenishment timer will be set to the next period.
*/
if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
dl_check_constrained_dl(&p->dl);
if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
add_rq_bw(&p->dl, &rq->dl);
add_running_bw(&p->dl, &rq->dl);
}
/*
* If p is throttled, we do not enqueue it. In fact, if it exhausted
* its budget it needs a replenishment and, since it now is on
* its rq, the bandwidth timer callback (which clearly has not
* run yet) will take care of this.
* However, the active utilization does not depend on the fact
* that the task is on the runqueue or not (but depends on the
* task's state - in GRUB parlance, "inactive" vs "active contending").
* In other words, even if a task is throttled its utilization must
* be counted in the active utilization; hence, we need to call
* add_running_bw().
*/
if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
if (flags & ENQUEUE_WAKEUP)
task_contending(&p->dl, flags);
return;
}
enqueue_dl_entity(&p->dl, pi_se, flags);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
}
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
dequeue_dl_entity(&p->dl);
dequeue_pushable_dl_task(rq, p);
}
static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
update_curr_dl(rq);
__dequeue_task_dl(rq, p, flags);
if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
sub_running_bw(&p->dl, &rq->dl);
sub_rq_bw(&p->dl, &rq->dl);
}
/*
* This check allows to start the inactive timer (or to immediately
* decrease the active utilization, if needed) in two cases:
* when the task blocks and when it is terminating
* (p->state == TASK_DEAD). We can handle the two cases in the same
* way, because from GRUB's point of view the same thing is happening
* (the task moves from "active contending" to "active non contending"
* or "inactive")
*/
if (flags & DEQUEUE_SLEEP)
task_non_contending(p);
}
/*
* Yield task semantic for -deadline tasks is:
*
* get off from the CPU until our next instance, with
* a new runtime. This is of little use now, since we
* don't have a bandwidth reclaiming mechanism. Anyway,
* bandwidth reclaiming is planned for the future, and
* yield_task_dl will indicate that some spare budget
* is available for other task instances to use it.
*/
static void yield_task_dl(struct rq *rq)
{
/*
* We make the task go to sleep until its current deadline by
* forcing its runtime to zero. This way, update_curr_dl() stops
* it and the bandwidth timer will wake it up and will give it
* new scheduling parameters (thanks to dl_yielded=1).
*/
rq->curr->dl.dl_yielded = 1;
update_rq_clock(rq);
update_curr_dl(rq);
/*
* Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule()
* and double the fastpath cost.
*/
rq_clock_skip_update(rq);
}
#ifdef CONFIG_SMP
static int find_later_rq(struct task_struct *task);
static int
select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags,
int sibling_count_hint)
{
struct task_struct *curr;
struct rq *rq;
if (sd_flag != SD_BALANCE_WAKE)
goto out;
rq = cpu_rq(cpu);
rcu_read_lock();
curr = READ_ONCE(rq->curr); /* unlocked access */
/*
* If we are dealing with a -deadline task, we must
* decide where to wake it up.
* If it has a later deadline and the current task
* on this rq can't move (provided the waking task
* can!) we prefer to send it somewhere else. On the
* other hand, if it has a shorter deadline, we
* try to make it stay here, it might be important.
*/
if (unlikely(dl_task(curr)) &&
(curr->nr_cpus_allowed < 2 ||
!dl_entity_preempt(&p->dl, &curr->dl)) &&
(p->nr_cpus_allowed > 1)) {
int target = find_later_rq(p);
if (target != -1 &&
(dl_time_before(p->dl.deadline,
cpu_rq(target)->dl.earliest_dl.curr) ||
(cpu_rq(target)->dl.dl_nr_running == 0)))
cpu = target;
}
rcu_read_unlock();
out:
return cpu;
}
static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
{
struct rq *rq;
if (p->state != TASK_WAKING)
return;
rq = task_rq(p);
/*
* Since p->state == TASK_WAKING, set_task_cpu() has been called
* from try_to_wake_up(). Hence, p->pi_lock is locked, but
* rq->lock is not... So, lock it
*/
raw_spin_lock(&rq->lock);
if (p->dl.dl_non_contending) {
sub_running_bw(&p->dl, &rq->dl);
p->dl.dl_non_contending = 0;
/*
* If the timer handler is currently running and the
* timer cannot be cancelled, inactive_task_timer()
* will see that dl_not_contending is not set, and
* will not touch the rq's active utilization,
* so we are still safe.
*/
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
}
sub_rq_bw(&p->dl, &rq->dl);
raw_spin_unlock(&rq->lock);
}
static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
{
/*
* Current can't be migrated, useless to reschedule,
* let's hope p can move out.
*/
if (rq->curr->nr_cpus_allowed == 1 ||
!cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
return;
/*
* p is migratable, so let's not schedule it and
* see if it is pushed or pulled somewhere else.
*/
if (p->nr_cpus_allowed != 1 &&
cpudl_find(&rq->rd->cpudl, p, NULL))
return;
resched_curr(rq);
}
#endif /* CONFIG_SMP */
/*
* Only called when both the current and waking task are -deadline
* tasks.
*/
static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
int flags)
{
if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
resched_curr(rq);
return;
}
#ifdef CONFIG_SMP
/*
* In the unlikely case current and p have the same deadline
* let us try to decide what's the best thing to do...
*/
if ((p->dl.deadline == rq->curr->dl.deadline) &&
!test_tsk_need_resched(rq->curr))
check_preempt_equal_dl(rq, p);
#endif /* CONFIG_SMP */
}
#ifdef CONFIG_SCHED_HRTICK
static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
{
hrtick_start(rq, p->dl.runtime);
}
#else /* !CONFIG_SCHED_HRTICK */
static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
{
}
#endif
static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
struct dl_rq *dl_rq)
{
struct rb_node *left = rb_first_cached(&dl_rq->root);
if (!left)
return NULL;
return rb_entry(left, struct sched_dl_entity, rb_node);
}
static struct task_struct *
pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
struct sched_dl_entity *dl_se;
struct task_struct *p;
struct dl_rq *dl_rq;
dl_rq = &rq->dl;
if (need_pull_dl_task(rq, prev)) {
/*
* This is OK, because current is on_cpu, which avoids it being
* picked for load-balance and preemption/IRQs are still
* disabled avoiding further scheduler activity on it and we're
* being very careful to re-start the picking loop.
*/
rq_unpin_lock(rq, rf);
pull_dl_task(rq);
rq_repin_lock(rq, rf);
/*
* pull_dl_task() can drop (and re-acquire) rq->lock; this
* means a stop task can slip in, in which case we need to
* re-start task selection.
*/
if (rq->stop && task_on_rq_queued(rq->stop))
return RETRY_TASK;
}
/*
* When prev is DL, we may throttle it in put_prev_task().
* So, we update time before we check for dl_nr_running.
*/
if (prev->sched_class == &dl_sched_class)
update_curr_dl(rq);
if (unlikely(!dl_rq->dl_nr_running))
return NULL;
put_prev_task(rq, prev);
dl_se = pick_next_dl_entity(rq, dl_rq);
BUG_ON(!dl_se);
p = dl_task_of(dl_se);
p->se.exec_start = rq_clock_task(rq);
/* Running task will never be pushed. */
dequeue_pushable_dl_task(rq, p);
if (hrtick_enabled(rq))
start_hrtick_dl(rq, p);
deadline_queue_push_tasks(rq);
if (rq->curr->sched_class != &dl_sched_class)
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
return p;
}
static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
{
update_curr_dl(rq);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
}
/*
* scheduler tick hitting a task of our scheduling class.
*
* NOTE: This function can be called remotely by the tick offload that
* goes along full dynticks. Therefore no local assumption can be made
* and everything must be accessed through the @rq and @curr passed in
* parameters.
*/
static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
{
update_curr_dl(rq);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
/*
* Even when we have runtime, update_curr_dl() might have resulted in us
* not being the leftmost task anymore. In that case NEED_RESCHED will
* be set and schedule() will start a new hrtick for the next task.
*/
if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
is_leftmost(p, &rq->dl))
start_hrtick_dl(rq, p);
}
static void task_fork_dl(struct task_struct *p)
{
/*
* SCHED_DEADLINE tasks cannot fork and this is achieved through
* sched_fork()
*/
}
static void set_curr_task_dl(struct rq *rq)
{
struct task_struct *p = rq->curr;
p->se.exec_start = rq_clock_task(rq);
/* You can't push away the running task */
dequeue_pushable_dl_task(rq, p);
}
#ifdef CONFIG_SMP
/* Only try algorithms three times */
#define DL_MAX_TRIES 3
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
cpumask_test_cpu(cpu, &p->cpus_allowed))
return 1;
return 0;
}
/*
* Return the earliest pushable rq's task, which is suitable to be executed
* on the CPU, NULL otherwise:
*/
static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
{
struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
struct task_struct *p = NULL;
if (!has_pushable_dl_tasks(rq))
return NULL;
next_node:
if (next_node) {
p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
if (pick_dl_task(rq, p, cpu))
return p;
next_node = rb_next(next_node);
goto next_node;
}
return NULL;
}
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
static int find_later_rq(struct task_struct *task)
{
struct sched_domain *sd;
struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
int this_cpu = smp_processor_id();
int cpu = task_cpu(task);
/* Make sure the mask is initialized first */
if (unlikely(!later_mask))
return -1;
if (task->nr_cpus_allowed == 1)
return -1;
/*
* We have to consider system topology and task affinity
* first, then we can look for a suitable CPU.
*/
if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
return -1;
/*
* If we are here, some targets have been found, including
* the most suitable which is, among the runqueues where the
* current tasks have later deadlines than the task's one, the
* rq with the latest possible one.
*
* Now we check how well this matches with task's
* affinity and system topology.
*
* The last CPU where the task run is our first
* guess, since it is most likely cache-hot there.
*/
if (cpumask_test_cpu(cpu, later_mask))
return cpu;
/*
* Check if this_cpu is to be skipped (i.e., it is
* not in the mask) or not.
*/
if (!cpumask_test_cpu(this_cpu, later_mask))
this_cpu = -1;
rcu_read_lock();
for_each_domain(cpu, sd) {
if (sd->flags & SD_WAKE_AFFINE) {
int best_cpu;
/*
* If possible, preempting this_cpu is
* cheaper than migrating.
*/
if (this_cpu != -1 &&
cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
rcu_read_unlock();
return this_cpu;
}
best_cpu = cpumask_first_and(later_mask,
sched_domain_span(sd));
/*
* Last chance: if a CPU being in both later_mask
* and current sd span is valid, that becomes our
* choice. Of course, the latest possible CPU is
* already under consideration through later_mask.
*/
if (best_cpu < nr_cpu_ids) {
rcu_read_unlock();
return best_cpu;
}
}
}
rcu_read_unlock();
/*
* At this point, all our guesses failed, we just return
* 'something', and let the caller sort the things out.
*/
if (this_cpu != -1)
return this_cpu;
cpu = cpumask_any(later_mask);
if (cpu < nr_cpu_ids)
return cpu;
return -1;
}
/* Locks the rq it finds */
static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
{
struct rq *later_rq = NULL;
int tries;
int cpu;
for (tries = 0; tries < DL_MAX_TRIES; tries++) {
cpu = find_later_rq(task);
if ((cpu == -1) || (cpu == rq->cpu))
break;
later_rq = cpu_rq(cpu);
if (later_rq->dl.dl_nr_running &&
!dl_time_before(task->dl.deadline,
later_rq->dl.earliest_dl.curr)) {
/*
* Target rq has tasks of equal or earlier deadline,
* retrying does not release any lock and is unlikely
* to yield a different result.
*/
later_rq = NULL;
break;
}
/* Retry if something changed. */
if (double_lock_balance(rq, later_rq)) {
if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
task_running(rq, task) ||
!dl_task(task) ||
!task_on_rq_queued(task))) {
double_unlock_balance(rq, later_rq);
later_rq = NULL;
break;
}
}
/*
* If the rq we found has no -deadline task, or
* its earliest one has a later deadline than our
* task, the rq is a good one.
*/
if (!later_rq->dl.dl_nr_running ||
dl_time_before(task->dl.deadline,
later_rq->dl.earliest_dl.curr))
break;
/* Otherwise we try again. */
double_unlock_balance(rq, later_rq);
later_rq = NULL;
}
return later_rq;
}
static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
{
struct task_struct *p;
if (!has_pushable_dl_tasks(rq))
return NULL;
p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
struct task_struct, pushable_dl_tasks);
BUG_ON(rq->cpu != task_cpu(p));
BUG_ON(task_current(rq, p));
BUG_ON(p->nr_cpus_allowed <= 1);
BUG_ON(!task_on_rq_queued(p));
BUG_ON(!dl_task(p));
return p;
}
/*
* See if the non running -deadline tasks on this rq
* can be sent to some other CPU where they can preempt
* and start executing.
*/
static int push_dl_task(struct rq *rq)
{
struct task_struct *next_task;
struct rq *later_rq;
int ret = 0;
if (!rq->dl.overloaded)
return 0;
next_task = pick_next_pushable_dl_task(rq);
if (!next_task)
return 0;
retry:
if (unlikely(next_task == rq->curr)) {
WARN_ON(1);
return 0;
}
/*
* If next_task preempts rq->curr, and rq->curr
* can move away, it makes sense to just reschedule
* without going further in pushing next_task.
*/
if (dl_task(rq->curr) &&
dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
rq->curr->nr_cpus_allowed > 1) {
resched_curr(rq);
return 0;
}
/* We might release rq lock */
get_task_struct(next_task);
/* Will lock the rq it'll find */
later_rq = find_lock_later_rq(next_task, rq);
if (!later_rq) {
struct task_struct *task;
/*
* We must check all this again, since
* find_lock_later_rq releases rq->lock and it is
* then possible that next_task has migrated.
*/
task = pick_next_pushable_dl_task(rq);
if (task == next_task) {
/*
* The task is still there. We don't try
* again, some other CPU will pull it when ready.
*/
goto out;
}
if (!task)
/* No more tasks */
goto out;
put_task_struct(next_task);
next_task = task;
goto retry;
}
deactivate_task(rq, next_task, 0);
sub_running_bw(&next_task->dl, &rq->dl);
sub_rq_bw(&next_task->dl, &rq->dl);
next_task->on_rq = TASK_ON_RQ_MIGRATING;
set_task_cpu(next_task, later_rq->cpu);
next_task->on_rq = TASK_ON_RQ_QUEUED;
add_rq_bw(&next_task->dl, &later_rq->dl);
/*
* Update the later_rq clock here, because the clock is used
* by the cpufreq_update_util() inside __add_running_bw().
*/
update_rq_clock(later_rq);
add_running_bw(&next_task->dl, &later_rq->dl);
activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
ret = 1;
resched_curr(later_rq);
double_unlock_balance(rq, later_rq);
out:
put_task_struct(next_task);
return ret;
}
static void push_dl_tasks(struct rq *rq)
{
/* push_dl_task() will return true if it moved a -deadline task */
while (push_dl_task(rq))
;
}
static void pull_dl_task(struct rq *this_rq)
{
int this_cpu = this_rq->cpu, cpu;
struct task_struct *p;
bool resched = false;
struct rq *src_rq;
u64 dmin = LONG_MAX;
if (likely(!dl_overloaded(this_rq)))
return;
/*
* Match the barrier from dl_set_overloaded; this guarantees that if we
* see overloaded we must also see the dlo_mask bit.
*/
smp_rmb();
for_each_cpu(cpu, this_rq->rd->dlo_mask) {
if (this_cpu == cpu)
continue;
src_rq = cpu_rq(cpu);
/*
* It looks racy, abd it is! However, as in sched_rt.c,
* we are fine with this.
*/
if (this_rq->dl.dl_nr_running &&
dl_time_before(this_rq->dl.earliest_dl.curr,
src_rq->dl.earliest_dl.next))
continue;
/* Might drop this_rq->lock */
double_lock_balance(this_rq, src_rq);
/*
* If there are no more pullable tasks on the
* rq, we're done with it.
*/
if (src_rq->dl.dl_nr_running <= 1)
goto skip;
p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
/*
* We found a task to be pulled if:
* - it preempts our current (if there's one),
* - it will preempt the last one we pulled (if any).
*/
if (p && dl_time_before(p->dl.deadline, dmin) &&
(!this_rq->dl.dl_nr_running ||
dl_time_before(p->dl.deadline,
this_rq->dl.earliest_dl.curr))) {
WARN_ON(p == src_rq->curr);
WARN_ON(!task_on_rq_queued(p));
/*
* Then we pull iff p has actually an earlier
* deadline than the current task of its runqueue.
*/
if (dl_time_before(p->dl.deadline,
src_rq->curr->dl.deadline))
goto skip;
resched = true;
deactivate_task(src_rq, p, 0);
sub_running_bw(&p->dl, &src_rq->dl);
sub_rq_bw(&p->dl, &src_rq->dl);
p->on_rq = TASK_ON_RQ_MIGRATING;
set_task_cpu(p, this_cpu);
p->on_rq = TASK_ON_RQ_QUEUED;
add_rq_bw(&p->dl, &this_rq->dl);
add_running_bw(&p->dl, &this_rq->dl);
activate_task(this_rq, p, 0);
dmin = p->dl.deadline;
/* Is there any other task even earlier? */
}
skip:
double_unlock_balance(this_rq, src_rq);
}
if (resched)
resched_curr(this_rq);
}
/*
* Since the task is not running and a reschedule is not going to happen
* anytime soon on its runqueue, we try pushing it away now.
*/
static void task_woken_dl(struct rq *rq, struct task_struct *p)
{
if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
p->nr_cpus_allowed > 1 &&
dl_task(rq->curr) &&
(rq->curr->nr_cpus_allowed < 2 ||
!dl_entity_preempt(&p->dl, &rq->curr->dl))) {
push_dl_tasks(rq);
}
}
static void set_cpus_allowed_dl(struct task_struct *p,
const struct cpumask *new_mask)
{
struct root_domain *src_rd;
struct rq *rq;
BUG_ON(!dl_task(p));
rq = task_rq(p);
src_rd = rq->rd;
/*
* Migrating a SCHED_DEADLINE task between exclusive
* cpusets (different root_domains) entails a bandwidth
* update. We already made space for us in the destination
* domain (see cpuset_can_attach()).
*/
if (!cpumask_intersects(src_rd->span, new_mask)) {
struct dl_bw *src_dl_b;
src_dl_b = dl_bw_of(cpu_of(rq));
/*
* We now free resources of the root_domain we are migrating
* off. In the worst case, sched_setattr() may temporary fail
* until we complete the update.
*/
raw_spin_lock(&src_dl_b->lock);
__dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
raw_spin_unlock(&src_dl_b->lock);
}
set_cpus_allowed_common(p, new_mask);
}
/* Assumes rq->lock is held */
static void rq_online_dl(struct rq *rq)
{
if (rq->dl.overloaded)
dl_set_overload(rq);
cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
if (rq->dl.dl_nr_running > 0)
cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
}
/* Assumes rq->lock is held */
static void rq_offline_dl(struct rq *rq)
{
if (rq->dl.overloaded)
dl_clear_overload(rq);
cpudl_clear(&rq->rd->cpudl, rq->cpu);
cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
}
void __init init_sched_dl_class(void)
{
unsigned int i;
for_each_possible_cpu(i)
zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
GFP_KERNEL, cpu_to_node(i));
}
#endif /* CONFIG_SMP */
static void switched_from_dl(struct rq *rq, struct task_struct *p)
{
/*
* task_non_contending() can start the "inactive timer" (if the 0-lag
* time is in the future). If the task switches back to dl before
* the "inactive timer" fires, it can continue to consume its current
* runtime using its current deadline. If it stays outside of
* SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
* will reset the task parameters.
*/
if (task_on_rq_queued(p) && p->dl.dl_runtime)
task_non_contending(p);
if (!task_on_rq_queued(p)) {
/*
* Inactive timer is armed. However, p is leaving DEADLINE and
* might migrate away from this rq while continuing to run on
* some other class. We need to remove its contribution from
* this rq running_bw now, or sub_rq_bw (below) will complain.
*/
if (p->dl.dl_non_contending)
sub_running_bw(&p->dl, &rq->dl);
sub_rq_bw(&p->dl, &rq->dl);
}
/*
* We cannot use inactive_task_timer() to invoke sub_running_bw()
* at the 0-lag time, because the task could have been migrated
* while SCHED_OTHER in the meanwhile.
*/
if (p->dl.dl_non_contending)
p->dl.dl_non_contending = 0;
/*
* Since this might be the only -deadline task on the rq,
* this is the right place to try to pull some other one
* from an overloaded CPU, if any.
*/
if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
return;
deadline_queue_pull_task(rq);
}
/*
* When switching to -deadline, we may overload the rq, then
* we try to push someone off, if possible.
*/
static void switched_to_dl(struct rq *rq, struct task_struct *p)
{
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
/* If p is not queued we will update its parameters at next wakeup. */
if (!task_on_rq_queued(p)) {
add_rq_bw(&p->dl, &rq->dl);
return;
}
if (rq->curr != p) {
#ifdef CONFIG_SMP
if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
deadline_queue_push_tasks(rq);
#endif
if (dl_task(rq->curr))
check_preempt_curr_dl(rq, p, 0);
else
resched_curr(rq);
}
}
/*
* If the scheduling parameters of a -deadline task changed,
* a push or pull operation might be needed.
*/
static void prio_changed_dl(struct rq *rq, struct task_struct *p,
int oldprio)
{
if (task_on_rq_queued(p) || rq->curr == p) {
#ifdef CONFIG_SMP
/*
* This might be too much, but unfortunately
* we don't have the old deadline value, and
* we can't argue if the task is increasing
* or lowering its prio, so...
*/
if (!rq->dl.overloaded)
deadline_queue_pull_task(rq);
/*
* If we now have a earlier deadline task than p,
* then reschedule, provided p is still on this
* runqueue.
*/
if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
resched_curr(rq);
#else
/*
* Again, we don't know if p has a earlier
* or later deadline, so let's blindly set a
* (maybe not needed) rescheduling point.
*/
resched_curr(rq);
#endif /* CONFIG_SMP */
}
}
const struct sched_class dl_sched_class = {
.next = &rt_sched_class,
.enqueue_task = enqueue_task_dl,
.dequeue_task = dequeue_task_dl,
.yield_task = yield_task_dl,
.check_preempt_curr = check_preempt_curr_dl,
.pick_next_task = pick_next_task_dl,
.put_prev_task = put_prev_task_dl,
#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_dl,
.migrate_task_rq = migrate_task_rq_dl,
.set_cpus_allowed = set_cpus_allowed_dl,
.rq_online = rq_online_dl,
.rq_offline = rq_offline_dl,
.task_woken = task_woken_dl,
#endif
.set_curr_task = set_curr_task_dl,
.task_tick = task_tick_dl,
.task_fork = task_fork_dl,
.prio_changed = prio_changed_dl,
.switched_from = switched_from_dl,
.switched_to = switched_to_dl,
.update_curr = update_curr_dl,
#ifdef CONFIG_SCHED_WALT
.fixup_walt_sched_stats = fixup_walt_sched_stats_common,
#endif
};
int sched_dl_global_validate(void)
{
u64 runtime = global_rt_runtime();
u64 period = global_rt_period();
u64 new_bw = to_ratio(period, runtime);
struct dl_bw *dl_b;
int cpu, ret = 0;
unsigned long flags;
/*
* Here we want to check the bandwidth not being set to some
* value smaller than the currently allocated bandwidth in
* any of the root_domains.
*
* FIXME: Cycling on all the CPUs is overdoing, but simpler than
* cycling on root_domains... Discussion on different/better
* solutions is welcome!
*/
for_each_possible_cpu(cpu) {
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
if (new_bw < dl_b->total_bw)
ret = -EBUSY;
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched();
if (ret)
break;
}
return ret;
}
void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
{
if (global_rt_runtime() == RUNTIME_INF) {
dl_rq->bw_ratio = 1 << RATIO_SHIFT;
dl_rq->extra_bw = 1 << BW_SHIFT;
} else {
dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
dl_rq->extra_bw = to_ratio(global_rt_period(),
global_rt_runtime());
}
}
void sched_dl_do_global(void)
{
u64 new_bw = -1;
struct dl_bw *dl_b;
int cpu;
unsigned long flags;
def_dl_bandwidth.dl_period = global_rt_period();
def_dl_bandwidth.dl_runtime = global_rt_runtime();
if (global_rt_runtime() != RUNTIME_INF)
new_bw = to_ratio(global_rt_period(), global_rt_runtime());
/*
* FIXME: As above...
*/
for_each_possible_cpu(cpu) {
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
dl_b->bw = new_bw;
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched();
init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
}
}
/*
* We must be sure that accepting a new task (or allowing changing the
* parameters of an existing one) is consistent with the bandwidth
* constraints. If yes, this function also accordingly updates the currently
* allocated bandwidth to reflect the new situation.
*
* This function is called while holding p's rq->lock.
*/
int sched_dl_overflow(struct task_struct *p, int policy,
const struct sched_attr *attr)
{
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
u64 period = attr->sched_period ?: attr->sched_deadline;
u64 runtime = attr->sched_runtime;
u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
int cpus, err = -1;
if (attr->sched_flags & SCHED_FLAG_SUGOV)
return 0;
/* !deadline task may carry old deadline bandwidth */
if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
return 0;
/*
* Either if a task, enters, leave, or stays -deadline but changes
* its parameters, we may need to update accordingly the total
* allocated bandwidth of the container.
*/
raw_spin_lock(&dl_b->lock);
cpus = dl_bw_cpus(task_cpu(p));
if (dl_policy(policy) && !task_has_dl_policy(p) &&
!__dl_overflow(dl_b, cpus, 0, new_bw)) {
if (hrtimer_active(&p->dl.inactive_timer))
__dl_sub(dl_b, p->dl.dl_bw, cpus);
__dl_add(dl_b, new_bw, cpus);
err = 0;
} else if (dl_policy(policy) && task_has_dl_policy(p) &&
!__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
/*
* XXX this is slightly incorrect: when the task
* utilization decreases, we should delay the total
* utilization change until the task's 0-lag point.
* But this would require to set the task's "inactive
* timer" when the task is not inactive.
*/
__dl_sub(dl_b, p->dl.dl_bw, cpus);
__dl_add(dl_b, new_bw, cpus);
dl_change_utilization(p, new_bw);
err = 0;
} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
/*
* Do not decrease the total deadline utilization here,
* switched_from_dl() will take care to do it at the correct
* (0-lag) time.
*/
err = 0;
}
raw_spin_unlock(&dl_b->lock);
return err;
}
/*
* This function initializes the sched_dl_entity of a newly becoming
* SCHED_DEADLINE task.
*
* Only the static values are considered here, the actual runtime and the
* absolute deadline will be properly calculated when the task is enqueued
* for the first time with its new policy.
*/
void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
{
struct sched_dl_entity *dl_se = &p->dl;
dl_se->dl_runtime = attr->sched_runtime;
dl_se->dl_deadline = attr->sched_deadline;
dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
dl_se->flags = attr->sched_flags;
dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
}
void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
{
struct sched_dl_entity *dl_se = &p->dl;
attr->sched_priority = p->rt_priority;
attr->sched_runtime = dl_se->dl_runtime;
attr->sched_deadline = dl_se->dl_deadline;
attr->sched_period = dl_se->dl_period;
attr->sched_flags = dl_se->flags;
}
/*
* This function validates the new parameters of a -deadline task.
* We ask for the deadline not being zero, and greater or equal
* than the runtime, as well as the period of being zero or
* greater than deadline. Furthermore, we have to be sure that
* user parameters are above the internal resolution of 1us (we
* check sched_runtime only since it is always the smaller one) and
* below 2^63 ns (we have to check both sched_deadline and
* sched_period, as the latter can be zero).
*/
bool __checkparam_dl(const struct sched_attr *attr)
{
/* special dl tasks don't actually use any parameter */
if (attr->sched_flags & SCHED_FLAG_SUGOV)
return true;
/* deadline != 0 */
if (attr->sched_deadline == 0)
return false;
/*
* Since we truncate DL_SCALE bits, make sure we're at least
* that big.
*/
if (attr->sched_runtime < (1ULL << DL_SCALE))
return false;
/*
* Since we use the MSB for wrap-around and sign issues, make
* sure it's not set (mind that period can be equal to zero).
*/
if (attr->sched_deadline & (1ULL << 63) ||
attr->sched_period & (1ULL << 63))
return false;
/* runtime <= deadline <= period (if period != 0) */
if ((attr->sched_period != 0 &&
attr->sched_period < attr->sched_deadline) ||
attr->sched_deadline < attr->sched_runtime)
return false;
return true;
}
/*
* This function clears the sched_dl_entity static params.
*/
void __dl_clear_params(struct task_struct *p)
{
struct sched_dl_entity *dl_se = &p->dl;
dl_se->dl_runtime = 0;
dl_se->dl_deadline = 0;
dl_se->dl_period = 0;
dl_se->flags = 0;
dl_se->dl_bw = 0;
dl_se->dl_density = 0;
dl_se->dl_boosted = 0;
dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0;
dl_se->dl_non_contending = 0;
dl_se->dl_overrun = 0;
}
bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
{
struct sched_dl_entity *dl_se = &p->dl;
if (dl_se->dl_runtime != attr->sched_runtime ||
dl_se->dl_deadline != attr->sched_deadline ||
dl_se->dl_period != attr->sched_period ||
dl_se->flags != attr->sched_flags)
return true;
return false;
}
#ifdef CONFIG_SMP
int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
{
unsigned int dest_cpu;
struct dl_bw *dl_b;
bool overflow;
int cpus, ret;
unsigned long flags;
dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
rcu_read_lock_sched();
dl_b = dl_bw_of(dest_cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
cpus = dl_bw_cpus(dest_cpu);
overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
if (overflow) {
ret = -EBUSY;
} else {
/*
* We reserve space for this task in the destination
* root_domain, as we can't fail after this point.
* We will free resources in the source root_domain
* later on (see set_cpus_allowed_dl()).
*/
__dl_add(dl_b, p->dl.dl_bw, cpus);
ret = 0;
}
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched();
return ret;
}
int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
const struct cpumask *trial)
{
int ret = 1, trial_cpus;
struct dl_bw *cur_dl_b;
unsigned long flags;
rcu_read_lock_sched();
cur_dl_b = dl_bw_of(cpumask_any(cur));
trial_cpus = cpumask_weight(trial);
raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
if (cur_dl_b->bw != -1 &&
cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
ret = 0;
raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
rcu_read_unlock_sched();
return ret;
}
bool dl_cpu_busy(unsigned int cpu)
{
unsigned long flags;
struct dl_bw *dl_b;
bool overflow;
int cpus;
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
cpus = dl_bw_cpus(cpu);
overflow = __dl_overflow(dl_b, cpus, 0, 0);
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched();
return overflow;
}
#endif
#ifdef CONFIG_SCHED_DEBUG
void print_dl_stats(struct seq_file *m, int cpu)
{
print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
}
#endif /* CONFIG_SCHED_DEBUG */