This is the 4.19.156 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl+qe0EACgkQONu9yGCS
 aT6MSw//TZRP6iLK2RhIrZu2jKD8jfYbHMT9JgKV2QCw7meg9q0JMj+SNP9CPbiL
 oOYtsXsRFRnAh98aBXNMFmzV7Zm0uUu0XGeFGxnf8y2X7EI1nZ6plvrCUYD8dCiF
 IPR67yyc5MojNQTfm0XDvQ3C7bKx5PuheRCLwhSuKclnrDxi8FNjS2NSBxi5G32j
 B7NzateeG7m/zE9fG1RkiJzfwu8/k0PKKecEYFwjRSC5QrXwvtEKdz/X/HkoXsck
 345wWHCTObpcDbDWkkUF5VuR36kCWMP+uYT4lNihZTV9+9b8Gz9ghhanDIuVCoU1
 biEsJnCORe/PV/xcgGJNkpEtabbDQNJ5Dn3wLKSuRAbBOkN2/nwzZa4EDoXWQSTv
 PDhzbLDjFjMu8Yb9PKrylhYGTmlNS4mA3hMszF4QNszhRyxTyDGln4MbUkpKg4sO
 HgU4JLvDOCfkCsGTBJ4XGTBcH+6ZxZwm1b+e4uy3FFZW2CEqSetZ3TCyIBxdLupa
 8JYmfqQjmaj0KUiUV9l1SJ6uHcIyg/FoNuCAdtDl7mLuzZdwtEhk3TeaZn4iwxWJ
 Ku+2qY0X6wsePOTfIA7puWBbK+IonM24Q3oIDVqjA+2yrmLJGlYuaQJrSPzEJHoh
 upHznwsU2W7MIfA6hJIcQeWIvzM4w5GSKUr3YeknVPIStP1ZqRg=
 =trRk
 -----END PGP SIGNATURE-----

Merge 4.19.156 into android-4.19-stable

Changes in 4.19.156
	drm/i915: Break up error capture compression loops with cond_resched()
	tipc: fix use-after-free in tipc_bcast_get_mode
	ptrace: fix task_join_group_stop() for the case when current is traced
	cadence: force nonlinear buffers to be cloned
	chelsio/chtls: fix memory leaks caused by a race
	chelsio/chtls: fix always leaking ctrl_skb
	gianfar: Replace skb_realloc_headroom with skb_cow_head for PTP
	gianfar: Account for Tx PTP timestamp in the skb headroom
	net: usb: qmi_wwan: add Telit LE910Cx 0x1230 composition
	sctp: Fix COMM_LOST/CANT_STR_ASSOC err reporting on big-endian platforms
	sfp: Fix error handing in sfp_probe()
	blktrace: fix debugfs use after free
	btrfs: extent_io: Kill the forward declaration of flush_write_bio
	btrfs: extent_io: Move the BUG_ON() in flush_write_bio() one level up
	Revert "btrfs: flush write bio if we loop in extent_write_cache_pages"
	btrfs: flush write bio if we loop in extent_write_cache_pages
	btrfs: extent_io: Handle errors better in extent_write_full_page()
	btrfs: extent_io: Handle errors better in btree_write_cache_pages()
	btrfs: extent_io: add proper error handling to lock_extent_buffer_for_io()
	Btrfs: fix unwritten extent buffers and hangs on future writeback attempts
	btrfs: Don't submit any btree write bio if the fs has errors
	btrfs: Move btrfs_check_chunk_valid() to tree-check.[ch] and export it
	btrfs: tree-checker: Make chunk item checker messages more readable
	btrfs: tree-checker: Make btrfs_check_chunk_valid() return EUCLEAN instead of EIO
	btrfs: tree-checker: Check chunk item at tree block read time
	btrfs: tree-checker: Verify dev item
	btrfs: tree-checker: Fix wrong check on max devid
	btrfs: tree-checker: Enhance chunk checker to validate chunk profile
	btrfs: tree-checker: Verify inode item
	btrfs: tree-checker: fix the error message for transid error
	Fonts: Replace discarded const qualifier
	ALSA: usb-audio: Add implicit feedback quirk for Zoom UAC-2
	ALSA: usb-audio: add usb vendor id as DSD-capable for Khadas devices
	ALSA: usb-audio: Add implicit feedback quirk for Qu-16
	ALSA: usb-audio: Add implicit feedback quirk for MODX
	mm: mempolicy: fix potential pte_unmap_unlock pte error
	lib/crc32test: remove extra local_irq_disable/enable
	kthread_worker: prevent queuing delayed work from timer_fn when it is being canceled
	mm: always have io_remap_pfn_range() set pgprot_decrypted()
	gfs2: Wake up when sd_glock_disposal becomes zero
	ring-buffer: Fix recursion protection transitions between interrupt context
	ftrace: Fix recursion check for NMI test
	ftrace: Handle tracing when switching between context
	tracing: Fix out of bounds write in get_trace_buf
	futex: Handle transient "ownerless" rtmutex state correctly
	ARM: dts: sun4i-a10: fix cpu_alert temperature
	x86/kexec: Use up-to-dated screen_info copy to fill boot params
	of: Fix reserved-memory overlap detection
	blk-cgroup: Fix memleak on error path
	blk-cgroup: Pre-allocate tree node on blkg_conf_prep
	scsi: core: Don't start concurrent async scan on same host
	vsock: use ns_capable_noaudit() on socket create
	drm/vc4: drv: Add error handding for bind
	ACPI: NFIT: Fix comparison to '-ENXIO'
	vt: Disable KD_FONT_OP_COPY
	fork: fix copy_process(CLONE_PARENT) race with the exiting ->real_parent
	serial: 8250_mtk: Fix uart_get_baud_rate warning
	serial: txx9: add missing platform_driver_unregister() on error in serial_txx9_init
	USB: serial: cyberjack: fix write-URB completion race
	USB: serial: option: add Quectel EC200T module support
	USB: serial: option: add LE910Cx compositions 0x1203, 0x1230, 0x1231
	USB: serial: option: add Telit FN980 composition 0x1055
	USB: Add NO_LPM quirk for Kingston flash drive
	usb: mtu3: fix panic in mtu3_gadget_stop()
	ARC: stack unwinding: avoid indefinite looping
	Revert "ARC: entry: fix potential EFA clobber when TIF_SYSCALL_TRACE"
	PM: runtime: Resume the device earlier in __device_release_driver()
	perf/core: Fix a memory leak in perf_event_parse_addr_filter()
	tools: perf: Fix build error in v4.19.y
	net: dsa: read mac address from DT for slave device
	arm64: dts: marvell: espressobin: Add ethernet switch aliases
	Linux 4.19.156

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I87af8871465f54de0332fa74bc1f342b7fe99061
This commit is contained in:
Greg Kroah-Hartman 2020-11-10 13:23:09 +01:00
commit bc09bee25e
67 changed files with 807 additions and 297 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 155
SUBLEVEL = 156
EXTRAVERSION =
NAME = "People's Front"

View file

@ -156,6 +156,7 @@ END(EV_Extension)
tracesys:
; save EFA in case tracer wants the PC of traced task
; using ERET won't work since next-PC has already committed
lr r12, [efa]
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
@ -198,9 +199,15 @@ tracesys_exit:
; Breakpoint TRAP
; ---------------------------------------------
trap_with_param:
mov r0, r12 ; EFA in case ptracer/gdb wants stop_pc
; stop_pc info by gdb needs this info
lr r0, [efa]
mov r1, sp
; Now that we have read EFA, it is safe to do "fake" rtie
; and get out of CPU exception mode
FAKE_RET_FROM_EXCPN
; Save callee regs in case gdb wants to have a look
; SP will grow up by size of CALLEE Reg-File
; NOTE: clobbers r12
@ -227,10 +234,6 @@ ENTRY(EV_Trap)
EXCEPTION_PROLOGUE
lr r12, [efa]
FAKE_RET_FROM_EXCPN
;============ TRAP 1 :breakpoints
; Check ECR for trap with arg (PROLOGUE ensures r9 has ECR)
bmsk.f 0, r9, 7
@ -238,6 +241,9 @@ ENTRY(EV_Trap)
;============ TRAP (no param): syscall top level
; First return from Exception to pure K mode (Exception/IRQs renabled)
FAKE_RET_FROM_EXCPN
; If syscall tracing ongoing, invoke pre-post-hooks
GET_CURR_THR_INFO_FLAGS r10
btst r10, TIF_SYSCALL_TRACE

View file

@ -115,7 +115,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
int (*consumer_fn) (unsigned int, void *), void *arg)
{
#ifdef CONFIG_ARC_DW2_UNWIND
int ret = 0;
int ret = 0, cnt = 0;
unsigned int address;
struct unwind_frame_info frame_info;
@ -135,6 +135,11 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
break;
frame_info.regs.r63 = frame_info.regs.r31;
if (cnt++ > 128) {
printk("unwinder looping too long, aborting !\n");
return 0;
}
}
return address; /* return the last address it saw */

View file

@ -143,7 +143,7 @@
trips {
cpu_alert0: cpu-alert0 {
/* milliCelsius */
temperature = <850000>;
temperature = <85000>;
hysteresis = <2000>;
type = "passive";
};

View file

@ -21,6 +21,10 @@
aliases {
ethernet0 = &eth0;
/* for dsa slave device */
ethernet1 = &switch0port1;
ethernet2 = &switch0port2;
ethernet3 = &switch0port3;
serial0 = &uart0;
serial1 = &uart1;
};
@ -136,25 +140,25 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
switch0port0: port@0 {
reg = <0>;
label = "cpu";
ethernet = <&eth0>;
};
port@1 {
switch0port1: port@1 {
reg = <1>;
label = "wan";
phy-handle = <&switch0phy0>;
};
port@2 {
switch0port2: port@2 {
reg = <2>;
label = "lan0";
phy-handle = <&switch0phy1>;
};
port@3 {
switch0port3: port@3 {
reg = <3>;
label = "lan1";
phy-handle = <&switch0phy2>;

View file

@ -211,8 +211,7 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
params->hdr.hardware_subarch = boot_params.hdr.hardware_subarch;
/* Copying screen_info will do? */
memcpy(&params->screen_info, &boot_params.screen_info,
sizeof(struct screen_info));
memcpy(&params->screen_info, &screen_info, sizeof(struct screen_info));
/* Fill in memsize later */
params->screen_info.ext_mem_k = 0;

View file

@ -876,13 +876,20 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
goto fail;
}
if (radix_tree_preload(GFP_KERNEL)) {
blkg_free(new_blkg);
ret = -ENOMEM;
goto fail;
}
rcu_read_lock();
spin_lock_irq(q->queue_lock);
blkg = blkg_lookup_check(pos, pol, q);
if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg);
goto fail_unlock;
blkg_free(new_blkg);
goto fail_preloaded;
}
if (blkg) {
@ -891,10 +898,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
blkg = blkg_create(pos, q, new_blkg);
if (unlikely(IS_ERR(blkg))) {
ret = PTR_ERR(blkg);
goto fail_unlock;
goto fail_preloaded;
}
}
radix_tree_preload_end();
if (pos == blkcg)
goto success;
}
@ -904,6 +913,8 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
ctx->body = body;
return 0;
fail_preloaded:
radix_tree_preload_end();
fail_unlock:
spin_unlock_irq(q->queue_lock);
rcu_read_unlock();

View file

@ -1535,7 +1535,7 @@ static ssize_t format1_show(struct device *dev,
le16_to_cpu(nfit_dcr->dcr->code));
break;
}
if (rc != ENXIO)
if (rc != -ENXIO)
break;
}
mutex_unlock(&acpi_desc->init_mutex);

View file

@ -936,6 +936,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
drv = dev->driver;
if (drv) {
pm_runtime_get_sync(dev);
while (device_links_busy(dev)) {
device_unlock(dev);
if (parent && dev->bus->need_parent_lock)
@ -951,11 +953,12 @@ static void __device_release_driver(struct device *dev, struct device *parent)
* have released the driver successfully while this one
* was waiting, so check for that.
*/
if (dev->driver != drv)
if (dev->driver != drv) {
pm_runtime_put(dev);
return;
}
}
pm_runtime_get_sync(dev);
pm_runtime_clean_up_links(dev);
driver_sysfs_remove(dev);

View file

@ -175,7 +175,7 @@ static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
{
if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
__skb_trim(skb, 0);
refcount_add(2, &skb->users);
refcount_inc(&skb->users);
} else {
skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
}

View file

@ -368,6 +368,9 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
if (ret)
goto out_notcb;
if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
goto out_notcb;
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid);
csk->wr_credits -= DIV_ROUND_UP(len, 16);
csk->wr_unacked += DIV_ROUND_UP(len, 16);

View file

@ -268,6 +268,8 @@ static int compress_page(struct compress *c,
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO;
cond_resched();
} while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */
@ -347,6 +349,7 @@ static int compress_page(struct compress *c,
if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
cond_resched();
return 0;
}

View file

@ -312,6 +312,7 @@ static int vc4_drm_bind(struct device *dev)
component_unbind_all(dev, drm);
gem_destroy:
vc4_gem_destroy(drm);
drm_mode_config_cleanup(drm);
vc4_bo_cache_destroy(drm);
dev_put:
drm_dev_put(drm);

View file

@ -1704,7 +1704,8 @@ static inline int macb_clear_csum(struct sk_buff *skb)
static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
{
bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
skb_is_nonlinear(*skb);
int padlen = ETH_ZLEN - (*skb)->len;
int headroom = skb_headroom(*skb);
int tailroom = skb_tailroom(*skb);

View file

@ -1388,7 +1388,7 @@ static int gfar_probe(struct platform_device *ofdev)
if (dev->features & NETIF_F_IP_CSUM ||
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
dev->needed_headroom = GMAC_FCB_LEN;
dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* Initializing some of the rx/tx queue level parameters */
for (i = 0; i < priv->num_tx_queues; i++) {
@ -2370,20 +2370,12 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* make space for additional header when fcb is needed */
if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
struct sk_buff *skb_new;
skb_new = skb_realloc_headroom(skb, fcb_len);
if (!skb_new) {
if (fcb_len) {
if (unlikely(skb_cow_head(skb, fcb_len))) {
dev->stats.tx_errors++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (skb->sk)
skb_set_owner_w(skb_new, skb->sk);
dev_consume_skb_any(skb);
skb = skb_new;
}
/* total number of fragments in the SKB */

View file

@ -1886,7 +1886,8 @@ static int sfp_probe(struct platform_device *pdev)
continue;
irq = gpiod_to_irq(sfp->gpio[i]);
if (!irq) {
if (irq < 0) {
irq = 0;
poll = true;
continue;
}

View file

@ -1268,6 +1268,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1230, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */

View file

@ -221,6 +221,16 @@ static int __init __rmem_cmp(const void *a, const void *b)
if (ra->base > rb->base)
return 1;
/*
* Put the dynamic allocations (address == 0, size == 0) before static
* allocations at address 0x0 so that overlap detection works
* correctly.
*/
if (ra->size < rb->size)
return -1;
if (ra->size > rb->size)
return 1;
return 0;
}
@ -238,8 +248,7 @@ static void __init __rmem_check_for_overlap(void)
this = &reserved_mem[i];
next = &reserved_mem[i + 1];
if (!(this->base && next->base))
continue;
if (this->base + this->size > next->base) {
phys_addr_t this_end, next_end;

View file

@ -1722,15 +1722,16 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
*/
static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
{
struct async_scan_data *data;
struct async_scan_data *data = NULL;
unsigned long flags;
if (strncmp(scsi_scan_type, "sync", 4) == 0)
return NULL;
mutex_lock(&shost->scan_mutex);
if (shost->async_scan) {
shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
return NULL;
goto err;
}
data = kmalloc(sizeof(*data), GFP_KERNEL);
@ -1741,7 +1742,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
goto err;
init_completion(&data->prev_finished);
mutex_lock(&shost->scan_mutex);
spin_lock_irqsave(shost->host_lock, flags);
shost->async_scan = 1;
spin_unlock_irqrestore(shost->host_lock, flags);
@ -1756,6 +1756,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
return data;
err:
mutex_unlock(&shost->scan_mutex);
kfree(data);
return NULL;
}

View file

@ -47,7 +47,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
*/
baud = tty_termios_baud_rate(termios);
serial8250_do_set_termios(port, termios, old);
serial8250_do_set_termios(port, termios, NULL);
tty_termios_encode_baud_rate(termios, baud, baud);

View file

@ -1284,6 +1284,9 @@ static int __init serial_txx9_init(void)
#ifdef ENABLE_SERIAL_TXX9_PCI
ret = pci_register_driver(&serial_txx9_pci_driver);
if (ret) {
platform_driver_unregister(&serial_txx9_plat_driver);
}
#endif
if (ret == 0)
goto out;

View file

@ -4574,27 +4574,6 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
return rc;
}
static int con_font_copy(struct vc_data *vc, struct console_font_op *op)
{
int con = op->height;
int rc;
console_lock();
if (vc->vc_mode != KD_TEXT)
rc = -EINVAL;
else if (!vc->vc_sw->con_font_copy)
rc = -ENOSYS;
else if (con < 0 || !vc_cons_allocated(con))
rc = -ENOTTY;
else if (con == vc->vc_num) /* nothing to do */
rc = 0;
else
rc = vc->vc_sw->con_font_copy(vc, con);
console_unlock();
return rc;
}
int con_font_op(struct vc_data *vc, struct console_font_op *op)
{
switch (op->op) {
@ -4605,7 +4584,8 @@ int con_font_op(struct vc_data *vc, struct console_font_op *op)
case KD_FONT_OP_SET_DEFAULT:
return con_font_default(vc, op);
case KD_FONT_OP_COPY:
return con_font_copy(vc, op);
/* was buggy and never really used */
return -EINVAL;
}
return -ENOSYS;
}

View file

@ -378,6 +378,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* Kingston DataTraveler 3.0 */
{ USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },

View file

@ -573,6 +573,7 @@ static int mtu3_gadget_stop(struct usb_gadget *g)
spin_unlock_irqrestore(&mtu->lock, flags);
synchronize_irq(mtu->irq);
return 0;
}

View file

@ -357,11 +357,12 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
struct device *dev = &port->dev;
int status = urb->status;
unsigned long flags;
bool resubmitted = false;
set_bit(0, &port->write_urbs_free);
if (status) {
dev_dbg(dev, "%s - nonzero write bulk status received: %d\n",
__func__, status);
set_bit(0, &port->write_urbs_free);
return;
}
@ -394,6 +395,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
goto exit;
}
resubmitted = true;
dev_dbg(dev, "%s - priv->wrsent=%d\n", __func__, priv->wrsent);
dev_dbg(dev, "%s - priv->wrfilled=%d\n", __func__, priv->wrfilled);
@ -410,6 +413,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
exit:
spin_unlock_irqrestore(&priv->lock, flags);
if (!resubmitted)
set_bit(0, &port->write_urbs_free);
usb_serial_port_softint(port);
}

View file

@ -250,6 +250,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
#define QUECTEL_PRODUCT_EC200T 0x6026
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@ -1117,6 +1118,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
.driver_info = ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
@ -1189,6 +1191,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1054, 0xff), /* Telit FT980-KS */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff), /* Telit FN980 (PCIe) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@ -1201,6 +1205,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff), /* Telit LE910Cx (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@ -1215,6 +1221,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1230, 0xff), /* Telit LE910Cx (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1261),

View file

@ -1459,6 +1459,21 @@ do { \
#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31)
#define BTRFS_INODE_FLAG_MASK \
(BTRFS_INODE_NODATASUM | \
BTRFS_INODE_NODATACOW | \
BTRFS_INODE_READONLY | \
BTRFS_INODE_NOCOMPRESS | \
BTRFS_INODE_PREALLOC | \
BTRFS_INODE_SYNC | \
BTRFS_INODE_IMMUTABLE | \
BTRFS_INODE_APPEND | \
BTRFS_INODE_NODUMP | \
BTRFS_INODE_NOATIME | \
BTRFS_INODE_DIRSYNC | \
BTRFS_INODE_COMPRESS | \
BTRFS_INODE_ROOT_ITEM_INIT)
struct btrfs_map_token {
const struct extent_buffer *eb;
char *kaddr;

View file

@ -138,7 +138,61 @@ static int add_extent_changeset(struct extent_state *state, unsigned bits,
return ret;
}
static void flush_write_bio(struct extent_page_data *epd);
static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
unsigned long bio_flags)
{
blk_status_t ret = 0;
struct bio_vec *bvec = bio_last_bvec_all(bio);
struct page *page = bvec->bv_page;
struct extent_io_tree *tree = bio->bi_private;
u64 start;
start = page_offset(page) + bvec->bv_offset;
bio->bi_private = NULL;
if (tree->ops)
ret = tree->ops->submit_bio_hook(tree->private_data, bio,
mirror_num, bio_flags, start);
else
btrfsic_submit_bio(bio);
return blk_status_to_errno(ret);
}
/* Cleanup unsubmitted bios */
static void end_write_bio(struct extent_page_data *epd, int ret)
{
if (epd->bio) {
epd->bio->bi_status = errno_to_blk_status(ret);
bio_endio(epd->bio);
epd->bio = NULL;
}
}
/*
* Submit bio from extent page data via submit_one_bio
*
* Return 0 if everything is OK.
* Return <0 for error.
*/
static int __must_check flush_write_bio(struct extent_page_data *epd)
{
int ret = 0;
if (epd->bio) {
ret = submit_one_bio(epd->bio, 0, 0);
/*
* Clean up of epd->bio is handled by its endio function.
* And endio is either triggered by successful bio execution
* or the error handler of submit bio hook.
* So at this point, no matter what happened, we don't need
* to clean up epd->bio.
*/
epd->bio = NULL;
}
return ret;
}
int __init extent_io_init(void)
{
@ -2710,28 +2764,6 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
return bio;
}
static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
unsigned long bio_flags)
{
blk_status_t ret = 0;
struct bio_vec *bvec = bio_last_bvec_all(bio);
struct page *page = bvec->bv_page;
struct extent_io_tree *tree = bio->bi_private;
u64 start;
start = page_offset(page) + bvec->bv_offset;
bio->bi_private = NULL;
if (tree->ops)
ret = tree->ops->submit_bio_hook(tree->private_data, bio,
mirror_num, bio_flags, start);
else
btrfsic_submit_bio(bio);
return blk_status_to_errno(ret);
}
/*
* @opf: bio REQ_OP_* and REQ_* flags as one value
* @tree: tree so we can call our merge_bio hook
@ -3439,6 +3471,9 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
* records are inserted to lock ranges in the tree, and as dirty areas
* are found, they are marked writeback. Then the lock bits are removed
* and the end_io handler clears the writeback ranges
*
* Return 0 if everything goes well.
* Return <0 for error.
*/
static int __extent_writepage(struct page *page, struct writeback_control *wbc,
struct extent_page_data *epd)
@ -3506,6 +3541,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
end_extent_writepage(page, ret, start, page_end);
}
unlock_page(page);
ASSERT(ret <= 0);
return ret;
done_unlocked:
@ -3518,18 +3554,34 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
TASK_UNINTERRUPTIBLE);
}
static void end_extent_buffer_writeback(struct extent_buffer *eb)
{
clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
smp_mb__after_atomic();
wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
}
/*
* Lock eb pages and flush the bio if we can't the locks
*
* Return 0 if nothing went wrong
* Return >0 is same as 0, except bio is not submitted
* Return <0 if something went wrong, no page is locked
*/
static noinline_for_stack int
lock_extent_buffer_for_io(struct extent_buffer *eb,
struct btrfs_fs_info *fs_info,
struct extent_page_data *epd)
{
int i, num_pages;
int i, num_pages, failed_page_nr;
int flush = 0;
int ret = 0;
if (!btrfs_try_tree_write_lock(eb)) {
ret = flush_write_bio(epd);
if (ret < 0)
return ret;
flush = 1;
flush_write_bio(epd);
btrfs_tree_lock(eb);
}
@ -3538,7 +3590,9 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
if (!epd->sync_io)
return 0;
if (!flush) {
flush_write_bio(epd);
ret = flush_write_bio(epd);
if (ret < 0)
return ret;
flush = 1;
}
while (1) {
@ -3579,7 +3633,14 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
if (!trylock_page(p)) {
if (!flush) {
flush_write_bio(epd);
int err;
err = flush_write_bio(epd);
if (err < 0) {
ret = err;
failed_page_nr = i;
goto err_unlock;
}
flush = 1;
}
lock_page(p);
@ -3587,13 +3648,25 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
}
return ret;
}
static void end_extent_buffer_writeback(struct extent_buffer *eb)
{
clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
smp_mb__after_atomic();
wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
err_unlock:
/* Unlock already locked pages */
for (i = 0; i < failed_page_nr; i++)
unlock_page(eb->pages[i]);
/*
* Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
* Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
* be made and undo everything done before.
*/
btrfs_tree_lock(eb);
spin_lock(&eb->refs_lock);
set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
end_extent_buffer_writeback(eb);
spin_unlock(&eb->refs_lock);
percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
fs_info->dirty_metadata_batch);
btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
btrfs_tree_unlock(eb);
return ret;
}
static void set_btree_ioerr(struct page *page)
@ -3869,7 +3942,44 @@ int btree_write_cache_pages(struct address_space *mapping,
index = 0;
goto retry;
}
flush_write_bio(&epd);
ASSERT(ret <= 0);
if (ret < 0) {
end_write_bio(&epd, ret);
return ret;
}
/*
* If something went wrong, don't allow any metadata write bio to be
* submitted.
*
* This would prevent use-after-free if we had dirty pages not
* cleaned up, which can still happen by fuzzed images.
*
* - Bad extent tree
* Allowing existing tree block to be allocated for other trees.
*
* - Log tree operations
* Exiting tree blocks get allocated to log tree, bumps its
* generation, then get cleaned in tree re-balance.
* Such tree block will not be written back, since it's clean,
* thus no WRITTEN flag set.
* And after log writes back, this tree block is not traced by
* any dirty extent_io_tree.
*
* - Offending tree block gets re-dirtied from its original owner
* Since it has bumped generation, no WRITTEN flag, it can be
* reused without COWing. This tree block will not be traced
* by btrfs_transaction::dirty_pages.
*
* Now such dirty tree block will not be cleaned by any dirty
* extent io tree. Thus we don't want to submit such wild eb
* if the fs already has error.
*/
if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
ret = flush_write_bio(&epd);
} else {
ret = -EUCLEAN;
end_write_bio(&epd, ret);
}
return ret;
}
@ -3966,7 +4076,8 @@ static int extent_write_cache_pages(struct address_space *mapping,
* tmpfs file mapping
*/
if (!trylock_page(page)) {
flush_write_bio(epd);
ret = flush_write_bio(epd);
BUG_ON(ret < 0);
lock_page(page);
}
@ -3976,8 +4087,10 @@ static int extent_write_cache_pages(struct address_space *mapping,
}
if (wbc->sync_mode != WB_SYNC_NONE) {
if (PageWriteback(page))
flush_write_bio(epd);
if (PageWriteback(page)) {
ret = flush_write_bio(epd);
BUG_ON(ret < 0);
}
wait_on_page_writeback(page);
}
@ -4022,8 +4135,9 @@ static int extent_write_cache_pages(struct address_space *mapping,
* page in our current bio, and thus deadlock, so flush the
* write bio here.
*/
flush_write_bio(epd);
goto retry;
ret = flush_write_bio(epd);
if (!ret)
goto retry;
}
if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
@ -4033,17 +4147,6 @@ static int extent_write_cache_pages(struct address_space *mapping,
return ret;
}
static void flush_write_bio(struct extent_page_data *epd)
{
if (epd->bio) {
int ret;
ret = submit_one_bio(epd->bio, 0, 0);
BUG_ON(ret < 0); /* -ENOMEM */
epd->bio = NULL;
}
}
int extent_write_full_page(struct page *page, struct writeback_control *wbc)
{
int ret;
@ -4055,8 +4158,14 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc)
};
ret = __extent_writepage(page, wbc, &epd);
ASSERT(ret <= 0);
if (ret < 0) {
end_write_bio(&epd, ret);
return ret;
}
flush_write_bio(&epd);
ret = flush_write_bio(&epd);
ASSERT(ret <= 0);
return ret;
}
@ -4064,6 +4173,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
int mode)
{
int ret = 0;
int flush_ret;
struct address_space *mapping = inode->i_mapping;
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct page *page;
@ -4098,7 +4208,8 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
start += PAGE_SIZE;
}
flush_write_bio(&epd);
flush_ret = flush_write_bio(&epd);
BUG_ON(flush_ret < 0);
return ret;
}
@ -4106,6 +4217,7 @@ int extent_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
int ret = 0;
int flush_ret;
struct extent_page_data epd = {
.bio = NULL,
.tree = &BTRFS_I(mapping->host)->io_tree,
@ -4114,7 +4226,8 @@ int extent_writepages(struct address_space *mapping,
};
ret = extent_write_cache_pages(mapping, wbc, &epd);
flush_write_bio(&epd);
flush_ret = flush_write_bio(&epd);
BUG_ON(flush_ret < 0);
return ret;
}

View file

@ -448,6 +448,320 @@ static int check_block_group_item(struct btrfs_fs_info *fs_info,
return 0;
}
__printf(5, 6)
__cold
static void chunk_err(const struct btrfs_fs_info *fs_info,
const struct extent_buffer *leaf,
const struct btrfs_chunk *chunk, u64 logical,
const char *fmt, ...)
{
bool is_sb;
struct va_format vaf;
va_list args;
int i;
int slot = -1;
/* Only superblock eb is able to have such small offset */
is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET);
if (!is_sb) {
/*
* Get the slot number by iterating through all slots, this
* would provide better readability.
*/
for (i = 0; i < btrfs_header_nritems(leaf); i++) {
if (btrfs_item_ptr_offset(leaf, i) ==
(unsigned long)chunk) {
slot = i;
break;
}
}
}
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (is_sb)
btrfs_crit(fs_info,
"corrupt superblock syschunk array: chunk_start=%llu, %pV",
logical, &vaf);
else
btrfs_crit(fs_info,
"corrupt leaf: root=%llu block=%llu slot=%d chunk_start=%llu, %pV",
BTRFS_CHUNK_TREE_OBJECTID, leaf->start, slot,
logical, &vaf);
va_end(args);
}
/*
* The common chunk check which could also work on super block sys chunk array.
*
* Return -EUCLEAN if anything is corrupted.
* Return 0 if everything is OK.
*/
int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk, u64 logical)
{
u64 length;
u64 stripe_len;
u16 num_stripes;
u16 sub_stripes;
u64 type;
u64 features;
bool mixed = false;
length = btrfs_chunk_length(leaf, chunk);
stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
type = btrfs_chunk_type(leaf, chunk);
if (!num_stripes) {
chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk num_stripes, have %u", num_stripes);
return -EUCLEAN;
}
if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk logical, have %llu should aligned to %u",
logical, fs_info->sectorsize);
return -EUCLEAN;
}
if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk sectorsize, have %u expect %u",
btrfs_chunk_sector_size(leaf, chunk),
fs_info->sectorsize);
return -EUCLEAN;
}
if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk length, have %llu", length);
return -EUCLEAN;
}
if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk stripe length: %llu",
stripe_len);
return -EUCLEAN;
}
if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
type) {
chunk_err(fs_info, leaf, chunk, logical,
"unrecognized chunk type: 0x%llx",
~(BTRFS_BLOCK_GROUP_TYPE_MASK |
BTRFS_BLOCK_GROUP_PROFILE_MASK) &
btrfs_chunk_type(leaf, chunk));
return -EUCLEAN;
}
if (!is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) {
chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
return -EUCLEAN;
}
if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
chunk_err(fs_info, leaf, chunk, logical,
"missing chunk type flag, have 0x%llx one bit must be set in 0x%llx",
type, BTRFS_BLOCK_GROUP_TYPE_MASK);
return -EUCLEAN;
}
if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
(type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
chunk_err(fs_info, leaf, chunk, logical,
"system chunk with data or metadata type: 0x%llx",
type);
return -EUCLEAN;
}
features = btrfs_super_incompat_flags(fs_info->super_copy);
if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
mixed = true;
if (!mixed) {
if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
(type & BTRFS_BLOCK_GROUP_DATA)) {
chunk_err(fs_info, leaf, chunk, logical,
"mixed chunk type in non-mixed mode: 0x%llx", type);
return -EUCLEAN;
}
}
if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
(type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
(type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
(type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
(type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && num_stripes != 1)) {
chunk_err(fs_info, leaf, chunk, logical,
"invalid num_stripes:sub_stripes %u:%u for profile %llu",
num_stripes, sub_stripes,
type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
return -EUCLEAN;
}
return 0;
}
__printf(4, 5)
__cold
static void dev_item_err(const struct btrfs_fs_info *fs_info,
const struct extent_buffer *eb, int slot,
const char *fmt, ...)
{
struct btrfs_key key;
struct va_format vaf;
va_list args;
btrfs_item_key_to_cpu(eb, &key, slot);
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
btrfs_crit(fs_info,
"corrupt %s: root=%llu block=%llu slot=%d devid=%llu %pV",
btrfs_header_level(eb) == 0 ? "leaf" : "node",
btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
key.objectid, &vaf);
va_end(args);
}
static int check_dev_item(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_key *key, int slot)
{
struct btrfs_dev_item *ditem;
if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) {
dev_item_err(fs_info, leaf, slot,
"invalid objectid: has=%llu expect=%llu",
key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
return -EUCLEAN;
}
ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
if (btrfs_device_id(leaf, ditem) != key->offset) {
dev_item_err(fs_info, leaf, slot,
"devid mismatch: key has=%llu item has=%llu",
key->offset, btrfs_device_id(leaf, ditem));
return -EUCLEAN;
}
/*
* For device total_bytes, we don't have reliable way to check it, as
* it can be 0 for device removal. Device size check can only be done
* by dev extents check.
*/
if (btrfs_device_bytes_used(leaf, ditem) >
btrfs_device_total_bytes(leaf, ditem)) {
dev_item_err(fs_info, leaf, slot,
"invalid bytes used: have %llu expect [0, %llu]",
btrfs_device_bytes_used(leaf, ditem),
btrfs_device_total_bytes(leaf, ditem));
return -EUCLEAN;
}
/*
* Remaining members like io_align/type/gen/dev_group aren't really
* utilized. Skip them to make later usage of them easier.
*/
return 0;
}
/* Inode item error output has the same format as dir_item_err() */
#define inode_item_err(fs_info, eb, slot, fmt, ...) \
dir_item_err(fs_info, eb, slot, fmt, __VA_ARGS__)
static int check_inode_item(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_key *key, int slot)
{
struct btrfs_inode_item *iitem;
u64 super_gen = btrfs_super_generation(fs_info->super_copy);
u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
u32 mode;
if ((key->objectid < BTRFS_FIRST_FREE_OBJECTID ||
key->objectid > BTRFS_LAST_FREE_OBJECTID) &&
key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID &&
key->objectid != BTRFS_FREE_INO_OBJECTID) {
generic_err(fs_info, leaf, slot,
"invalid key objectid: has %llu expect %llu or [%llu, %llu] or %llu",
key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID,
BTRFS_FIRST_FREE_OBJECTID,
BTRFS_LAST_FREE_OBJECTID,
BTRFS_FREE_INO_OBJECTID);
return -EUCLEAN;
}
if (key->offset != 0) {
inode_item_err(fs_info, leaf, slot,
"invalid key offset: has %llu expect 0",
key->offset);
return -EUCLEAN;
}
iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
/* Here we use super block generation + 1 to handle log tree */
if (btrfs_inode_generation(leaf, iitem) > super_gen + 1) {
inode_item_err(fs_info, leaf, slot,
"invalid inode generation: has %llu expect (0, %llu]",
btrfs_inode_generation(leaf, iitem),
super_gen + 1);
return -EUCLEAN;
}
/* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */
if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) {
inode_item_err(fs_info, leaf, slot,
"invalid inode transid: has %llu expect [0, %llu]",
btrfs_inode_transid(leaf, iitem), super_gen + 1);
return -EUCLEAN;
}
/*
* For size and nbytes it's better not to be too strict, as for dir
* item its size/nbytes can easily get wrong, but doesn't affect
* anything in the fs. So here we skip the check.
*/
mode = btrfs_inode_mode(leaf, iitem);
if (mode & ~valid_mask) {
inode_item_err(fs_info, leaf, slot,
"unknown mode bit detected: 0x%x",
mode & ~valid_mask);
return -EUCLEAN;
}
/*
* S_IFMT is not bit mapped so we can't completely rely on is_power_of_2,
* but is_power_of_2() can save us from checking FIFO/CHR/DIR/REG.
* Only needs to check BLK, LNK and SOCKS
*/
if (!is_power_of_2(mode & S_IFMT)) {
if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) {
inode_item_err(fs_info, leaf, slot,
"invalid mode: has 0%o expect valid S_IF* bit(s)",
mode & S_IFMT);
return -EUCLEAN;
}
}
if (S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1) {
inode_item_err(fs_info, leaf, slot,
"invalid nlink: has %u expect no more than 1 for dir",
btrfs_inode_nlink(leaf, iitem));
return -EUCLEAN;
}
if (btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK) {
inode_item_err(fs_info, leaf, slot,
"unknown flags detected: 0x%llx",
btrfs_inode_flags(leaf, iitem) &
~BTRFS_INODE_FLAG_MASK);
return -EUCLEAN;
}
return 0;
}
/*
* Common point to switch the item-specific validation.
*/
@ -456,6 +770,7 @@ static int check_leaf_item(struct btrfs_fs_info *fs_info,
struct btrfs_key *key, int slot)
{
int ret = 0;
struct btrfs_chunk *chunk;
switch (key->type) {
case BTRFS_EXTENT_DATA_KEY:
@ -472,6 +787,17 @@ static int check_leaf_item(struct btrfs_fs_info *fs_info,
case BTRFS_BLOCK_GROUP_ITEM_KEY:
ret = check_block_group_item(fs_info, leaf, key, slot);
break;
case BTRFS_CHUNK_ITEM_KEY:
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
ret = btrfs_check_chunk_valid(fs_info, leaf, chunk,
key->offset);
break;
case BTRFS_DEV_ITEM_KEY:
ret = check_dev_item(fs_info, leaf, key, slot);
break;
case BTRFS_INODE_ITEM_KEY:
ret = check_inode_item(fs_info, leaf, key, slot);
break;
}
return ret;
}

View file

@ -25,4 +25,8 @@ int btrfs_check_leaf_relaxed(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf);
int btrfs_check_node(struct btrfs_fs_info *fs_info, struct extent_buffer *node);
int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk, u64 logical);
#endif

View file

@ -28,6 +28,7 @@
#include "math.h"
#include "dev-replace.h"
#include "sysfs.h"
#include "tree-checker.h"
const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
[BTRFS_RAID_RAID10] = {
@ -4605,15 +4606,6 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
btrfs_set_fs_incompat(info, RAID56);
}
#define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \
- sizeof(struct btrfs_chunk)) \
/ sizeof(struct btrfs_stripe) + 1)
#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
- 2 * sizeof(struct btrfs_disk_key) \
- 2 * sizeof(struct btrfs_chunk)) \
/ sizeof(struct btrfs_stripe) + 1)
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
u64 start, u64 type)
{
@ -6370,99 +6362,6 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
return dev;
}
/* Return -EIO if any error, otherwise return 0. */
static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk, u64 logical)
{
u64 length;
u64 stripe_len;
u16 num_stripes;
u16 sub_stripes;
u64 type;
u64 features;
bool mixed = false;
length = btrfs_chunk_length(leaf, chunk);
stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
type = btrfs_chunk_type(leaf, chunk);
if (!num_stripes) {
btrfs_err(fs_info, "invalid chunk num_stripes: %u",
num_stripes);
return -EIO;
}
if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
btrfs_err(fs_info, "invalid chunk logical %llu", logical);
return -EIO;
}
if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
btrfs_err(fs_info, "invalid chunk sectorsize %u",
btrfs_chunk_sector_size(leaf, chunk));
return -EIO;
}
if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
btrfs_err(fs_info, "invalid chunk length %llu", length);
return -EIO;
}
if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
btrfs_err(fs_info, "invalid chunk stripe length: %llu",
stripe_len);
return -EIO;
}
if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
type) {
btrfs_err(fs_info, "unrecognized chunk type: %llu",
~(BTRFS_BLOCK_GROUP_TYPE_MASK |
BTRFS_BLOCK_GROUP_PROFILE_MASK) &
btrfs_chunk_type(leaf, chunk));
return -EIO;
}
if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
btrfs_err(fs_info, "missing chunk type flag: 0x%llx", type);
return -EIO;
}
if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
(type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
btrfs_err(fs_info,
"system chunk with data or metadata type: 0x%llx", type);
return -EIO;
}
features = btrfs_super_incompat_flags(fs_info->super_copy);
if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
mixed = true;
if (!mixed) {
if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
(type & BTRFS_BLOCK_GROUP_DATA)) {
btrfs_err(fs_info,
"mixed chunk type in non-mixed mode: 0x%llx", type);
return -EIO;
}
}
if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
(type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
(type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
(type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
(type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
num_stripes != 1)) {
btrfs_err(fs_info,
"invalid num_stripes:sub_stripes %u:%u for profile %llu",
num_stripes, sub_stripes,
type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
return -EIO;
}
return 0;
}
static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
u64 devid, u8 *uuid, bool error)
{
@ -6493,9 +6392,15 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
length = btrfs_chunk_length(leaf, chunk);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
if (ret)
return ret;
/*
* Only need to verify chunk item if we're reading from sys chunk array,
* as chunk item in tree block is already verified by tree-checker.
*/
if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
if (ret)
return ret;
}
read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);

View file

@ -257,6 +257,15 @@ struct btrfs_fs_devices {
#define BTRFS_BIO_INLINE_CSUM_SIZE 64
#define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \
- sizeof(struct btrfs_chunk)) \
/ sizeof(struct btrfs_stripe) + 1)
#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
- 2 * sizeof(struct btrfs_disk_key) \
- 2 * sizeof(struct btrfs_chunk)) \
/ sizeof(struct btrfs_stripe) + 1)
/*
* we need the mirror number and stripe index to be passed around
* the call chain while we are processing end_io (especially errors).

View file

@ -870,7 +870,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
out_free:
kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(cachep, gl);
atomic_dec(&sdp->sd_glock_disposal);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
out:
return ret;

View file

@ -1115,10 +1115,6 @@ static inline bool arch_has_pfn_modify_check(void)
#endif /* !__ASSEMBLY__ */
#ifndef io_remap_pfn_range
#define io_remap_pfn_range remap_pfn_range
#endif
#ifndef has_transparent_hugepage
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define has_transparent_hugepage() 1

View file

@ -2616,6 +2616,15 @@ static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}
#ifndef io_remap_pfn_range
static inline int io_remap_pfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn,
unsigned long size, pgprot_t prot)
{
return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
}
#endif
static inline vm_fault_t vmf_error(int err)
{
if (err == -ENOMEM)

View file

@ -196,6 +196,7 @@ struct dsa_port {
unsigned int index;
const char *name;
const struct dsa_port *cpu_dp;
const char *mac;
struct device_node *dn;
unsigned int ageing_time;
u8 stp_state;

View file

@ -9063,6 +9063,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
int fpos = token == IF_SRC_FILE ? 2 : 1;
kfree(filename);
filename = match_strdup(&args[fpos]);
if (!filename) {
ret = -ENOMEM;
@ -9109,16 +9110,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
*/
ret = -EOPNOTSUPP;
if (!event->ctx->task)
goto fail_free_name;
goto fail;
/* look up the path and grab its inode */
ret = kern_path(filename, LOOKUP_FOLLOW,
&filter->path);
if (ret)
goto fail_free_name;
kfree(filename);
filename = NULL;
goto fail;
ret = -EINVAL;
if (!filter->path.dentry ||
@ -9138,13 +9136,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
if (state != IF_STATE_ACTION)
goto fail;
kfree(filename);
kfree(orig);
return 0;
fail_free_name:
kfree(filename);
fail:
kfree(filename);
free_filters_list(filters);
kfree(orig);

View file

@ -2106,14 +2106,9 @@ static __latent_entropy struct task_struct *copy_process(
/* ok, now we should be set up.. */
p->pid = pid_nr(pid);
if (clone_flags & CLONE_THREAD) {
p->exit_signal = -1;
p->group_leader = current->group_leader;
p->tgid = current->tgid;
} else {
if (clone_flags & CLONE_PARENT)
p->exit_signal = current->group_leader->exit_signal;
else
p->exit_signal = (clone_flags & CSIGNAL);
p->group_leader = p;
p->tgid = p->pid;
}
@ -2158,9 +2153,14 @@ static __latent_entropy struct task_struct *copy_process(
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
p->parent_exec_id = current->parent_exec_id;
if (clone_flags & CLONE_THREAD)
p->exit_signal = -1;
else
p->exit_signal = current->group_leader->exit_signal;
} else {
p->real_parent = current;
p->parent_exec_id = current->self_exec_id;
p->exit_signal = (clone_flags & CSIGNAL);
}
klp_copy_process(p);

View file

@ -2417,10 +2417,22 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
}
/*
* Since we just failed the trylock; there must be an owner.
* The trylock just failed, so either there is an owner or
* there is a higher priority waiter than this one.
*/
newowner = rt_mutex_owner(&pi_state->pi_mutex);
BUG_ON(!newowner);
/*
* If the higher priority waiter has not yet taken over the
* rtmutex then newowner is NULL. We can't return here with
* that state because it's inconsistent vs. the user space
* state. So drop the locks and try again. It's a valid
* situation and not any different from the other retry
* conditions.
*/
if (unlikely(!newowner)) {
err = -EAGAIN;
goto handle_err;
}
} else {
WARN_ON_ONCE(argowner != current);
if (oldowner == current) {

View file

@ -864,7 +864,8 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
/* Move the work from worker->delayed_work_list. */
WARN_ON_ONCE(list_empty(&work->node));
list_del_init(&work->node);
kthread_insert_work(worker, work, &worker->work_list);
if (!work->canceling)
kthread_insert_work(worker, work, &worker->work_list);
spin_unlock(&worker->lock);
}

View file

@ -389,16 +389,17 @@ static bool task_participate_group_stop(struct task_struct *task)
void task_join_group_stop(struct task_struct *task)
{
unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
struct signal_struct *sig = current->signal;
if (sig->group_stop_count) {
sig->group_stop_count++;
mask |= JOBCTL_STOP_CONSUME;
} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
return;
/* Have the new thread join an on-going signal group stop */
unsigned long jobctl = current->jobctl;
if (jobctl & JOBCTL_STOP_PENDING) {
struct signal_struct *sig = current->signal;
unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
if (task_set_jobctl_pending(task, signr | gstop)) {
sig->group_stop_count++;
}
}
task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
}
/*

View file

@ -521,10 +521,18 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
if (!bt->msg_data)
goto err;
ret = -ENOENT;
dir = debugfs_lookup(buts->name, blk_debugfs_root);
if (!dir)
#ifdef CONFIG_BLK_DEBUG_FS
/*
* When tracing whole make_request drivers (multiqueue) block devices,
* reuse the existing debugfs directory created by the block layer on
* init. For request-based block devices, all partitions block devices,
* and scsi-generic block devices we create a temporary new debugfs
* directory that will be removed once the trace ends.
*/
if (q->mq_ops && bdev && bdev == bdev->bd_contains)
dir = q->debugfs_dir;
else
#endif
bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
if (!dir)
goto err;
@ -583,8 +591,6 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
ret = 0;
err:
if (dir && !bt->dir)
dput(dir);
if (ret)
blk_trace_free(bt);
return ret;

View file

@ -444,14 +444,16 @@ struct rb_event_info {
/*
* Used for which event context the event is in.
* NMI = 0
* IRQ = 1
* SOFTIRQ = 2
* NORMAL = 3
* TRANSITION = 0
* NMI = 1
* IRQ = 2
* SOFTIRQ = 3
* NORMAL = 4
*
* See trace_recursive_lock() comment below for more details.
*/
enum {
RB_CTX_TRANSITION,
RB_CTX_NMI,
RB_CTX_IRQ,
RB_CTX_SOFTIRQ,
@ -2620,10 +2622,10 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
* a bit of overhead in something as critical as function tracing,
* we use a bitmask trick.
*
* bit 0 = NMI context
* bit 1 = IRQ context
* bit 2 = SoftIRQ context
* bit 3 = normal context.
* bit 1 = NMI context
* bit 2 = IRQ context
* bit 3 = SoftIRQ context
* bit 4 = normal context.
*
* This works because this is the order of contexts that can
* preempt other contexts. A SoftIRQ never preempts an IRQ
@ -2646,6 +2648,30 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
* The least significant bit can be cleared this way, and it
* just so happens that it is the same bit corresponding to
* the current context.
*
* Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
* is set when a recursion is detected at the current context, and if
* the TRANSITION bit is already set, it will fail the recursion.
* This is needed because there's a lag between the changing of
* interrupt context and updating the preempt count. In this case,
* a false positive will be found. To handle this, one extra recursion
* is allowed, and this is done by the TRANSITION bit. If the TRANSITION
* bit is already set, then it is considered a recursion and the function
* ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
*
* On the trace_recursive_unlock(), the TRANSITION bit will be the first
* to be cleared. Even if it wasn't the context that set it. That is,
* if an interrupt comes in while NORMAL bit is set and the ring buffer
* is called before preempt_count() is updated, since the check will
* be on the NORMAL bit, the TRANSITION bit will then be set. If an
* NMI then comes in, it will set the NMI bit, but when the NMI code
* does the trace_recursive_unlock() it will clear the TRANSTION bit
* and leave the NMI bit set. But this is fine, because the interrupt
* code that set the TRANSITION bit will then clear the NMI bit when it
* calls trace_recursive_unlock(). If another NMI comes in, it will
* set the TRANSITION bit and continue.
*
* Note: The TRANSITION bit only handles a single transition between context.
*/
static __always_inline int
@ -2661,8 +2687,16 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
bit = pc & NMI_MASK ? RB_CTX_NMI :
pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
return 1;
if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
/*
* It is possible that this was called by transitioning
* between interrupt context, and preempt_count() has not
* been updated yet. In this case, use the TRANSITION bit.
*/
bit = RB_CTX_TRANSITION;
if (val & (1 << (bit + cpu_buffer->nest)))
return 1;
}
val |= (1 << (bit + cpu_buffer->nest));
cpu_buffer->current_context = val;
@ -2677,8 +2711,8 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->current_context - (1 << cpu_buffer->nest);
}
/* The recursive locking above uses 4 bits */
#define NESTED_BITS 4
/* The recursive locking above uses 5 bits */
#define NESTED_BITS 5
/**
* ring_buffer_nest_start - Allow to trace while nested

View file

@ -2819,7 +2819,7 @@ static char *get_trace_buf(void)
/* Interrupts must see nesting incremented before we use the buffer */
barrier();
return &buffer->buffer[buffer->nesting][0];
return &buffer->buffer[buffer->nesting - 1][0];
}
static void put_trace_buf(void)

View file

@ -534,6 +534,12 @@ enum {
TRACE_GRAPH_DEPTH_START_BIT,
TRACE_GRAPH_DEPTH_END_BIT,
/*
* When transitioning between context, the preempt_count() may
* not be correct. Allow for a single recursion to cover this case.
*/
TRACE_TRANSITION_BIT,
};
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
@ -588,14 +594,27 @@ static __always_inline int trace_test_and_set_recursion(int start, int max)
return 0;
bit = trace_get_context_bit() + start;
if (unlikely(val & (1 << bit)))
return -1;
if (unlikely(val & (1 << bit))) {
/*
* It could be that preempt_count has not been updated during
* a switch between contexts. Allow for a single recursion.
*/
bit = TRACE_TRANSITION_BIT;
if (trace_recursion_test(bit))
return -1;
trace_recursion_set(bit);
barrier();
return bit + 1;
}
/* Normal check passed, clear the transition to allow it again */
trace_recursion_clear(TRACE_TRANSITION_BIT);
val |= 1 << bit;
current->trace_recursion = val;
barrier();
return bit;
return bit + 1;
}
static __always_inline void trace_clear_recursion(int bit)
@ -605,6 +624,7 @@ static __always_inline void trace_clear_recursion(int bit)
if (!bit)
return;
bit--;
bit = 1 << bit;
val &= ~bit;

View file

@ -492,8 +492,13 @@ trace_selftest_function_recursion(void)
unregister_ftrace_function(&test_rec_probe);
ret = -1;
if (trace_selftest_recursion_cnt != 1) {
pr_cont("*callback not called once (%d)* ",
/*
* Recursion allows for transitions between context,
* and may call the callback twice.
*/
if (trace_selftest_recursion_cnt != 1 &&
trace_selftest_recursion_cnt != 2) {
pr_cont("*callback not called once (or twice) (%d)* ",
trace_selftest_recursion_cnt);
goto out;
}

View file

@ -683,7 +683,6 @@ static int __init crc32c_test(void)
/* reduce OS noise */
local_irq_save(flags);
local_irq_disable();
nsec = ktime_get_ns();
for (i = 0; i < 100; i++) {
@ -694,7 +693,6 @@ static int __init crc32c_test(void)
nsec = ktime_get_ns() - nsec;
local_irq_restore(flags);
local_irq_enable();
pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
@ -768,7 +766,6 @@ static int __init crc32_test(void)
/* reduce OS noise */
local_irq_save(flags);
local_irq_disable();
nsec = ktime_get_ns();
for (i = 0; i < 100; i++) {
@ -783,7 +780,6 @@ static int __init crc32_test(void)
nsec = ktime_get_ns() - nsec;
local_irq_restore(flags);
local_irq_enable();
pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
CRC_LE_BITS, CRC_BE_BITS);

View file

@ -8,7 +8,7 @@
#define FONTDATAMAX 9216
static struct font_data fontdata_10x18 = {
static const struct font_data fontdata_10x18 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, 0x00, /* 0000000000 */

View file

@ -3,7 +3,7 @@
#define FONTDATAMAX 2560
static struct font_data fontdata_6x10 = {
static const struct font_data fontdata_6x10 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */

View file

@ -9,7 +9,7 @@
#define FONTDATAMAX (11*256)
static struct font_data fontdata_6x11 = {
static const struct font_data fontdata_6x11 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */

View file

@ -8,7 +8,7 @@
#define FONTDATAMAX 3584
static struct font_data fontdata_7x14 = {
static const struct font_data fontdata_7x14 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 0000000 */

View file

@ -10,7 +10,7 @@
#define FONTDATAMAX 4096
static struct font_data fontdata_8x16 = {
static const struct font_data fontdata_8x16 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */

View file

@ -9,7 +9,7 @@
#define FONTDATAMAX 2048
static struct font_data fontdata_8x8 = {
static const struct font_data fontdata_8x8 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */

View file

@ -5,7 +5,7 @@
#define FONTDATAMAX 2048
static struct font_data acorndata_8x8 = {
static const struct font_data acorndata_8x8 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */
/* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */

View file

@ -43,7 +43,7 @@ __END__;
#define FONTDATAMAX 1536
static struct font_data fontdata_mini_4x6 = {
static const struct font_data fontdata_mini_4x6 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/*{*/
/* Char 0: ' ' */

View file

@ -14,7 +14,7 @@
#define FONTDATAMAX 2048
static struct font_data fontdata_pearl8x8 = {
static const struct font_data fontdata_pearl8x8 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */

View file

@ -3,7 +3,7 @@
#define FONTDATAMAX 11264
static struct font_data fontdata_sun12x22 = {
static const struct font_data fontdata_sun12x22 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, 0x00, /* 000000000000 */

View file

@ -3,7 +3,7 @@
#define FONTDATAMAX 4096
static struct font_data fontdata_sun8x16 = {
static const struct font_data fontdata_sun8x16 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00,

View file

@ -496,7 +496,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long flags = qp->flags;
int ret;
bool has_unmovable = false;
pte_t *pte;
pte_t *pte, *mapped_pte;
spinlock_t *ptl;
ptl = pmd_trans_huge_lock(pmd, vma);
@ -510,7 +510,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
if (pmd_trans_unstable(pmd))
return 0;
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
@ -542,7 +542,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
} else
break;
}
pte_unmap_unlock(pte - 1, ptl);
pte_unmap_unlock(mapped_pte, ptl);
cond_resched();
if (has_unmovable)

View file

@ -261,6 +261,7 @@ static int dsa_port_setup(struct dsa_port *dp)
int err = 0;
memset(&dp->devlink_port, 0, sizeof(dp->devlink_port));
dp->mac = of_get_mac_address(dp->dn);
if (dp->type != DSA_PORT_TYPE_UNUSED)
err = devlink_port_register(ds->devlink, &dp->devlink_port,

View file

@ -1313,7 +1313,10 @@ int dsa_slave_create(struct dsa_port *port)
slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
slave_dev->hw_features |= NETIF_F_HW_TC;
slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
eth_hw_addr_inherit(slave_dev, master);
if (port->mac && is_valid_ether_addr(port->mac))
ether_addr_copy(slave_dev->dev_addr, port->mac);
else
eth_hw_addr_inherit(slave_dev, master);
slave_dev->priv_flags |= IFF_NO_QUEUE;
slave_dev->netdev_ops = &dsa_slave_netdev_ops;
slave_dev->switchdev_ops = &dsa_slave_switchdev_ops;

View file

@ -1615,12 +1615,12 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
break;
case SCTP_CMD_INIT_FAILED:
sctp_cmd_init_failed(commands, asoc, cmd->obj.u32);
sctp_cmd_init_failed(commands, asoc, cmd->obj.u16);
break;
case SCTP_CMD_ASSOC_FAILED:
sctp_cmd_assoc_failed(commands, asoc, event_type,
subtype, chunk, cmd->obj.u32);
subtype, chunk, cmd->obj.u16);
break;
case SCTP_CMD_INIT_COUNTER_INC:

View file

@ -93,6 +93,11 @@ static int __net_init tipc_init_net(struct net *net)
static void __net_exit tipc_exit_net(struct net *net)
{
tipc_net_stop(net);
/* Make sure the tipc_net_finalize_work stopped
* before releasing the resources.
*/
flush_scheduled_work();
tipc_bcast_stop(net);
tipc_nametbl_stop(net);
tipc_sk_rht_destroy(net);

View file

@ -629,7 +629,7 @@ struct sock *__vsock_create(struct net *net,
vsk->owner = get_cred(psk->owner);
vsk->connect_timeout = psk->connect_timeout;
} else {
vsk->trusted = capable(CAP_NET_ADMIN);
vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
vsk->owner = get_current_cred();
vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
}

View file

@ -397,6 +397,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
switch (subs->stream->chip->usb_id) {
case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */
case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */
case USB_ID(0x22f0, 0x0006): /* Allen&Heath Qu-16 */
ep = 0x81;
ifnum = 3;
goto add_sync_ep_from_ifnum;
@ -406,6 +407,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
ifnum = 2;
goto add_sync_ep_from_ifnum;
case USB_ID(0x2466, 0x8003): /* Fractal Audio Axe-Fx II */
case USB_ID(0x0499, 0x172a): /* Yamaha MODX */
ep = 0x86;
ifnum = 2;
goto add_sync_ep_from_ifnum;
@ -413,6 +415,10 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
ep = 0x81;
ifnum = 2;
goto add_sync_ep_from_ifnum;
case USB_ID(0x1686, 0xf029): /* Zoom UAC-2 */
ep = 0x82;
ifnum = 2;
goto add_sync_ep_from_ifnum;
case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */
case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */
ep = 0x81;

View file

@ -1463,6 +1463,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
case 0x278b: /* Rotel? */
case 0x292b: /* Gustard/Ess based devices */
case 0x2ab6: /* T+A devices */
case 0x3353: /* Khadas devices */
case 0x3842: /* EVGA */
case 0xc502: /* HiBy devices */
if (fp->dsd_raw)

View file

@ -22,7 +22,7 @@ static inline void *zalloc(size_t size)
return calloc(1, size);
}
#define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
#define zfree(ptr) ({ free((void *)*ptr); *ptr = NULL; })
struct dirent;
struct nsinfo;