Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "Let's begin the holiday weekend with some networking fixes: 1) Whoops need to restrict cfg80211 wiphy names even more to 64 bytes. From Eric Biggers. 2) Fix flags being ignored when using kernel_connect() with SCTP, from Xin Long. 3) Use after free in DCCP, from Alexey Kodanev. 4) Need to check rhltable_init() return value in ipmr code, from Eric Dumazet. 5) XDP handling fixes in virtio_net from Jason Wang. 6) Missing RTA_TABLE in rtm_ipv4_policy[], from Roopa Prabhu. 7) Need to use IRQ disabling spinlocks in mlx4_qp_lookup(), from Jack Morgenstein. 8) Prevent out-of-bounds speculation using indexes in BPF, from Daniel Borkmann. 9) Fix regression added by AF_PACKET link layer cure, from Willem de Bruijn. 10) Correct ENIC dma mask, from Govindarajulu Varadarajan. 11) Missing config options for PMTU tests, from Stefano Brivio" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (48 commits) ibmvnic: Fix partial success login retries selftests/net: Add missing config options for PMTU tests mlx4_core: allocate ICM memory in page size chunks enic: set DMA mask to 47 bit ppp: remove the PPPIOCDETACH ioctl ipv4: remove warning in ip_recv_error net : sched: cls_api: deal with egdev path only if needed vhost: synchronize IOTLB message with dev cleanup packet: fix reserve calculation net/mlx5: IPSec, Fix a race between concurrent sandbox QP commands net/mlx5e: When RXFCS is set, add FCS data into checksum calculation bpf: properly enforce index mask to prevent out-of-bounds speculation net/mlx4: Fix irq-unsafe spinlock usage net: phy: broadcom: Fix bcm_write_exp() net: phy: broadcom: Fix auxiliary control register reads net: ipv4: add missing RTA_TABLE to rtm_ipv4_policy net/mlx4: fix spelling mistake: "Inrerface" -> "Interface" and rephrase message ibmvnic: Only do H_EOI for mobility events tuntap: correctly set SOCKWQ_ASYNC_NOSPACE virtio-net: fix leaking page for gso packet during mergeable XDP ...
This commit is contained in:
commit
03250e1028
49 changed files with 372 additions and 193 deletions
|
@ -300,12 +300,6 @@ unattached instance are:
|
|||
The ioctl calls available on an instance of /dev/ppp attached to a
|
||||
channel are:
|
||||
|
||||
* PPPIOCDETACH detaches the instance from the channel. This ioctl is
|
||||
deprecated since the same effect can be achieved by closing the
|
||||
instance. In order to prevent possible races this ioctl will fail
|
||||
with an EINVAL error if more than one file descriptor refers to this
|
||||
instance (i.e. as a result of dup(), dup2() or fork()).
|
||||
|
||||
* PPPIOCCONNECT connects this channel to a PPP interface. The
|
||||
argument should point to an int containing the interface unit
|
||||
number. It will return an EINVAL error if the channel is already
|
||||
|
|
|
@ -2332,7 +2332,7 @@ F: drivers/gpio/gpio-ath79.c
|
|||
F: Documentation/devicetree/bindings/gpio/gpio-ath79.txt
|
||||
|
||||
ATHEROS ATH GENERIC UTILITIES
|
||||
M: "Luis R. Rodriguez" <mcgrof@do-not-panic.com>
|
||||
M: Kalle Valo <kvalo@codeaurora.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/wireless/ath/*
|
||||
|
@ -2347,7 +2347,7 @@ S: Maintained
|
|||
F: drivers/net/wireless/ath/ath5k/
|
||||
|
||||
ATHEROS ATH6KL WIRELESS DRIVER
|
||||
M: Kalle Valo <kvalo@qca.qualcomm.com>
|
||||
M: Kalle Valo <kvalo@codeaurora.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://wireless.kernel.org/en/users/Drivers/ath6kl
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
|
||||
|
@ -11626,7 +11626,7 @@ S: Maintained
|
|||
F: drivers/media/tuners/qt1010*
|
||||
|
||||
QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
|
||||
M: Kalle Valo <kvalo@qca.qualcomm.com>
|
||||
M: Kalle Valo <kvalo@codeaurora.org>
|
||||
L: ath10k@lists.infradead.org
|
||||
W: http://wireless.kernel.org/en/users/Drivers/ath10k
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
|
||||
|
@ -11677,7 +11677,7 @@ S: Maintained
|
|||
F: drivers/media/platform/qcom/venus/
|
||||
|
||||
QUALCOMM WCN36XX WIRELESS DRIVER
|
||||
M: Eugene Krasnikov <k.eugene.e@gmail.com>
|
||||
M: Kalle Valo <kvalo@codeaurora.org>
|
||||
L: wcn36xx@lists.infradead.org
|
||||
W: http://wireless.kernel.org/en/users/Drivers/wcn36xx
|
||||
T: git git://github.com/KrasnikovEugene/wcn36xx.git
|
||||
|
|
|
@ -184,7 +184,7 @@ static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
|
|||
{
|
||||
int i;
|
||||
static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
|
||||
char interrupts[20];
|
||||
char interrupts[25];
|
||||
char *ints = interrupts;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(irq_name); i++)
|
||||
|
|
|
@ -388,10 +388,10 @@ void divasa_xdi_driver_unload(void)
|
|||
** Receive and process command from user mode utility
|
||||
*/
|
||||
void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
|
||||
int length,
|
||||
int length, void *mptr,
|
||||
divas_xdi_copy_from_user_fn_t cp_fn)
|
||||
{
|
||||
diva_xdi_um_cfg_cmd_t msg;
|
||||
diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
|
||||
diva_os_xdi_adapter_t *a = NULL;
|
||||
diva_os_spin_lock_magic_t old_irql;
|
||||
struct list_head *tmp;
|
||||
|
@ -401,21 +401,21 @@ void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
|
|||
length, sizeof(diva_xdi_um_cfg_cmd_t)))
|
||||
return NULL;
|
||||
}
|
||||
if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) {
|
||||
if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) {
|
||||
DBG_ERR(("A: A(?) open, write error"))
|
||||
return NULL;
|
||||
}
|
||||
diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter");
|
||||
list_for_each(tmp, &adapter_queue) {
|
||||
a = list_entry(tmp, diva_os_xdi_adapter_t, link);
|
||||
if (a->controller == (int)msg.adapter)
|
||||
if (a->controller == (int)msg->adapter)
|
||||
break;
|
||||
a = NULL;
|
||||
}
|
||||
diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter");
|
||||
|
||||
if (!a) {
|
||||
DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter))
|
||||
DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter))
|
||||
}
|
||||
|
||||
return (a);
|
||||
|
@ -437,8 +437,10 @@ void diva_xdi_close_adapter(void *adapter, void *os_handle)
|
|||
|
||||
int
|
||||
diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
|
||||
int length, divas_xdi_copy_from_user_fn_t cp_fn)
|
||||
int length, void *mptr,
|
||||
divas_xdi_copy_from_user_fn_t cp_fn)
|
||||
{
|
||||
diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
|
||||
diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
|
||||
void *data;
|
||||
|
||||
|
@ -459,7 +461,13 @@ diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
|
|||
return (-2);
|
||||
}
|
||||
|
||||
length = (*cp_fn) (os_handle, data, src, length);
|
||||
if (msg) {
|
||||
*(diva_xdi_um_cfg_cmd_t *)data = *msg;
|
||||
length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg),
|
||||
src + sizeof(*msg), length - sizeof(*msg));
|
||||
} else {
|
||||
length = (*cp_fn) (os_handle, data, src, length);
|
||||
}
|
||||
if (length > 0) {
|
||||
if ((*(a->interface.cmd_proc))
|
||||
(a, (diva_xdi_um_cfg_cmd_t *) data, length)) {
|
||||
|
|
|
@ -20,10 +20,11 @@ int diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
|
|||
int max_length, divas_xdi_copy_to_user_fn_t cp_fn);
|
||||
|
||||
int diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
|
||||
int length, divas_xdi_copy_from_user_fn_t cp_fn);
|
||||
int length, void *msg,
|
||||
divas_xdi_copy_from_user_fn_t cp_fn);
|
||||
|
||||
void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
|
||||
int length,
|
||||
int length, void *msg,
|
||||
divas_xdi_copy_from_user_fn_t cp_fn);
|
||||
|
||||
void diva_xdi_close_adapter(void *adapter, void *os_handle);
|
||||
|
|
|
@ -591,19 +591,22 @@ static int divas_release(struct inode *inode, struct file *file)
|
|||
static ssize_t divas_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
diva_xdi_um_cfg_cmd_t msg;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!file->private_data) {
|
||||
file->private_data = diva_xdi_open_adapter(file, buf,
|
||||
count,
|
||||
count, &msg,
|
||||
xdi_copy_from_user);
|
||||
}
|
||||
if (!file->private_data) {
|
||||
return (-ENODEV);
|
||||
if (!file->private_data)
|
||||
return (-ENODEV);
|
||||
ret = diva_xdi_write(file->private_data, file,
|
||||
buf, count, &msg, xdi_copy_from_user);
|
||||
} else {
|
||||
ret = diva_xdi_write(file->private_data, file,
|
||||
buf, count, NULL, xdi_copy_from_user);
|
||||
}
|
||||
|
||||
ret = diva_xdi_write(file->private_data, file,
|
||||
buf, count, xdi_copy_from_user);
|
||||
switch (ret) {
|
||||
case -1: /* Message should be removed from rx mailbox first */
|
||||
ret = -EBUSY;
|
||||
|
@ -622,11 +625,12 @@ static ssize_t divas_write(struct file *file, const char __user *buf,
|
|||
static ssize_t divas_read(struct file *file, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
diva_xdi_um_cfg_cmd_t msg;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!file->private_data) {
|
||||
file->private_data = diva_xdi_open_adapter(file, buf,
|
||||
count,
|
||||
count, &msg,
|
||||
xdi_copy_from_user);
|
||||
}
|
||||
if (!file->private_data) {
|
||||
|
|
|
@ -1552,22 +1552,26 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (!ioaddr) {
|
||||
if (pcnet32_debug & NETIF_MSG_PROBE)
|
||||
pr_err("card has no PCI IO resources, aborting\n");
|
||||
return -ENODEV;
|
||||
err = -ENODEV;
|
||||
goto err_disable_dev;
|
||||
}
|
||||
|
||||
err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK);
|
||||
if (err) {
|
||||
if (pcnet32_debug & NETIF_MSG_PROBE)
|
||||
pr_err("architecture does not support 32bit PCI busmaster DMA\n");
|
||||
return err;
|
||||
goto err_disable_dev;
|
||||
}
|
||||
if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
|
||||
if (pcnet32_debug & NETIF_MSG_PROBE)
|
||||
pr_err("io address range already allocated\n");
|
||||
return -EBUSY;
|
||||
err = -EBUSY;
|
||||
goto err_disable_dev;
|
||||
}
|
||||
|
||||
err = pcnet32_probe1(ioaddr, 1, pdev);
|
||||
|
||||
err_disable_dev:
|
||||
if (err < 0)
|
||||
pci_disable_device(pdev);
|
||||
|
||||
|
|
|
@ -2747,11 +2747,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
pci_set_master(pdev);
|
||||
|
||||
/* Query PCI controller on system for DMA addressing
|
||||
* limitation for the device. Try 64-bit first, and
|
||||
* limitation for the device. Try 47-bit first, and
|
||||
* fail to 32-bit.
|
||||
*/
|
||||
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
|
||||
if (err) {
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
|
@ -2765,10 +2765,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
goto err_out_release_regions;
|
||||
}
|
||||
} else {
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
|
||||
if (err) {
|
||||
dev_err(dev, "Unable to obtain %u-bit DMA "
|
||||
"for consistent allocations, aborting\n", 64);
|
||||
"for consistent allocations, aborting\n", 47);
|
||||
goto err_out_release_regions;
|
||||
}
|
||||
using_dac = 1;
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
|
||||
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
|
||||
|
|
|
@ -1,20 +1,8 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Fast Ethernet Controller (ENET) PTP driver for MX6x.
|
||||
*
|
||||
* Copyright (C) 2012 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
|
|
@ -796,9 +796,11 @@ static int ibmvnic_login(struct net_device *netdev)
|
|||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||
unsigned long timeout = msecs_to_jiffies(30000);
|
||||
int retry_count = 0;
|
||||
bool retry;
|
||||
int rc;
|
||||
|
||||
do {
|
||||
retry = false;
|
||||
if (retry_count > IBMVNIC_MAX_QUEUES) {
|
||||
netdev_warn(netdev, "Login attempts exceeded\n");
|
||||
return -1;
|
||||
|
@ -822,6 +824,9 @@ static int ibmvnic_login(struct net_device *netdev)
|
|||
retry_count++;
|
||||
release_sub_crqs(adapter, 1);
|
||||
|
||||
retry = true;
|
||||
netdev_dbg(netdev,
|
||||
"Received partial success, retrying...\n");
|
||||
adapter->init_done_rc = 0;
|
||||
reinit_completion(&adapter->init_done);
|
||||
send_cap_queries(adapter);
|
||||
|
@ -849,7 +854,7 @@ static int ibmvnic_login(struct net_device *netdev)
|
|||
netdev_warn(netdev, "Adapter login failed\n");
|
||||
return -1;
|
||||
}
|
||||
} while (adapter->init_done_rc == PARTIALSUCCESS);
|
||||
} while (retry);
|
||||
|
||||
/* handle pending MAC address changes after successful login */
|
||||
if (adapter->mac_change_pending) {
|
||||
|
@ -2617,18 +2622,21 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
|
|||
{
|
||||
struct device *dev = &adapter->vdev->dev;
|
||||
unsigned long rc;
|
||||
u64 val;
|
||||
|
||||
if (scrq->hw_irq > 0x100000000ULL) {
|
||||
dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
|
||||
return 1;
|
||||
}
|
||||
|
||||
val = (0xff000000) | scrq->hw_irq;
|
||||
rc = plpar_hcall_norets(H_EOI, val);
|
||||
if (rc)
|
||||
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
|
||||
val, rc);
|
||||
if (adapter->resetting &&
|
||||
adapter->reset_reason == VNIC_RESET_MOBILITY) {
|
||||
u64 val = (0xff000000) | scrq->hw_irq;
|
||||
|
||||
rc = plpar_hcall_norets(H_EOI, val);
|
||||
if (rc)
|
||||
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
|
||||
val, rc);
|
||||
}
|
||||
|
||||
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
|
||||
H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
|
||||
|
|
|
@ -43,12 +43,12 @@
|
|||
#include "fw.h"
|
||||
|
||||
/*
|
||||
* We allocate in as big chunks as we can, up to a maximum of 256 KB
|
||||
* per chunk.
|
||||
* We allocate in page size (default 4KB on many archs) chunks to avoid high
|
||||
* order memory allocations in fragmented/high usage memory situation.
|
||||
*/
|
||||
enum {
|
||||
MLX4_ICM_ALLOC_SIZE = 1 << 18,
|
||||
MLX4_TABLE_CHUNK_SIZE = 1 << 18
|
||||
MLX4_ICM_ALLOC_SIZE = PAGE_SIZE,
|
||||
MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE,
|
||||
};
|
||||
|
||||
static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
|
||||
|
@ -398,9 +398,11 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
|||
u64 size;
|
||||
|
||||
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
|
||||
if (WARN_ON(!obj_per_chunk))
|
||||
return -EINVAL;
|
||||
num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
|
||||
|
||||
table->icm = kcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL);
|
||||
table->icm = kvzalloc(num_icm * sizeof(*table->icm), GFP_KERNEL);
|
||||
if (!table->icm)
|
||||
return -ENOMEM;
|
||||
table->virt = virt;
|
||||
|
@ -446,7 +448,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
|||
mlx4_free_icm(dev, table->icm[i], use_coherent);
|
||||
}
|
||||
|
||||
kfree(table->icm);
|
||||
kvfree(table->icm);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -462,5 +464,5 @@ void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
|
|||
mlx4_free_icm(dev, table->icm[i], table->coherent);
|
||||
}
|
||||
|
||||
kfree(table->icm);
|
||||
kvfree(table->icm);
|
||||
}
|
||||
|
|
|
@ -172,7 +172,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
|
|||
list_add_tail(&dev_ctx->list, &priv->ctx_list);
|
||||
spin_unlock_irqrestore(&priv->ctx_lock, flags);
|
||||
|
||||
mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n",
|
||||
mlx4_dbg(dev, "Interface for protocol %d restarted with bonded mode %s\n",
|
||||
dev_ctx->intf->protocol, enable ?
|
||||
"enabled" : "disabled");
|
||||
}
|
||||
|
|
|
@ -393,11 +393,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
|
|||
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
|
||||
struct mlx4_qp *qp;
|
||||
|
||||
spin_lock(&qp_table->lock);
|
||||
spin_lock_irq(&qp_table->lock);
|
||||
|
||||
qp = __mlx4_qp_lookup(dev, qpn);
|
||||
|
||||
spin_unlock(&qp_table->lock);
|
||||
spin_unlock_irq(&qp_table->lock);
|
||||
return qp;
|
||||
}
|
||||
|
||||
|
|
|
@ -615,6 +615,45 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
|
|||
return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
|
||||
}
|
||||
|
||||
static __be32 mlx5e_get_fcs(struct sk_buff *skb)
|
||||
{
|
||||
int last_frag_sz, bytes_in_prev, nr_frags;
|
||||
u8 *fcs_p1, *fcs_p2;
|
||||
skb_frag_t *last_frag;
|
||||
__be32 fcs_bytes;
|
||||
|
||||
if (!skb_is_nonlinear(skb))
|
||||
return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
|
||||
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
|
||||
last_frag_sz = skb_frag_size(last_frag);
|
||||
|
||||
/* If all FCS data is in last frag */
|
||||
if (last_frag_sz >= ETH_FCS_LEN)
|
||||
return *(__be32 *)(skb_frag_address(last_frag) +
|
||||
last_frag_sz - ETH_FCS_LEN);
|
||||
|
||||
fcs_p2 = (u8 *)skb_frag_address(last_frag);
|
||||
bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
|
||||
|
||||
/* Find where the other part of the FCS is - Linear or another frag */
|
||||
if (nr_frags == 1) {
|
||||
fcs_p1 = skb_tail_pointer(skb);
|
||||
} else {
|
||||
skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
|
||||
|
||||
fcs_p1 = skb_frag_address(prev_frag) +
|
||||
skb_frag_size(prev_frag);
|
||||
}
|
||||
fcs_p1 -= bytes_in_prev;
|
||||
|
||||
memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
|
||||
memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
|
||||
|
||||
return fcs_bytes;
|
||||
}
|
||||
|
||||
static inline void mlx5e_handle_csum(struct net_device *netdev,
|
||||
struct mlx5_cqe64 *cqe,
|
||||
struct mlx5e_rq *rq,
|
||||
|
@ -643,6 +682,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
|||
skb->csum = csum_partial(skb->data + ETH_HLEN,
|
||||
network_depth - ETH_HLEN,
|
||||
skb->csum);
|
||||
if (unlikely(netdev->features & NETIF_F_RXFCS))
|
||||
skb->csum = csum_add(skb->csum,
|
||||
(__force __wsum)mlx5e_get_fcs(skb));
|
||||
rq->stats.csum_complete++;
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -237,19 +237,17 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
|
|||
context->buf.sg[0].data = &context->command;
|
||||
|
||||
spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
|
||||
list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
|
||||
res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
|
||||
if (!res)
|
||||
list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
|
||||
spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
|
||||
|
||||
res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
|
||||
if (res) {
|
||||
mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n",
|
||||
res);
|
||||
spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
|
||||
list_del(&context->list);
|
||||
spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
|
||||
mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
|
||||
kfree(context);
|
||||
return ERR_PTR(res);
|
||||
}
|
||||
|
||||
/* Context will be freed by wait func after completion */
|
||||
return context;
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@
|
|||
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
|
||||
|
||||
/* ILT entry structure */
|
||||
#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
|
||||
#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
|
||||
#define ILT_ENTRY_PHY_ADDR_SHIFT 0
|
||||
#define ILT_ENTRY_VALID_MASK 0x1ULL
|
||||
#define ILT_ENTRY_VALID_SHIFT 52
|
||||
|
|
|
@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev)
|
|||
return rc;
|
||||
|
||||
/* make rcal=100, since rdb default is 000 */
|
||||
rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10);
|
||||
rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */
|
||||
rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10);
|
||||
rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */
|
||||
rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00);
|
||||
rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum)
|
|||
/* The register must be written to both the Shadow Register Select and
|
||||
* the Shadow Read Register Selector
|
||||
*/
|
||||
phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum |
|
||||
phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK |
|
||||
regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT);
|
||||
return phy_read(phydev, MII_BCM54XX_AUX_CTL);
|
||||
}
|
||||
|
|
|
@ -14,11 +14,18 @@
|
|||
#ifndef _LINUX_BCM_PHY_LIB_H
|
||||
#define _LINUX_BCM_PHY_LIB_H
|
||||
|
||||
#include <linux/brcmphy.h>
|
||||
#include <linux/phy.h>
|
||||
|
||||
int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
|
||||
int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
|
||||
|
||||
static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
|
||||
u16 reg, u16 val)
|
||||
{
|
||||
return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
|
||||
}
|
||||
|
||||
int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
|
||||
int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);
|
||||
|
||||
|
|
|
@ -65,10 +65,10 @@ struct bcm7xxx_phy_priv {
|
|||
static void r_rc_cal_reset(struct phy_device *phydev)
|
||||
{
|
||||
/* Reset R_CAL/RC_CAL Engine */
|
||||
bcm_phy_write_exp(phydev, 0x00b0, 0x0010);
|
||||
bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
|
||||
|
||||
/* Disable Reset R_AL/RC_CAL Engine */
|
||||
bcm_phy_write_exp(phydev, 0x00b0, 0x0000);
|
||||
bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
|
||||
}
|
||||
|
||||
static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
|
||||
|
|
|
@ -605,30 +605,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
|
||||
if (cmd == PPPIOCDETACH) {
|
||||
/*
|
||||
* We have to be careful here... if the file descriptor
|
||||
* has been dup'd, we could have another process in the
|
||||
* middle of a poll using the same file *, so we had
|
||||
* better not free the interface data structures -
|
||||
* instead we fail the ioctl. Even in this case, we
|
||||
* shut down the interface if we are the owner of it.
|
||||
* Actually, we should get rid of PPPIOCDETACH, userland
|
||||
* (i.e. pppd) could achieve the same effect by closing
|
||||
* this fd and reopening /dev/ppp.
|
||||
* PPPIOCDETACH is no longer supported as it was heavily broken,
|
||||
* and is only known to have been used by pppd older than
|
||||
* ppp-2.4.2 (released November 2003).
|
||||
*/
|
||||
pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n",
|
||||
current->comm, current->pid);
|
||||
err = -EINVAL;
|
||||
if (pf->kind == INTERFACE) {
|
||||
ppp = PF_TO_PPP(pf);
|
||||
rtnl_lock();
|
||||
if (file == ppp->owner)
|
||||
unregister_netdevice(ppp->dev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
if (atomic_long_read(&file->f_count) < 2) {
|
||||
ppp_release(NULL, file);
|
||||
err = 0;
|
||||
} else
|
||||
pr_warn("PPPIOCDETACH file->f_count=%ld\n",
|
||||
atomic_long_read(&file->f_count));
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -1423,6 +1423,13 @@ static void tun_net_init(struct net_device *dev)
|
|||
dev->max_mtu = MAX_MTU - dev->hard_header_len;
|
||||
}
|
||||
|
||||
static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
|
||||
{
|
||||
struct sock *sk = tfile->socket.sk;
|
||||
|
||||
return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
|
||||
}
|
||||
|
||||
/* Character device part */
|
||||
|
||||
/* Poll */
|
||||
|
@ -1445,10 +1452,14 @@ static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
|
|||
if (!ptr_ring_empty(&tfile->tx_ring))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
|
||||
if (tun->dev->flags & IFF_UP &&
|
||||
(sock_writeable(sk) ||
|
||||
(!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
|
||||
sock_writeable(sk))))
|
||||
/* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
|
||||
* guarantee EPOLLOUT to be raised by either here or
|
||||
* tun_sock_write_space(). Then process could get notification
|
||||
* after it writes to a down device and meets -EIO.
|
||||
*/
|
||||
if (tun_sock_writeable(tun, tfile) ||
|
||||
(!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
|
||||
tun_sock_writeable(tun, tfile)))
|
||||
mask |= EPOLLOUT | EPOLLWRNORM;
|
||||
|
||||
if (tun->dev->reg_state != NETREG_REGISTERED)
|
||||
|
|
|
@ -707,6 +707,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|||
void *data;
|
||||
u32 act;
|
||||
|
||||
/* Transient failure which in theory could occur if
|
||||
* in-flight packets from before XDP was enabled reach
|
||||
* the receive path after XDP is loaded.
|
||||
*/
|
||||
if (unlikely(hdr->hdr.gso_type))
|
||||
goto err_xdp;
|
||||
|
||||
/* This happens when rx buffer size is underestimated
|
||||
* or headroom is not enough because of the buffer
|
||||
* was refilled before XDP is set. This should only
|
||||
|
@ -727,14 +734,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|||
xdp_page = page;
|
||||
}
|
||||
|
||||
/* Transient failure which in theory could occur if
|
||||
* in-flight packets from before XDP was enabled reach
|
||||
* the receive path after XDP is loaded. In practice I
|
||||
* was not able to create this condition.
|
||||
*/
|
||||
if (unlikely(hdr->hdr.gso_type))
|
||||
goto err_xdp;
|
||||
|
||||
/* Allow consuming headroom but reserve enough space to push
|
||||
* the descriptor on if we get an XDP_TX return code.
|
||||
*/
|
||||
|
@ -775,7 +774,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|||
}
|
||||
*xdp_xmit = true;
|
||||
if (unlikely(xdp_page != page))
|
||||
goto err_xdp;
|
||||
put_page(page);
|
||||
rcu_read_unlock();
|
||||
goto xdp_xmit;
|
||||
case XDP_REDIRECT:
|
||||
|
@ -787,7 +786,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|||
}
|
||||
*xdp_xmit = true;
|
||||
if (unlikely(xdp_page != page))
|
||||
goto err_xdp;
|
||||
put_page(page);
|
||||
rcu_read_unlock();
|
||||
goto xdp_xmit;
|
||||
default:
|
||||
|
@ -875,7 +874,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|||
rcu_read_unlock();
|
||||
err_skb:
|
||||
put_page(page);
|
||||
while (--num_buf) {
|
||||
while (num_buf-- > 1) {
|
||||
buf = virtqueue_get_buf(rq->vq, &len);
|
||||
if (unlikely(!buf)) {
|
||||
pr_debug("%s: rx error: %d buffers missing\n",
|
||||
|
|
|
@ -3340,7 +3340,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
|
|||
static int hwsim_dump_radio_nl(struct sk_buff *skb,
|
||||
struct netlink_callback *cb)
|
||||
{
|
||||
int last_idx = cb->args[0];
|
||||
int last_idx = cb->args[0] - 1;
|
||||
struct mac80211_hwsim_data *data = NULL;
|
||||
int res = 0;
|
||||
void *hdr;
|
||||
|
@ -3368,7 +3368,7 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
|
|||
last_idx = data->idx;
|
||||
}
|
||||
|
||||
cb->args[0] = last_idx;
|
||||
cb->args[0] = last_idx + 1;
|
||||
|
||||
/* list changed, but no new element sent, set interrupted flag */
|
||||
if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) {
|
||||
|
|
|
@ -117,7 +117,7 @@ config SSB_SERIAL
|
|||
|
||||
config SSB_DRIVER_PCICORE_POSSIBLE
|
||||
bool
|
||||
depends on SSB_PCIHOST && SSB = y
|
||||
depends on SSB_PCIHOST
|
||||
default y
|
||||
|
||||
config SSB_DRIVER_PCICORE
|
||||
|
@ -131,7 +131,7 @@ config SSB_DRIVER_PCICORE
|
|||
|
||||
config SSB_PCICORE_HOSTMODE
|
||||
bool "Hostmode support for SSB PCI core"
|
||||
depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS
|
||||
depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS && SSB = y
|
||||
help
|
||||
PCIcore hostmode operation (external PCI bus).
|
||||
|
||||
|
|
|
@ -981,6 +981,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&dev->mutex);
|
||||
vhost_dev_lock_vqs(dev);
|
||||
switch (msg->type) {
|
||||
case VHOST_IOTLB_UPDATE:
|
||||
|
@ -1016,6 +1017,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
|
|||
}
|
||||
|
||||
vhost_dev_unlock_vqs(dev);
|
||||
mutex_unlock(&dev->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
|
||||
|
|
|
@ -142,7 +142,7 @@ struct bpf_verifier_state_list {
|
|||
struct bpf_insn_aux_data {
|
||||
union {
|
||||
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
|
||||
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
|
||||
unsigned long map_state; /* pointer/poison value for maps */
|
||||
s32 call_imm; /* saved imm field of call insn */
|
||||
};
|
||||
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
|
||||
|
|
|
@ -103,6 +103,8 @@ void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
|
|||
/*
|
||||
* sctp/socket.c
|
||||
*/
|
||||
int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
|
||||
int addr_len, int flags);
|
||||
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
|
||||
int sctp_inet_listen(struct socket *sock, int backlog);
|
||||
void sctp_write_space(struct sock *sk);
|
||||
|
|
|
@ -2698,7 +2698,7 @@ enum nl80211_attrs {
|
|||
#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
|
||||
#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
|
||||
|
||||
#define NL80211_WIPHY_NAME_MAXLEN 128
|
||||
#define NL80211_WIPHY_NAME_MAXLEN 64
|
||||
|
||||
#define NL80211_MAX_SUPP_RATES 32
|
||||
#define NL80211_MAX_SUPP_HT_RATES 77
|
||||
|
|
|
@ -106,7 +106,7 @@ struct pppol2tp_ioc_stats {
|
|||
#define PPPIOCGIDLE _IOR('t', 63, struct ppp_idle) /* get idle time */
|
||||
#define PPPIOCNEWUNIT _IOWR('t', 62, int) /* create new ppp unit */
|
||||
#define PPPIOCATTACH _IOW('t', 61, int) /* attach to ppp unit */
|
||||
#define PPPIOCDETACH _IOW('t', 60, int) /* detach from ppp unit/chan */
|
||||
#define PPPIOCDETACH _IOW('t', 60, int) /* obsolete, do not use */
|
||||
#define PPPIOCSMRRU _IOW('t', 59, int) /* set multilink MRU */
|
||||
#define PPPIOCCONNECT _IOW('t', 58, int) /* connect channel to unit */
|
||||
#define PPPIOCDISCONN _IO('t', 57) /* disconnect channel */
|
||||
|
|
|
@ -156,7 +156,29 @@ struct bpf_verifier_stack_elem {
|
|||
#define BPF_COMPLEXITY_LIMIT_INSNS 131072
|
||||
#define BPF_COMPLEXITY_LIMIT_STACK 1024
|
||||
|
||||
#define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)
|
||||
#define BPF_MAP_PTR_UNPRIV 1UL
|
||||
#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
|
||||
POISON_POINTER_DELTA))
|
||||
#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
|
||||
|
||||
static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
|
||||
{
|
||||
return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON;
|
||||
}
|
||||
|
||||
static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
|
||||
{
|
||||
return aux->map_state & BPF_MAP_PTR_UNPRIV;
|
||||
}
|
||||
|
||||
static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
|
||||
const struct bpf_map *map, bool unpriv)
|
||||
{
|
||||
BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
|
||||
unpriv |= bpf_map_ptr_unpriv(aux);
|
||||
aux->map_state = (unsigned long)map |
|
||||
(unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
|
||||
}
|
||||
|
||||
struct bpf_call_arg_meta {
|
||||
struct bpf_map *map_ptr;
|
||||
|
@ -2358,6 +2380,29 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
|
||||
int func_id, int insn_idx)
|
||||
{
|
||||
struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
|
||||
|
||||
if (func_id != BPF_FUNC_tail_call &&
|
||||
func_id != BPF_FUNC_map_lookup_elem)
|
||||
return 0;
|
||||
if (meta->map_ptr == NULL) {
|
||||
verbose(env, "kernel subsystem misconfigured verifier\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!BPF_MAP_PTR(aux->map_state))
|
||||
bpf_map_ptr_store(aux, meta->map_ptr,
|
||||
meta->map_ptr->unpriv_array);
|
||||
else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr)
|
||||
bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
|
||||
meta->map_ptr->unpriv_array);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
|
||||
{
|
||||
const struct bpf_func_proto *fn = NULL;
|
||||
|
@ -2412,13 +2457,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
|
|||
err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
|
||||
if (err)
|
||||
return err;
|
||||
if (func_id == BPF_FUNC_tail_call) {
|
||||
if (meta.map_ptr == NULL) {
|
||||
verbose(env, "verifier bug\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
|
||||
}
|
||||
err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -2429,6 +2467,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = record_func_map(env, &meta, func_id, insn_idx);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Mark slots with STACK_MISC in case of raw mode, stack offset
|
||||
* is inferred from register state.
|
||||
*/
|
||||
|
@ -2453,8 +2495,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
|
|||
} else if (fn->ret_type == RET_VOID) {
|
||||
regs[BPF_REG_0].type = NOT_INIT;
|
||||
} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
|
||||
struct bpf_insn_aux_data *insn_aux;
|
||||
|
||||
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
|
||||
/* There is no offset yet applied, variable or fixed */
|
||||
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||
|
@ -2470,11 +2510,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
|
|||
}
|
||||
regs[BPF_REG_0].map_ptr = meta.map_ptr;
|
||||
regs[BPF_REG_0].id = ++env->id_gen;
|
||||
insn_aux = &env->insn_aux_data[insn_idx];
|
||||
if (!insn_aux->map_ptr)
|
||||
insn_aux->map_ptr = meta.map_ptr;
|
||||
else if (insn_aux->map_ptr != meta.map_ptr)
|
||||
insn_aux->map_ptr = BPF_MAP_PTR_POISON;
|
||||
} else {
|
||||
verbose(env, "unknown return type %d of func %s#%d\n",
|
||||
fn->ret_type, func_id_name(func_id), func_id);
|
||||
|
@ -5470,6 +5505,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
|||
struct bpf_insn *insn = prog->insnsi;
|
||||
const struct bpf_func_proto *fn;
|
||||
const int insn_cnt = prog->len;
|
||||
struct bpf_insn_aux_data *aux;
|
||||
struct bpf_insn insn_buf[16];
|
||||
struct bpf_prog *new_prog;
|
||||
struct bpf_map *map_ptr;
|
||||
|
@ -5544,19 +5580,22 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
|||
insn->imm = 0;
|
||||
insn->code = BPF_JMP | BPF_TAIL_CALL;
|
||||
|
||||
aux = &env->insn_aux_data[i + delta];
|
||||
if (!bpf_map_ptr_unpriv(aux))
|
||||
continue;
|
||||
|
||||
/* instead of changing every JIT dealing with tail_call
|
||||
* emit two extra insns:
|
||||
* if (index >= max_entries) goto out;
|
||||
* index &= array->index_mask;
|
||||
* to avoid out-of-bounds cpu speculation
|
||||
*/
|
||||
map_ptr = env->insn_aux_data[i + delta].map_ptr;
|
||||
if (map_ptr == BPF_MAP_PTR_POISON) {
|
||||
if (bpf_map_ptr_poisoned(aux)) {
|
||||
verbose(env, "tail_call abusing map_ptr\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!map_ptr->unpriv_array)
|
||||
continue;
|
||||
|
||||
map_ptr = BPF_MAP_PTR(aux->map_state);
|
||||
insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
|
||||
map_ptr->max_entries, 2);
|
||||
insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
|
||||
|
@ -5580,9 +5619,12 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
|||
*/
|
||||
if (prog->jit_requested && BITS_PER_LONG == 64 &&
|
||||
insn->imm == BPF_FUNC_map_lookup_elem) {
|
||||
map_ptr = env->insn_aux_data[i + delta].map_ptr;
|
||||
if (map_ptr == BPF_MAP_PTR_POISON ||
|
||||
!map_ptr->ops->map_gen_lookup)
|
||||
aux = &env->insn_aux_data[i + delta];
|
||||
if (bpf_map_ptr_poisoned(aux))
|
||||
goto patch_call_imm;
|
||||
|
||||
map_ptr = BPF_MAP_PTR(aux->map_state);
|
||||
if (!map_ptr->ops->map_gen_lookup)
|
||||
goto patch_call_imm;
|
||||
|
||||
cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
|
||||
|
|
|
@ -1536,7 +1536,7 @@ batadv_mcast_netlink_get_primary(struct netlink_callback *cb,
|
|||
|
||||
if (!ret && primary_if)
|
||||
*primary_if = hard_iface;
|
||||
else
|
||||
else if (hard_iface)
|
||||
batadv_hardif_put(hard_iface);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -862,7 +862,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
|
|||
struct batadv_orig_node_vlan *vlan;
|
||||
u8 *tt_change_ptr;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock_bh(&orig_node->vlan_list_lock);
|
||||
hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
|
||||
num_vlan++;
|
||||
num_entries += atomic_read(&vlan->tt.num_entries);
|
||||
|
@ -900,7 +900,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
|
|||
*tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
spin_unlock_bh(&orig_node->vlan_list_lock);
|
||||
return tvlv_len;
|
||||
}
|
||||
|
||||
|
@ -931,15 +931,20 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
|
|||
struct batadv_tvlv_tt_vlan_data *tt_vlan;
|
||||
struct batadv_softif_vlan *vlan;
|
||||
u16 num_vlan = 0;
|
||||
u16 num_entries = 0;
|
||||
u16 vlan_entries = 0;
|
||||
u16 total_entries = 0;
|
||||
u16 tvlv_len;
|
||||
u8 *tt_change_ptr;
|
||||
int change_offset;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock_bh(&bat_priv->softif_vlan_list_lock);
|
||||
hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
|
||||
vlan_entries = atomic_read(&vlan->tt.num_entries);
|
||||
if (vlan_entries < 1)
|
||||
continue;
|
||||
|
||||
num_vlan++;
|
||||
num_entries += atomic_read(&vlan->tt.num_entries);
|
||||
total_entries += vlan_entries;
|
||||
}
|
||||
|
||||
change_offset = sizeof(**tt_data);
|
||||
|
@ -947,7 +952,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
|
|||
|
||||
/* if tt_len is negative, allocate the space needed by the full table */
|
||||
if (*tt_len < 0)
|
||||
*tt_len = batadv_tt_len(num_entries);
|
||||
*tt_len = batadv_tt_len(total_entries);
|
||||
|
||||
tvlv_len = *tt_len;
|
||||
tvlv_len += change_offset;
|
||||
|
@ -964,6 +969,10 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
|
|||
|
||||
tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
|
||||
hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
|
||||
vlan_entries = atomic_read(&vlan->tt.num_entries);
|
||||
if (vlan_entries < 1)
|
||||
continue;
|
||||
|
||||
tt_vlan->vid = htons(vlan->vid);
|
||||
tt_vlan->crc = htonl(vlan->tt.crc);
|
||||
|
||||
|
@ -974,7 +983,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
|
|||
*tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
|
||||
return tvlv_len;
|
||||
}
|
||||
|
||||
|
@ -1538,6 +1547,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
|
|||
* handled by a given originator
|
||||
* @entry: the TT global entry to check
|
||||
* @orig_node: the originator to search in the list
|
||||
* @flags: a pointer to store TT flags for the given @entry received
|
||||
* from @orig_node
|
||||
*
|
||||
* find out if an orig_node is already in the list of a tt_global_entry.
|
||||
*
|
||||
|
@ -1545,7 +1556,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
|
|||
*/
|
||||
static bool
|
||||
batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
|
||||
const struct batadv_orig_node *orig_node)
|
||||
const struct batadv_orig_node *orig_node,
|
||||
u8 *flags)
|
||||
{
|
||||
struct batadv_tt_orig_list_entry *orig_entry;
|
||||
bool found = false;
|
||||
|
@ -1553,6 +1565,10 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
|
|||
orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
|
||||
if (orig_entry) {
|
||||
found = true;
|
||||
|
||||
if (flags)
|
||||
*flags = orig_entry->flags;
|
||||
|
||||
batadv_tt_orig_list_entry_put(orig_entry);
|
||||
}
|
||||
|
||||
|
@ -1731,7 +1747,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
|
|||
if (!(common->flags & BATADV_TT_CLIENT_TEMP))
|
||||
goto out;
|
||||
if (batadv_tt_global_entry_has_orig(tt_global_entry,
|
||||
orig_node))
|
||||
orig_node, NULL))
|
||||
goto out_remove;
|
||||
batadv_tt_global_del_orig_list(tt_global_entry);
|
||||
goto add_orig_entry;
|
||||
|
@ -2880,23 +2896,46 @@ batadv_tt_req_node_new(struct batadv_priv *bat_priv,
|
|||
}
|
||||
|
||||
/**
|
||||
* batadv_tt_local_valid() - verify that given tt entry is a valid one
|
||||
* batadv_tt_local_valid() - verify local tt entry and get flags
|
||||
* @entry_ptr: to be checked local tt entry
|
||||
* @data_ptr: not used but definition required to satisfy the callback prototype
|
||||
* @flags: a pointer to store TT flags for this client to
|
||||
*
|
||||
* Checks the validity of the given local TT entry. If it is, then the provided
|
||||
* flags pointer is updated.
|
||||
*
|
||||
* Return: true if the entry is a valid, false otherwise.
|
||||
*/
|
||||
static bool batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr)
|
||||
static bool batadv_tt_local_valid(const void *entry_ptr,
|
||||
const void *data_ptr,
|
||||
u8 *flags)
|
||||
{
|
||||
const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
|
||||
|
||||
if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
|
||||
return false;
|
||||
|
||||
if (flags)
|
||||
*flags = tt_common_entry->flags;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_tt_global_valid() - verify global tt entry and get flags
|
||||
* @entry_ptr: to be checked global tt entry
|
||||
* @data_ptr: an orig_node object (may be NULL)
|
||||
* @flags: a pointer to store TT flags for this client to
|
||||
*
|
||||
* Checks the validity of the given global TT entry. If it is, then the provided
|
||||
* flags pointer is updated either with the common (summed) TT flags if data_ptr
|
||||
* is NULL or the specific, per originator TT flags otherwise.
|
||||
*
|
||||
* Return: true if the entry is a valid, false otherwise.
|
||||
*/
|
||||
static bool batadv_tt_global_valid(const void *entry_ptr,
|
||||
const void *data_ptr)
|
||||
const void *data_ptr,
|
||||
u8 *flags)
|
||||
{
|
||||
const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
|
||||
const struct batadv_tt_global_entry *tt_global_entry;
|
||||
|
@ -2910,7 +2949,8 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
|
|||
struct batadv_tt_global_entry,
|
||||
common);
|
||||
|
||||
return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
|
||||
return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node,
|
||||
flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2920,25 +2960,34 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
|
|||
* @hash: hash table containing the tt entries
|
||||
* @tt_len: expected tvlv tt data buffer length in number of bytes
|
||||
* @tvlv_buff: pointer to the buffer to fill with the TT data
|
||||
* @valid_cb: function to filter tt change entries
|
||||
* @valid_cb: function to filter tt change entries and to return TT flags
|
||||
* @cb_data: data passed to the filter function as argument
|
||||
*
|
||||
* Fills the tvlv buff with the tt entries from the specified hash. If valid_cb
|
||||
* is not provided then this becomes a no-op.
|
||||
*/
|
||||
static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
|
||||
struct batadv_hashtable *hash,
|
||||
void *tvlv_buff, u16 tt_len,
|
||||
bool (*valid_cb)(const void *,
|
||||
const void *),
|
||||
const void *,
|
||||
u8 *flags),
|
||||
void *cb_data)
|
||||
{
|
||||
struct batadv_tt_common_entry *tt_common_entry;
|
||||
struct batadv_tvlv_tt_change *tt_change;
|
||||
struct hlist_head *head;
|
||||
u16 tt_tot, tt_num_entries = 0;
|
||||
u8 flags;
|
||||
bool ret;
|
||||
u32 i;
|
||||
|
||||
tt_tot = batadv_tt_entries(tt_len);
|
||||
tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff;
|
||||
|
||||
if (!valid_cb)
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
@ -2948,11 +2997,12 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
|
|||
if (tt_tot == tt_num_entries)
|
||||
break;
|
||||
|
||||
if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
|
||||
ret = valid_cb(tt_common_entry, cb_data, &flags);
|
||||
if (!ret)
|
||||
continue;
|
||||
|
||||
ether_addr_copy(tt_change->addr, tt_common_entry->addr);
|
||||
tt_change->flags = tt_common_entry->flags;
|
||||
tt_change->flags = flags;
|
||||
tt_change->vid = htons(tt_common_entry->vid);
|
||||
memset(tt_change->reserved, 0,
|
||||
sizeof(tt_change->reserved));
|
||||
|
|
|
@ -283,9 +283,7 @@ int dccp_disconnect(struct sock *sk, int flags)
|
|||
|
||||
dccp_clear_xmit_timers(sk);
|
||||
ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
|
||||
ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
|
||||
dp->dccps_hc_rx_ccid = NULL;
|
||||
dp->dccps_hc_tx_ccid = NULL;
|
||||
|
||||
__skb_queue_purge(&sk->sk_receive_queue);
|
||||
__skb_queue_purge(&sk->sk_write_queue);
|
||||
|
|
|
@ -649,6 +649,7 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
|
|||
[RTA_ENCAP] = { .type = NLA_NESTED },
|
||||
[RTA_UID] = { .type = NLA_U32 },
|
||||
[RTA_MARK] = { .type = NLA_U32 },
|
||||
[RTA_TABLE] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
|
||||
|
|
|
@ -505,8 +505,6 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
|
|||
int err;
|
||||
int copied;
|
||||
|
||||
WARN_ON_ONCE(sk->sk_family == AF_INET6);
|
||||
|
||||
err = -EAGAIN;
|
||||
skb = sock_dequeue_err_skb(sk);
|
||||
if (!skb)
|
||||
|
|
|
@ -43,7 +43,10 @@ mr_table_alloc(struct net *net, u32 id,
|
|||
write_pnet(&mrt->net, net);
|
||||
|
||||
mrt->ops = *ops;
|
||||
rhltable_init(&mrt->mfc_hash, mrt->ops.rht_params);
|
||||
if (rhltable_init(&mrt->mfc_hash, mrt->ops.rht_params)) {
|
||||
kfree(mrt);
|
||||
return NULL;
|
||||
}
|
||||
INIT_LIST_HEAD(&mrt->mfc_cache_list);
|
||||
INIT_LIST_HEAD(&mrt->mfc_unres_queue);
|
||||
|
||||
|
|
|
@ -401,7 +401,7 @@ u32 mesh_plink_deactivate(struct sta_info *sta)
|
|||
|
||||
static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
|
||||
struct sta_info *sta,
|
||||
struct ieee802_11_elems *elems, bool insert)
|
||||
struct ieee802_11_elems *elems)
|
||||
{
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
struct ieee80211_supported_band *sband;
|
||||
|
@ -447,7 +447,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
|
|||
sta->sta.bandwidth = IEEE80211_STA_RX_BW_20;
|
||||
}
|
||||
|
||||
if (insert)
|
||||
if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
|
||||
rate_control_rate_init(sta);
|
||||
else
|
||||
rate_control_rate_update(local, sband, sta, changed);
|
||||
|
@ -551,7 +551,7 @@ mesh_sta_info_get(struct ieee80211_sub_if_data *sdata,
|
|||
rcu_read_lock();
|
||||
sta = sta_info_get(sdata, addr);
|
||||
if (sta) {
|
||||
mesh_sta_info_init(sdata, sta, elems, false);
|
||||
mesh_sta_info_init(sdata, sta, elems);
|
||||
} else {
|
||||
rcu_read_unlock();
|
||||
/* can't run atomic */
|
||||
|
@ -561,7 +561,7 @@ mesh_sta_info_get(struct ieee80211_sub_if_data *sdata,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
mesh_sta_info_init(sdata, sta, elems, true);
|
||||
mesh_sta_info_init(sdata, sta, elems);
|
||||
|
||||
if (sta_info_insert_rcu(sta))
|
||||
return NULL;
|
||||
|
|
|
@ -2911,7 +2911,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
|||
if (unlikely(offset < 0))
|
||||
goto out_free;
|
||||
} else if (reserve) {
|
||||
skb_push(skb, reserve);
|
||||
skb_reserve(skb, -reserve);
|
||||
}
|
||||
|
||||
/* Returns -EFAULT on error */
|
||||
|
|
|
@ -1588,7 +1588,7 @@ int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
|
|||
return ret;
|
||||
ok_count = ret;
|
||||
|
||||
if (!exts)
|
||||
if (!exts || ok_count)
|
||||
return ok_count;
|
||||
ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -1006,7 +1006,7 @@ static const struct proto_ops inet6_seqpacket_ops = {
|
|||
.owner = THIS_MODULE,
|
||||
.release = inet6_release,
|
||||
.bind = inet6_bind,
|
||||
.connect = inet_dgram_connect,
|
||||
.connect = sctp_inet_connect,
|
||||
.socketpair = sock_no_socketpair,
|
||||
.accept = inet_accept,
|
||||
.getname = sctp_getname,
|
||||
|
|
|
@ -1012,7 +1012,7 @@ static const struct proto_ops inet_seqpacket_ops = {
|
|||
.owner = THIS_MODULE,
|
||||
.release = inet_release, /* Needs to be wrapped... */
|
||||
.bind = inet_bind,
|
||||
.connect = inet_dgram_connect,
|
||||
.connect = sctp_inet_connect,
|
||||
.socketpair = sock_no_socketpair,
|
||||
.accept = inet_accept,
|
||||
.getname = inet_getname, /* Semantics are different. */
|
||||
|
|
|
@ -1086,7 +1086,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
|
|||
*/
|
||||
static int __sctp_connect(struct sock *sk,
|
||||
struct sockaddr *kaddrs,
|
||||
int addrs_size,
|
||||
int addrs_size, int flags,
|
||||
sctp_assoc_t *assoc_id)
|
||||
{
|
||||
struct net *net = sock_net(sk);
|
||||
|
@ -1104,7 +1104,6 @@ static int __sctp_connect(struct sock *sk,
|
|||
union sctp_addr *sa_addr = NULL;
|
||||
void *addr_buf;
|
||||
unsigned short port;
|
||||
unsigned int f_flags = 0;
|
||||
|
||||
sp = sctp_sk(sk);
|
||||
ep = sp->ep;
|
||||
|
@ -1254,13 +1253,7 @@ static int __sctp_connect(struct sock *sk,
|
|||
sp->pf->to_sk_daddr(sa_addr, sk);
|
||||
sk->sk_err = 0;
|
||||
|
||||
/* in-kernel sockets don't generally have a file allocated to them
|
||||
* if all they do is call sock_create_kern().
|
||||
*/
|
||||
if (sk->sk_socket->file)
|
||||
f_flags = sk->sk_socket->file->f_flags;
|
||||
|
||||
timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
|
||||
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
|
||||
|
||||
if (assoc_id)
|
||||
*assoc_id = asoc->assoc_id;
|
||||
|
@ -1348,7 +1341,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
|
|||
sctp_assoc_t *assoc_id)
|
||||
{
|
||||
struct sockaddr *kaddrs;
|
||||
int err = 0;
|
||||
int err = 0, flags = 0;
|
||||
|
||||
pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
|
||||
__func__, sk, addrs, addrs_size);
|
||||
|
@ -1367,7 +1360,13 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
|
|||
if (err)
|
||||
goto out_free;
|
||||
|
||||
err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
|
||||
/* in-kernel sockets don't generally have a file allocated to them
|
||||
* if all they do is call sock_create_kern().
|
||||
*/
|
||||
if (sk->sk_socket->file)
|
||||
flags = sk->sk_socket->file->f_flags;
|
||||
|
||||
err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
|
||||
|
||||
out_free:
|
||||
kvfree(kaddrs);
|
||||
|
@ -4397,16 +4396,26 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
|
|||
* len: the size of the address.
|
||||
*/
|
||||
static int sctp_connect(struct sock *sk, struct sockaddr *addr,
|
||||
int addr_len)
|
||||
int addr_len, int flags)
|
||||
{
|
||||
int err = 0;
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct sctp_af *af;
|
||||
int err = 0;
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
|
||||
addr, addr_len);
|
||||
|
||||
/* We may need to bind the socket. */
|
||||
if (!inet->inet_num) {
|
||||
if (sk->sk_prot->get_port(sk, 0)) {
|
||||
release_sock(sk);
|
||||
return -EAGAIN;
|
||||
}
|
||||
inet->inet_sport = htons(inet->inet_num);
|
||||
}
|
||||
|
||||
/* Validate addr_len before calling common connect/connectx routine. */
|
||||
af = sctp_get_af_specific(addr->sa_family);
|
||||
if (!af || addr_len < af->sockaddr_len) {
|
||||
|
@ -4415,13 +4424,25 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr,
|
|||
/* Pass correct addr len to common routine (so it knows there
|
||||
* is only one address being passed.
|
||||
*/
|
||||
err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
|
||||
err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL);
|
||||
}
|
||||
|
||||
release_sock(sk);
|
||||
return err;
|
||||
}
|
||||
|
||||
int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
|
||||
int addr_len, int flags)
|
||||
{
|
||||
if (addr_len < sizeof(uaddr->sa_family))
|
||||
return -EINVAL;
|
||||
|
||||
if (uaddr->sa_family == AF_UNSPEC)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return sctp_connect(sock->sk, uaddr, addr_len, flags);
|
||||
}
|
||||
|
||||
/* FIXME: Write comments. */
|
||||
static int sctp_disconnect(struct sock *sk, int flags)
|
||||
{
|
||||
|
@ -8724,7 +8745,6 @@ struct proto sctp_prot = {
|
|||
.name = "SCTP",
|
||||
.owner = THIS_MODULE,
|
||||
.close = sctp_close,
|
||||
.connect = sctp_connect,
|
||||
.disconnect = sctp_disconnect,
|
||||
.accept = sctp_accept,
|
||||
.ioctl = sctp_ioctl,
|
||||
|
@ -8767,7 +8787,6 @@ struct proto sctpv6_prot = {
|
|||
.name = "SCTPv6",
|
||||
.owner = THIS_MODULE,
|
||||
.close = sctp_close,
|
||||
.connect = sctp_connect,
|
||||
.disconnect = sctp_disconnect,
|
||||
.accept = sctp_accept,
|
||||
.ioctl = sctp_ioctl,
|
||||
|
|
|
@ -15555,7 +15555,8 @@ void cfg80211_ft_event(struct net_device *netdev,
|
|||
if (!ft_event->target_ap)
|
||||
return;
|
||||
|
||||
msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL);
|
||||
msg = nlmsg_new(100 + ft_event->ies_len + ft_event->ric_ies_len,
|
||||
GFP_KERNEL);
|
||||
if (!msg)
|
||||
return;
|
||||
|
||||
|
|
|
@ -916,6 +916,9 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
|
|||
const struct fwdb_header *hdr = regdb;
|
||||
const struct fwdb_country *country;
|
||||
|
||||
if (!regdb)
|
||||
return -ENODATA;
|
||||
|
||||
if (IS_ERR(regdb))
|
||||
return PTR_ERR(regdb);
|
||||
|
||||
|
|
|
@ -5,3 +5,5 @@ CONFIG_BPF_EVENTS=y
|
|||
CONFIG_TEST_BPF=m
|
||||
CONFIG_CGROUP_BPF=y
|
||||
CONFIG_NETDEVSIM=m
|
||||
CONFIG_NET_CLS_ACT=y
|
||||
CONFIG_NET_SCH_INGRESS=y
|
||||
|
|
|
@ -7,3 +7,8 @@ CONFIG_NET_L3_MASTER_DEV=y
|
|||
CONFIG_IPV6=y
|
||||
CONFIG_IPV6_MULTIPLE_TABLES=y
|
||||
CONFIG_VETH=y
|
||||
CONFIG_INET_XFRM_MODE_TUNNEL=y
|
||||
CONFIG_NET_IPVTI=y
|
||||
CONFIG_INET6_XFRM_MODE_TUNNEL=y
|
||||
CONFIG_IPV6_VTI=y
|
||||
CONFIG_DUMMY=y
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#include <unistd.h>
|
||||
#include <numa.h>
|
||||
|
||||
#include "../kselftest.h"
|
||||
|
||||
static const int PORT = 8888;
|
||||
|
||||
static void build_rcv_group(int *rcv_fd, size_t len, int family, int proto)
|
||||
|
@ -229,7 +231,7 @@ int main(void)
|
|||
int *rcv_fd, nodes;
|
||||
|
||||
if (numa_available() < 0)
|
||||
error(1, errno, "no numa api support");
|
||||
ksft_exit_skip("no numa api support\n");
|
||||
|
||||
nodes = numa_max_node() + 1;
|
||||
|
||||
|
|
Loading…
Reference in a new issue