Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (66 commits) can-bcm: fix minor heap overflow gianfar: Do not call device_set_wakeup_enable() under a spinlock ipv6: Warn users if maximum number of routes is reached. docs: Add neigh/gc_thresh3 and route/max_size documentation. axnet_cs: fix resume problem for some Ax88790 chip ipv6: addrconf: don't remove address state on ifdown if the address is being kept tcp: Don't change unlocked socket state in tcp_v4_err(). x25: Prevent crashing when parsing bad X.25 facilities cxgb4vf: add call to Firmware to reset VF State. cxgb4vf: Fail open if link_start() fails. cxgb4vf: flesh out PCI Device ID Table ... cxgb4vf: fix some errors in Gather List to skb conversion cxgb4vf: fix bug in Generic Receive Offload cxgb4vf: don't implement trivial (and incorrect) ndo_select_queue() ixgbe: Look inside vlan when determining offload protocol. bnx2x: Look inside vlan when determining checksum proto. vlan: Add function to retrieve EtherType from vlan packets. virtio-net: init link state correctly ucc_geth: Fix deadlock ucc_geth: Do not bring the whole IF down when TX failure. ...
This commit is contained in:
commit
9457b24a09
74 changed files with 507 additions and 331 deletions
|
@ -20,6 +20,15 @@ ip_no_pmtu_disc - BOOLEAN
|
|||
min_pmtu - INTEGER
|
||||
default 562 - minimum discovered Path MTU
|
||||
|
||||
route/max_size - INTEGER
|
||||
Maximum number of routes allowed in the kernel. Increase
|
||||
this when using large numbers of interfaces and/or routes.
|
||||
|
||||
neigh/default/gc_thresh3 - INTEGER
|
||||
Maximum number of neighbor entries allowed. Increase this
|
||||
when using large numbers of interfaces and when communicating
|
||||
with large numbers of directly-connected peers.
|
||||
|
||||
mtu_expires - INTEGER
|
||||
Time, in seconds, that cached PMTU information is kept.
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
SOLOS_ATTR_RO(DriverVersion)
|
||||
SOLOS_ATTR_RO(APIVersion)
|
||||
SOLOS_ATTR_RO(FirmwareVersion)
|
||||
SOLOS_ATTR_RO(Version)
|
||||
// SOLOS_ATTR_RO(DspVersion)
|
||||
// SOLOS_ATTR_RO(CommonHandshake)
|
||||
SOLOS_ATTR_RO(Connected)
|
||||
|
|
|
@ -1161,6 +1161,14 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|||
dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n",
|
||||
major_ver, minor_ver, fpga_ver);
|
||||
|
||||
if (fpga_ver < 37 && (fpga_upgrade || firmware_upgrade ||
|
||||
db_fpga_upgrade || db_firmware_upgrade)) {
|
||||
dev_warn(&dev->dev,
|
||||
"FPGA too old; cannot upgrade flash. Use JTAG.\n");
|
||||
fpga_upgrade = firmware_upgrade = 0;
|
||||
db_fpga_upgrade = db_firmware_upgrade = 0;
|
||||
}
|
||||
|
||||
if (card->fpga_version >= DMA_SUPPORTED){
|
||||
card->using_dma = 1;
|
||||
} else {
|
||||
|
|
|
@ -68,6 +68,9 @@ static struct usb_device_id btusb_table[] = {
|
|||
/* Apple MacBookPro6,2 */
|
||||
{ USB_DEVICE(0x05ac, 0x8218) },
|
||||
|
||||
/* Apple MacBookAir3,1, MacBookAir3,2 */
|
||||
{ USB_DEVICE(0x05ac, 0x821b) },
|
||||
|
||||
/* AVM BlueFRITZ! USB v2.0 */
|
||||
{ USB_DEVICE(0x057c, 0x3800) },
|
||||
|
||||
|
@ -1029,6 +1032,8 @@ static int btusb_probe(struct usb_interface *intf,
|
|||
|
||||
usb_set_intfdata(intf, data);
|
||||
|
||||
usb_enable_autosuspend(interface_to_usbdev(intf));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1680,7 +1680,7 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
|
|||
rc = XMIT_PLAIN;
|
||||
|
||||
else {
|
||||
if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
|
||||
rc = XMIT_CSUM_V6;
|
||||
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
|
||||
rc |= XMIT_CSUM_TCP;
|
||||
|
|
|
@ -753,7 +753,9 @@ static int cxgb4vf_open(struct net_device *dev)
|
|||
if (err)
|
||||
return err;
|
||||
set_bit(pi->port_id, &adapter->open_device_map);
|
||||
link_start(dev);
|
||||
err = link_start(dev);
|
||||
if (err)
|
||||
return err;
|
||||
netif_tx_start_all_queues(dev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1103,18 +1105,6 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return a TX Queue on which to send the specified skb.
|
||||
*/
|
||||
static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
/*
|
||||
* XXX For now just use the default hash but we probably want to
|
||||
* XXX look at other possibilities ...
|
||||
*/
|
||||
return skb_tx_hash(dev, skb);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/*
|
||||
* Poll all of our receive queues. This is called outside of normal interrupt
|
||||
|
@ -2074,6 +2064,22 @@ static int adap_init0(struct adapter *adapter)
|
|||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some environments do not properly handle PCIE FLRs -- e.g. in Linux
|
||||
* 2.6.31 and later we can't call pci_reset_function() in order to
|
||||
* issue an FLR because of a self- deadlock on the device semaphore.
|
||||
* Meanwhile, the OS infrastructure doesn't issue FLRs in all the
|
||||
* cases where they're needed -- for instance, some versions of KVM
|
||||
* fail to reset "Assigned Devices" when the VM reboots. Therefore we
|
||||
* use the firmware based reset in order to reset any per function
|
||||
* state.
|
||||
*/
|
||||
err = t4vf_fw_reset(adapter);
|
||||
if (err < 0) {
|
||||
dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Grab basic operational parameters. These will predominantly have
|
||||
* been set up by the Physical Function Driver or will be hard coded
|
||||
|
@ -2417,7 +2423,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
|
|||
.ndo_get_stats = cxgb4vf_get_stats,
|
||||
.ndo_set_rx_mode = cxgb4vf_set_rxmode,
|
||||
.ndo_set_mac_address = cxgb4vf_set_mac_addr,
|
||||
.ndo_select_queue = cxgb4vf_select_queue,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_do_ioctl = cxgb4vf_do_ioctl,
|
||||
.ndo_change_mtu = cxgb4vf_change_mtu,
|
||||
|
@ -2624,7 +2629,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
|
|||
netdev->do_ioctl = cxgb4vf_do_ioctl;
|
||||
netdev->change_mtu = cxgb4vf_change_mtu;
|
||||
netdev->set_mac_address = cxgb4vf_set_mac_addr;
|
||||
netdev->select_queue = cxgb4vf_select_queue;
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
netdev->poll_controller = cxgb4vf_poll_controller;
|
||||
#endif
|
||||
|
@ -2843,6 +2847,14 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
|
|||
CH_DEVICE(0x4800, 0), /* T440-dbg */
|
||||
CH_DEVICE(0x4801, 0), /* T420-cr */
|
||||
CH_DEVICE(0x4802, 0), /* T422-cr */
|
||||
CH_DEVICE(0x4803, 0), /* T440-cr */
|
||||
CH_DEVICE(0x4804, 0), /* T420-bch */
|
||||
CH_DEVICE(0x4805, 0), /* T440-bch */
|
||||
CH_DEVICE(0x4806, 0), /* T460-ch */
|
||||
CH_DEVICE(0x4807, 0), /* T420-so */
|
||||
CH_DEVICE(0x4808, 0), /* T420-cx */
|
||||
CH_DEVICE(0x4809, 0), /* T420-bt */
|
||||
CH_DEVICE(0x480a, 0), /* T404-bt */
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
|
|
|
@ -154,13 +154,14 @@ enum {
|
|||
*/
|
||||
RX_COPY_THRES = 256,
|
||||
RX_PULL_LEN = 128,
|
||||
};
|
||||
|
||||
/*
|
||||
* Can't define this in the above enum because PKTSHIFT isn't a constant in
|
||||
* the VF Driver ...
|
||||
*/
|
||||
#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT)
|
||||
/*
|
||||
* Main body length for sk_buffs used for RX Ethernet packets with
|
||||
* fragments. Should be >= RX_PULL_LEN but possibly bigger to give
|
||||
* pskb_may_pull() some room.
|
||||
*/
|
||||
RX_SKB_LEN = 512,
|
||||
};
|
||||
|
||||
/*
|
||||
* Software state per TX descriptor.
|
||||
|
@ -1354,6 +1355,67 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
* t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
|
||||
* @gl: the gather list
|
||||
* @skb_len: size of sk_buff main body if it carries fragments
|
||||
* @pull_len: amount of data to move to the sk_buff's main body
|
||||
*
|
||||
* Builds an sk_buff from the given packet gather list. Returns the
|
||||
* sk_buff or %NULL if sk_buff allocation failed.
|
||||
*/
|
||||
struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
|
||||
unsigned int skb_len, unsigned int pull_len)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct skb_shared_info *ssi;
|
||||
|
||||
/*
|
||||
* If the ingress packet is small enough, allocate an skb large enough
|
||||
* for all of the data and copy it inline. Otherwise, allocate an skb
|
||||
* with enough room to pull in the header and reference the rest of
|
||||
* the data via the skb fragment list.
|
||||
*
|
||||
* Below we rely on RX_COPY_THRES being less than the smallest Rx
|
||||
* buff! size, which is expected since buffers are at least
|
||||
* PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
|
||||
* fragment.
|
||||
*/
|
||||
if (gl->tot_len <= RX_COPY_THRES) {
|
||||
/* small packets have only one fragment */
|
||||
skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
|
||||
if (unlikely(!skb))
|
||||
goto out;
|
||||
__skb_put(skb, gl->tot_len);
|
||||
skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
|
||||
} else {
|
||||
skb = alloc_skb(skb_len, GFP_ATOMIC);
|
||||
if (unlikely(!skb))
|
||||
goto out;
|
||||
__skb_put(skb, pull_len);
|
||||
skb_copy_to_linear_data(skb, gl->va, pull_len);
|
||||
|
||||
ssi = skb_shinfo(skb);
|
||||
ssi->frags[0].page = gl->frags[0].page;
|
||||
ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
|
||||
ssi->frags[0].size = gl->frags[0].size - pull_len;
|
||||
if (gl->nfrags > 1)
|
||||
memcpy(&ssi->frags[1], &gl->frags[1],
|
||||
(gl->nfrags-1) * sizeof(skb_frag_t));
|
||||
ssi->nr_frags = gl->nfrags;
|
||||
|
||||
skb->len = gl->tot_len;
|
||||
skb->data_len = skb->len - pull_len;
|
||||
skb->truesize += skb->data_len;
|
||||
|
||||
/* Get a reference for the last page, we don't own it */
|
||||
get_page(gl->frags[gl->nfrags - 1].page);
|
||||
}
|
||||
|
||||
out:
|
||||
return skb;
|
||||
}
|
||||
|
||||
/**
|
||||
* t4vf_pktgl_free - free a packet gather list
|
||||
* @gl: the gather list
|
||||
|
@ -1463,10 +1525,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
|
|||
{
|
||||
struct sk_buff *skb;
|
||||
struct port_info *pi;
|
||||
struct skb_shared_info *ssi;
|
||||
const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
|
||||
bool csum_ok = pkt->csum_calc && !pkt->err_vec;
|
||||
unsigned int len = be16_to_cpu(pkt->len);
|
||||
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
|
||||
|
||||
/*
|
||||
|
@ -1481,42 +1541,14 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
|
|||
}
|
||||
|
||||
/*
|
||||
* If the ingress packet is small enough, allocate an skb large enough
|
||||
* for all of the data and copy it inline. Otherwise, allocate an skb
|
||||
* with enough room to pull in the header and reference the rest of
|
||||
* the data via the skb fragment list.
|
||||
* Convert the Packet Gather List into an skb.
|
||||
*/
|
||||
if (len <= RX_COPY_THRES) {
|
||||
/* small packets have only one fragment */
|
||||
skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
goto nomem;
|
||||
__skb_put(skb, gl->frags[0].size);
|
||||
skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
|
||||
} else {
|
||||
skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
goto nomem;
|
||||
__skb_put(skb, RX_PKT_PULL_LEN);
|
||||
skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
|
||||
|
||||
ssi = skb_shinfo(skb);
|
||||
ssi->frags[0].page = gl->frags[0].page;
|
||||
ssi->frags[0].page_offset = (gl->frags[0].page_offset +
|
||||
RX_PKT_PULL_LEN);
|
||||
ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
|
||||
if (gl->nfrags > 1)
|
||||
memcpy(&ssi->frags[1], &gl->frags[1],
|
||||
(gl->nfrags-1) * sizeof(skb_frag_t));
|
||||
ssi->nr_frags = gl->nfrags;
|
||||
skb->len = len + PKTSHIFT;
|
||||
skb->data_len = skb->len - RX_PKT_PULL_LEN;
|
||||
skb->truesize += skb->data_len;
|
||||
|
||||
/* Get a reference for the last page, we don't own it */
|
||||
get_page(gl->frags[gl->nfrags - 1].page);
|
||||
skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
|
||||
if (unlikely(!skb)) {
|
||||
t4vf_pktgl_free(gl);
|
||||
rxq->stats.rx_drops++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__skb_pull(skb, PKTSHIFT);
|
||||
skb->protocol = eth_type_trans(skb, rspq->netdev);
|
||||
skb_record_rx_queue(skb, rspq->idx);
|
||||
|
@ -1549,11 +1581,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
|
|||
netif_receive_skb(skb);
|
||||
|
||||
return 0;
|
||||
|
||||
nomem:
|
||||
t4vf_pktgl_free(gl);
|
||||
rxq->stats.rx_drops++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1679,6 +1706,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
|
|||
}
|
||||
len = RSPD_LEN(len);
|
||||
}
|
||||
gl.tot_len = len;
|
||||
|
||||
/*
|
||||
* Gather packet fragments.
|
||||
|
|
|
@ -235,6 +235,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
|
|||
int __devinit t4vf_wait_dev_ready(struct adapter *);
|
||||
int __devinit t4vf_port_init(struct adapter *, int);
|
||||
|
||||
int t4vf_fw_reset(struct adapter *);
|
||||
int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
|
||||
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
|
||||
|
||||
|
|
|
@ -325,6 +325,25 @@ int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* t4vf_fw_reset - issue a reset to FW
|
||||
* @adapter: the adapter
|
||||
*
|
||||
* Issues a reset command to FW. For a Physical Function this would
|
||||
* result in the Firmware reseting all of its state. For a Virtual
|
||||
* Function this just resets the state associated with the VF.
|
||||
*/
|
||||
int t4vf_fw_reset(struct adapter *adapter)
|
||||
{
|
||||
struct fw_reset_cmd cmd;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
|
||||
FW_CMD_WRITE);
|
||||
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
|
||||
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* t4vf_query_params - query FW or device parameters
|
||||
* @adapter: the adapter
|
||||
|
|
|
@ -635,9 +635,10 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
|||
if (wol->wolopts & ~WAKE_MAGIC)
|
||||
return -EINVAL;
|
||||
|
||||
device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
|
||||
|
||||
spin_lock_irqsave(&priv->bflock, flags);
|
||||
priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0;
|
||||
device_set_wakeup_enable(&dev->dev, priv->wol_en);
|
||||
priv->wol_en = !!device_may_wakeup(&dev->dev);
|
||||
spin_unlock_irqrestore(&priv->bflock, flags);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -764,8 +764,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
|
|||
#ifdef IXGBE_FCOE
|
||||
/* adjust for FCoE Sequence Offload */
|
||||
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
|
||||
&& (skb->protocol == htons(ETH_P_FCOE)) &&
|
||||
skb_is_gso(skb)) {
|
||||
&& skb_is_gso(skb)
|
||||
&& vlan_get_protocol(skb) ==
|
||||
htons(ETH_P_FCOE)) {
|
||||
hlen = skb_transport_offset(skb) +
|
||||
sizeof(struct fc_frame_header) +
|
||||
sizeof(struct fcoe_crc_eof);
|
||||
|
@ -5823,7 +5824,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
|
|||
|
||||
static int ixgbe_tso(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_ring *tx_ring, struct sk_buff *skb,
|
||||
u32 tx_flags, u8 *hdr_len)
|
||||
u32 tx_flags, u8 *hdr_len, __be16 protocol)
|
||||
{
|
||||
struct ixgbe_adv_tx_context_desc *context_desc;
|
||||
unsigned int i;
|
||||
|
@ -5841,7 +5842,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
|
|||
l4len = tcp_hdrlen(skb);
|
||||
*hdr_len += l4len;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
if (protocol == htons(ETH_P_IP)) {
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
iph->tot_len = 0;
|
||||
iph->check = 0;
|
||||
|
@ -5880,7 +5881,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
|
|||
type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
|
||||
IXGBE_ADVTXD_DTYP_CTXT);
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
if (protocol == htons(ETH_P_IP))
|
||||
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
|
||||
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
|
||||
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
|
||||
|
@ -5906,16 +5907,10 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
|
|||
return false;
|
||||
}
|
||||
|
||||
static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
|
||||
static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
|
||||
__be16 protocol)
|
||||
{
|
||||
u32 rtn = 0;
|
||||
__be16 protocol;
|
||||
|
||||
if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
|
||||
protocol = ((const struct vlan_ethhdr *)skb->data)->
|
||||
h_vlan_encapsulated_proto;
|
||||
else
|
||||
protocol = skb->protocol;
|
||||
|
||||
switch (protocol) {
|
||||
case cpu_to_be16(ETH_P_IP):
|
||||
|
@ -5943,7 +5938,7 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
|
|||
default:
|
||||
if (unlikely(net_ratelimit()))
|
||||
e_warn(probe, "partial checksum but proto=%x!\n",
|
||||
skb->protocol);
|
||||
protocol);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -5952,7 +5947,8 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
|
|||
|
||||
static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_ring *tx_ring,
|
||||
struct sk_buff *skb, u32 tx_flags)
|
||||
struct sk_buff *skb, u32 tx_flags,
|
||||
__be16 protocol)
|
||||
{
|
||||
struct ixgbe_adv_tx_context_desc *context_desc;
|
||||
unsigned int i;
|
||||
|
@ -5981,7 +5977,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
|
|||
IXGBE_ADVTXD_DTYP_CTXT);
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
type_tucmd_mlhl |= ixgbe_psum(adapter, skb);
|
||||
type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
|
||||
|
||||
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
|
||||
/* use index zero for tx checksum offload */
|
||||
|
@ -6179,7 +6175,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
|
|||
}
|
||||
|
||||
static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
|
||||
int queue, u32 tx_flags)
|
||||
int queue, u32 tx_flags, __be16 protocol)
|
||||
{
|
||||
struct ixgbe_atr_input atr_input;
|
||||
struct tcphdr *th;
|
||||
|
@ -6190,7 +6186,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
|
|||
u8 l4type = 0;
|
||||
|
||||
/* Right now, we support IPv4 only */
|
||||
if (skb->protocol != htons(ETH_P_IP))
|
||||
if (protocol != htons(ETH_P_IP))
|
||||
return;
|
||||
/* check if we're UDP or TCP */
|
||||
if (iph->protocol == IPPROTO_TCP) {
|
||||
|
@ -6257,10 +6253,13 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
|
|||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||
int txq = smp_processor_id();
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
if ((skb->protocol == htons(ETH_P_FCOE)) ||
|
||||
(skb->protocol == htons(ETH_P_FIP))) {
|
||||
__be16 protocol;
|
||||
|
||||
protocol = vlan_get_protocol(skb);
|
||||
|
||||
if ((protocol == htons(ETH_P_FCOE)) ||
|
||||
(protocol == htons(ETH_P_FIP))) {
|
||||
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
|
||||
txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
|
||||
txq += adapter->ring_feature[RING_F_FCOE].mask;
|
||||
|
@ -6303,6 +6302,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
|
|||
int tso;
|
||||
int count = 0;
|
||||
unsigned int f;
|
||||
__be16 protocol;
|
||||
|
||||
protocol = vlan_get_protocol(skb);
|
||||
|
||||
if (vlan_tx_tag_present(skb)) {
|
||||
tx_flags |= vlan_tx_tag_get(skb);
|
||||
|
@ -6323,8 +6325,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
|
|||
/* for FCoE with DCB, we force the priority to what
|
||||
* was specified by the switch */
|
||||
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
|
||||
(skb->protocol == htons(ETH_P_FCOE) ||
|
||||
skb->protocol == htons(ETH_P_FIP))) {
|
||||
(protocol == htons(ETH_P_FCOE) ||
|
||||
protocol == htons(ETH_P_FIP))) {
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
|
||||
tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
|
||||
|
@ -6334,7 +6336,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
|
|||
}
|
||||
#endif
|
||||
/* flag for FCoE offloads */
|
||||
if (skb->protocol == htons(ETH_P_FCOE))
|
||||
if (protocol == htons(ETH_P_FCOE))
|
||||
tx_flags |= IXGBE_TX_FLAGS_FCOE;
|
||||
}
|
||||
#endif
|
||||
|
@ -6368,9 +6370,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
|
|||
tx_flags |= IXGBE_TX_FLAGS_FSO;
|
||||
#endif /* IXGBE_FCOE */
|
||||
} else {
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
if (protocol == htons(ETH_P_IP))
|
||||
tx_flags |= IXGBE_TX_FLAGS_IPV4;
|
||||
tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
|
||||
tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
|
||||
protocol);
|
||||
if (tso < 0) {
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
|
@ -6378,7 +6381,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
|
|||
|
||||
if (tso)
|
||||
tx_flags |= IXGBE_TX_FLAGS_TSO;
|
||||
else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
|
||||
else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
|
||||
protocol) &&
|
||||
(skb->ip_summed == CHECKSUM_PARTIAL))
|
||||
tx_flags |= IXGBE_TX_FLAGS_CSUM;
|
||||
}
|
||||
|
@ -6392,7 +6396,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
|
|||
test_bit(__IXGBE_FDIR_INIT_DONE,
|
||||
&tx_ring->reinit_state)) {
|
||||
ixgbe_atr(adapter, skb, tx_ring->queue_index,
|
||||
tx_flags);
|
||||
tx_flags, protocol);
|
||||
tx_ring->atr_count = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -111,13 +111,14 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id);
|
|||
|
||||
typedef struct axnet_dev_t {
|
||||
struct pcmcia_device *p_dev;
|
||||
caddr_t base;
|
||||
struct timer_list watchdog;
|
||||
int stale, fast_poll;
|
||||
u_short link_status;
|
||||
u_char duplex_flag;
|
||||
int phy_id;
|
||||
int flags;
|
||||
caddr_t base;
|
||||
struct timer_list watchdog;
|
||||
int stale, fast_poll;
|
||||
u_short link_status;
|
||||
u_char duplex_flag;
|
||||
int phy_id;
|
||||
int flags;
|
||||
int active_low;
|
||||
} axnet_dev_t;
|
||||
|
||||
static inline axnet_dev_t *PRIV(struct net_device *dev)
|
||||
|
@ -322,6 +323,8 @@ static int axnet_config(struct pcmcia_device *link)
|
|||
if (info->flags & IS_AX88790)
|
||||
outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */
|
||||
|
||||
info->active_low = 0;
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
|
||||
j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
|
||||
|
@ -329,15 +332,18 @@ static int axnet_config(struct pcmcia_device *link)
|
|||
if ((j != 0) && (j != 0xffff)) break;
|
||||
}
|
||||
|
||||
/* Maybe PHY is in power down mode. (PPD_SET = 1)
|
||||
Bit 2 of CCSR is active low. */
|
||||
if (i == 32) {
|
||||
/* Maybe PHY is in power down mode. (PPD_SET = 1)
|
||||
Bit 2 of CCSR is active low. */
|
||||
pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
|
||||
for (i = 0; i < 32; i++) {
|
||||
j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
|
||||
j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
|
||||
if (j == j2) continue;
|
||||
if ((j != 0) && (j != 0xffff)) break;
|
||||
if ((j != 0) && (j != 0xffff)) {
|
||||
info->active_low = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -383,8 +389,12 @@ static int axnet_suspend(struct pcmcia_device *link)
|
|||
static int axnet_resume(struct pcmcia_device *link)
|
||||
{
|
||||
struct net_device *dev = link->priv;
|
||||
axnet_dev_t *info = PRIV(dev);
|
||||
|
||||
if (link->open) {
|
||||
if (info->active_low == 1)
|
||||
pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
|
||||
|
||||
axnet_reset_8390(dev);
|
||||
AX88190_init(dev, 1);
|
||||
netif_device_attach(dev);
|
||||
|
|
|
@ -846,10 +846,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
|||
else
|
||||
tp->features &= ~RTL_FEATURE_WOL;
|
||||
__rtl8169_set_wol(tp, wol->wolopts);
|
||||
device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
|
||||
|
||||
spin_unlock_irq(&tp->lock);
|
||||
|
||||
device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2931,7 +2931,7 @@ static const struct rtl_cfg_info {
|
|||
.hw_start = rtl_hw_start_8168,
|
||||
.region = 2,
|
||||
.align = 8,
|
||||
.intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
|
||||
.intr_event = SYSErr | LinkChg | RxOverflow |
|
||||
TxErr | TxOK | RxOK | RxErr,
|
||||
.napi_event = TxErr | TxOK | RxOK | RxOverflow,
|
||||
.features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
|
||||
|
@ -4588,7 +4588,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
|
|||
}
|
||||
|
||||
/* Work around for rx fifo overflow */
|
||||
if (unlikely(status & RxFIFOOver)) {
|
||||
if (unlikely(status & RxFIFOOver) &&
|
||||
(tp->mac_version == RTL_GIGA_MAC_VER_11)) {
|
||||
netif_stop_queue(dev);
|
||||
rtl8169_tx_timeout(dev);
|
||||
break;
|
||||
|
|
|
@ -3858,7 +3858,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
|
|||
|
||||
/* device is off until link detection */
|
||||
netif_carrier_off(dev);
|
||||
netif_stop_queue(dev);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
|
@ -2050,12 +2050,16 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
|
|||
|
||||
ugeth_vdbg("%s: IN", __func__);
|
||||
|
||||
/*
|
||||
* Tell the kernel the link is down.
|
||||
* Must be done before disabling the controller
|
||||
* or deadlock may happen.
|
||||
*/
|
||||
phy_stop(phydev);
|
||||
|
||||
/* Disable the controller */
|
||||
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
|
||||
|
||||
/* Tell the kernel the link is down */
|
||||
phy_stop(phydev);
|
||||
|
||||
/* Mask all interrupts */
|
||||
out_be32(ugeth->uccf->p_uccm, 0x00000000);
|
||||
|
||||
|
@ -2065,9 +2069,6 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
|
|||
/* Disable Rx and Tx */
|
||||
clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
|
||||
|
||||
phy_disconnect(ugeth->phydev);
|
||||
ugeth->phydev = NULL;
|
||||
|
||||
ucc_geth_memclean(ugeth);
|
||||
}
|
||||
|
||||
|
@ -3550,7 +3551,10 @@ static int ucc_geth_close(struct net_device *dev)
|
|||
|
||||
napi_disable(&ugeth->napi);
|
||||
|
||||
cancel_work_sync(&ugeth->timeout_work);
|
||||
ucc_geth_stop(ugeth);
|
||||
phy_disconnect(ugeth->phydev);
|
||||
ugeth->phydev = NULL;
|
||||
|
||||
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
|
||||
|
||||
|
@ -3579,8 +3583,12 @@ static void ucc_geth_timeout_work(struct work_struct *work)
|
|||
* Must reset MAC *and* PHY. This is done by reopening
|
||||
* the device.
|
||||
*/
|
||||
ucc_geth_close(dev);
|
||||
ucc_geth_open(dev);
|
||||
netif_tx_stop_all_queues(dev);
|
||||
ucc_geth_stop(ugeth);
|
||||
ucc_geth_init_mac(ugeth);
|
||||
/* Must start PHY here */
|
||||
phy_start(ugeth->phydev);
|
||||
netif_tx_start_all_queues(dev);
|
||||
}
|
||||
|
||||
netif_tx_schedule_all(dev);
|
||||
|
@ -3594,7 +3602,6 @@ static void ucc_geth_timeout(struct net_device *dev)
|
|||
{
|
||||
struct ucc_geth_private *ugeth = netdev_priv(dev);
|
||||
|
||||
netif_carrier_off(dev);
|
||||
schedule_work(&ugeth->timeout_work);
|
||||
}
|
||||
|
||||
|
|
|
@ -986,9 +986,15 @@ static int virtnet_probe(struct virtio_device *vdev)
|
|||
goto unregister;
|
||||
}
|
||||
|
||||
vi->status = VIRTIO_NET_S_LINK_UP;
|
||||
virtnet_update_status(vi);
|
||||
netif_carrier_on(dev);
|
||||
/* Assume link up if device can't report link status,
|
||||
otherwise get link status from config. */
|
||||
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
|
||||
netif_carrier_off(dev);
|
||||
virtnet_update_status(vi);
|
||||
} else {
|
||||
vi->status = VIRTIO_NET_S_LINK_UP;
|
||||
netif_carrier_on(dev);
|
||||
}
|
||||
|
||||
pr_debug("virtnet: registered device %s\n", dev->name);
|
||||
return 0;
|
||||
|
|
|
@ -410,6 +410,9 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
|
|||
val &= ~(AR_WA_BIT6 | AR_WA_BIT7);
|
||||
}
|
||||
|
||||
if (AR_SREV_9280(ah))
|
||||
val |= AR_WA_BIT22;
|
||||
|
||||
if (AR_SREV_9285E_20(ah))
|
||||
val |= AR_WA_BIT23;
|
||||
|
||||
|
|
|
@ -675,6 +675,7 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
|
|||
}
|
||||
|
||||
extern struct ieee80211_ops ath9k_ops;
|
||||
extern struct pm_qos_request_list ath9k_pm_qos_req;
|
||||
extern int modparam_nohwcrypt;
|
||||
extern int led_blink;
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
|
|||
{ USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
|
||||
{ USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
|
||||
{ USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
|
||||
{ USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */
|
||||
{ USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
|
||||
{ USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */
|
||||
{ },
|
||||
|
@ -540,11 +541,11 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
|
|||
return;
|
||||
}
|
||||
|
||||
usb_fill_int_urb(urb, hif_dev->udev,
|
||||
usb_fill_bulk_urb(urb, hif_dev->udev,
|
||||
usb_rcvbulkpipe(hif_dev->udev,
|
||||
USB_REG_IN_PIPE),
|
||||
nskb->data, MAX_REG_IN_BUF_SIZE,
|
||||
ath9k_hif_usb_reg_in_cb, nskb, 1);
|
||||
ath9k_hif_usb_reg_in_cb, nskb);
|
||||
|
||||
ret = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
if (ret) {
|
||||
|
@ -720,11 +721,11 @@ static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev)
|
|||
if (!skb)
|
||||
goto err;
|
||||
|
||||
usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev,
|
||||
usb_fill_bulk_urb(hif_dev->reg_in_urb, hif_dev->udev,
|
||||
usb_rcvbulkpipe(hif_dev->udev,
|
||||
USB_REG_IN_PIPE),
|
||||
skb->data, MAX_REG_IN_BUF_SIZE,
|
||||
ath9k_hif_usb_reg_in_cb, skb, 1);
|
||||
ath9k_hif_usb_reg_in_cb, skb);
|
||||
|
||||
if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0)
|
||||
goto err;
|
||||
|
@ -843,14 +844,6 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
|
|||
goto err_fw_req;
|
||||
}
|
||||
|
||||
/* Alloc URBs */
|
||||
ret = ath9k_hif_usb_alloc_urbs(hif_dev);
|
||||
if (ret) {
|
||||
dev_err(&hif_dev->udev->dev,
|
||||
"ath9k_htc: Unable to allocate URBs\n");
|
||||
goto err_urb;
|
||||
}
|
||||
|
||||
/* Download firmware */
|
||||
ret = ath9k_hif_usb_download_fw(hif_dev);
|
||||
if (ret) {
|
||||
|
@ -866,16 +859,22 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
|
|||
*/
|
||||
for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) {
|
||||
endp = &alt->endpoint[idx].desc;
|
||||
if (((endp->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
|
||||
== 0x04) &&
|
||||
((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
|
||||
== USB_ENDPOINT_XFER_INT)) {
|
||||
if ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
|
||||
== USB_ENDPOINT_XFER_INT) {
|
||||
endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK;
|
||||
endp->bmAttributes |= USB_ENDPOINT_XFER_BULK;
|
||||
endp->bInterval = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Alloc URBs */
|
||||
ret = ath9k_hif_usb_alloc_urbs(hif_dev);
|
||||
if (ret) {
|
||||
dev_err(&hif_dev->udev->dev,
|
||||
"ath9k_htc: Unable to allocate URBs\n");
|
||||
goto err_urb;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_fw_download:
|
||||
|
|
|
@ -484,6 +484,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
|
|||
ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
|
||||
"Failed allocating banks for "
|
||||
"external radio\n");
|
||||
ath9k_hw_rf_free_ext_banks(ah);
|
||||
return ecode;
|
||||
}
|
||||
|
||||
|
@ -952,9 +953,12 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
|
|||
REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
|
||||
break;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
case NL80211_IFTYPE_MONITOR:
|
||||
REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
|
||||
break;
|
||||
default:
|
||||
if (ah->is_monitoring)
|
||||
REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1634,7 +1638,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
|
|||
|
||||
switch (ah->opmode) {
|
||||
case NL80211_IFTYPE_STATION:
|
||||
case NL80211_IFTYPE_MONITOR:
|
||||
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
|
||||
REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
|
||||
REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
|
||||
|
@ -1663,6 +1666,14 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
|
|||
AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
|
||||
break;
|
||||
default:
|
||||
if (ah->is_monitoring) {
|
||||
REG_WRITE(ah, AR_NEXT_TBTT_TIMER,
|
||||
TU_TO_USEC(next_beacon));
|
||||
REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
|
||||
REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
|
||||
flags |= AR_TBTT_TIMER_EN;
|
||||
break;
|
||||
}
|
||||
ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON,
|
||||
"%s: unsupported opmode: %d\n",
|
||||
__func__, ah->opmode);
|
||||
|
|
|
@ -622,6 +622,7 @@ struct ath_hw {
|
|||
|
||||
bool sw_mgmt_crypto;
|
||||
bool is_pciexpress;
|
||||
bool is_monitoring;
|
||||
bool need_an_top2_fixup;
|
||||
u16 tx_trig_level;
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pm_qos_params.h>
|
||||
|
||||
#include "ath9k.h"
|
||||
|
||||
|
@ -179,6 +180,8 @@ static const struct ath_ops ath9k_common_ops = {
|
|||
.write = ath9k_iowrite32,
|
||||
};
|
||||
|
||||
struct pm_qos_request_list ath9k_pm_qos_req;
|
||||
|
||||
/**************************/
|
||||
/* Initialization */
|
||||
/**************************/
|
||||
|
@ -756,6 +759,9 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
|
|||
ath_init_leds(sc);
|
||||
ath_start_rfkill_poll(sc);
|
||||
|
||||
pm_qos_add_request(&ath9k_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
|
||||
PM_QOS_DEFAULT_VALUE);
|
||||
|
||||
return 0;
|
||||
|
||||
error_world:
|
||||
|
@ -811,6 +817,8 @@ void ath9k_deinit_device(struct ath_softc *sc)
|
|||
|
||||
ath9k_ps_wakeup(sc);
|
||||
|
||||
pm_qos_remove_request(&ath9k_pm_qos_req);
|
||||
|
||||
wiphy_rfkill_stop_polling(sc->hw->wiphy);
|
||||
ath_deinit_leds(sc);
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/nl80211.h>
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include "ath9k.h"
|
||||
#include "btcoex.h"
|
||||
|
||||
|
@ -93,11 +94,13 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
|
|||
{
|
||||
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
|
||||
unsigned long flags;
|
||||
enum ath9k_power_mode power_mode;
|
||||
|
||||
spin_lock_irqsave(&sc->sc_pm_lock, flags);
|
||||
if (++sc->ps_usecount != 1)
|
||||
goto unlock;
|
||||
|
||||
power_mode = sc->sc_ah->power_mode;
|
||||
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
|
||||
|
||||
/*
|
||||
|
@ -105,10 +108,12 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
|
|||
* useful data. Better clear them now so that they don't mess up
|
||||
* survey data results.
|
||||
*/
|
||||
spin_lock(&common->cc_lock);
|
||||
ath_hw_cycle_counters_update(common);
|
||||
memset(&common->cc_survey, 0, sizeof(common->cc_survey));
|
||||
spin_unlock(&common->cc_lock);
|
||||
if (power_mode != ATH9K_PM_AWAKE) {
|
||||
spin_lock(&common->cc_lock);
|
||||
ath_hw_cycle_counters_update(common);
|
||||
memset(&common->cc_survey, 0, sizeof(common->cc_survey));
|
||||
spin_unlock(&common->cc_lock);
|
||||
}
|
||||
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
|
||||
|
@ -1217,6 +1222,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
|
|||
ah->imask |= ATH9K_INT_CST;
|
||||
|
||||
sc->sc_flags &= ~SC_OP_INVALID;
|
||||
sc->sc_ah->is_monitoring = false;
|
||||
|
||||
/* Disable BMISS interrupt when we're not associated */
|
||||
ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
|
||||
|
@ -1238,6 +1244,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
|
|||
ath9k_btcoex_timer_resume(sc);
|
||||
}
|
||||
|
||||
pm_qos_update_request(&ath9k_pm_qos_req, 55);
|
||||
|
||||
mutex_unlock:
|
||||
mutex_unlock(&sc->mutex);
|
||||
|
||||
|
@ -1415,6 +1423,8 @@ static void ath9k_stop(struct ieee80211_hw *hw)
|
|||
|
||||
sc->sc_flags |= SC_OP_INVALID;
|
||||
|
||||
pm_qos_update_request(&ath9k_pm_qos_req, PM_QOS_DEFAULT_VALUE);
|
||||
|
||||
mutex_unlock(&sc->mutex);
|
||||
|
||||
ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
|
||||
|
@ -1493,8 +1503,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
|
|||
ath9k_hw_set_interrupts(ah, ah->imask);
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_AP ||
|
||||
vif->type == NL80211_IFTYPE_ADHOC ||
|
||||
vif->type == NL80211_IFTYPE_MONITOR) {
|
||||
vif->type == NL80211_IFTYPE_ADHOC) {
|
||||
sc->sc_flags |= SC_OP_ANI_RUN;
|
||||
ath_start_ani(common);
|
||||
}
|
||||
|
@ -1644,8 +1653,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
|
|||
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
|
||||
if (conf->flags & IEEE80211_CONF_MONITOR) {
|
||||
ath_print(common, ATH_DBG_CONFIG,
|
||||
"HW opmode set to Monitor mode\n");
|
||||
sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
|
||||
"Monitor mode is enabled\n");
|
||||
sc->sc_ah->is_monitoring = true;
|
||||
} else {
|
||||
ath_print(common, ATH_DBG_CONFIG,
|
||||
"Monitor mode is disabled\n");
|
||||
sc->sc_ah->is_monitoring = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -441,7 +441,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
|
|||
*/
|
||||
if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
|
||||
(sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
|
||||
(sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR))
|
||||
(sc->sc_ah->is_monitoring))
|
||||
rfilt |= ATH9K_RX_FILTER_PROM;
|
||||
|
||||
if (sc->rx.rxfilter & FIF_CONTROL)
|
||||
|
@ -897,7 +897,7 @@ static bool ath9k_rx_accept(struct ath_common *common,
|
|||
* decryption and MIC failures. For monitor mode,
|
||||
* we also ignore the CRC error.
|
||||
*/
|
||||
if (ah->opmode == NL80211_IFTYPE_MONITOR) {
|
||||
if (ah->is_monitoring) {
|
||||
if (rx_stats->rs_status &
|
||||
~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
|
||||
ATH9K_RXERR_CRC))
|
||||
|
|
|
@ -703,6 +703,7 @@
|
|||
#define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */
|
||||
#define AR_WA_ANALOG_SHIFT (1 << 20)
|
||||
#define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */
|
||||
#define AR_WA_BIT22 (1 << 22)
|
||||
#define AR9285_WA_DEFAULT 0x004a050b
|
||||
#define AR9280_WA_DEFAULT 0x0040073b
|
||||
#define AR_WA_DEFAULT 0x0000073f
|
||||
|
|
|
@ -82,9 +82,11 @@ static struct usb_device_id carl9170_usb_ids[] = {
|
|||
{ USB_DEVICE(0x07d1, 0x3c10) },
|
||||
/* D-Link DWA 160 A2 */
|
||||
{ USB_DEVICE(0x07d1, 0x3a09) },
|
||||
/* D-Link DWA 130 D */
|
||||
{ USB_DEVICE(0x07d1, 0x3a0f) },
|
||||
/* Netgear WNA1000 */
|
||||
{ USB_DEVICE(0x0846, 0x9040) },
|
||||
/* Netgear WNDA3100 */
|
||||
/* Netgear WNDA3100 (v1) */
|
||||
{ USB_DEVICE(0x0846, 0x9010) },
|
||||
/* Netgear WN111 v2 */
|
||||
{ USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED },
|
||||
|
|
|
@ -4000,7 +4000,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
|
|||
* "the hard way", rather than using device's scan.
|
||||
*/
|
||||
if (iwl3945_mod_params.disable_hw_scan) {
|
||||
IWL_ERR(priv, "sw scan support is deprecated\n");
|
||||
dev_printk(KERN_DEBUG, &(pdev->dev),
|
||||
"sw scan support is deprecated\n");
|
||||
iwl3945_hw_ops.hw_scan = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -700,8 +700,9 @@ static void lbs_scan_worker(struct work_struct *work)
|
|||
|
||||
if (priv->scan_channel < priv->scan_req->n_channels) {
|
||||
cancel_delayed_work(&priv->scan_work);
|
||||
queue_delayed_work(priv->work_thread, &priv->scan_work,
|
||||
msecs_to_jiffies(300));
|
||||
if (!priv->stopping)
|
||||
queue_delayed_work(priv->work_thread, &priv->scan_work,
|
||||
msecs_to_jiffies(300));
|
||||
}
|
||||
|
||||
/* This is the final data we are about to send */
|
||||
|
|
|
@ -36,6 +36,7 @@ struct lbs_private {
|
|||
/* CFG80211 */
|
||||
struct wireless_dev *wdev;
|
||||
bool wiphy_registered;
|
||||
bool stopping;
|
||||
struct cfg80211_scan_request *scan_req;
|
||||
u8 assoc_bss[ETH_ALEN];
|
||||
u8 disassoc_reason;
|
||||
|
|
|
@ -104,6 +104,7 @@ static int lbs_dev_open(struct net_device *dev)
|
|||
lbs_deb_enter(LBS_DEB_NET);
|
||||
|
||||
spin_lock_irq(&priv->driver_lock);
|
||||
priv->stopping = false;
|
||||
|
||||
if (priv->connect_status == LBS_CONNECTED)
|
||||
netif_carrier_on(dev);
|
||||
|
@ -131,10 +132,16 @@ static int lbs_eth_stop(struct net_device *dev)
|
|||
lbs_deb_enter(LBS_DEB_NET);
|
||||
|
||||
spin_lock_irq(&priv->driver_lock);
|
||||
priv->stopping = true;
|
||||
netif_stop_queue(dev);
|
||||
spin_unlock_irq(&priv->driver_lock);
|
||||
|
||||
schedule_work(&priv->mcast_work);
|
||||
cancel_delayed_work_sync(&priv->scan_work);
|
||||
if (priv->scan_req) {
|
||||
cfg80211_scan_done(priv->scan_req, false);
|
||||
priv->scan_req = NULL;
|
||||
}
|
||||
|
||||
lbs_deb_leave(LBS_DEB_NET);
|
||||
return 0;
|
||||
|
|
|
@ -440,7 +440,6 @@ struct qeth_qdio_out_q {
|
|||
* index of buffer to be filled by driver; state EMPTY or PACKING
|
||||
*/
|
||||
int next_buf_to_fill;
|
||||
int sync_iqdio_error;
|
||||
/*
|
||||
* number of buffers that are currently filled (PRIMED)
|
||||
* -> these buffers are hardware-owned
|
||||
|
@ -695,14 +694,6 @@ struct qeth_mc_mac {
|
|||
int is_vmac;
|
||||
};
|
||||
|
||||
struct qeth_skb_data {
|
||||
__u32 magic;
|
||||
int count;
|
||||
};
|
||||
|
||||
#define QETH_SKB_MAGIC 0x71657468
|
||||
#define QETH_SIGA_CC2_RETRIES 3
|
||||
|
||||
struct qeth_rx {
|
||||
int b_count;
|
||||
int b_index;
|
||||
|
|
|
@ -877,8 +877,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
|
|||
return;
|
||||
}
|
||||
|
||||
static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
||||
struct qeth_qdio_out_buffer *buf, unsigned int qeth_skip_skb)
|
||||
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
||||
struct qeth_qdio_out_buffer *buf)
|
||||
{
|
||||
int i;
|
||||
struct sk_buff *skb;
|
||||
|
@ -887,13 +887,11 @@ static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
|||
if (buf->buffer->element[0].flags & 0x40)
|
||||
atomic_dec(&queue->set_pci_flags_count);
|
||||
|
||||
if (!qeth_skip_skb) {
|
||||
skb = skb_dequeue(&buf->skb_list);
|
||||
while (skb) {
|
||||
atomic_dec(&skb->users);
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = skb_dequeue(&buf->skb_list);
|
||||
while (skb) {
|
||||
atomic_dec(&skb->users);
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = skb_dequeue(&buf->skb_list);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
|
||||
if (buf->buffer->element[i].addr && buf->is_header[i])
|
||||
|
@ -909,12 +907,6 @@ static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
|||
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
|
||||
}
|
||||
|
||||
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
||||
struct qeth_qdio_out_buffer *buf)
|
||||
{
|
||||
__qeth_clear_output_buffer(queue, buf, 0);
|
||||
}
|
||||
|
||||
void qeth_clear_qdio_buffers(struct qeth_card *card)
|
||||
{
|
||||
int i, j;
|
||||
|
@ -2833,7 +2825,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
|||
}
|
||||
}
|
||||
|
||||
queue->sync_iqdio_error = 0;
|
||||
queue->card->dev->trans_start = jiffies;
|
||||
if (queue->card->options.performance_stats) {
|
||||
queue->card->perf_stats.outbound_do_qdio_cnt++;
|
||||
|
@ -2849,10 +2840,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
|||
queue->card->perf_stats.outbound_do_qdio_time +=
|
||||
qeth_get_micros() -
|
||||
queue->card->perf_stats.outbound_do_qdio_start_time;
|
||||
if (rc > 0) {
|
||||
if (!(rc & QDIO_ERROR_SIGA_BUSY))
|
||||
queue->sync_iqdio_error = rc & 3;
|
||||
}
|
||||
if (rc) {
|
||||
queue->card->stats.tx_errors += count;
|
||||
/* ignore temporary SIGA errors without busy condition */
|
||||
|
@ -2916,7 +2903,7 @@ void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
|
|||
{
|
||||
struct qeth_card *card = (struct qeth_card *)card_ptr;
|
||||
|
||||
if (card->dev)
|
||||
if (card->dev && (card->dev->flags & IFF_UP))
|
||||
napi_schedule(&card->napi);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
|
||||
|
@ -2940,7 +2927,6 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
|
|||
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
|
||||
struct qeth_qdio_out_buffer *buffer;
|
||||
int i;
|
||||
unsigned qeth_send_err;
|
||||
|
||||
QETH_CARD_TEXT(card, 6, "qdouhdl");
|
||||
if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
|
||||
|
@ -2956,9 +2942,8 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
|
|||
}
|
||||
for (i = first_element; i < (first_element + count); ++i) {
|
||||
buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
|
||||
qeth_send_err = qeth_handle_send_error(card, buffer, qdio_error);
|
||||
__qeth_clear_output_buffer(queue, buffer,
|
||||
(qeth_send_err == QETH_SEND_ERROR_RETRY) ? 1 : 0);
|
||||
qeth_handle_send_error(card, buffer, qdio_error);
|
||||
qeth_clear_output_buffer(queue, buffer);
|
||||
}
|
||||
atomic_sub(count, &queue->used_buffers);
|
||||
/* check if we need to do something on this outbound queue */
|
||||
|
@ -3183,10 +3168,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
|
|||
int offset, int hd_len)
|
||||
{
|
||||
struct qeth_qdio_out_buffer *buffer;
|
||||
struct sk_buff *skb1;
|
||||
struct qeth_skb_data *retry_ctrl;
|
||||
int index;
|
||||
int rc;
|
||||
|
||||
/* spin until we get the queue ... */
|
||||
while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
|
||||
|
@ -3205,25 +3187,6 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
|
|||
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
|
||||
qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
|
||||
qeth_flush_buffers(queue, index, 1);
|
||||
if (queue->sync_iqdio_error == 2) {
|
||||
skb1 = skb_dequeue(&buffer->skb_list);
|
||||
while (skb1) {
|
||||
atomic_dec(&skb1->users);
|
||||
skb1 = skb_dequeue(&buffer->skb_list);
|
||||
}
|
||||
retry_ctrl = (struct qeth_skb_data *) &skb->cb[16];
|
||||
if (retry_ctrl->magic != QETH_SKB_MAGIC) {
|
||||
retry_ctrl->magic = QETH_SKB_MAGIC;
|
||||
retry_ctrl->count = 0;
|
||||
}
|
||||
if (retry_ctrl->count < QETH_SIGA_CC2_RETRIES) {
|
||||
retry_ctrl->count++;
|
||||
rc = dev_queue_xmit(skb);
|
||||
} else {
|
||||
dev_kfree_skb_any(skb);
|
||||
QETH_CARD_TEXT(card, 2, "qrdrop");
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
out:
|
||||
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
|
||||
|
|
|
@ -339,6 +339,31 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* vlan_get_protocol - get protocol EtherType.
|
||||
* @skb: skbuff to query
|
||||
*
|
||||
* Returns the EtherType of the packet, regardless of whether it is
|
||||
* vlan encapsulated (normal or hardware accelerated) or not.
|
||||
*/
|
||||
static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
|
||||
{
|
||||
__be16 protocol = 0;
|
||||
|
||||
if (vlan_tx_tag_present(skb) ||
|
||||
skb->protocol != cpu_to_be16(ETH_P_8021Q))
|
||||
protocol = skb->protocol;
|
||||
else {
|
||||
__be16 proto, *protop;
|
||||
protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr,
|
||||
h_vlan_encapsulated_proto),
|
||||
sizeof(proto), &proto);
|
||||
if (likely(protop))
|
||||
protocol = *protop;
|
||||
}
|
||||
|
||||
return protocol;
|
||||
}
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
/* VLAN IOCTLs are found in sockios.h */
|
||||
|
|
|
@ -1554,6 +1554,11 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
|
|||
|
||||
static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
|
||||
{
|
||||
if (WARN_ON(!dev_queue)) {
|
||||
printk(KERN_INFO "netif_stop_queue() cannot be called before "
|
||||
"register_netdev()");
|
||||
return;
|
||||
}
|
||||
set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
|
||||
}
|
||||
|
||||
|
|
|
@ -215,7 +215,7 @@ NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb,
|
|||
int ret;
|
||||
|
||||
if (!cond ||
|
||||
(ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN) == 1))
|
||||
((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1))
|
||||
ret = okfn(skb);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -225,7 +225,7 @@ extern int decnet_di_count;
|
|||
extern int decnet_dr_count;
|
||||
extern int decnet_no_fc_max_cwnd;
|
||||
|
||||
extern int sysctl_decnet_mem[3];
|
||||
extern long sysctl_decnet_mem[3];
|
||||
extern int sysctl_decnet_wmem[3];
|
||||
extern int sysctl_decnet_rmem[3];
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define _NET_DST_OPS_H
|
||||
#include <linux/types.h>
|
||||
#include <linux/percpu_counter.h>
|
||||
#include <linux/cache.h>
|
||||
|
||||
struct dst_entry;
|
||||
struct kmem_cachep;
|
||||
|
|
|
@ -762,7 +762,7 @@ struct proto {
|
|||
|
||||
/* Memory pressure */
|
||||
void (*enter_memory_pressure)(struct sock *sk);
|
||||
atomic_t *memory_allocated; /* Current allocated memory. */
|
||||
atomic_long_t *memory_allocated; /* Current allocated memory. */
|
||||
struct percpu_counter *sockets_allocated; /* Current number of sockets. */
|
||||
/*
|
||||
* Pressure flag: try to collapse.
|
||||
|
@ -771,7 +771,7 @@ struct proto {
|
|||
* is strict, actions are advisory and have some latency.
|
||||
*/
|
||||
int *memory_pressure;
|
||||
int *sysctl_mem;
|
||||
long *sysctl_mem;
|
||||
int *sysctl_wmem;
|
||||
int *sysctl_rmem;
|
||||
int max_header;
|
||||
|
|
|
@ -224,7 +224,7 @@ extern int sysctl_tcp_fack;
|
|||
extern int sysctl_tcp_reordering;
|
||||
extern int sysctl_tcp_ecn;
|
||||
extern int sysctl_tcp_dsack;
|
||||
extern int sysctl_tcp_mem[3];
|
||||
extern long sysctl_tcp_mem[3];
|
||||
extern int sysctl_tcp_wmem[3];
|
||||
extern int sysctl_tcp_rmem[3];
|
||||
extern int sysctl_tcp_app_win;
|
||||
|
@ -247,7 +247,7 @@ extern int sysctl_tcp_cookie_size;
|
|||
extern int sysctl_tcp_thin_linear_timeouts;
|
||||
extern int sysctl_tcp_thin_dupack;
|
||||
|
||||
extern atomic_t tcp_memory_allocated;
|
||||
extern atomic_long_t tcp_memory_allocated;
|
||||
extern struct percpu_counter tcp_sockets_allocated;
|
||||
extern int tcp_memory_pressure;
|
||||
|
||||
|
@ -280,7 +280,7 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
|
|||
}
|
||||
|
||||
if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
|
||||
atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
|
||||
atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -105,10 +105,10 @@ static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
|
|||
|
||||
extern struct proto udp_prot;
|
||||
|
||||
extern atomic_t udp_memory_allocated;
|
||||
extern atomic_long_t udp_memory_allocated;
|
||||
|
||||
/* sysctl variables for udp */
|
||||
extern int sysctl_udp_mem[3];
|
||||
extern long sysctl_udp_mem[3];
|
||||
extern int sysctl_udp_rmem_min;
|
||||
extern int sysctl_udp_wmem_min;
|
||||
|
||||
|
|
|
@ -1392,6 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
|
|||
ax25_cb *ax25;
|
||||
int err = 0;
|
||||
|
||||
memset(fsa, 0, sizeof(fsa));
|
||||
lock_sock(sk);
|
||||
ax25 = ax25_sk(sk);
|
||||
|
||||
|
@ -1403,7 +1404,6 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
|
|||
|
||||
fsa->fsa_ax25.sax25_family = AF_AX25;
|
||||
fsa->fsa_ax25.sax25_call = ax25->dest_addr;
|
||||
fsa->fsa_ax25.sax25_ndigis = 0;
|
||||
|
||||
if (ax25->digipeat != NULL) {
|
||||
ndigi = ax25->digipeat->ndigi;
|
||||
|
|
|
@ -1175,6 +1175,12 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
|
|||
hci_send_cmd(hdev,
|
||||
HCI_OP_READ_REMOTE_EXT_FEATURES,
|
||||
sizeof(cp), &cp);
|
||||
} else if (!ev->status && conn->out &&
|
||||
conn->sec_level == BT_SECURITY_HIGH) {
|
||||
struct hci_cp_auth_requested cp;
|
||||
cp.handle = ev->handle;
|
||||
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
|
||||
sizeof(cp), &cp);
|
||||
} else {
|
||||
conn->state = BT_CONNECTED;
|
||||
hci_proto_connect_cfm(conn, ev->status);
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config BT_HIDP
|
||||
tristate "HIDP protocol support"
|
||||
depends on BT && BT_L2CAP && INPUT
|
||||
depends on BT && BT_L2CAP && INPUT && HID_SUPPORT
|
||||
select HID
|
||||
help
|
||||
HIDP (Human Interface Device Protocol) is a transport layer
|
||||
|
|
|
@ -2421,11 +2421,11 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned
|
|||
break;
|
||||
|
||||
case 2:
|
||||
*val = __le16_to_cpu(*((__le16 *) opt->val));
|
||||
*val = get_unaligned_le16(opt->val);
|
||||
break;
|
||||
|
||||
case 4:
|
||||
*val = __le32_to_cpu(*((__le32 *) opt->val));
|
||||
*val = get_unaligned_le32(opt->val);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -2452,11 +2452,11 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
|
|||
break;
|
||||
|
||||
case 2:
|
||||
*((__le16 *) opt->val) = cpu_to_le16(val);
|
||||
put_unaligned_le16(val, opt->val);
|
||||
break;
|
||||
|
||||
case 4:
|
||||
*((__le32 *) opt->val) = cpu_to_le32(val);
|
||||
put_unaligned_le32(val, opt->val);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -79,7 +79,10 @@ static void rfcomm_make_uih(struct sk_buff *skb, u8 addr);
|
|||
|
||||
static void rfcomm_process_connect(struct rfcomm_session *s);
|
||||
|
||||
static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err);
|
||||
static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
|
||||
bdaddr_t *dst,
|
||||
u8 sec_level,
|
||||
int *err);
|
||||
static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst);
|
||||
static void rfcomm_session_del(struct rfcomm_session *s);
|
||||
|
||||
|
@ -401,7 +404,7 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
|
|||
|
||||
s = rfcomm_session_get(src, dst);
|
||||
if (!s) {
|
||||
s = rfcomm_session_create(src, dst, &err);
|
||||
s = rfcomm_session_create(src, dst, d->sec_level, &err);
|
||||
if (!s)
|
||||
return err;
|
||||
}
|
||||
|
@ -679,7 +682,10 @@ static void rfcomm_session_close(struct rfcomm_session *s, int err)
|
|||
rfcomm_session_put(s);
|
||||
}
|
||||
|
||||
static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err)
|
||||
static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
|
||||
bdaddr_t *dst,
|
||||
u8 sec_level,
|
||||
int *err)
|
||||
{
|
||||
struct rfcomm_session *s = NULL;
|
||||
struct sockaddr_l2 addr;
|
||||
|
@ -704,6 +710,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst
|
|||
sk = sock->sk;
|
||||
lock_sock(sk);
|
||||
l2cap_pi(sk)->imtu = l2cap_mtu;
|
||||
l2cap_pi(sk)->sec_level = sec_level;
|
||||
if (l2cap_ertm)
|
||||
l2cap_pi(sk)->mode = L2CAP_MODE_ERTM;
|
||||
release_sock(sk);
|
||||
|
|
|
@ -125,7 +125,7 @@ struct bcm_sock {
|
|||
struct list_head tx_ops;
|
||||
unsigned long dropped_usr_msgs;
|
||||
struct proc_dir_entry *bcm_proc_read;
|
||||
char procname [9]; /* pointer printed in ASCII with \0 */
|
||||
char procname [20]; /* pointer printed in ASCII with \0 */
|
||||
};
|
||||
|
||||
static inline struct bcm_sock *bcm_sk(const struct sock *sk)
|
||||
|
|
|
@ -370,6 +370,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
|
|||
|
||||
static struct notifier_block dst_dev_notifier = {
|
||||
.notifier_call = dst_dev_event,
|
||||
.priority = -10, /* must be called after other network notifiers */
|
||||
};
|
||||
|
||||
void __init dst_init(void)
|
||||
|
|
|
@ -112,39 +112,41 @@ EXPORT_SYMBOL(sk_filter);
|
|||
*/
|
||||
unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
|
||||
{
|
||||
struct sock_filter *fentry; /* We walk down these */
|
||||
void *ptr;
|
||||
u32 A = 0; /* Accumulator */
|
||||
u32 X = 0; /* Index Register */
|
||||
u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
|
||||
unsigned long memvalid = 0;
|
||||
u32 tmp;
|
||||
int k;
|
||||
int pc;
|
||||
|
||||
BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
|
||||
/*
|
||||
* Process array of filter instructions.
|
||||
*/
|
||||
for (pc = 0; pc < flen; pc++) {
|
||||
fentry = &filter[pc];
|
||||
const struct sock_filter *fentry = &filter[pc];
|
||||
u32 f_k = fentry->k;
|
||||
|
||||
switch (fentry->code) {
|
||||
case BPF_S_ALU_ADD_X:
|
||||
A += X;
|
||||
continue;
|
||||
case BPF_S_ALU_ADD_K:
|
||||
A += fentry->k;
|
||||
A += f_k;
|
||||
continue;
|
||||
case BPF_S_ALU_SUB_X:
|
||||
A -= X;
|
||||
continue;
|
||||
case BPF_S_ALU_SUB_K:
|
||||
A -= fentry->k;
|
||||
A -= f_k;
|
||||
continue;
|
||||
case BPF_S_ALU_MUL_X:
|
||||
A *= X;
|
||||
continue;
|
||||
case BPF_S_ALU_MUL_K:
|
||||
A *= fentry->k;
|
||||
A *= f_k;
|
||||
continue;
|
||||
case BPF_S_ALU_DIV_X:
|
||||
if (X == 0)
|
||||
|
@ -152,49 +154,49 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
|
|||
A /= X;
|
||||
continue;
|
||||
case BPF_S_ALU_DIV_K:
|
||||
A /= fentry->k;
|
||||
A /= f_k;
|
||||
continue;
|
||||
case BPF_S_ALU_AND_X:
|
||||
A &= X;
|
||||
continue;
|
||||
case BPF_S_ALU_AND_K:
|
||||
A &= fentry->k;
|
||||
A &= f_k;
|
||||
continue;
|
||||
case BPF_S_ALU_OR_X:
|
||||
A |= X;
|
||||
continue;
|
||||
case BPF_S_ALU_OR_K:
|
||||
A |= fentry->k;
|
||||
A |= f_k;
|
||||
continue;
|
||||
case BPF_S_ALU_LSH_X:
|
||||
A <<= X;
|
||||
continue;
|
||||
case BPF_S_ALU_LSH_K:
|
||||
A <<= fentry->k;
|
||||
A <<= f_k;
|
||||
continue;
|
||||
case BPF_S_ALU_RSH_X:
|
||||
A >>= X;
|
||||
continue;
|
||||
case BPF_S_ALU_RSH_K:
|
||||
A >>= fentry->k;
|
||||
A >>= f_k;
|
||||
continue;
|
||||
case BPF_S_ALU_NEG:
|
||||
A = -A;
|
||||
continue;
|
||||
case BPF_S_JMP_JA:
|
||||
pc += fentry->k;
|
||||
pc += f_k;
|
||||
continue;
|
||||
case BPF_S_JMP_JGT_K:
|
||||
pc += (A > fentry->k) ? fentry->jt : fentry->jf;
|
||||
pc += (A > f_k) ? fentry->jt : fentry->jf;
|
||||
continue;
|
||||
case BPF_S_JMP_JGE_K:
|
||||
pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
|
||||
pc += (A >= f_k) ? fentry->jt : fentry->jf;
|
||||
continue;
|
||||
case BPF_S_JMP_JEQ_K:
|
||||
pc += (A == fentry->k) ? fentry->jt : fentry->jf;
|
||||
pc += (A == f_k) ? fentry->jt : fentry->jf;
|
||||
continue;
|
||||
case BPF_S_JMP_JSET_K:
|
||||
pc += (A & fentry->k) ? fentry->jt : fentry->jf;
|
||||
pc += (A & f_k) ? fentry->jt : fentry->jf;
|
||||
continue;
|
||||
case BPF_S_JMP_JGT_X:
|
||||
pc += (A > X) ? fentry->jt : fentry->jf;
|
||||
|
@ -209,7 +211,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
|
|||
pc += (A & X) ? fentry->jt : fentry->jf;
|
||||
continue;
|
||||
case BPF_S_LD_W_ABS:
|
||||
k = fentry->k;
|
||||
k = f_k;
|
||||
load_w:
|
||||
ptr = load_pointer(skb, k, 4, &tmp);
|
||||
if (ptr != NULL) {
|
||||
|
@ -218,7 +220,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
|
|||
}
|
||||
break;
|
||||
case BPF_S_LD_H_ABS:
|
||||
k = fentry->k;
|
||||
k = f_k;
|
||||
load_h:
|
||||
ptr = load_pointer(skb, k, 2, &tmp);
|
||||
if (ptr != NULL) {
|
||||
|
@ -227,7 +229,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
|
|||
}
|
||||
break;
|
||||
case BPF_S_LD_B_ABS:
|
||||
k = fentry->k;
|
||||
k = f_k;
|
||||
load_b:
|
||||
ptr = load_pointer(skb, k, 1, &tmp);
|
||||
if (ptr != NULL) {
|
||||
|
@ -242,32 +244,34 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
|
|||
X = skb->len;
|
||||
continue;
|
||||
case BPF_S_LD_W_IND:
|
||||
k = X + fentry->k;
|
||||
k = X + f_k;
|
||||
goto load_w;
|
||||
case BPF_S_LD_H_IND:
|
||||
k = X + fentry->k;
|
||||
k = X + f_k;
|
||||
goto load_h;
|
||||
case BPF_S_LD_B_IND:
|
||||
k = X + fentry->k;
|
||||
k = X + f_k;
|
||||
goto load_b;
|
||||
case BPF_S_LDX_B_MSH:
|
||||
ptr = load_pointer(skb, fentry->k, 1, &tmp);
|
||||
ptr = load_pointer(skb, f_k, 1, &tmp);
|
||||
if (ptr != NULL) {
|
||||
X = (*(u8 *)ptr & 0xf) << 2;
|
||||
continue;
|
||||
}
|
||||
return 0;
|
||||
case BPF_S_LD_IMM:
|
||||
A = fentry->k;
|
||||
A = f_k;
|
||||
continue;
|
||||
case BPF_S_LDX_IMM:
|
||||
X = fentry->k;
|
||||
X = f_k;
|
||||
continue;
|
||||
case BPF_S_LD_MEM:
|
||||
A = mem[fentry->k];
|
||||
A = (memvalid & (1UL << f_k)) ?
|
||||
mem[f_k] : 0;
|
||||
continue;
|
||||
case BPF_S_LDX_MEM:
|
||||
X = mem[fentry->k];
|
||||
X = (memvalid & (1UL << f_k)) ?
|
||||
mem[f_k] : 0;
|
||||
continue;
|
||||
case BPF_S_MISC_TAX:
|
||||
X = A;
|
||||
|
@ -276,14 +280,16 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
|
|||
A = X;
|
||||
continue;
|
||||
case BPF_S_RET_K:
|
||||
return fentry->k;
|
||||
return f_k;
|
||||
case BPF_S_RET_A:
|
||||
return A;
|
||||
case BPF_S_ST:
|
||||
mem[fentry->k] = A;
|
||||
memvalid |= 1UL << f_k;
|
||||
mem[f_k] = A;
|
||||
continue;
|
||||
case BPF_S_STX:
|
||||
mem[fentry->k] = X;
|
||||
memvalid |= 1UL << f_k;
|
||||
mem[f_k] = X;
|
||||
continue;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
|
|
|
@ -887,7 +887,7 @@ static ssize_t pktgen_if_write(struct file *file,
|
|||
i += len;
|
||||
|
||||
if (debug) {
|
||||
size_t copy = min(count, 1023);
|
||||
size_t copy = min_t(size_t, count, 1023);
|
||||
char tb[copy + 1];
|
||||
if (copy_from_user(tb, user_buffer, copy))
|
||||
return -EFAULT;
|
||||
|
@ -2612,8 +2612,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
|
|||
/* Update any of the values, used when we're incrementing various
|
||||
* fields.
|
||||
*/
|
||||
queue_map = pkt_dev->cur_queue_map;
|
||||
mod_cur_headers(pkt_dev);
|
||||
queue_map = pkt_dev->cur_queue_map;
|
||||
|
||||
datalen = (odev->hard_header_len + 16) & ~0xf;
|
||||
|
||||
|
@ -2976,8 +2976,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
|
|||
/* Update any of the values, used when we're incrementing various
|
||||
* fields.
|
||||
*/
|
||||
queue_map = pkt_dev->cur_queue_map;
|
||||
mod_cur_headers(pkt_dev);
|
||||
queue_map = pkt_dev->cur_queue_map;
|
||||
|
||||
skb = __netdev_alloc_skb(odev,
|
||||
pkt_dev->cur_pkt_size + 64
|
||||
|
|
|
@ -347,16 +347,17 @@ static size_t rtnl_link_get_size(const struct net_device *dev)
|
|||
if (!ops)
|
||||
return 0;
|
||||
|
||||
size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
|
||||
nlmsg_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
|
||||
size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
|
||||
nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
|
||||
|
||||
if (ops->get_size)
|
||||
/* IFLA_INFO_DATA + nested data */
|
||||
size += nlmsg_total_size(sizeof(struct nlattr)) +
|
||||
size += nla_total_size(sizeof(struct nlattr)) +
|
||||
ops->get_size(dev);
|
||||
|
||||
if (ops->get_xstats_size)
|
||||
size += ops->get_xstats_size(dev); /* IFLA_INFO_XSTATS */
|
||||
/* IFLA_INFO_XSTATS */
|
||||
size += nla_total_size(ops->get_xstats_size(dev));
|
||||
|
||||
return size;
|
||||
}
|
||||
|
|
|
@ -1653,10 +1653,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
|
|||
{
|
||||
struct proto *prot = sk->sk_prot;
|
||||
int amt = sk_mem_pages(size);
|
||||
int allocated;
|
||||
long allocated;
|
||||
|
||||
sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
|
||||
allocated = atomic_add_return(amt, prot->memory_allocated);
|
||||
allocated = atomic_long_add_return(amt, prot->memory_allocated);
|
||||
|
||||
/* Under limit. */
|
||||
if (allocated <= prot->sysctl_mem[0]) {
|
||||
|
@ -1714,7 +1714,7 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
|
|||
|
||||
/* Alas. Undo changes. */
|
||||
sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
|
||||
atomic_sub(amt, prot->memory_allocated);
|
||||
atomic_long_sub(amt, prot->memory_allocated);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(__sk_mem_schedule);
|
||||
|
@ -1727,12 +1727,12 @@ void __sk_mem_reclaim(struct sock *sk)
|
|||
{
|
||||
struct proto *prot = sk->sk_prot;
|
||||
|
||||
atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
|
||||
atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
|
||||
prot->memory_allocated);
|
||||
sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
|
||||
|
||||
if (prot->memory_pressure && *prot->memory_pressure &&
|
||||
(atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
|
||||
(atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
|
||||
*prot->memory_pressure = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(__sk_mem_reclaim);
|
||||
|
@ -2452,12 +2452,12 @@ static char proto_method_implemented(const void *method)
|
|||
|
||||
static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
|
||||
{
|
||||
seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
|
||||
seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
|
||||
"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
|
||||
proto->name,
|
||||
proto->obj_size,
|
||||
sock_prot_inuse_get(seq_file_net(seq), proto),
|
||||
proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
|
||||
proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
|
||||
proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
|
||||
proto->max_header,
|
||||
proto->slab == NULL ? "no" : "yes",
|
||||
|
|
|
@ -155,7 +155,7 @@ static const struct proto_ops dn_proto_ops;
|
|||
static DEFINE_RWLOCK(dn_hash_lock);
|
||||
static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
|
||||
static struct hlist_head dn_wild_sk;
|
||||
static atomic_t decnet_memory_allocated;
|
||||
static atomic_long_t decnet_memory_allocated;
|
||||
|
||||
static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
|
||||
static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
|
||||
|
|
|
@ -38,7 +38,7 @@ int decnet_log_martians = 1;
|
|||
int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW;
|
||||
|
||||
/* Reasonable defaults, I hope, based on tcp's defaults */
|
||||
int sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 };
|
||||
long sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 };
|
||||
int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
|
||||
int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
|
||||
|
||||
|
@ -324,7 +324,7 @@ static ctl_table dn_table[] = {
|
|||
.data = &sysctl_decnet_mem,
|
||||
.maxlen = sizeof(sysctl_decnet_mem),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
.proc_handler = proc_doulongvec_minmax
|
||||
},
|
||||
{
|
||||
.procname = "decnet_rmem",
|
||||
|
|
|
@ -2306,10 +2306,8 @@ void ip_mc_drop_socket(struct sock *sk)
|
|||
|
||||
in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
|
||||
(void) ip_mc_leave_src(sk, iml, in_dev);
|
||||
if (in_dev != NULL) {
|
||||
if (in_dev != NULL)
|
||||
ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
|
||||
in_dev_put(in_dev);
|
||||
}
|
||||
/* decrease mem now to avoid the memleak warning */
|
||||
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
|
||||
call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
|
||||
|
|
|
@ -59,13 +59,13 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
|
|||
local_bh_enable();
|
||||
|
||||
socket_seq_show(seq);
|
||||
seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n",
|
||||
seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
|
||||
sock_prot_inuse_get(net, &tcp_prot), orphans,
|
||||
tcp_death_row.tw_count, sockets,
|
||||
atomic_read(&tcp_memory_allocated));
|
||||
seq_printf(seq, "UDP: inuse %d mem %d\n",
|
||||
atomic_long_read(&tcp_memory_allocated));
|
||||
seq_printf(seq, "UDP: inuse %d mem %ld\n",
|
||||
sock_prot_inuse_get(net, &udp_prot),
|
||||
atomic_read(&udp_memory_allocated));
|
||||
atomic_long_read(&udp_memory_allocated));
|
||||
seq_printf(seq, "UDPLITE: inuse %d\n",
|
||||
sock_prot_inuse_get(net, &udplite_prot));
|
||||
seq_printf(seq, "RAW: inuse %d\n",
|
||||
|
|
|
@ -398,7 +398,7 @@ static struct ctl_table ipv4_table[] = {
|
|||
.data = &sysctl_tcp_mem,
|
||||
.maxlen = sizeof(sysctl_tcp_mem),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec
|
||||
.proc_handler = proc_doulongvec_minmax
|
||||
},
|
||||
{
|
||||
.procname = "tcp_wmem",
|
||||
|
@ -602,8 +602,7 @@ static struct ctl_table ipv4_table[] = {
|
|||
.data = &sysctl_udp_mem,
|
||||
.maxlen = sizeof(sysctl_udp_mem),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &zero
|
||||
.proc_handler = proc_doulongvec_minmax,
|
||||
},
|
||||
{
|
||||
.procname = "udp_rmem_min",
|
||||
|
|
|
@ -282,7 +282,7 @@ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
|
|||
struct percpu_counter tcp_orphan_count;
|
||||
EXPORT_SYMBOL_GPL(tcp_orphan_count);
|
||||
|
||||
int sysctl_tcp_mem[3] __read_mostly;
|
||||
long sysctl_tcp_mem[3] __read_mostly;
|
||||
int sysctl_tcp_wmem[3] __read_mostly;
|
||||
int sysctl_tcp_rmem[3] __read_mostly;
|
||||
|
||||
|
@ -290,7 +290,7 @@ EXPORT_SYMBOL(sysctl_tcp_mem);
|
|||
EXPORT_SYMBOL(sysctl_tcp_rmem);
|
||||
EXPORT_SYMBOL(sysctl_tcp_wmem);
|
||||
|
||||
atomic_t tcp_memory_allocated; /* Current allocated memory. */
|
||||
atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
|
||||
EXPORT_SYMBOL(tcp_memory_allocated);
|
||||
|
||||
/*
|
||||
|
@ -2246,7 +2246,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
|
|||
/* Values greater than interface MTU won't take effect. However
|
||||
* at the point when this call is done we typically don't yet
|
||||
* know which interface is going to be used */
|
||||
if (val < 8 || val > MAX_TCP_WINDOW) {
|
||||
if (val < 64 || val > MAX_TCP_WINDOW) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -259,8 +259,11 @@ static void tcp_fixup_sndbuf(struct sock *sk)
|
|||
int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 +
|
||||
sizeof(struct sk_buff);
|
||||
|
||||
if (sk->sk_sndbuf < 3 * sndmem)
|
||||
sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]);
|
||||
if (sk->sk_sndbuf < 3 * sndmem) {
|
||||
sk->sk_sndbuf = 3 * sndmem;
|
||||
if (sk->sk_sndbuf > sysctl_tcp_wmem[2])
|
||||
sk->sk_sndbuf = sysctl_tcp_wmem[2];
|
||||
}
|
||||
}
|
||||
|
||||
/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
|
||||
|
@ -396,7 +399,7 @@ static void tcp_clamp_window(struct sock *sk)
|
|||
if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
|
||||
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
|
||||
!tcp_memory_pressure &&
|
||||
atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
|
||||
atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
|
||||
sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
|
||||
sysctl_tcp_rmem[2]);
|
||||
}
|
||||
|
@ -4861,7 +4864,7 @@ static int tcp_should_expand_sndbuf(struct sock *sk)
|
|||
return 0;
|
||||
|
||||
/* If we are under soft global TCP memory pressure, do not expand. */
|
||||
if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
|
||||
if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
|
||||
return 0;
|
||||
|
||||
/* If we filled the congestion window, do not expand. */
|
||||
|
|
|
@ -415,6 +415,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
|
|||
!icsk->icsk_backoff)
|
||||
break;
|
||||
|
||||
if (sock_owned_by_user(sk))
|
||||
break;
|
||||
|
||||
icsk->icsk_backoff--;
|
||||
inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
|
||||
icsk->icsk_backoff;
|
||||
|
@ -429,11 +432,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
|
|||
if (remaining) {
|
||||
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
|
||||
remaining, TCP_RTO_MAX);
|
||||
} else if (sock_owned_by_user(sk)) {
|
||||
/* RTO revert clocked out retransmission,
|
||||
* but socket is locked. Will defer. */
|
||||
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
|
||||
HZ/20, TCP_RTO_MAX);
|
||||
} else {
|
||||
/* RTO revert clocked out retransmission.
|
||||
* Will retransmit now */
|
||||
|
|
|
@ -110,7 +110,7 @@
|
|||
struct udp_table udp_table __read_mostly;
|
||||
EXPORT_SYMBOL(udp_table);
|
||||
|
||||
int sysctl_udp_mem[3] __read_mostly;
|
||||
long sysctl_udp_mem[3] __read_mostly;
|
||||
EXPORT_SYMBOL(sysctl_udp_mem);
|
||||
|
||||
int sysctl_udp_rmem_min __read_mostly;
|
||||
|
@ -119,7 +119,7 @@ EXPORT_SYMBOL(sysctl_udp_rmem_min);
|
|||
int sysctl_udp_wmem_min __read_mostly;
|
||||
EXPORT_SYMBOL(sysctl_udp_wmem_min);
|
||||
|
||||
atomic_t udp_memory_allocated;
|
||||
atomic_long_t udp_memory_allocated;
|
||||
EXPORT_SYMBOL(udp_memory_allocated);
|
||||
|
||||
#define MAX_UDP_PORTS 65536
|
||||
|
|
|
@ -2740,10 +2740,6 @@ static int addrconf_ifdown(struct net_device *dev, int how)
|
|||
/* Flag it for later restoration when link comes up */
|
||||
ifa->flags |= IFA_F_TENTATIVE;
|
||||
ifa->state = INET6_IFADDR_STATE_DAD;
|
||||
|
||||
write_unlock_bh(&idev->lock);
|
||||
|
||||
in6_ifa_hold(ifa);
|
||||
} else {
|
||||
list_del(&ifa->if_list);
|
||||
|
||||
|
@ -2758,19 +2754,15 @@ static int addrconf_ifdown(struct net_device *dev, int how)
|
|||
ifa->state = INET6_IFADDR_STATE_DEAD;
|
||||
spin_unlock_bh(&ifa->state_lock);
|
||||
|
||||
if (state == INET6_IFADDR_STATE_DEAD)
|
||||
goto put_ifa;
|
||||
if (state == INET6_IFADDR_STATE_DEAD) {
|
||||
in6_ifa_put(ifa);
|
||||
} else {
|
||||
__ipv6_ifa_notify(RTM_DELADDR, ifa);
|
||||
atomic_notifier_call_chain(&inet6addr_chain,
|
||||
NETDEV_DOWN, ifa);
|
||||
}
|
||||
write_lock_bh(&idev->lock);
|
||||
}
|
||||
|
||||
__ipv6_ifa_notify(RTM_DELADDR, ifa);
|
||||
if (ifa->state == INET6_IFADDR_STATE_DEAD)
|
||||
atomic_notifier_call_chain(&inet6addr_chain,
|
||||
NETDEV_DOWN, ifa);
|
||||
|
||||
put_ifa:
|
||||
in6_ifa_put(ifa);
|
||||
|
||||
write_lock_bh(&idev->lock);
|
||||
}
|
||||
|
||||
list_splice(&keep_list, &idev->addr_list);
|
||||
|
|
|
@ -286,7 +286,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
|
|||
|
||||
/* Check for overlap with preceding fragment. */
|
||||
if (prev &&
|
||||
(NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0)
|
||||
(NFCT_FRAG6_CB(prev)->offset + prev->len) > offset)
|
||||
goto discard_fq;
|
||||
|
||||
/* Look for overlap with succeeding segment. */
|
||||
|
|
|
@ -349,7 +349,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
|
|||
|
||||
/* Check for overlap with preceding fragment. */
|
||||
if (prev &&
|
||||
(FRAG6_CB(prev)->offset + prev->len) - offset > 0)
|
||||
(FRAG6_CB(prev)->offset + prev->len) > offset)
|
||||
goto discard_fq;
|
||||
|
||||
/* Look for overlap with succeeding segment. */
|
||||
|
|
|
@ -1945,8 +1945,12 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
|
|||
struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
|
||||
struct neighbour *neigh;
|
||||
|
||||
if (rt == NULL)
|
||||
if (rt == NULL) {
|
||||
if (net_ratelimit())
|
||||
pr_warning("IPv6: Maximum number of routes reached,"
|
||||
" consider increasing route/max_size.\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
dev_hold(net->loopback_dev);
|
||||
in6_dev_hold(idev);
|
||||
|
|
|
@ -391,6 +391,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
|
|||
u32 hw_reconf_flags = 0;
|
||||
int i;
|
||||
|
||||
if (local->scan_sdata == sdata)
|
||||
ieee80211_scan_cancel(local);
|
||||
|
||||
clear_bit(SDATA_STATE_RUNNING, &sdata->state);
|
||||
|
||||
/*
|
||||
|
@ -523,9 +526,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
|
|||
synchronize_rcu();
|
||||
skb_queue_purge(&sdata->skb_queue);
|
||||
|
||||
if (local->scan_sdata == sdata)
|
||||
ieee80211_scan_cancel(local);
|
||||
|
||||
/*
|
||||
* Disable beaconing here for mesh only, AP and IBSS
|
||||
* are already taken care of.
|
||||
|
|
|
@ -1610,9 +1610,11 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
|
||||
err = -EINVAL;
|
||||
vnet_hdr_len = sizeof(vnet_hdr);
|
||||
if ((len -= vnet_hdr_len) < 0)
|
||||
if (len < vnet_hdr_len)
|
||||
goto out_free;
|
||||
|
||||
len -= vnet_hdr_len;
|
||||
|
||||
if (skb_is_gso(skb)) {
|
||||
struct skb_shared_info *sinfo = skb_shinfo(skb);
|
||||
|
||||
|
@ -1719,7 +1721,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
|
|||
rcu_read_lock();
|
||||
dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
|
||||
if (dev)
|
||||
strlcpy(uaddr->sa_data, dev->name, 15);
|
||||
strncpy(uaddr->sa_data, dev->name, 14);
|
||||
else
|
||||
memset(uaddr->sa_data, 0, 14);
|
||||
rcu_read_unlock();
|
||||
|
@ -1742,6 +1744,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
|
|||
sll->sll_family = AF_PACKET;
|
||||
sll->sll_ifindex = po->ifindex;
|
||||
sll->sll_protocol = po->num;
|
||||
sll->sll_pkttype = 0;
|
||||
rcu_read_lock();
|
||||
dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
|
||||
if (dev) {
|
||||
|
|
|
@ -249,8 +249,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
|
|||
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
|
||||
rm->data.op_nents = ceil(total_len, PAGE_SIZE);
|
||||
rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
|
||||
if (!rm->data.op_sg)
|
||||
if (!rm->data.op_sg) {
|
||||
rds_message_put(rm);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
for (i = 0; i < rm->data.op_nents; ++i) {
|
||||
sg_set_page(&rm->data.op_sg[i],
|
||||
|
|
|
@ -268,6 +268,10 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh,
|
|||
goto nla_put_failure;
|
||||
|
||||
nla_nest_end(skb, nest);
|
||||
|
||||
if (tcf_exts_dump_stats(skb, &f->exts, &basic_ext_map) < 0)
|
||||
goto nla_put_failure;
|
||||
|
||||
return skb->len;
|
||||
|
||||
nla_put_failure:
|
||||
|
|
|
@ -92,7 +92,7 @@ static struct sctp_af *sctp_af_v6_specific;
|
|||
struct kmem_cache *sctp_chunk_cachep __read_mostly;
|
||||
struct kmem_cache *sctp_bucket_cachep __read_mostly;
|
||||
|
||||
int sysctl_sctp_mem[3];
|
||||
long sysctl_sctp_mem[3];
|
||||
int sysctl_sctp_rmem[3];
|
||||
int sysctl_sctp_wmem[3];
|
||||
|
||||
|
|
|
@ -111,12 +111,12 @@ static void sctp_sock_migrate(struct sock *, struct sock *,
|
|||
static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
|
||||
|
||||
extern struct kmem_cache *sctp_bucket_cachep;
|
||||
extern int sysctl_sctp_mem[3];
|
||||
extern long sysctl_sctp_mem[3];
|
||||
extern int sysctl_sctp_rmem[3];
|
||||
extern int sysctl_sctp_wmem[3];
|
||||
|
||||
static int sctp_memory_pressure;
|
||||
static atomic_t sctp_memory_allocated;
|
||||
static atomic_long_t sctp_memory_allocated;
|
||||
struct percpu_counter sctp_sockets_allocated;
|
||||
|
||||
static void sctp_enter_memory_pressure(struct sock *sk)
|
||||
|
|
|
@ -54,7 +54,7 @@ static int sack_timer_max = 500;
|
|||
static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
|
||||
static int rwnd_scale_max = 16;
|
||||
|
||||
extern int sysctl_sctp_mem[3];
|
||||
extern long sysctl_sctp_mem[3];
|
||||
extern int sysctl_sctp_rmem[3];
|
||||
extern int sysctl_sctp_wmem[3];
|
||||
|
||||
|
@ -203,7 +203,7 @@ static ctl_table sctp_table[] = {
|
|||
.data = &sysctl_sctp_mem,
|
||||
.maxlen = sizeof(sysctl_sctp_mem),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
.proc_handler = proc_doulongvec_minmax
|
||||
},
|
||||
{
|
||||
.procname = "sctp_rmem",
|
||||
|
|
|
@ -396,6 +396,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
|
|||
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
|
||||
struct tipc_sock *tsock = tipc_sk(sock->sk);
|
||||
|
||||
memset(addr, 0, sizeof(*addr));
|
||||
if (peer) {
|
||||
if ((sock->state != SS_CONNECTED) &&
|
||||
((peer != 2) || (sock->state != SS_DISCONNECTING)))
|
||||
|
|
|
@ -224,8 +224,8 @@ static int nl80211_prepare_netdev_dump(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
*rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx);
|
||||
if (IS_ERR(dev)) {
|
||||
err = PTR_ERR(dev);
|
||||
if (IS_ERR(*rdev)) {
|
||||
err = PTR_ERR(*rdev);
|
||||
goto out_rtnl;
|
||||
}
|
||||
|
||||
|
|
|
@ -61,6 +61,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
|
|||
while (len > 0) {
|
||||
switch (*p & X25_FAC_CLASS_MASK) {
|
||||
case X25_FAC_CLASS_A:
|
||||
if (len < 2)
|
||||
return 0;
|
||||
switch (*p) {
|
||||
case X25_FAC_REVERSE:
|
||||
if((p[1] & 0x81) == 0x81) {
|
||||
|
@ -104,6 +106,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
|
|||
len -= 2;
|
||||
break;
|
||||
case X25_FAC_CLASS_B:
|
||||
if (len < 3)
|
||||
return 0;
|
||||
switch (*p) {
|
||||
case X25_FAC_PACKET_SIZE:
|
||||
facilities->pacsize_in = p[1];
|
||||
|
@ -125,6 +129,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
|
|||
len -= 3;
|
||||
break;
|
||||
case X25_FAC_CLASS_C:
|
||||
if (len < 4)
|
||||
return 0;
|
||||
printk(KERN_DEBUG "X.25: unknown facility %02X, "
|
||||
"values %02X, %02X, %02X\n",
|
||||
p[0], p[1], p[2], p[3]);
|
||||
|
@ -132,6 +138,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
|
|||
len -= 4;
|
||||
break;
|
||||
case X25_FAC_CLASS_D:
|
||||
if (len < p[1] + 2)
|
||||
return 0;
|
||||
switch (*p) {
|
||||
case X25_FAC_CALLING_AE:
|
||||
if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
|
||||
|
@ -149,9 +157,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
|
|||
break;
|
||||
default:
|
||||
printk(KERN_DEBUG "X.25: unknown facility %02X,"
|
||||
"length %d, values %02X, %02X, "
|
||||
"%02X, %02X\n",
|
||||
p[0], p[1], p[2], p[3], p[4], p[5]);
|
||||
"length %d\n", p[0], p[1]);
|
||||
break;
|
||||
}
|
||||
len -= p[1] + 2;
|
||||
|
|
Loading…
Reference in a new issue