Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (52 commits) netxen: do_rom_fast_write error handling natsemi: Fix detection of vanilla natsemi cards net: remove a collection of unneeded #undef REALLY_SLOW_IO stuff chelsio: Fix non-NAPI compile cxgb3 - Feed Rx free list with pages cxgb3 - Recovery from HW starvation of response queue entries. cxgb3 - Unmap offload packets when they are freed cxgb3 - FW version update cxgb3 - private ioctl cleanup cxgb3 - manage sysfs attributes per port S2IO: Restoring the mac address in s2io_reset S2IO: Avoid printing the Enhanced statistics for Xframe I card. S2IO: Making LED off during LINK_DOWN notification. S2IO: Added a loadable parameter to enable or disable vlan stripping in frame. S2IO: Optimized the delay to wait for command completion S2IO: Fixes for MSI and MSIX qla3xxx: Bumping driver version number qla3xxx: Kernic Panic on pSeries under stress conditions qla3xxx: bugfix tx reset after stress conditions. qla3xxx: Check return code from pci_map_single() in ql_release_to_lrg_buf_free_list(), ql_populate_free_queue(), ql_alloc_large_buffers(), and ql3xxx_send() ...
This commit is contained in:
commit
ce27002078
37 changed files with 1709 additions and 844 deletions
|
@ -3177,8 +3177,8 @@ L: linux-kernel@vger.kernel.org ?
|
|||
S: Supported
|
||||
|
||||
SPIDERNET NETWORK DRIVER for CELL
|
||||
P: Jim Lewis
|
||||
M: jim@jklewis.com
|
||||
P: Linas Vepstas
|
||||
M: linas@austin.ibm.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
||||
|
|
|
@ -2245,7 +2245,7 @@ config BNX2
|
|||
|
||||
config SPIDER_NET
|
||||
tristate "Spider Gigabit Ethernet driver"
|
||||
depends on PCI && PPC_IBM_CELL_BLADE
|
||||
depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
|
||||
select FW_LOADER
|
||||
help
|
||||
This driver supports the Gigabit Ethernet chips present on the
|
||||
|
|
|
@ -1696,6 +1696,7 @@ irqreturn_t t1_interrupt(int irq, void *cookie)
|
|||
{
|
||||
int work_done;
|
||||
struct adapter *adapter = cookie;
|
||||
struct respQ *Q = &adapter->sge->respQ;
|
||||
|
||||
spin_lock(&adapter->async_lock);
|
||||
|
||||
|
|
|
@ -74,6 +74,11 @@ enum { /* adapter flags */
|
|||
struct rx_desc;
|
||||
struct rx_sw_desc;
|
||||
|
||||
struct sge_fl_page {
|
||||
struct skb_frag_struct frag;
|
||||
unsigned char *va;
|
||||
};
|
||||
|
||||
struct sge_fl { /* SGE per free-buffer list state */
|
||||
unsigned int buf_size; /* size of each Rx buffer */
|
||||
unsigned int credits; /* # of available Rx buffers */
|
||||
|
@ -81,11 +86,13 @@ struct sge_fl { /* SGE per free-buffer list state */
|
|||
unsigned int cidx; /* consumer index */
|
||||
unsigned int pidx; /* producer index */
|
||||
unsigned int gen; /* free list generation */
|
||||
unsigned int cntxt_id; /* SGE context id for the free list */
|
||||
struct sge_fl_page page;
|
||||
struct rx_desc *desc; /* address of HW Rx descriptor ring */
|
||||
struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
|
||||
dma_addr_t phys_addr; /* physical address of HW ring start */
|
||||
unsigned int cntxt_id; /* SGE context id for the free list */
|
||||
unsigned long empty; /* # of times queue ran out of buffers */
|
||||
unsigned long alloc_failed; /* # of times buffer allocation failed */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -121,6 +128,8 @@ struct sge_rspq { /* state for an SGE response queue */
|
|||
unsigned long empty; /* # of times queue ran out of credits */
|
||||
unsigned long nomem; /* # of responses deferred due to no mem */
|
||||
unsigned long unhandled_irqs; /* # of spurious intrs */
|
||||
unsigned long starved;
|
||||
unsigned long restarted;
|
||||
};
|
||||
|
||||
struct tx_desc;
|
||||
|
|
|
@ -36,28 +36,17 @@
|
|||
* Ioctl commands specific to this driver.
|
||||
*/
|
||||
enum {
|
||||
CHELSIO_SETREG = 1024,
|
||||
CHELSIO_GETREG,
|
||||
CHELSIO_SETTPI,
|
||||
CHELSIO_GETTPI,
|
||||
CHELSIO_GETMTUTAB,
|
||||
CHELSIO_SETMTUTAB,
|
||||
CHELSIO_GETMTU,
|
||||
CHELSIO_SET_PM,
|
||||
CHELSIO_GET_PM,
|
||||
CHELSIO_GET_TCAM,
|
||||
CHELSIO_SET_TCAM,
|
||||
CHELSIO_GET_TCB,
|
||||
CHELSIO_GET_MEM,
|
||||
CHELSIO_LOAD_FW,
|
||||
CHELSIO_GET_PROTO,
|
||||
CHELSIO_SET_PROTO,
|
||||
CHELSIO_SET_TRACE_FILTER,
|
||||
CHELSIO_SET_QSET_PARAMS,
|
||||
CHELSIO_GET_QSET_PARAMS,
|
||||
CHELSIO_SET_QSET_NUM,
|
||||
CHELSIO_GET_QSET_NUM,
|
||||
CHELSIO_SET_PKTSCHED,
|
||||
CHELSIO_GETMTUTAB = 1029,
|
||||
CHELSIO_SETMTUTAB = 1030,
|
||||
CHELSIO_SET_PM = 1032,
|
||||
CHELSIO_GET_PM = 1033,
|
||||
CHELSIO_GET_MEM = 1038,
|
||||
CHELSIO_LOAD_FW = 1041,
|
||||
CHELSIO_SET_TRACE_FILTER = 1044,
|
||||
CHELSIO_SET_QSET_PARAMS = 1045,
|
||||
CHELSIO_GET_QSET_PARAMS = 1046,
|
||||
CHELSIO_SET_QSET_NUM = 1047,
|
||||
CHELSIO_GET_QSET_NUM = 1048,
|
||||
};
|
||||
|
||||
struct ch_reg {
|
||||
|
|
|
@ -434,27 +434,25 @@ static int setup_sge_qsets(struct adapter *adap)
|
|||
|
||||
static ssize_t attr_show(struct device *d, struct device_attribute *attr,
|
||||
char *buf,
|
||||
ssize_t(*format) (struct adapter *, char *))
|
||||
ssize_t(*format) (struct net_device *, char *))
|
||||
{
|
||||
ssize_t len;
|
||||
struct adapter *adap = to_net_dev(d)->priv;
|
||||
|
||||
/* Synchronize with ioctls that may shut down the device */
|
||||
rtnl_lock();
|
||||
len = (*format) (adap, buf);
|
||||
len = (*format) (to_net_dev(d), buf);
|
||||
rtnl_unlock();
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t attr_store(struct device *d, struct device_attribute *attr,
|
||||
const char *buf, size_t len,
|
||||
ssize_t(*set) (struct adapter *, unsigned int),
|
||||
ssize_t(*set) (struct net_device *, unsigned int),
|
||||
unsigned int min_val, unsigned int max_val)
|
||||
{
|
||||
char *endp;
|
||||
ssize_t ret;
|
||||
unsigned int val;
|
||||
struct adapter *adap = to_net_dev(d)->priv;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
@ -464,7 +462,7 @@ static ssize_t attr_store(struct device *d, struct device_attribute *attr,
|
|||
return -EINVAL;
|
||||
|
||||
rtnl_lock();
|
||||
ret = (*set) (adap, val);
|
||||
ret = (*set) (to_net_dev(d), val);
|
||||
if (!ret)
|
||||
ret = len;
|
||||
rtnl_unlock();
|
||||
|
@ -472,8 +470,9 @@ static ssize_t attr_store(struct device *d, struct device_attribute *attr,
|
|||
}
|
||||
|
||||
#define CXGB3_SHOW(name, val_expr) \
|
||||
static ssize_t format_##name(struct adapter *adap, char *buf) \
|
||||
static ssize_t format_##name(struct net_device *dev, char *buf) \
|
||||
{ \
|
||||
struct adapter *adap = dev->priv; \
|
||||
return sprintf(buf, "%u\n", val_expr); \
|
||||
} \
|
||||
static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
|
||||
|
@ -482,8 +481,10 @@ static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
|
|||
return attr_show(d, attr, buf, format_##name); \
|
||||
}
|
||||
|
||||
static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
|
||||
static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
|
||||
{
|
||||
struct adapter *adap = dev->priv;
|
||||
|
||||
if (adap->flags & FULL_INIT_DONE)
|
||||
return -EBUSY;
|
||||
if (val && adap->params.rev == 0)
|
||||
|
@ -500,8 +501,10 @@ static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
|
|||
return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
|
||||
}
|
||||
|
||||
static ssize_t set_nservers(struct adapter *adap, unsigned int val)
|
||||
static ssize_t set_nservers(struct net_device *dev, unsigned int val)
|
||||
{
|
||||
struct adapter *adap = dev->priv;
|
||||
|
||||
if (adap->flags & FULL_INIT_DONE)
|
||||
return -EBUSY;
|
||||
if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
|
||||
|
@ -1549,32 +1552,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
return -EFAULT;
|
||||
|
||||
switch (cmd) {
|
||||
case CHELSIO_SETREG:{
|
||||
struct ch_reg edata;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
if (copy_from_user(&edata, useraddr, sizeof(edata)))
|
||||
return -EFAULT;
|
||||
if ((edata.addr & 3) != 0
|
||||
|| edata.addr >= adapter->mmio_len)
|
||||
return -EINVAL;
|
||||
writel(edata.val, adapter->regs + edata.addr);
|
||||
break;
|
||||
}
|
||||
case CHELSIO_GETREG:{
|
||||
struct ch_reg edata;
|
||||
|
||||
if (copy_from_user(&edata, useraddr, sizeof(edata)))
|
||||
return -EFAULT;
|
||||
if ((edata.addr & 3) != 0
|
||||
|| edata.addr >= adapter->mmio_len)
|
||||
return -EINVAL;
|
||||
edata.val = readl(adapter->regs + edata.addr);
|
||||
if (copy_to_user(useraddr, &edata, sizeof(edata)))
|
||||
return -EFAULT;
|
||||
break;
|
||||
}
|
||||
case CHELSIO_SET_QSET_PARAMS:{
|
||||
int i;
|
||||
struct qset_params *q;
|
||||
|
@ -1838,10 +1815,10 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Version scheme:
|
||||
* bits 0..9: chip version
|
||||
* bits 10..15: chip revision
|
||||
*/
|
||||
* Version scheme:
|
||||
* bits 0..9: chip version
|
||||
* bits 10..15: chip revision
|
||||
*/
|
||||
t.version = 3 | (adapter->params.rev << 10);
|
||||
if (copy_to_user(useraddr, &t, sizeof(t)))
|
||||
return -EFAULT;
|
||||
|
@ -1890,20 +1867,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
t.trace_rx);
|
||||
break;
|
||||
}
|
||||
case CHELSIO_SET_PKTSCHED:{
|
||||
struct ch_pktsched_params p;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
if (!adapter->open_device_map)
|
||||
return -EAGAIN; /* uP and SGE must be running */
|
||||
if (copy_from_user(&p, useraddr, sizeof(p)))
|
||||
return -EFAULT;
|
||||
send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
|
||||
p.binding);
|
||||
break;
|
||||
|
||||
}
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
|
|
@ -45,9 +45,25 @@
|
|||
#define USE_GTS 0
|
||||
|
||||
#define SGE_RX_SM_BUF_SIZE 1536
|
||||
|
||||
/*
|
||||
* If USE_RX_PAGE is defined, the small freelist populated with (partial)
|
||||
* pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must
|
||||
* be a multiple of the host page size).
|
||||
*/
|
||||
#define USE_RX_PAGE
|
||||
#define RX_PAGE_SIZE 2048
|
||||
|
||||
/*
|
||||
* skb freelist packets are copied into a new skb (and the freelist one is
|
||||
* reused) if their len is <=
|
||||
*/
|
||||
#define SGE_RX_COPY_THRES 256
|
||||
|
||||
# define SGE_RX_DROP_THRES 16
|
||||
/*
|
||||
* Minimum number of freelist entries before we start dropping TUNNEL frames.
|
||||
*/
|
||||
#define SGE_RX_DROP_THRES 16
|
||||
|
||||
/*
|
||||
* Period of the Tx buffer reclaim timer. This timer does not need to run
|
||||
|
@ -85,7 +101,10 @@ struct tx_sw_desc { /* SW state per Tx descriptor */
|
|||
};
|
||||
|
||||
struct rx_sw_desc { /* SW state per Rx descriptor */
|
||||
struct sk_buff *skb;
|
||||
union {
|
||||
struct sk_buff *skb;
|
||||
struct sge_fl_page page;
|
||||
} t;
|
||||
DECLARE_PCI_UNMAP_ADDR(dma_addr);
|
||||
};
|
||||
|
||||
|
@ -104,6 +123,15 @@ struct unmap_info { /* packet unmapping info, overlays skb->cb */
|
|||
u32 len; /* mapped length of skb main body */
|
||||
};
|
||||
|
||||
/*
|
||||
* Holds unmapping information for Tx packets that need deferred unmapping.
|
||||
* This structure lives at skb->head and must be allocated by callers.
|
||||
*/
|
||||
struct deferred_unmap_info {
|
||||
struct pci_dev *pdev;
|
||||
dma_addr_t addr[MAX_SKB_FRAGS + 1];
|
||||
};
|
||||
|
||||
/*
|
||||
* Maps a number of flits to the number of Tx descriptors that can hold them.
|
||||
* The formula is
|
||||
|
@ -252,10 +280,13 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
|
|||
struct pci_dev *pdev = adapter->pdev;
|
||||
unsigned int cidx = q->cidx;
|
||||
|
||||
const int need_unmap = need_skb_unmap() &&
|
||||
q->cntxt_id >= FW_TUNNEL_SGEEC_START;
|
||||
|
||||
d = &q->sdesc[cidx];
|
||||
while (n--) {
|
||||
if (d->skb) { /* an SGL is present */
|
||||
if (need_skb_unmap())
|
||||
if (need_unmap)
|
||||
unmap_skb(d->skb, q, cidx, pdev);
|
||||
if (d->skb->priority == cidx)
|
||||
kfree_skb(d->skb);
|
||||
|
@ -320,16 +351,27 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
|
|||
|
||||
pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
|
||||
q->buf_size, PCI_DMA_FROMDEVICE);
|
||||
kfree_skb(d->skb);
|
||||
d->skb = NULL;
|
||||
|
||||
if (q->buf_size != RX_PAGE_SIZE) {
|
||||
kfree_skb(d->t.skb);
|
||||
d->t.skb = NULL;
|
||||
} else {
|
||||
if (d->t.page.frag.page)
|
||||
put_page(d->t.page.frag.page);
|
||||
d->t.page.frag.page = NULL;
|
||||
}
|
||||
if (++cidx == q->size)
|
||||
cidx = 0;
|
||||
}
|
||||
|
||||
if (q->page.frag.page)
|
||||
put_page(q->page.frag.page);
|
||||
q->page.frag.page = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* add_one_rx_buf - add a packet buffer to a free-buffer list
|
||||
* @skb: the buffer to add
|
||||
* @va: va of the buffer to add
|
||||
* @len: the buffer length
|
||||
* @d: the HW Rx descriptor to write
|
||||
* @sd: the SW Rx descriptor to write
|
||||
|
@ -339,14 +381,13 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
|
|||
* Add a buffer of the given length to the supplied HW and SW Rx
|
||||
* descriptors.
|
||||
*/
|
||||
static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
|
||||
static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
|
||||
struct rx_desc *d, struct rx_sw_desc *sd,
|
||||
unsigned int gen, struct pci_dev *pdev)
|
||||
{
|
||||
dma_addr_t mapping;
|
||||
|
||||
sd->skb = skb;
|
||||
mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
|
||||
mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
|
||||
pci_unmap_addr_set(sd, dma_addr, mapping);
|
||||
|
||||
d->addr_lo = cpu_to_be32(mapping);
|
||||
|
@ -371,14 +412,47 @@ static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
|
|||
{
|
||||
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
|
||||
struct rx_desc *d = &q->desc[q->pidx];
|
||||
struct sge_fl_page *p = &q->page;
|
||||
|
||||
while (n--) {
|
||||
struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
|
||||
unsigned char *va;
|
||||
|
||||
if (!skb)
|
||||
break;
|
||||
if (unlikely(q->buf_size != RX_PAGE_SIZE)) {
|
||||
struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
|
||||
|
||||
if (!skb) {
|
||||
q->alloc_failed++;
|
||||
break;
|
||||
}
|
||||
va = skb->data;
|
||||
sd->t.skb = skb;
|
||||
} else {
|
||||
if (!p->frag.page) {
|
||||
p->frag.page = alloc_pages(gfp, 0);
|
||||
if (unlikely(!p->frag.page)) {
|
||||
q->alloc_failed++;
|
||||
break;
|
||||
} else {
|
||||
p->frag.size = RX_PAGE_SIZE;
|
||||
p->frag.page_offset = 0;
|
||||
p->va = page_address(p->frag.page);
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(&sd->t, p, sizeof(*p));
|
||||
va = p->va;
|
||||
|
||||
p->frag.page_offset += RX_PAGE_SIZE;
|
||||
BUG_ON(p->frag.page_offset > PAGE_SIZE);
|
||||
p->va += RX_PAGE_SIZE;
|
||||
if (p->frag.page_offset == PAGE_SIZE)
|
||||
p->frag.page = NULL;
|
||||
else
|
||||
get_page(p->frag.page);
|
||||
}
|
||||
|
||||
add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev);
|
||||
|
||||
add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
|
||||
d++;
|
||||
sd++;
|
||||
if (++q->pidx == q->size) {
|
||||
|
@ -413,7 +487,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
|
|||
struct rx_desc *from = &q->desc[idx];
|
||||
struct rx_desc *to = &q->desc[q->pidx];
|
||||
|
||||
q->sdesc[q->pidx] = q->sdesc[idx];
|
||||
memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc));
|
||||
to->addr_lo = from->addr_lo; /* already big endian */
|
||||
to->addr_hi = from->addr_hi; /* likewise */
|
||||
wmb();
|
||||
|
@ -446,7 +520,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
|
|||
* of the SW ring.
|
||||
*/
|
||||
static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
|
||||
size_t sw_size, dma_addr_t *phys, void *metadata)
|
||||
size_t sw_size, dma_addr_t * phys, void *metadata)
|
||||
{
|
||||
size_t len = nelem * elem_size;
|
||||
void *s = NULL;
|
||||
|
@ -575,61 +649,6 @@ static inline unsigned int flits_to_desc(unsigned int n)
|
|||
return flit_desc_map[n];
|
||||
}
|
||||
|
||||
/**
|
||||
* get_packet - return the next ingress packet buffer from a free list
|
||||
* @adap: the adapter that received the packet
|
||||
* @fl: the SGE free list holding the packet
|
||||
* @len: the packet length including any SGE padding
|
||||
* @drop_thres: # of remaining buffers before we start dropping packets
|
||||
*
|
||||
* Get the next packet from a free list and complete setup of the
|
||||
* sk_buff. If the packet is small we make a copy and recycle the
|
||||
* original buffer, otherwise we use the original buffer itself. If a
|
||||
* positive drop threshold is supplied packets are dropped and their
|
||||
* buffers recycled if (a) the number of remaining buffers is under the
|
||||
* threshold and the packet is too big to copy, or (b) the packet should
|
||||
* be copied but there is no memory for the copy.
|
||||
*/
|
||||
static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
|
||||
unsigned int len, unsigned int drop_thres)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
|
||||
|
||||
prefetch(sd->skb->data);
|
||||
|
||||
if (len <= SGE_RX_COPY_THRES) {
|
||||
skb = alloc_skb(len, GFP_ATOMIC);
|
||||
if (likely(skb != NULL)) {
|
||||
__skb_put(skb, len);
|
||||
pci_dma_sync_single_for_cpu(adap->pdev,
|
||||
pci_unmap_addr(sd,
|
||||
dma_addr),
|
||||
len, PCI_DMA_FROMDEVICE);
|
||||
memcpy(skb->data, sd->skb->data, len);
|
||||
pci_dma_sync_single_for_device(adap->pdev,
|
||||
pci_unmap_addr(sd,
|
||||
dma_addr),
|
||||
len, PCI_DMA_FROMDEVICE);
|
||||
} else if (!drop_thres)
|
||||
goto use_orig_buf;
|
||||
recycle:
|
||||
recycle_rx_buf(adap, fl, fl->cidx);
|
||||
return skb;
|
||||
}
|
||||
|
||||
if (unlikely(fl->credits < drop_thres))
|
||||
goto recycle;
|
||||
|
||||
use_orig_buf:
|
||||
pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
|
||||
fl->buf_size, PCI_DMA_FROMDEVICE);
|
||||
skb = sd->skb;
|
||||
skb_put(skb, len);
|
||||
__refill_fl(adap, fl);
|
||||
return skb;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_imm_packet - return the next ingress packet buffer from a response
|
||||
* @resp: the response descriptor containing the packet data
|
||||
|
@ -1226,6 +1245,50 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
|
|||
return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
|
||||
}
|
||||
|
||||
/**
|
||||
* deferred_unmap_destructor - unmap a packet when it is freed
|
||||
* @skb: the packet
|
||||
*
|
||||
* This is the packet destructor used for Tx packets that need to remain
|
||||
* mapped until they are freed rather than until their Tx descriptors are
|
||||
* freed.
|
||||
*/
|
||||
static void deferred_unmap_destructor(struct sk_buff *skb)
|
||||
{
|
||||
int i;
|
||||
const dma_addr_t *p;
|
||||
const struct skb_shared_info *si;
|
||||
const struct deferred_unmap_info *dui;
|
||||
const struct unmap_info *ui = (struct unmap_info *)skb->cb;
|
||||
|
||||
dui = (struct deferred_unmap_info *)skb->head;
|
||||
p = dui->addr;
|
||||
|
||||
if (ui->len)
|
||||
pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE);
|
||||
|
||||
si = skb_shinfo(skb);
|
||||
for (i = 0; i < si->nr_frags; i++)
|
||||
pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
|
||||
PCI_DMA_TODEVICE);
|
||||
}
|
||||
|
||||
static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
|
||||
const struct sg_ent *sgl, int sgl_flits)
|
||||
{
|
||||
dma_addr_t *p;
|
||||
struct deferred_unmap_info *dui;
|
||||
|
||||
dui = (struct deferred_unmap_info *)skb->head;
|
||||
dui->pdev = pdev;
|
||||
for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
|
||||
*p++ = be64_to_cpu(sgl->addr[0]);
|
||||
*p++ = be64_to_cpu(sgl->addr[1]);
|
||||
}
|
||||
if (sgl_flits)
|
||||
*p = be64_to_cpu(sgl->addr[0]);
|
||||
}
|
||||
|
||||
/**
|
||||
* write_ofld_wr - write an offload work request
|
||||
* @adap: the adapter
|
||||
|
@ -1262,8 +1325,11 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
|
|||
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
|
||||
sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
|
||||
adap->pdev);
|
||||
if (need_skb_unmap())
|
||||
if (need_skb_unmap()) {
|
||||
setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
|
||||
skb->destructor = deferred_unmap_destructor;
|
||||
((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
|
||||
}
|
||||
|
||||
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
|
||||
gen, from->wr_hi, from->wr_lo);
|
||||
|
@ -1617,7 +1683,6 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
|
|||
struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
|
||||
struct port_info *pi;
|
||||
|
||||
rq->eth_pkts++;
|
||||
skb_pull(skb, sizeof(*p) + pad);
|
||||
skb->dev = adap->port[p->iff];
|
||||
skb->dev->last_rx = jiffies;
|
||||
|
@ -1645,6 +1710,85 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
|
|||
netif_rx(skb);
|
||||
}
|
||||
|
||||
#define SKB_DATA_SIZE 128
|
||||
|
||||
static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p,
|
||||
unsigned int len)
|
||||
{
|
||||
skb->len = len;
|
||||
if (len <= SKB_DATA_SIZE) {
|
||||
memcpy(skb->data, p->va, len);
|
||||
skb->tail += len;
|
||||
put_page(p->frag.page);
|
||||
} else {
|
||||
memcpy(skb->data, p->va, SKB_DATA_SIZE);
|
||||
skb_shinfo(skb)->frags[0].page = p->frag.page;
|
||||
skb_shinfo(skb)->frags[0].page_offset =
|
||||
p->frag.page_offset + SKB_DATA_SIZE;
|
||||
skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE;
|
||||
skb_shinfo(skb)->nr_frags = 1;
|
||||
skb->data_len = len - SKB_DATA_SIZE;
|
||||
skb->tail += SKB_DATA_SIZE;
|
||||
skb->truesize += skb->data_len;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* get_packet - return the next ingress packet buffer from a free list
|
||||
* @adap: the adapter that received the packet
|
||||
* @fl: the SGE free list holding the packet
|
||||
* @len: the packet length including any SGE padding
|
||||
* @drop_thres: # of remaining buffers before we start dropping packets
|
||||
*
|
||||
* Get the next packet from a free list and complete setup of the
|
||||
* sk_buff. If the packet is small we make a copy and recycle the
|
||||
* original buffer, otherwise we use the original buffer itself. If a
|
||||
* positive drop threshold is supplied packets are dropped and their
|
||||
* buffers recycled if (a) the number of remaining buffers is under the
|
||||
* threshold and the packet is too big to copy, or (b) the packet should
|
||||
* be copied but there is no memory for the copy.
|
||||
*/
|
||||
static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
|
||||
unsigned int len, unsigned int drop_thres)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
|
||||
|
||||
prefetch(sd->t.skb->data);
|
||||
|
||||
if (len <= SGE_RX_COPY_THRES) {
|
||||
skb = alloc_skb(len, GFP_ATOMIC);
|
||||
if (likely(skb != NULL)) {
|
||||
struct rx_desc *d = &fl->desc[fl->cidx];
|
||||
dma_addr_t mapping =
|
||||
(dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 |
|
||||
be32_to_cpu(d->addr_lo));
|
||||
|
||||
__skb_put(skb, len);
|
||||
pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
memcpy(skb->data, sd->t.skb->data, len);
|
||||
pci_dma_sync_single_for_device(adap->pdev, mapping, len,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
} else if (!drop_thres)
|
||||
goto use_orig_buf;
|
||||
recycle:
|
||||
recycle_rx_buf(adap, fl, fl->cidx);
|
||||
return skb;
|
||||
}
|
||||
|
||||
if (unlikely(fl->credits < drop_thres))
|
||||
goto recycle;
|
||||
|
||||
use_orig_buf:
|
||||
pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
|
||||
fl->buf_size, PCI_DMA_FROMDEVICE);
|
||||
skb = sd->t.skb;
|
||||
skb_put(skb, len);
|
||||
__refill_fl(adap, fl);
|
||||
return skb;
|
||||
}
|
||||
|
||||
/**
|
||||
* handle_rsp_cntrl_info - handles control information in a response
|
||||
* @qs: the queue set corresponding to the response
|
||||
|
@ -1767,7 +1911,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
|
|||
q->next_holdoff = q->holdoff_tmr;
|
||||
|
||||
while (likely(budget_left && is_new_response(r, q))) {
|
||||
int eth, ethpad = 0;
|
||||
int eth, ethpad = 2;
|
||||
struct sk_buff *skb = NULL;
|
||||
u32 len, flags = ntohl(r->flags);
|
||||
u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
|
||||
|
@ -1794,18 +1938,56 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
|
|||
break;
|
||||
}
|
||||
q->imm_data++;
|
||||
ethpad = 0;
|
||||
} else if ((len = ntohl(r->len_cq)) != 0) {
|
||||
struct sge_fl *fl;
|
||||
struct sge_fl *fl =
|
||||
(len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
|
||||
|
||||
if (fl->buf_size == RX_PAGE_SIZE) {
|
||||
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
|
||||
struct sge_fl_page *p = &sd->t.page;
|
||||
|
||||
prefetch(p->va);
|
||||
prefetch(p->va + L1_CACHE_BYTES);
|
||||
|
||||
__refill_fl(adap, fl);
|
||||
|
||||
pci_unmap_single(adap->pdev,
|
||||
pci_unmap_addr(sd, dma_addr),
|
||||
fl->buf_size,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
|
||||
if (eth) {
|
||||
if (unlikely(fl->credits <
|
||||
SGE_RX_DROP_THRES))
|
||||
goto eth_recycle;
|
||||
|
||||
skb = alloc_skb(SKB_DATA_SIZE,
|
||||
GFP_ATOMIC);
|
||||
if (unlikely(!skb)) {
|
||||
eth_recycle:
|
||||
q->rx_drops++;
|
||||
recycle_rx_buf(adap, fl,
|
||||
fl->cidx);
|
||||
goto eth_done;
|
||||
}
|
||||
} else {
|
||||
skb = alloc_skb(SKB_DATA_SIZE,
|
||||
GFP_ATOMIC);
|
||||
if (unlikely(!skb))
|
||||
goto no_mem;
|
||||
}
|
||||
|
||||
skb_data_init(skb, p, G_RSPD_LEN(len));
|
||||
eth_done:
|
||||
fl->credits--;
|
||||
q->eth_pkts++;
|
||||
} else {
|
||||
fl->credits--;
|
||||
skb = get_packet(adap, fl, G_RSPD_LEN(len),
|
||||
eth ? SGE_RX_DROP_THRES : 0);
|
||||
}
|
||||
|
||||
fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
|
||||
fl->credits--;
|
||||
skb = get_packet(adap, fl, G_RSPD_LEN(len),
|
||||
eth ? SGE_RX_DROP_THRES : 0);
|
||||
if (!skb)
|
||||
q->rx_drops++;
|
||||
else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
|
||||
__skb_pull(skb, 2);
|
||||
ethpad = 2;
|
||||
if (++fl->cidx == fl->size)
|
||||
fl->cidx = 0;
|
||||
} else
|
||||
|
@ -1829,18 +2011,23 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
|
|||
q->credits = 0;
|
||||
}
|
||||
|
||||
if (likely(skb != NULL)) {
|
||||
if (skb) {
|
||||
/* Preserve the RSS info in csum & priority */
|
||||
skb->csum = rss_hi;
|
||||
skb->priority = rss_lo;
|
||||
|
||||
if (eth)
|
||||
rx_eth(adap, q, skb, ethpad);
|
||||
else {
|
||||
/* Preserve the RSS info in csum & priority */
|
||||
skb->csum = rss_hi;
|
||||
skb->priority = rss_lo;
|
||||
ngathered = rx_offload(&adap->tdev, q, skb,
|
||||
offload_skbs, ngathered);
|
||||
if (unlikely(r->rss_hdr.opcode ==
|
||||
CPL_TRACE_PKT))
|
||||
__skb_pull(skb, ethpad);
|
||||
|
||||
ngathered = rx_offload(&adap->tdev, q,
|
||||
skb, offload_skbs,
|
||||
ngathered);
|
||||
}
|
||||
}
|
||||
|
||||
--budget_left;
|
||||
}
|
||||
|
||||
|
@ -2320,10 +2507,23 @@ static void sge_timer_cb(unsigned long data)
|
|||
&adap->sge.qs[0].rspq.lock;
|
||||
if (spin_trylock_irq(lock)) {
|
||||
if (!napi_is_scheduled(qs->netdev)) {
|
||||
u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
|
||||
|
||||
if (qs->fl[0].credits < qs->fl[0].size)
|
||||
__refill_fl(adap, &qs->fl[0]);
|
||||
if (qs->fl[1].credits < qs->fl[1].size)
|
||||
__refill_fl(adap, &qs->fl[1]);
|
||||
|
||||
if (status & (1 << qs->rspq.cntxt_id)) {
|
||||
qs->rspq.starved++;
|
||||
if (qs->rspq.credits) {
|
||||
refill_rspq(adap, &qs->rspq, 1);
|
||||
qs->rspq.credits--;
|
||||
qs->rspq.restarted++;
|
||||
t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
|
||||
1 << qs->rspq.cntxt_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(lock);
|
||||
}
|
||||
|
@ -2432,13 +2632,21 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
|
|||
flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
|
||||
|
||||
if (ntxq == 1) {
|
||||
#ifdef USE_RX_PAGE
|
||||
q->fl[0].buf_size = RX_PAGE_SIZE;
|
||||
#else
|
||||
q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
|
||||
sizeof(struct cpl_rx_pkt);
|
||||
#endif
|
||||
q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
|
||||
sizeof(struct cpl_rx_pkt);
|
||||
} else {
|
||||
#ifdef USE_RX_PAGE
|
||||
q->fl[0].buf_size = RX_PAGE_SIZE;
|
||||
#else
|
||||
q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
|
||||
sizeof(struct cpl_rx_data);
|
||||
#endif
|
||||
q->fl[1].buf_size = (16 * 1024) -
|
||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
}
|
||||
|
@ -2632,7 +2840,7 @@ void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
|
|||
q->polling = adap->params.rev > 0;
|
||||
q->coalesce_usecs = 5;
|
||||
q->rspq_size = 1024;
|
||||
q->fl_size = 4096;
|
||||
q->fl_size = 1024;
|
||||
q->jumbo_size = 512;
|
||||
q->txq_size[TXQ_ETH] = 1024;
|
||||
q->txq_size[TXQ_OFLD] = 1024;
|
||||
|
|
|
@ -884,11 +884,13 @@ int t3_check_fw_version(struct adapter *adapter)
|
|||
major = G_FW_VERSION_MAJOR(vers);
|
||||
minor = G_FW_VERSION_MINOR(vers);
|
||||
|
||||
if (type == FW_VERSION_T3 && major == 3 && minor == 1)
|
||||
if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
|
||||
minor == FW_VERSION_MINOR)
|
||||
return 0;
|
||||
|
||||
CH_ERR(adapter, "found wrong FW version(%u.%u), "
|
||||
"driver needs version 3.1\n", major, minor);
|
||||
"driver needs version %u.%u\n", major, minor,
|
||||
FW_VERSION_MAJOR, FW_VERSION_MINOR);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,4 +36,6 @@
|
|||
#define DRV_NAME "cxgb3"
|
||||
/* Driver version */
|
||||
#define DRV_VERSION "1.0"
|
||||
#define FW_VERSION_MAJOR 3
|
||||
#define FW_VERSION_MINOR 2
|
||||
#endif /* __CHELSIO_VERSION_H */
|
||||
|
|
|
@ -38,12 +38,6 @@ static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj
|
|||
/* Add more time here if your adapter won't work OK: */
|
||||
#define DE600_SLOW_DOWN udelay(delay_time)
|
||||
|
||||
/*
|
||||
* If you still have trouble reading/writing to the adapter,
|
||||
* modify the following "#define": (see <asm/io.h> for more info)
|
||||
#define REALLY_SLOW_IO
|
||||
*/
|
||||
|
||||
/* use 0 for production, 1 for verification, >2 for debug */
|
||||
#ifdef DE600_DEBUG
|
||||
#define PRINTK(x) if (de600_debug >= 2) printk x
|
||||
|
|
|
@ -839,7 +839,7 @@ enum {
|
|||
NV_MSIX_INT_DISABLED,
|
||||
NV_MSIX_INT_ENABLED
|
||||
};
|
||||
static int msix = NV_MSIX_INT_ENABLED;
|
||||
static int msix = NV_MSIX_INT_DISABLED;
|
||||
|
||||
/*
|
||||
* DMA 64bit
|
||||
|
@ -3104,13 +3104,17 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
|
|||
struct fe_priv *np = netdev_priv(dev);
|
||||
u8 __iomem *base = get_hwbase(dev);
|
||||
unsigned long flags;
|
||||
int retcode;
|
||||
|
||||
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
|
||||
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
|
||||
pkts = nv_rx_process(dev, limit);
|
||||
else
|
||||
retcode = nv_alloc_rx(dev);
|
||||
} else {
|
||||
pkts = nv_rx_process_optimized(dev, limit);
|
||||
retcode = nv_alloc_rx_optimized(dev);
|
||||
}
|
||||
|
||||
if (nv_alloc_rx(dev)) {
|
||||
if (retcode) {
|
||||
spin_lock_irqsave(&np->lock, flags);
|
||||
if (!np->in_shutdown)
|
||||
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
|
||||
|
@ -5370,19 +5374,19 @@ static struct pci_device_id pci_tbl[] = {
|
|||
},
|
||||
{ /* MCP65 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
|
||||
},
|
||||
{ /* MCP65 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
|
||||
},
|
||||
{ /* MCP65 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
|
||||
},
|
||||
{ /* MCP65 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
|
||||
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
|
||||
},
|
||||
{ /* MCP67 Ethernet Controller */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
|
||||
|
|
|
@ -147,13 +147,13 @@ static void mv643xx_eth_rx_refill_descs(struct net_device *dev)
|
|||
int unaligned;
|
||||
|
||||
while (mp->rx_desc_count < mp->rx_ring_size) {
|
||||
skb = dev_alloc_skb(ETH_RX_SKB_SIZE + ETH_DMA_ALIGN);
|
||||
skb = dev_alloc_skb(ETH_RX_SKB_SIZE + dma_get_cache_alignment());
|
||||
if (!skb)
|
||||
break;
|
||||
mp->rx_desc_count++;
|
||||
unaligned = (u32)skb->data & (ETH_DMA_ALIGN - 1);
|
||||
unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
|
||||
if (unaligned)
|
||||
skb_reserve(skb, ETH_DMA_ALIGN - unaligned);
|
||||
skb_reserve(skb, dma_get_cache_alignment() - unaligned);
|
||||
pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
|
||||
pkt_info.byte_cnt = ETH_RX_SKB_SIZE;
|
||||
pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
|
||||
|
|
|
@ -42,17 +42,6 @@
|
|||
#define MAX_DESCS_PER_SKB 1
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The MV643XX HW requires 8-byte alignment. However, when I/O
|
||||
* is non-cache-coherent, we need to ensure that the I/O buffers
|
||||
* we use don't share cache lines with other data.
|
||||
*/
|
||||
#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_NOT_COHERENT_CACHE)
|
||||
#define ETH_DMA_ALIGN L1_CACHE_BYTES
|
||||
#else
|
||||
#define ETH_DMA_ALIGN 8
|
||||
#endif
|
||||
|
||||
#define ETH_VLAN_HLEN 4
|
||||
#define ETH_FCS_LEN 4
|
||||
#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
|
||||
|
|
|
@ -195,6 +195,10 @@ struct myri10ge_priv {
|
|||
char *fw_name;
|
||||
char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
|
||||
char fw_version[128];
|
||||
int fw_ver_major;
|
||||
int fw_ver_minor;
|
||||
int fw_ver_tiny;
|
||||
int adopted_rx_filter_bug;
|
||||
u8 mac_addr[6]; /* eeprom mac address */
|
||||
unsigned long serial_number;
|
||||
int vendor_specific_offset;
|
||||
|
@ -447,7 +451,6 @@ myri10ge_validate_firmware(struct myri10ge_priv *mgp,
|
|||
struct mcp_gen_header *hdr)
|
||||
{
|
||||
struct device *dev = &mgp->pdev->dev;
|
||||
int major, minor;
|
||||
|
||||
/* check firmware type */
|
||||
if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
|
||||
|
@ -458,9 +461,11 @@ myri10ge_validate_firmware(struct myri10ge_priv *mgp,
|
|||
/* save firmware version for ethtool */
|
||||
strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
|
||||
|
||||
sscanf(mgp->fw_version, "%d.%d", &major, &minor);
|
||||
sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major,
|
||||
&mgp->fw_ver_minor, &mgp->fw_ver_tiny);
|
||||
|
||||
if (!(major == MXGEFW_VERSION_MAJOR && minor == MXGEFW_VERSION_MINOR)) {
|
||||
if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR
|
||||
&& mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
|
||||
dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
|
||||
dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
|
||||
MXGEFW_VERSION_MINOR);
|
||||
|
@ -561,6 +566,18 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
|
|||
memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
|
||||
status = myri10ge_validate_firmware(mgp, hdr);
|
||||
kfree(hdr);
|
||||
|
||||
/* check to see if adopted firmware has bug where adopting
|
||||
* it will cause broadcasts to be filtered unless the NIC
|
||||
* is kept in ALLMULTI mode */
|
||||
if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 &&
|
||||
mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) {
|
||||
mgp->adopted_rx_filter_bug = 1;
|
||||
dev_warn(dev, "Adopting fw %d.%d.%d: "
|
||||
"working around rx filter bug\n",
|
||||
mgp->fw_ver_major, mgp->fw_ver_minor,
|
||||
mgp->fw_ver_tiny);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -794,6 +811,8 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
|
|||
status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
|
||||
myri10ge_change_promisc(mgp, 0, 0);
|
||||
myri10ge_change_pause(mgp, mgp->pause);
|
||||
if (mgp->adopted_rx_filter_bug)
|
||||
(void)myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -2239,7 +2258,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
|
|||
myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
|
||||
|
||||
/* This firmware is known to not support multicast */
|
||||
if (!mgp->fw_multicast_support)
|
||||
if (!mgp->fw_multicast_support || mgp->adopted_rx_filter_bug)
|
||||
return;
|
||||
|
||||
/* Disable multicast filtering */
|
||||
|
|
|
@ -260,7 +260,7 @@ static const struct {
|
|||
|
||||
static const struct pci_device_id natsemi_pci_tbl[] __devinitdata = {
|
||||
{ PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
|
||||
{ } /* terminate list */
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
|
||||
|
|
|
@ -242,10 +242,11 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
|
|||
adapter->cmd_consumer = (uint32_t *) (((char *)addr) +
|
||||
sizeof(struct netxen_ring_ctx));
|
||||
|
||||
addr = pci_alloc_consistent(adapter->ahw.pdev,
|
||||
sizeof(struct cmd_desc_type0) *
|
||||
adapter->max_tx_desc_count,
|
||||
(dma_addr_t *) & hw->cmd_desc_phys_addr);
|
||||
addr = netxen_alloc(adapter->ahw.pdev,
|
||||
sizeof(struct cmd_desc_type0) *
|
||||
adapter->max_tx_desc_count,
|
||||
(dma_addr_t *) & hw->cmd_desc_phys_addr,
|
||||
&adapter->ahw.cmd_desc_pdev);
|
||||
printk("cmd_desc_phys_addr: 0x%llx\n", (u64) hw->cmd_desc_phys_addr);
|
||||
|
||||
if (addr == NULL) {
|
||||
|
|
|
@ -499,7 +499,10 @@ static inline int do_rom_fast_write_words(struct netxen_adapter *adapter,
|
|||
while(1) {
|
||||
int data1;
|
||||
|
||||
do_rom_fast_read(adapter, addridx, &data1);
|
||||
ret = do_rom_fast_read(adapter, addridx, &data1);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (data1 == data)
|
||||
break;
|
||||
|
||||
|
|
|
@ -525,6 +525,8 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
|
|||
if (adapter == NULL)
|
||||
return;
|
||||
|
||||
if (adapter->irq)
|
||||
free_irq(adapter->irq, adapter);
|
||||
netxen_nic_stop_all_ports(adapter);
|
||||
/* leave the hw in the same state as reboot */
|
||||
netxen_pinit_from_rom(adapter, 0);
|
||||
|
@ -672,8 +674,6 @@ static int netxen_nic_close(struct net_device *netdev)
|
|||
|
||||
if (!adapter->active_ports) {
|
||||
netxen_nic_disable_int(adapter);
|
||||
if (adapter->irq)
|
||||
free_irq(adapter->irq, adapter);
|
||||
cmd_buff = adapter->cmd_buf_arr;
|
||||
for (i = 0; i < adapter->max_tx_desc_count; i++) {
|
||||
buffrag = cmd_buff->frag_array;
|
||||
|
@ -1155,8 +1155,8 @@ static void __exit netxen_exit_module(void)
|
|||
/*
|
||||
* Wait for some time to allow the dma to drain, if any.
|
||||
*/
|
||||
destroy_workqueue(netxen_workq);
|
||||
pci_unregister_driver(&netxen_driver);
|
||||
destroy_workqueue(netxen_workq);
|
||||
}
|
||||
|
||||
module_exit(netxen_exit_module);
|
||||
|
|
|
@ -104,8 +104,6 @@ static int automatic_resume; /* experimental .. better should be zero */
|
|||
static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */
|
||||
static int fifo=0x8; /* don't change */
|
||||
|
||||
/* #define REALLY_SLOW_IO */
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
|
||||
#define DRV_NAME "qla3xxx"
|
||||
#define DRV_STRING "QLogic ISP3XXX Network Driver"
|
||||
#define DRV_VERSION "v2.02.00-k36"
|
||||
#define DRV_VERSION "v2.03.00-k3"
|
||||
#define PFX DRV_NAME " "
|
||||
|
||||
static const char ql3xxx_driver_name[] = DRV_NAME;
|
||||
|
@ -276,7 +276,8 @@ static void ql_enable_interrupts(struct ql3_adapter *qdev)
|
|||
static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
|
||||
struct ql_rcv_buf_cb *lrg_buf_cb)
|
||||
{
|
||||
u64 map;
|
||||
dma_addr_t map;
|
||||
int err;
|
||||
lrg_buf_cb->next = NULL;
|
||||
|
||||
if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
|
||||
|
@ -287,9 +288,10 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
|
|||
}
|
||||
|
||||
if (!lrg_buf_cb->skb) {
|
||||
lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
|
||||
lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
|
||||
qdev->lrg_buffer_len);
|
||||
if (unlikely(!lrg_buf_cb->skb)) {
|
||||
printk(KERN_ERR PFX "%s: failed dev_alloc_skb().\n",
|
||||
printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n",
|
||||
qdev->ndev->name);
|
||||
qdev->lrg_buf_skb_check++;
|
||||
} else {
|
||||
|
@ -303,6 +305,17 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
|
|||
qdev->lrg_buffer_len -
|
||||
QL_HEADER_SPACE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
err = pci_dma_mapping_error(map);
|
||||
if(err) {
|
||||
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
||||
qdev->ndev->name, err);
|
||||
dev_kfree_skb(lrg_buf_cb->skb);
|
||||
lrg_buf_cb->skb = NULL;
|
||||
|
||||
qdev->lrg_buf_skb_check++;
|
||||
return;
|
||||
}
|
||||
|
||||
lrg_buf_cb->buf_phy_addr_low =
|
||||
cpu_to_le32(LS_64BITS(map));
|
||||
lrg_buf_cb->buf_phy_addr_high =
|
||||
|
@ -1387,6 +1400,8 @@ static void ql_link_state_machine(struct ql3_adapter *qdev)
|
|||
printk(KERN_INFO PFX
|
||||
"%s: Reset in progress, skip processing link "
|
||||
"state.\n", qdev->ndev->name);
|
||||
|
||||
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1518,8 +1533,10 @@ static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
|
|||
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
|
||||
if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
|
||||
(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
|
||||
2) << 7))
|
||||
2) << 7)) {
|
||||
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
||||
return 0;
|
||||
}
|
||||
status = ql_is_auto_cfg(qdev);
|
||||
ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
|
||||
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
||||
|
@ -1533,8 +1550,10 @@ static u32 ql_get_speed(struct ql3_adapter *qdev)
|
|||
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
|
||||
if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
|
||||
(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
|
||||
2) << 7))
|
||||
2) << 7)) {
|
||||
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
||||
return 0;
|
||||
}
|
||||
status = ql_get_link_speed(qdev);
|
||||
ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
|
||||
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
||||
|
@ -1548,8 +1567,10 @@ static int ql_get_full_dup(struct ql3_adapter *qdev)
|
|||
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
|
||||
if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
|
||||
(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
|
||||
2) << 7))
|
||||
2) << 7)) {
|
||||
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
||||
return 0;
|
||||
}
|
||||
status = ql_is_link_full_dup(qdev);
|
||||
ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
|
||||
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
||||
|
@ -1615,14 +1636,16 @@ static const struct ethtool_ops ql3xxx_ethtool_ops = {
|
|||
static int ql_populate_free_queue(struct ql3_adapter *qdev)
|
||||
{
|
||||
struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
|
||||
u64 map;
|
||||
dma_addr_t map;
|
||||
int err;
|
||||
|
||||
while (lrg_buf_cb) {
|
||||
if (!lrg_buf_cb->skb) {
|
||||
lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
|
||||
lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
|
||||
qdev->lrg_buffer_len);
|
||||
if (unlikely(!lrg_buf_cb->skb)) {
|
||||
printk(KERN_DEBUG PFX
|
||||
"%s: Failed dev_alloc_skb().\n",
|
||||
"%s: Failed netdev_alloc_skb().\n",
|
||||
qdev->ndev->name);
|
||||
break;
|
||||
} else {
|
||||
|
@ -1636,6 +1659,17 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
|
|||
qdev->lrg_buffer_len -
|
||||
QL_HEADER_SPACE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
|
||||
err = pci_dma_mapping_error(map);
|
||||
if(err) {
|
||||
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
||||
qdev->ndev->name, err);
|
||||
dev_kfree_skb(lrg_buf_cb->skb);
|
||||
lrg_buf_cb->skb = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
lrg_buf_cb->buf_phy_addr_low =
|
||||
cpu_to_le32(LS_64BITS(map));
|
||||
lrg_buf_cb->buf_phy_addr_high =
|
||||
|
@ -1690,11 +1724,11 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
|
|||
|
||||
qdev->lrg_buf_q_producer_index++;
|
||||
|
||||
if (qdev->lrg_buf_q_producer_index == NUM_LBUFQ_ENTRIES)
|
||||
if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
|
||||
qdev->lrg_buf_q_producer_index = 0;
|
||||
|
||||
if (qdev->lrg_buf_q_producer_index ==
|
||||
(NUM_LBUFQ_ENTRIES - 1)) {
|
||||
(qdev->num_lbufq_entries - 1)) {
|
||||
lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
|
||||
}
|
||||
}
|
||||
|
@ -1713,8 +1747,31 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
|
|||
{
|
||||
struct ql_tx_buf_cb *tx_cb;
|
||||
int i;
|
||||
int retval = 0;
|
||||
|
||||
if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
|
||||
printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
|
||||
}
|
||||
|
||||
tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
|
||||
|
||||
/* Check the transmit response flags for any errors */
|
||||
if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
|
||||
printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
|
||||
|
||||
qdev->stats.tx_errors++;
|
||||
retval = -EIO;
|
||||
goto frame_not_sent;
|
||||
}
|
||||
|
||||
if(tx_cb->seg_count == 0) {
|
||||
printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
|
||||
|
||||
qdev->stats.tx_errors++;
|
||||
retval = -EIO;
|
||||
goto invalid_seg_count;
|
||||
}
|
||||
|
||||
pci_unmap_single(qdev->pdev,
|
||||
pci_unmap_addr(&tx_cb->map[0], mapaddr),
|
||||
pci_unmap_len(&tx_cb->map[0], maplen),
|
||||
|
@ -1731,11 +1788,32 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
|
|||
}
|
||||
qdev->stats.tx_packets++;
|
||||
qdev->stats.tx_bytes += tx_cb->skb->len;
|
||||
|
||||
frame_not_sent:
|
||||
dev_kfree_skb_irq(tx_cb->skb);
|
||||
tx_cb->skb = NULL;
|
||||
|
||||
invalid_seg_count:
|
||||
atomic_inc(&qdev->tx_count);
|
||||
}
|
||||
|
||||
void ql_get_sbuf(struct ql3_adapter *qdev)
|
||||
{
|
||||
if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
|
||||
qdev->small_buf_index = 0;
|
||||
qdev->small_buf_release_cnt++;
|
||||
}
|
||||
|
||||
struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
|
||||
{
|
||||
struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
|
||||
lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
|
||||
qdev->lrg_buf_release_cnt++;
|
||||
if (++qdev->lrg_buf_index == qdev->num_large_buffers)
|
||||
qdev->lrg_buf_index = 0;
|
||||
return(lrg_buf_cb);
|
||||
}
|
||||
|
||||
/*
|
||||
* The difference between 3022 and 3032 for inbound completions:
|
||||
* 3022 uses two buffers per completion. The first buffer contains
|
||||
|
@ -1751,47 +1829,21 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
|
|||
static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
|
||||
struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
|
||||
{
|
||||
long int offset;
|
||||
u32 lrg_buf_phy_addr_low = 0;
|
||||
struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
|
||||
struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
|
||||
u32 *curr_ial_ptr;
|
||||
struct sk_buff *skb;
|
||||
u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
|
||||
|
||||
/*
|
||||
* Get the inbound address list (small buffer).
|
||||
*/
|
||||
offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
|
||||
if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
|
||||
qdev->small_buf_index = 0;
|
||||
ql_get_sbuf(qdev);
|
||||
|
||||
curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
|
||||
qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
|
||||
qdev->small_buf_release_cnt++;
|
||||
|
||||
if (qdev->device_id == QL3022_DEVICE_ID) {
|
||||
/* start of first buffer (3022 only) */
|
||||
lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
|
||||
lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
|
||||
qdev->lrg_buf_release_cnt++;
|
||||
if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) {
|
||||
qdev->lrg_buf_index = 0;
|
||||
}
|
||||
curr_ial_ptr++; /* 64-bit pointers require two incs. */
|
||||
curr_ial_ptr++;
|
||||
}
|
||||
if (qdev->device_id == QL3022_DEVICE_ID)
|
||||
lrg_buf_cb1 = ql_get_lbuf(qdev);
|
||||
|
||||
/* start of second buffer */
|
||||
lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
|
||||
lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
|
||||
|
||||
/*
|
||||
* Second buffer gets sent up the stack.
|
||||
*/
|
||||
qdev->lrg_buf_release_cnt++;
|
||||
if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
|
||||
qdev->lrg_buf_index = 0;
|
||||
lrg_buf_cb2 = ql_get_lbuf(qdev);
|
||||
skb = lrg_buf_cb2->skb;
|
||||
|
||||
qdev->stats.rx_packets++;
|
||||
|
@ -1819,11 +1871,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
|
|||
static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
|
||||
struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
|
||||
{
|
||||
long int offset;
|
||||
u32 lrg_buf_phy_addr_low = 0;
|
||||
struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
|
||||
struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
|
||||
u32 *curr_ial_ptr;
|
||||
struct sk_buff *skb1 = NULL, *skb2;
|
||||
struct net_device *ndev = qdev->ndev;
|
||||
u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
|
||||
|
@ -1833,35 +1882,20 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
|
|||
* Get the inbound address list (small buffer).
|
||||
*/
|
||||
|
||||
offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
|
||||
if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
|
||||
qdev->small_buf_index = 0;
|
||||
curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
|
||||
qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
|
||||
qdev->small_buf_release_cnt++;
|
||||
ql_get_sbuf(qdev);
|
||||
|
||||
if (qdev->device_id == QL3022_DEVICE_ID) {
|
||||
/* start of first buffer on 3022 */
|
||||
lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
|
||||
lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
|
||||
qdev->lrg_buf_release_cnt++;
|
||||
if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
|
||||
qdev->lrg_buf_index = 0;
|
||||
lrg_buf_cb1 = ql_get_lbuf(qdev);
|
||||
skb1 = lrg_buf_cb1->skb;
|
||||
curr_ial_ptr++; /* 64-bit pointers require two incs. */
|
||||
curr_ial_ptr++;
|
||||
size = ETH_HLEN;
|
||||
if (*((u16 *) skb1->data) != 0xFFFF)
|
||||
size += VLAN_ETH_HLEN - ETH_HLEN;
|
||||
}
|
||||
|
||||
/* start of second buffer */
|
||||
lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
|
||||
lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
|
||||
lrg_buf_cb2 = ql_get_lbuf(qdev);
|
||||
skb2 = lrg_buf_cb2->skb;
|
||||
qdev->lrg_buf_release_cnt++;
|
||||
if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
|
||||
qdev->lrg_buf_index = 0;
|
||||
|
||||
skb_put(skb2, length); /* Just the second buffer length here. */
|
||||
pci_unmap_single(qdev->pdev,
|
||||
|
@ -1914,10 +1948,13 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
|
|||
struct net_rsp_iocb *net_rsp;
|
||||
struct net_device *ndev = qdev->ndev;
|
||||
unsigned long hw_flags;
|
||||
int work_done = 0;
|
||||
|
||||
u32 rsp_producer_index = le32_to_cpu(*(qdev->prsp_producer_index));
|
||||
|
||||
/* While there are entries in the completion queue. */
|
||||
while ((cpu_to_le32(*(qdev->prsp_producer_index)) !=
|
||||
qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) {
|
||||
while ((rsp_producer_index !=
|
||||
qdev->rsp_consumer_index) && (work_done < work_to_do)) {
|
||||
|
||||
net_rsp = qdev->rsp_current;
|
||||
switch (net_rsp->opcode) {
|
||||
|
@ -1968,37 +2005,34 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
|
|||
} else {
|
||||
qdev->rsp_current++;
|
||||
}
|
||||
|
||||
work_done = *tx_cleaned + *rx_cleaned;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
|
||||
if(work_done) {
|
||||
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
|
||||
|
||||
ql_update_lrg_bufq_prod_index(qdev);
|
||||
ql_update_lrg_bufq_prod_index(qdev);
|
||||
|
||||
if (qdev->small_buf_release_cnt >= 16) {
|
||||
while (qdev->small_buf_release_cnt >= 16) {
|
||||
qdev->small_buf_q_producer_index++;
|
||||
if (qdev->small_buf_release_cnt >= 16) {
|
||||
while (qdev->small_buf_release_cnt >= 16) {
|
||||
qdev->small_buf_q_producer_index++;
|
||||
|
||||
if (qdev->small_buf_q_producer_index ==
|
||||
NUM_SBUFQ_ENTRIES)
|
||||
qdev->small_buf_q_producer_index = 0;
|
||||
qdev->small_buf_release_cnt -= 8;
|
||||
}
|
||||
|
||||
wmb();
|
||||
ql_write_common_reg(qdev,
|
||||
&port_regs->CommonRegs.
|
||||
rxSmallQProducerIndex,
|
||||
qdev->small_buf_q_producer_index);
|
||||
|
||||
if (qdev->small_buf_q_producer_index ==
|
||||
NUM_SBUFQ_ENTRIES)
|
||||
qdev->small_buf_q_producer_index = 0;
|
||||
qdev->small_buf_release_cnt -= 8;
|
||||
}
|
||||
|
||||
ql_write_common_reg(qdev,
|
||||
&port_regs->CommonRegs.
|
||||
rxSmallQProducerIndex,
|
||||
qdev->small_buf_q_producer_index);
|
||||
}
|
||||
|
||||
ql_write_common_reg(qdev,
|
||||
&port_regs->CommonRegs.rspQConsumerIndex,
|
||||
qdev->rsp_consumer_index);
|
||||
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
||||
|
||||
if (unlikely(netif_queue_stopped(qdev->ndev))) {
|
||||
if (netif_queue_stopped(qdev->ndev) &&
|
||||
(atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4)))
|
||||
netif_wake_queue(qdev->ndev);
|
||||
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
||||
}
|
||||
|
||||
return *tx_cleaned + *rx_cleaned;
|
||||
|
@ -2009,6 +2043,8 @@ static int ql_poll(struct net_device *ndev, int *budget)
|
|||
struct ql3_adapter *qdev = netdev_priv(ndev);
|
||||
int work_to_do = min(*budget, ndev->quota);
|
||||
int rx_cleaned = 0, tx_cleaned = 0;
|
||||
unsigned long hw_flags;
|
||||
struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
|
||||
|
||||
if (!netif_carrier_ok(ndev))
|
||||
goto quit_polling;
|
||||
|
@ -2017,9 +2053,17 @@ static int ql_poll(struct net_device *ndev, int *budget)
|
|||
*budget -= rx_cleaned;
|
||||
ndev->quota -= rx_cleaned;
|
||||
|
||||
if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) {
|
||||
if( tx_cleaned + rx_cleaned != work_to_do ||
|
||||
!netif_running(ndev)) {
|
||||
quit_polling:
|
||||
netif_rx_complete(ndev);
|
||||
|
||||
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
|
||||
ql_write_common_reg(qdev,
|
||||
&port_regs->CommonRegs.rspQConsumerIndex,
|
||||
qdev->rsp_consumer_index);
|
||||
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
||||
|
||||
ql_enable_interrupts(qdev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2073,10 +2117,9 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
|
|||
spin_unlock(&qdev->adapter_lock);
|
||||
} else if (value & ISP_IMR_DISABLE_CMPL_INT) {
|
||||
ql_disable_interrupts(qdev);
|
||||
if (likely(netif_rx_schedule_prep(ndev)))
|
||||
if (likely(netif_rx_schedule_prep(ndev))) {
|
||||
__netif_rx_schedule(ndev);
|
||||
else
|
||||
ql_enable_interrupts(qdev);
|
||||
}
|
||||
} else {
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
@ -2093,8 +2136,12 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
|
|||
* the next AOL if more frags are coming.
|
||||
* That is why the frags:segment count ratio is not linear.
|
||||
*/
|
||||
static int ql_get_seg_count(unsigned short frags)
|
||||
static int ql_get_seg_count(struct ql3_adapter *qdev,
|
||||
unsigned short frags)
|
||||
{
|
||||
if (qdev->device_id == QL3022_DEVICE_ID)
|
||||
return 1;
|
||||
|
||||
switch(frags) {
|
||||
case 0: return 1; /* just the skb->data seg */
|
||||
case 1: return 2; /* skb->data + 1 frag */
|
||||
|
@ -2139,17 +2186,169 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
|
|||
|
||||
if (ip) {
|
||||
if (ip->protocol == IPPROTO_TCP) {
|
||||
mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC;
|
||||
mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
|
||||
OB_3032MAC_IOCB_REQ_IC;
|
||||
mac_iocb_ptr->ip_hdr_off = offset;
|
||||
mac_iocb_ptr->ip_hdr_len = ip->ihl;
|
||||
} else if (ip->protocol == IPPROTO_UDP) {
|
||||
mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC;
|
||||
mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
|
||||
OB_3032MAC_IOCB_REQ_IC;
|
||||
mac_iocb_ptr->ip_hdr_off = offset;
|
||||
mac_iocb_ptr->ip_hdr_len = ip->ihl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the buffers for this transmit. This will return
|
||||
* NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
|
||||
*/
|
||||
static int ql_send_map(struct ql3_adapter *qdev,
|
||||
struct ob_mac_iocb_req *mac_iocb_ptr,
|
||||
struct ql_tx_buf_cb *tx_cb,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct oal *oal;
|
||||
struct oal_entry *oal_entry;
|
||||
int len = skb->len;
|
||||
dma_addr_t map;
|
||||
int err;
|
||||
int completed_segs, i;
|
||||
int seg_cnt, seg = 0;
|
||||
int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
|
||||
|
||||
seg_cnt = tx_cb->seg_count = ql_get_seg_count(qdev,
|
||||
(skb_shinfo(skb)->nr_frags));
|
||||
if(seg_cnt == -1) {
|
||||
printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
/*
|
||||
* Map the skb buffer first.
|
||||
*/
|
||||
map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
|
||||
|
||||
err = pci_dma_mapping_error(map);
|
||||
if(err) {
|
||||
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
||||
qdev->ndev->name, err);
|
||||
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
|
||||
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
|
||||
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
|
||||
oal_entry->len = cpu_to_le32(len);
|
||||
pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
|
||||
pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
|
||||
seg++;
|
||||
|
||||
if (seg_cnt == 1) {
|
||||
/* Terminate the last segment. */
|
||||
oal_entry->len =
|
||||
cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
|
||||
} else {
|
||||
oal = tx_cb->oal;
|
||||
for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
|
||||
oal_entry++;
|
||||
if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
|
||||
(seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
|
||||
(seg == 12 && seg_cnt > 13) || /* but necessary. */
|
||||
(seg == 17 && seg_cnt > 18)) {
|
||||
/* Continuation entry points to outbound address list. */
|
||||
map = pci_map_single(qdev->pdev, oal,
|
||||
sizeof(struct oal),
|
||||
PCI_DMA_TODEVICE);
|
||||
|
||||
err = pci_dma_mapping_error(map);
|
||||
if(err) {
|
||||
|
||||
printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
|
||||
qdev->ndev->name, err);
|
||||
goto map_error;
|
||||
}
|
||||
|
||||
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
|
||||
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
|
||||
oal_entry->len =
|
||||
cpu_to_le32(sizeof(struct oal) |
|
||||
OAL_CONT_ENTRY);
|
||||
pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
|
||||
map);
|
||||
pci_unmap_len_set(&tx_cb->map[seg], maplen,
|
||||
len);
|
||||
oal_entry = (struct oal_entry *)oal;
|
||||
oal++;
|
||||
seg++;
|
||||
}
|
||||
|
||||
map =
|
||||
pci_map_page(qdev->pdev, frag->page,
|
||||
frag->page_offset, frag->size,
|
||||
PCI_DMA_TODEVICE);
|
||||
|
||||
err = pci_dma_mapping_error(map);
|
||||
if(err) {
|
||||
printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
|
||||
qdev->ndev->name, err);
|
||||
goto map_error;
|
||||
}
|
||||
|
||||
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
|
||||
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
|
||||
oal_entry->len = cpu_to_le32(frag->size);
|
||||
pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
|
||||
pci_unmap_len_set(&tx_cb->map[seg], maplen,
|
||||
frag->size);
|
||||
}
|
||||
/* Terminate the last segment. */
|
||||
oal_entry->len =
|
||||
cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
|
||||
}
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
map_error:
|
||||
/* A PCI mapping failed and now we will need to back out
|
||||
* We need to traverse through the oal's and associated pages which
|
||||
* have been mapped and now we must unmap them to clean up properly
|
||||
*/
|
||||
|
||||
seg = 1;
|
||||
oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
|
||||
oal = tx_cb->oal;
|
||||
for (i=0; i<completed_segs; i++,seg++) {
|
||||
oal_entry++;
|
||||
|
||||
if((seg == 2 && seg_cnt > 3) || /* Check for continuation */
|
||||
(seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
|
||||
(seg == 12 && seg_cnt > 13) || /* but necessary. */
|
||||
(seg == 17 && seg_cnt > 18)) {
|
||||
pci_unmap_single(qdev->pdev,
|
||||
pci_unmap_addr(&tx_cb->map[seg], mapaddr),
|
||||
pci_unmap_len(&tx_cb->map[seg], maplen),
|
||||
PCI_DMA_TODEVICE);
|
||||
oal++;
|
||||
seg++;
|
||||
}
|
||||
|
||||
pci_unmap_page(qdev->pdev,
|
||||
pci_unmap_addr(&tx_cb->map[seg], mapaddr),
|
||||
pci_unmap_len(&tx_cb->map[seg], maplen),
|
||||
PCI_DMA_TODEVICE);
|
||||
}
|
||||
|
||||
pci_unmap_single(qdev->pdev,
|
||||
pci_unmap_addr(&tx_cb->map[0], mapaddr),
|
||||
pci_unmap_addr(&tx_cb->map[0], maplen),
|
||||
PCI_DMA_TODEVICE);
|
||||
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* The difference between 3022 and 3032 sends:
|
||||
* 3022 only supports a simple single segment transmission.
|
||||
|
@ -2167,92 +2366,35 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
|
|||
struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
|
||||
struct ql_tx_buf_cb *tx_cb;
|
||||
u32 tot_len = skb->len;
|
||||
struct oal *oal;
|
||||
struct oal_entry *oal_entry;
|
||||
int len;
|
||||
struct ob_mac_iocb_req *mac_iocb_ptr;
|
||||
u64 map;
|
||||
int seg_cnt, seg = 0;
|
||||
int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
|
||||
|
||||
if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
|
||||
if (!netif_queue_stopped(ndev))
|
||||
netif_stop_queue(ndev);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
|
||||
seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags));
|
||||
if(seg_cnt == -1) {
|
||||
if((tx_cb->seg_count = ql_get_seg_count(qdev,
|
||||
(skb_shinfo(skb)->nr_frags))) == -1) {
|
||||
printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
}
|
||||
|
||||
mac_iocb_ptr = tx_cb->queue_entry;
|
||||
memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
|
||||
mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
|
||||
mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
|
||||
mac_iocb_ptr->flags |= qdev->mb_bit_mask;
|
||||
mac_iocb_ptr->transaction_id = qdev->req_producer_index;
|
||||
mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
|
||||
tx_cb->skb = skb;
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
if (qdev->device_id == QL3032_DEVICE_ID &&
|
||||
skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
ql_hw_csum_setup(skb, mac_iocb_ptr);
|
||||
len = skb_headlen(skb);
|
||||
map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
|
||||
oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
|
||||
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
|
||||
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
|
||||
oal_entry->len = cpu_to_le32(len);
|
||||
pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
|
||||
pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
|
||||
seg++;
|
||||
|
||||
if (!skb_shinfo(skb)->nr_frags) {
|
||||
/* Terminate the last segment. */
|
||||
oal_entry->len =
|
||||
cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
|
||||
} else {
|
||||
int i;
|
||||
oal = tx_cb->oal;
|
||||
for (i=0; i<frag_cnt; i++,seg++) {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
oal_entry++;
|
||||
if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
|
||||
(seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
|
||||
(seg == 12 && seg_cnt > 13) || /* but necessary. */
|
||||
(seg == 17 && seg_cnt > 18)) {
|
||||
/* Continuation entry points to outbound address list. */
|
||||
map = pci_map_single(qdev->pdev, oal,
|
||||
sizeof(struct oal),
|
||||
PCI_DMA_TODEVICE);
|
||||
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
|
||||
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
|
||||
oal_entry->len =
|
||||
cpu_to_le32(sizeof(struct oal) |
|
||||
OAL_CONT_ENTRY);
|
||||
pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
|
||||
map);
|
||||
pci_unmap_len_set(&tx_cb->map[seg], maplen,
|
||||
len);
|
||||
oal_entry = (struct oal_entry *)oal;
|
||||
oal++;
|
||||
seg++;
|
||||
}
|
||||
|
||||
map =
|
||||
pci_map_page(qdev->pdev, frag->page,
|
||||
frag->page_offset, frag->size,
|
||||
PCI_DMA_TODEVICE);
|
||||
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
|
||||
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
|
||||
oal_entry->len = cpu_to_le32(frag->size);
|
||||
pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
|
||||
pci_unmap_len_set(&tx_cb->map[seg], maplen,
|
||||
frag->size);
|
||||
}
|
||||
/* Terminate the last segment. */
|
||||
oal_entry->len =
|
||||
cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
|
||||
|
||||
if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
|
||||
printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
wmb();
|
||||
qdev->req_producer_index++;
|
||||
if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
|
||||
|
@ -2338,12 +2480,19 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
|
|||
{
|
||||
/* Create Large Buffer Queue */
|
||||
qdev->lrg_buf_q_size =
|
||||
NUM_LBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
|
||||
qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
|
||||
if (qdev->lrg_buf_q_size < PAGE_SIZE)
|
||||
qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
|
||||
else
|
||||
qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
|
||||
|
||||
qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
|
||||
if (qdev->lrg_buf == NULL) {
|
||||
printk(KERN_ERR PFX
|
||||
"%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
qdev->lrg_buf_q_alloc_virt_addr =
|
||||
pci_alloc_consistent(qdev->pdev,
|
||||
qdev->lrg_buf_q_alloc_size,
|
||||
|
@ -2393,7 +2542,7 @@ static void ql_free_buffer_queues(struct ql3_adapter *qdev)
|
|||
"%s: Already done.\n", qdev->ndev->name);
|
||||
return;
|
||||
}
|
||||
|
||||
if(qdev->lrg_buf) kfree(qdev->lrg_buf);
|
||||
pci_free_consistent(qdev->pdev,
|
||||
qdev->lrg_buf_q_alloc_size,
|
||||
qdev->lrg_buf_q_alloc_virt_addr,
|
||||
|
@ -2438,8 +2587,6 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
|
|||
|
||||
small_buf_q_entry = qdev->small_buf_q_virt_addr;
|
||||
|
||||
qdev->last_rsp_offset = qdev->small_buf_phy_addr_low;
|
||||
|
||||
/* Initialize the small buffer queue. */
|
||||
for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
|
||||
small_buf_q_entry->addr_high =
|
||||
|
@ -2476,7 +2623,7 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev)
|
|||
int i = 0;
|
||||
struct ql_rcv_buf_cb *lrg_buf_cb;
|
||||
|
||||
for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
|
||||
for (i = 0; i < qdev->num_large_buffers; i++) {
|
||||
lrg_buf_cb = &qdev->lrg_buf[i];
|
||||
if (lrg_buf_cb->skb) {
|
||||
dev_kfree_skb(lrg_buf_cb->skb);
|
||||
|
@ -2497,7 +2644,7 @@ static void ql_init_large_buffers(struct ql3_adapter *qdev)
|
|||
struct ql_rcv_buf_cb *lrg_buf_cb;
|
||||
struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
|
||||
|
||||
for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
|
||||
for (i = 0; i < qdev->num_large_buffers; i++) {
|
||||
lrg_buf_cb = &qdev->lrg_buf[i];
|
||||
buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
|
||||
buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
|
||||
|
@ -2512,10 +2659,12 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
|
|||
int i;
|
||||
struct ql_rcv_buf_cb *lrg_buf_cb;
|
||||
struct sk_buff *skb;
|
||||
u64 map;
|
||||
dma_addr_t map;
|
||||
int err;
|
||||
|
||||
for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
|
||||
skb = dev_alloc_skb(qdev->lrg_buffer_len);
|
||||
for (i = 0; i < qdev->num_large_buffers; i++) {
|
||||
skb = netdev_alloc_skb(qdev->ndev,
|
||||
qdev->lrg_buffer_len);
|
||||
if (unlikely(!skb)) {
|
||||
/* Better luck next round */
|
||||
printk(KERN_ERR PFX
|
||||
|
@ -2541,6 +2690,15 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
|
|||
qdev->lrg_buffer_len -
|
||||
QL_HEADER_SPACE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
|
||||
err = pci_dma_mapping_error(map);
|
||||
if(err) {
|
||||
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
||||
qdev->ndev->name, err);
|
||||
ql_free_large_buffers(qdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
|
||||
pci_unmap_len_set(lrg_buf_cb, maplen,
|
||||
qdev->lrg_buffer_len -
|
||||
|
@ -2592,9 +2750,15 @@ static int ql_create_send_free_list(struct ql3_adapter *qdev)
|
|||
|
||||
static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
|
||||
{
|
||||
if (qdev->ndev->mtu == NORMAL_MTU_SIZE)
|
||||
if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
|
||||
qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
|
||||
qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
|
||||
}
|
||||
else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
|
||||
/*
|
||||
* Bigger buffers, so less of them.
|
||||
*/
|
||||
qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
|
||||
qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
|
||||
} else {
|
||||
printk(KERN_ERR PFX
|
||||
|
@ -2602,6 +2766,7 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
|
|||
qdev->ndev->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
|
||||
qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
|
||||
qdev->max_frame_size =
|
||||
(qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
|
||||
|
@ -2834,7 +2999,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
|
|||
&hmem_regs->rxLargeQBaseAddrLow,
|
||||
LS_64BITS(qdev->lrg_buf_q_phy_addr));
|
||||
|
||||
ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, NUM_LBUFQ_ENTRIES);
|
||||
ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
|
||||
|
||||
ql_write_page1_reg(qdev,
|
||||
&hmem_regs->rxLargeBufferLength,
|
||||
|
@ -2856,7 +3021,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
|
|||
|
||||
qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
|
||||
qdev->small_buf_release_cnt = 8;
|
||||
qdev->lrg_buf_q_producer_index = NUM_LBUFQ_ENTRIES - 1;
|
||||
qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
|
||||
qdev->lrg_buf_release_cnt = 8;
|
||||
qdev->lrg_buf_next_free =
|
||||
(struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
|
||||
|
@ -3292,6 +3457,7 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
|
|||
err_init:
|
||||
ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
|
||||
err_lock:
|
||||
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
||||
free_irq(qdev->pdev->irq, ndev);
|
||||
err_irq:
|
||||
if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
|
||||
|
@ -3343,27 +3509,6 @@ static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev)
|
|||
return &qdev->stats;
|
||||
}
|
||||
|
||||
static int ql3xxx_change_mtu(struct net_device *ndev, int new_mtu)
|
||||
{
|
||||
struct ql3_adapter *qdev = netdev_priv(ndev);
|
||||
printk(KERN_ERR PFX "%s: new mtu size = %d.\n", ndev->name, new_mtu);
|
||||
if (new_mtu != NORMAL_MTU_SIZE && new_mtu != JUMBO_MTU_SIZE) {
|
||||
printk(KERN_ERR PFX
|
||||
"%s: mtu size of %d is not valid. Use exactly %d or "
|
||||
"%d.\n", ndev->name, new_mtu, NORMAL_MTU_SIZE,
|
||||
JUMBO_MTU_SIZE);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!netif_running(ndev)) {
|
||||
ndev->mtu = new_mtu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ndev->mtu = new_mtu;
|
||||
return ql_cycle_adapter(qdev,QL_DO_RESET);
|
||||
}
|
||||
|
||||
static void ql3xxx_set_multicast_list(struct net_device *ndev)
|
||||
{
|
||||
/*
|
||||
|
@ -3609,8 +3754,12 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
|
|||
}
|
||||
|
||||
ndev = alloc_etherdev(sizeof(struct ql3_adapter));
|
||||
if (!ndev)
|
||||
if (!ndev) {
|
||||
printk(KERN_ERR PFX "%s could not alloc etherdev\n",
|
||||
pci_name(pdev));
|
||||
err = -ENOMEM;
|
||||
goto err_out_free_regions;
|
||||
}
|
||||
|
||||
SET_MODULE_OWNER(ndev);
|
||||
SET_NETDEV_DEV(ndev, &pdev->dev);
|
||||
|
@ -3639,6 +3788,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
|
|||
if (!qdev->mem_map_registers) {
|
||||
printk(KERN_ERR PFX "%s: cannot map device registers\n",
|
||||
pci_name(pdev));
|
||||
err = -EIO;
|
||||
goto err_out_free_ndev;
|
||||
}
|
||||
|
||||
|
@ -3650,7 +3800,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
|
|||
ndev->hard_start_xmit = ql3xxx_send;
|
||||
ndev->stop = ql3xxx_close;
|
||||
ndev->get_stats = ql3xxx_get_stats;
|
||||
ndev->change_mtu = ql3xxx_change_mtu;
|
||||
ndev->set_multicast_list = ql3xxx_set_multicast_list;
|
||||
SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
|
||||
ndev->set_mac_address = ql3xxx_set_mac_address;
|
||||
|
@ -3667,6 +3816,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
|
|||
printk(KERN_ALERT PFX
|
||||
"ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
|
||||
qdev->index);
|
||||
err = -EIO;
|
||||
goto err_out_iounmap;
|
||||
}
|
||||
|
||||
|
@ -3674,9 +3824,11 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
|
|||
|
||||
/* Validate and set parameters */
|
||||
if (qdev->mac_index) {
|
||||
ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
|
||||
memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
|
||||
ETH_ALEN);
|
||||
} else {
|
||||
ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
|
||||
memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
|
||||
ETH_ALEN);
|
||||
}
|
||||
|
|
|
@ -1014,13 +1014,15 @@ struct eeprom_data {
|
|||
|
||||
/* Transmit and Receive Buffers */
|
||||
#define NUM_LBUFQ_ENTRIES 128
|
||||
#define JUMBO_NUM_LBUFQ_ENTRIES \
|
||||
(NUM_LBUFQ_ENTRIES/(JUMBO_MTU_SIZE/NORMAL_MTU_SIZE))
|
||||
#define NUM_SBUFQ_ENTRIES 64
|
||||
#define QL_SMALL_BUFFER_SIZE 32
|
||||
#define QL_ADDR_ELE_PER_BUFQ_ENTRY \
|
||||
(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element))
|
||||
/* Each send has at least control block. This is how many we keep. */
|
||||
#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
|
||||
#define NUM_LARGE_BUFFERS NUM_LBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
|
||||
|
||||
#define QL_HEADER_SPACE 32 /* make header space at top of skb. */
|
||||
/*
|
||||
* Large & Small Buffers for Receives
|
||||
|
@ -1092,7 +1094,6 @@ struct oal_entry {
|
|||
u32 len;
|
||||
#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */
|
||||
#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */
|
||||
u32 reserved;
|
||||
};
|
||||
|
||||
struct oal {
|
||||
|
@ -1193,7 +1194,7 @@ struct ql3_adapter {
|
|||
struct net_rsp_iocb *rsp_current;
|
||||
u16 rsp_consumer_index;
|
||||
u16 reserved_06;
|
||||
u32 *prsp_producer_index;
|
||||
volatile u32 *prsp_producer_index;
|
||||
u32 rsp_producer_index_phy_addr_high;
|
||||
u32 rsp_producer_index_phy_addr_low;
|
||||
|
||||
|
@ -1207,9 +1208,11 @@ struct ql3_adapter {
|
|||
u32 lrg_buf_q_producer_index;
|
||||
u32 lrg_buf_release_cnt;
|
||||
struct bufq_addr_element *lrg_buf_next_free;
|
||||
u32 num_large_buffers;
|
||||
u32 num_lbufq_entries;
|
||||
|
||||
/* Large (Receive) Buffers */
|
||||
struct ql_rcv_buf_cb lrg_buf[NUM_LARGE_BUFFERS];
|
||||
struct ql_rcv_buf_cb *lrg_buf;
|
||||
struct ql_rcv_buf_cb *lrg_buf_free_head;
|
||||
struct ql_rcv_buf_cb *lrg_buf_free_tail;
|
||||
u32 lrg_buf_free_count;
|
||||
|
|
|
@ -430,6 +430,7 @@ struct XENA_dev_config {
|
|||
#define TX_PA_CFG_IGNORE_SNAP_OUI BIT(2)
|
||||
#define TX_PA_CFG_IGNORE_LLC_CTRL BIT(3)
|
||||
#define TX_PA_CFG_IGNORE_L2_ERR BIT(6)
|
||||
#define RX_PA_CFG_STRIP_VLAN_TAG BIT(15)
|
||||
|
||||
/* Recent add, used only debug purposes. */
|
||||
u64 pcc_enable;
|
||||
|
|
|
@ -42,6 +42,14 @@
|
|||
* Possible values '1' for enable '0' for disable. Default is '0'
|
||||
* lro_max_pkts: This parameter defines maximum number of packets can be
|
||||
* aggregated as a single large packet
|
||||
* napi: This parameter used to enable/disable NAPI (polling Rx)
|
||||
* Possible values '1' for enable and '0' for disable. Default is '1'
|
||||
* ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
|
||||
* Possible values '1' for enable and '0' for disable. Default is '0'
|
||||
* vlan_tag_strip: This can be used to enable or disable vlan stripping.
|
||||
* Possible values '1' for enable , '0' for disable.
|
||||
* Default is '2' - which means disable in promisc mode
|
||||
* and enable in non-promiscuous mode.
|
||||
************************************************************************/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
@ -76,7 +84,7 @@
|
|||
#include "s2io.h"
|
||||
#include "s2io-regs.h"
|
||||
|
||||
#define DRV_VERSION "2.0.16.1"
|
||||
#define DRV_VERSION "2.0.17.1"
|
||||
|
||||
/* S2io Driver name & version. */
|
||||
static char s2io_driver_name[] = "Neterion";
|
||||
|
@ -131,7 +139,7 @@ static char s2io_gstrings[][ETH_GSTRING_LEN] = {
|
|||
"BIST Test\t(offline)"
|
||||
};
|
||||
|
||||
static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
|
||||
static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
|
||||
{"tmac_frms"},
|
||||
{"tmac_data_octets"},
|
||||
{"tmac_drop_frms"},
|
||||
|
@ -225,7 +233,10 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
|
|||
{"rxd_rd_cnt"},
|
||||
{"rxd_wr_cnt"},
|
||||
{"txf_rd_cnt"},
|
||||
{"rxf_wr_cnt"},
|
||||
{"rxf_wr_cnt"}
|
||||
};
|
||||
|
||||
static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
|
||||
{"rmac_ttl_1519_4095_frms"},
|
||||
{"rmac_ttl_4096_8191_frms"},
|
||||
{"rmac_ttl_8192_max_frms"},
|
||||
|
@ -241,7 +252,10 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
|
|||
{"rmac_red_discard"},
|
||||
{"rmac_rts_discard"},
|
||||
{"rmac_ingm_full_discard"},
|
||||
{"link_fault_cnt"},
|
||||
{"link_fault_cnt"}
|
||||
};
|
||||
|
||||
static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
|
||||
{"\n DRIVER STATISTICS"},
|
||||
{"single_bit_ecc_errs"},
|
||||
{"double_bit_ecc_errs"},
|
||||
|
@ -269,8 +283,16 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
|
|||
("lro_avg_aggr_pkts"),
|
||||
};
|
||||
|
||||
#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
|
||||
#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
|
||||
#define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
|
||||
#define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
|
||||
ETH_GSTRING_LEN
|
||||
#define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
|
||||
|
||||
#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
|
||||
#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
|
||||
|
||||
#define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
|
||||
#define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
|
||||
|
||||
#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
|
||||
#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
|
||||
|
@ -293,6 +315,9 @@ static void s2io_vlan_rx_register(struct net_device *dev,
|
|||
spin_unlock_irqrestore(&nic->tx_lock, flags);
|
||||
}
|
||||
|
||||
/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
|
||||
int vlan_strip_flag;
|
||||
|
||||
/* Unregister the vlan */
|
||||
static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
|
||||
{
|
||||
|
@ -404,6 +429,7 @@ S2IO_PARM_INT(indicate_max_pkts, 0);
|
|||
|
||||
S2IO_PARM_INT(napi, 1);
|
||||
S2IO_PARM_INT(ufo, 0);
|
||||
S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
|
||||
|
||||
static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
|
||||
{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
|
||||
|
@ -1371,6 +1397,16 @@ static int init_nic(struct s2io_nic *nic)
|
|||
&bar0->rts_frm_len_n[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/* Disable differentiated services steering logic */
|
||||
for (i = 0; i < 64; i++) {
|
||||
if (rts_ds_steer(nic, i, 0) == FAILURE) {
|
||||
DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
|
||||
dev->name);
|
||||
DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
|
||||
return FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
/* Program statistics memory */
|
||||
writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
|
||||
|
@ -1943,6 +1979,13 @@ static int start_nic(struct s2io_nic *nic)
|
|||
writeq(val64, &bar0->rx_pa_cfg);
|
||||
}
|
||||
|
||||
if (vlan_tag_strip == 0) {
|
||||
val64 = readq(&bar0->rx_pa_cfg);
|
||||
val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
|
||||
writeq(val64, &bar0->rx_pa_cfg);
|
||||
vlan_strip_flag = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enabling MC-RLDRAM. After enabling the device, we timeout
|
||||
* for around 100ms, which is approximately the time required
|
||||
|
@ -3195,26 +3238,37 @@ static void alarm_intr_handler(struct s2io_nic *nic)
|
|||
* SUCCESS on success and FAILURE on failure.
|
||||
*/
|
||||
|
||||
static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit)
|
||||
static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
|
||||
int bit_state)
|
||||
{
|
||||
int ret = FAILURE, cnt = 0;
|
||||
int ret = FAILURE, cnt = 0, delay = 1;
|
||||
u64 val64;
|
||||
|
||||
while (TRUE) {
|
||||
if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
|
||||
return FAILURE;
|
||||
|
||||
do {
|
||||
val64 = readq(addr);
|
||||
if (!(val64 & busy_bit)) {
|
||||
ret = SUCCESS;
|
||||
break;
|
||||
if (bit_state == S2IO_BIT_RESET) {
|
||||
if (!(val64 & busy_bit)) {
|
||||
ret = SUCCESS;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (!(val64 & busy_bit)) {
|
||||
ret = SUCCESS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(in_interrupt())
|
||||
mdelay(50);
|
||||
mdelay(delay);
|
||||
else
|
||||
msleep(50);
|
||||
msleep(delay);
|
||||
|
||||
if (cnt++ > 10)
|
||||
break;
|
||||
}
|
||||
if (++cnt >= 10)
|
||||
delay = 50;
|
||||
} while (cnt < 20);
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
|
@ -3340,6 +3394,9 @@ static void s2io_reset(struct s2io_nic * sp)
|
|||
writeq(val64, &bar0->pcc_err_reg);
|
||||
}
|
||||
|
||||
/* restore the previously assigned mac address */
|
||||
s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
|
||||
|
||||
sp->device_enabled_once = FALSE;
|
||||
}
|
||||
|
||||
|
@ -4087,6 +4144,11 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp)
|
|||
val64 &= ~GPIO_INT_MASK_LINK_UP;
|
||||
val64 |= GPIO_INT_MASK_LINK_DOWN;
|
||||
writeq(val64, &bar0->gpio_int_mask);
|
||||
|
||||
/* turn off LED */
|
||||
val64 = readq(&bar0->adapter_control);
|
||||
val64 = val64 &(~ADAPTER_LED_ON);
|
||||
writeq(val64, &bar0->adapter_control);
|
||||
}
|
||||
}
|
||||
val64 = readq(&bar0->gpio_int_mask);
|
||||
|
@ -4296,7 +4358,8 @@ static void s2io_set_multicast(struct net_device *dev)
|
|||
writeq(val64, &bar0->rmac_addr_cmd_mem);
|
||||
/* Wait till command completes */
|
||||
wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
|
||||
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
|
||||
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
|
||||
S2IO_BIT_RESET);
|
||||
|
||||
sp->m_cast_flg = 1;
|
||||
sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
|
||||
|
@ -4312,7 +4375,8 @@ static void s2io_set_multicast(struct net_device *dev)
|
|||
writeq(val64, &bar0->rmac_addr_cmd_mem);
|
||||
/* Wait till command completes */
|
||||
wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
|
||||
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
|
||||
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
|
||||
S2IO_BIT_RESET);
|
||||
|
||||
sp->m_cast_flg = 0;
|
||||
sp->all_multi_pos = 0;
|
||||
|
@ -4329,6 +4393,13 @@ static void s2io_set_multicast(struct net_device *dev)
|
|||
writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
|
||||
writel((u32) (val64 >> 32), (add + 4));
|
||||
|
||||
if (vlan_tag_strip != 1) {
|
||||
val64 = readq(&bar0->rx_pa_cfg);
|
||||
val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
|
||||
writeq(val64, &bar0->rx_pa_cfg);
|
||||
vlan_strip_flag = 0;
|
||||
}
|
||||
|
||||
val64 = readq(&bar0->mac_cfg);
|
||||
sp->promisc_flg = 1;
|
||||
DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
|
||||
|
@ -4344,6 +4415,13 @@ static void s2io_set_multicast(struct net_device *dev)
|
|||
writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
|
||||
writel((u32) (val64 >> 32), (add + 4));
|
||||
|
||||
if (vlan_tag_strip != 0) {
|
||||
val64 = readq(&bar0->rx_pa_cfg);
|
||||
val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
|
||||
writeq(val64, &bar0->rx_pa_cfg);
|
||||
vlan_strip_flag = 1;
|
||||
}
|
||||
|
||||
val64 = readq(&bar0->mac_cfg);
|
||||
sp->promisc_flg = 0;
|
||||
DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
|
||||
|
@ -4378,7 +4456,8 @@ static void s2io_set_multicast(struct net_device *dev)
|
|||
|
||||
/* Wait for command completes */
|
||||
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
|
||||
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
|
||||
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
|
||||
S2IO_BIT_RESET)) {
|
||||
DBG_PRINT(ERR_DBG, "%s: Adding ",
|
||||
dev->name);
|
||||
DBG_PRINT(ERR_DBG, "Multicasts failed\n");
|
||||
|
@ -4409,7 +4488,8 @@ static void s2io_set_multicast(struct net_device *dev)
|
|||
|
||||
/* Wait for command completes */
|
||||
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
|
||||
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
|
||||
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
|
||||
S2IO_BIT_RESET)) {
|
||||
DBG_PRINT(ERR_DBG, "%s: Adding ",
|
||||
dev->name);
|
||||
DBG_PRINT(ERR_DBG, "Multicasts failed\n");
|
||||
|
@ -4435,6 +4515,7 @@ static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
|
|||
struct XENA_dev_config __iomem *bar0 = sp->bar0;
|
||||
register u64 val64, mac_addr = 0;
|
||||
int i;
|
||||
u64 old_mac_addr = 0;
|
||||
|
||||
/*
|
||||
* Set the new MAC address as the new unicast filter and reflect this
|
||||
|
@ -4444,6 +4525,22 @@ static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
|
|||
for (i = 0; i < ETH_ALEN; i++) {
|
||||
mac_addr <<= 8;
|
||||
mac_addr |= addr[i];
|
||||
old_mac_addr <<= 8;
|
||||
old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
|
||||
}
|
||||
|
||||
if(0 == mac_addr)
|
||||
return SUCCESS;
|
||||
|
||||
/* Update the internal structure with this new mac address */
|
||||
if(mac_addr != old_mac_addr) {
|
||||
memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
|
||||
sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
|
||||
sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
|
||||
sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
|
||||
sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
|
||||
sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
|
||||
sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
|
||||
}
|
||||
|
||||
writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
|
||||
|
@ -4455,7 +4552,7 @@ static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
|
|||
writeq(val64, &bar0->rmac_addr_cmd_mem);
|
||||
/* Wait till command completes */
|
||||
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
|
||||
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
|
||||
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
|
||||
DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
|
||||
return FAILURE;
|
||||
}
|
||||
|
@ -4546,7 +4643,11 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
|
|||
info->regdump_len = XENA_REG_SPACE;
|
||||
info->eedump_len = XENA_EEPROM_SPACE;
|
||||
info->testinfo_len = S2IO_TEST_LEN;
|
||||
info->n_stats = S2IO_STAT_LEN;
|
||||
|
||||
if (sp->device_type == XFRAME_I_DEVICE)
|
||||
info->n_stats = XFRAME_I_STAT_LEN;
|
||||
else
|
||||
info->n_stats = XFRAME_II_STAT_LEN;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -5568,22 +5669,30 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
|
|||
tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
|
||||
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
|
||||
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
|
||||
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
|
||||
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
|
||||
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
|
||||
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
|
||||
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
|
||||
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
|
||||
|
||||
/* Enhanced statistics exist only for Hercules */
|
||||
if(sp->device_type == XFRAME_II_DEVICE) {
|
||||
tmp_stats[i++] =
|
||||
le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
|
||||
tmp_stats[i++] =
|
||||
le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
|
||||
tmp_stats[i++] =
|
||||
le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
|
||||
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
|
||||
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
|
||||
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
|
||||
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
|
||||
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
|
||||
tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
|
||||
}
|
||||
|
||||
tmp_stats[i++] = 0;
|
||||
tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
|
||||
tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
|
||||
|
@ -5663,18 +5772,42 @@ static int s2io_ethtool_self_test_count(struct net_device *dev)
|
|||
static void s2io_ethtool_get_strings(struct net_device *dev,
|
||||
u32 stringset, u8 * data)
|
||||
{
|
||||
int stat_size = 0;
|
||||
struct s2io_nic *sp = dev->priv;
|
||||
|
||||
switch (stringset) {
|
||||
case ETH_SS_TEST:
|
||||
memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
|
||||
break;
|
||||
case ETH_SS_STATS:
|
||||
memcpy(data, ðtool_stats_keys,
|
||||
sizeof(ethtool_stats_keys));
|
||||
stat_size = sizeof(ethtool_xena_stats_keys);
|
||||
memcpy(data, ðtool_xena_stats_keys,stat_size);
|
||||
if(sp->device_type == XFRAME_II_DEVICE) {
|
||||
memcpy(data + stat_size,
|
||||
ðtool_enhanced_stats_keys,
|
||||
sizeof(ethtool_enhanced_stats_keys));
|
||||
stat_size += sizeof(ethtool_enhanced_stats_keys);
|
||||
}
|
||||
|
||||
memcpy(data + stat_size, ðtool_driver_stats_keys,
|
||||
sizeof(ethtool_driver_stats_keys));
|
||||
}
|
||||
}
|
||||
static int s2io_ethtool_get_stats_count(struct net_device *dev)
|
||||
{
|
||||
return (S2IO_STAT_LEN);
|
||||
struct s2io_nic *sp = dev->priv;
|
||||
int stat_count = 0;
|
||||
switch(sp->device_type) {
|
||||
case XFRAME_I_DEVICE:
|
||||
stat_count = XFRAME_I_STAT_LEN;
|
||||
break;
|
||||
|
||||
case XFRAME_II_DEVICE:
|
||||
stat_count = XFRAME_II_STAT_LEN;
|
||||
break;
|
||||
}
|
||||
|
||||
return stat_count;
|
||||
}
|
||||
|
||||
static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
|
||||
|
@ -5909,7 +6042,7 @@ static void s2io_set_link(struct work_struct *work)
|
|||
clear_bit(0, &(nic->link_state));
|
||||
|
||||
out_unlock:
|
||||
rtnl_lock();
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
|
||||
|
@ -6066,10 +6199,13 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
|
|||
rx_blocks[j].rxds[k].virt_addr;
|
||||
if(sp->rxd_mode >= RXD_MODE_3A)
|
||||
ba = &mac_control->rings[i].ba[j][k];
|
||||
set_rxd_buffer_pointer(sp, rxdp, ba,
|
||||
if (set_rxd_buffer_pointer(sp, rxdp, ba,
|
||||
&skb,(u64 *)&temp0_64,
|
||||
(u64 *)&temp1_64,
|
||||
(u64 *)&temp2_64, size);
|
||||
(u64 *)&temp2_64,
|
||||
size) == ENOMEM) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
set_rxd_buffer_size(sp, rxdp, size);
|
||||
wmb();
|
||||
|
@ -6112,7 +6248,7 @@ static int s2io_add_isr(struct s2io_nic * sp)
|
|||
}
|
||||
}
|
||||
if (sp->intr_type == MSI_X) {
|
||||
int i;
|
||||
int i, msix_tx_cnt=0,msix_rx_cnt=0;
|
||||
|
||||
for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
|
||||
if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
|
||||
|
@ -6121,16 +6257,36 @@ static int s2io_add_isr(struct s2io_nic * sp)
|
|||
err = request_irq(sp->entries[i].vector,
|
||||
s2io_msix_fifo_handle, 0, sp->desc[i],
|
||||
sp->s2io_entries[i].arg);
|
||||
DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
|
||||
(unsigned long long)sp->msix_info[i].addr);
|
||||
/* If either data or addr is zero print it */
|
||||
if(!(sp->msix_info[i].addr &&
|
||||
sp->msix_info[i].data)) {
|
||||
DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
|
||||
"Data:0x%lx\n",sp->desc[i],
|
||||
(unsigned long long)
|
||||
sp->msix_info[i].addr,
|
||||
(unsigned long)
|
||||
ntohl(sp->msix_info[i].data));
|
||||
} else {
|
||||
msix_tx_cnt++;
|
||||
}
|
||||
} else {
|
||||
sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
|
||||
dev->name, i);
|
||||
err = request_irq(sp->entries[i].vector,
|
||||
s2io_msix_ring_handle, 0, sp->desc[i],
|
||||
sp->s2io_entries[i].arg);
|
||||
DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
|
||||
(unsigned long long)sp->msix_info[i].addr);
|
||||
/* If either data or addr is zero print it */
|
||||
if(!(sp->msix_info[i].addr &&
|
||||
sp->msix_info[i].data)) {
|
||||
DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
|
||||
"Data:0x%lx\n",sp->desc[i],
|
||||
(unsigned long long)
|
||||
sp->msix_info[i].addr,
|
||||
(unsigned long)
|
||||
ntohl(sp->msix_info[i].data));
|
||||
} else {
|
||||
msix_rx_cnt++;
|
||||
}
|
||||
}
|
||||
if (err) {
|
||||
DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
|
||||
|
@ -6140,6 +6296,8 @@ static int s2io_add_isr(struct s2io_nic * sp)
|
|||
}
|
||||
sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
|
||||
}
|
||||
printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
|
||||
printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
|
||||
}
|
||||
if (sp->intr_type == INTA) {
|
||||
err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
|
||||
|
@ -6567,7 +6725,8 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
|
|||
|
||||
if (!sp->lro) {
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
|
||||
if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
|
||||
vlan_strip_flag)) {
|
||||
/* Queueing the vlan frame to the upper layer */
|
||||
if (napi)
|
||||
vlan_hwaccel_receive_skb(skb, sp->vlgrp,
|
||||
|
@ -6704,8 +6863,7 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
|
|||
"Defaulting to INTA\n");
|
||||
*dev_intr_type = INTA;
|
||||
}
|
||||
if ( (rx_ring_num > 1) && (*dev_intr_type != INTA) )
|
||||
napi = 0;
|
||||
|
||||
if (rx_ring_mode > 3) {
|
||||
DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
|
||||
DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
|
||||
|
@ -6714,6 +6872,37 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
|
|||
return SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
|
||||
* or Traffic class respectively.
|
||||
* @nic: device peivate variable
|
||||
* Description: The function configures the receive steering to
|
||||
* desired receive ring.
|
||||
* Return Value: SUCCESS on success and
|
||||
* '-1' on failure (endian settings incorrect).
|
||||
*/
|
||||
static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
|
||||
{
|
||||
struct XENA_dev_config __iomem *bar0 = nic->bar0;
|
||||
register u64 val64 = 0;
|
||||
|
||||
if (ds_codepoint > 63)
|
||||
return FAILURE;
|
||||
|
||||
val64 = RTS_DS_MEM_DATA(ring);
|
||||
writeq(val64, &bar0->rts_ds_mem_data);
|
||||
|
||||
val64 = RTS_DS_MEM_CTRL_WE |
|
||||
RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
|
||||
RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
|
||||
|
||||
writeq(val64, &bar0->rts_ds_mem_ctrl);
|
||||
|
||||
return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
|
||||
RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
|
||||
S2IO_BIT_RESET);
|
||||
}
|
||||
|
||||
/**
|
||||
* s2io_init_nic - Initialization of the adapter .
|
||||
* @pdev : structure containing the PCI related information of the device.
|
||||
|
@ -7008,13 +7197,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
|
|||
RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
|
||||
writeq(val64, &bar0->rmac_addr_cmd_mem);
|
||||
wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
|
||||
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
|
||||
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
|
||||
tmp64 = readq(&bar0->rmac_addr_data0_mem);
|
||||
mac_down = (u32) tmp64;
|
||||
mac_up = (u32) (tmp64 >> 32);
|
||||
|
||||
memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
|
||||
|
||||
sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
|
||||
sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
|
||||
sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
|
||||
|
|
|
@ -32,7 +32,8 @@
|
|||
#define FAILURE -1
|
||||
#define S2IO_MINUS_ONE 0xFFFFFFFFFFFFFFFFULL
|
||||
#define S2IO_MAX_PCI_CONFIG_SPACE_REINIT 100
|
||||
|
||||
#define S2IO_BIT_RESET 1
|
||||
#define S2IO_BIT_SET 2
|
||||
#define CHECKBIT(value, nbit) (value & (1 << nbit))
|
||||
|
||||
/* Maximum time to flicker LED when asked to identify NIC using ethtool */
|
||||
|
@ -296,6 +297,9 @@ struct stat_block {
|
|||
struct xpakStat xpak_stat;
|
||||
};
|
||||
|
||||
/* Default value for 'vlan_strip_tag' configuration parameter */
|
||||
#define NO_STRIP_IN_PROMISC 2
|
||||
|
||||
/*
|
||||
* Structures representing different init time configuration
|
||||
* parameters of the NIC.
|
||||
|
@ -1005,7 +1009,8 @@ static int s2io_set_swapper(struct s2io_nic * sp);
|
|||
static void s2io_card_down(struct s2io_nic *nic);
|
||||
static int s2io_card_up(struct s2io_nic *nic);
|
||||
static int get_xena_rev_id(struct pci_dev *pdev);
|
||||
static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit);
|
||||
static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
|
||||
int bit_state);
|
||||
static int s2io_add_isr(struct s2io_nic * sp);
|
||||
static void s2io_rem_isr(struct s2io_nic * sp);
|
||||
|
||||
|
@ -1019,6 +1024,7 @@ static void queue_rx_frame(struct sk_buff *skb);
|
|||
static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);
|
||||
static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
|
||||
struct sk_buff *skb, u32 tcp_len);
|
||||
static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring);
|
||||
|
||||
#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
|
||||
#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
|
||||
|
|
|
@ -12,26 +12,15 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/route.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/sgi/hpc3.h>
|
||||
#include <asm/sgi/ip22.h>
|
||||
#include <asm/sgialib.h>
|
||||
|
||||
#include "sgiseeq.h"
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ static const char * const cfm_events[] = {
|
|||
/*
|
||||
* map from state to downstream port type
|
||||
*/
|
||||
static const u_char cf_to_ptype[] = {
|
||||
static const unsigned char cf_to_ptype[] = {
|
||||
TNONE,TNONE,TNONE,TNONE,TNONE,
|
||||
TNONE,TB,TB,TS,
|
||||
TA,TB,TS,TB
|
||||
|
|
|
@ -77,13 +77,13 @@ static const struct pci_device_id skge_id_table[] = {
|
|||
{ PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) },
|
||||
{ PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015, },
|
||||
{ PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 },
|
||||
{ 0 }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, skge_id_table);
|
||||
|
@ -2767,6 +2767,17 @@ static int skge_change_mtu(struct net_device *dev, int new_mtu)
|
|||
return err;
|
||||
}
|
||||
|
||||
static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
|
||||
|
||||
static void genesis_add_filter(u8 filter[8], const u8 *addr)
|
||||
{
|
||||
u32 crc, bit;
|
||||
|
||||
crc = ether_crc_le(ETH_ALEN, addr);
|
||||
bit = ~crc & 0x3f;
|
||||
filter[bit/8] |= 1 << (bit%8);
|
||||
}
|
||||
|
||||
static void genesis_set_multicast(struct net_device *dev)
|
||||
{
|
||||
struct skge_port *skge = netdev_priv(dev);
|
||||
|
@ -2788,24 +2799,33 @@ static void genesis_set_multicast(struct net_device *dev)
|
|||
memset(filter, 0xff, sizeof(filter));
|
||||
else {
|
||||
memset(filter, 0, sizeof(filter));
|
||||
for (i = 0; list && i < count; i++, list = list->next) {
|
||||
u32 crc, bit;
|
||||
crc = ether_crc_le(ETH_ALEN, list->dmi_addr);
|
||||
bit = ~crc & 0x3f;
|
||||
filter[bit/8] |= 1 << (bit%8);
|
||||
}
|
||||
|
||||
if (skge->flow_status == FLOW_STAT_REM_SEND
|
||||
|| skge->flow_status == FLOW_STAT_SYMMETRIC)
|
||||
genesis_add_filter(filter, pause_mc_addr);
|
||||
|
||||
for (i = 0; list && i < count; i++, list = list->next)
|
||||
genesis_add_filter(filter, list->dmi_addr);
|
||||
}
|
||||
|
||||
xm_write32(hw, port, XM_MODE, mode);
|
||||
xm_outhash(hw, port, XM_HSM, filter);
|
||||
}
|
||||
|
||||
static void yukon_add_filter(u8 filter[8], const u8 *addr)
|
||||
{
|
||||
u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f;
|
||||
filter[bit/8] |= 1 << (bit%8);
|
||||
}
|
||||
|
||||
static void yukon_set_multicast(struct net_device *dev)
|
||||
{
|
||||
struct skge_port *skge = netdev_priv(dev);
|
||||
struct skge_hw *hw = skge->hw;
|
||||
int port = skge->port;
|
||||
struct dev_mc_list *list = dev->mc_list;
|
||||
int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND
|
||||
|| skge->flow_status == FLOW_STAT_SYMMETRIC);
|
||||
u16 reg;
|
||||
u8 filter[8];
|
||||
|
||||
|
@ -2818,16 +2838,17 @@ static void yukon_set_multicast(struct net_device *dev)
|
|||
reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
|
||||
else if (dev->flags & IFF_ALLMULTI) /* all multicast */
|
||||
memset(filter, 0xff, sizeof(filter));
|
||||
else if (dev->mc_count == 0) /* no multicast */
|
||||
else if (dev->mc_count == 0 && !rx_pause)/* no multicast */
|
||||
reg &= ~GM_RXCR_MCF_ENA;
|
||||
else {
|
||||
int i;
|
||||
reg |= GM_RXCR_MCF_ENA;
|
||||
|
||||
for (i = 0; list && i < dev->mc_count; i++, list = list->next) {
|
||||
u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f;
|
||||
filter[bit/8] |= 1 << (bit%8);
|
||||
}
|
||||
if (rx_pause)
|
||||
yukon_add_filter(filter, pause_mc_addr);
|
||||
|
||||
for (i = 0; list && i < dev->mc_count; i++, list = list->next)
|
||||
yukon_add_filter(filter, list->dmi_addr);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1849,8 +1849,7 @@ enum {
|
|||
GMR_FS_JABBER,
|
||||
/* Rx GMAC FIFO Flush Mask (default) */
|
||||
RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR |
|
||||
GMR_FS_BAD_FC | GMR_FS_GOOD_FC | GMR_FS_UN_SIZE |
|
||||
GMR_FS_JABBER,
|
||||
GMR_FS_BAD_FC | GMR_FS_UN_SIZE | GMR_FS_JABBER,
|
||||
};
|
||||
|
||||
/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* Network device driver for Cell Processor-Based Blade
|
||||
* Network device driver for Cell Processor-Based Blade and Celleb platform
|
||||
*
|
||||
* (C) Copyright IBM Corp. 2005
|
||||
* (C) Copyright 2006 TOSHIBA CORPORATION
|
||||
*
|
||||
* Authors : Utz Bacher <utz.bacher@de.ibm.com>
|
||||
* Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
|
||||
|
@ -165,6 +166,41 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
|
|||
return readvalue;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_setup_aneg - initial auto-negotiation setup
|
||||
* @card: device structure
|
||||
**/
|
||||
static void
|
||||
spider_net_setup_aneg(struct spider_net_card *card)
|
||||
{
|
||||
struct mii_phy *phy = &card->phy;
|
||||
u32 advertise = 0;
|
||||
u16 bmcr, bmsr, stat1000, estat;
|
||||
|
||||
bmcr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMCR);
|
||||
bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
|
||||
stat1000 = spider_net_read_phy(card->netdev, phy->mii_id, MII_STAT1000);
|
||||
estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
|
||||
|
||||
if (bmsr & BMSR_10HALF)
|
||||
advertise |= ADVERTISED_10baseT_Half;
|
||||
if (bmsr & BMSR_10FULL)
|
||||
advertise |= ADVERTISED_10baseT_Full;
|
||||
if (bmsr & BMSR_100HALF)
|
||||
advertise |= ADVERTISED_100baseT_Half;
|
||||
if (bmsr & BMSR_100FULL)
|
||||
advertise |= ADVERTISED_100baseT_Full;
|
||||
|
||||
if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
|
||||
advertise |= SUPPORTED_1000baseT_Full;
|
||||
if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
|
||||
advertise |= SUPPORTED_1000baseT_Half;
|
||||
|
||||
mii_phy_probe(phy, phy->mii_id);
|
||||
phy->def->ops->setup_aneg(phy, advertise);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_rx_irq_off - switch off rx irq on this spider card
|
||||
* @card: device structure
|
||||
|
@ -263,9 +299,9 @@ spider_net_get_mac_address(struct net_device *netdev)
|
|||
* returns the status as in the dmac_cmd_status field of the descriptor
|
||||
*/
|
||||
static inline int
|
||||
spider_net_get_descr_status(struct spider_net_descr *descr)
|
||||
spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
|
||||
{
|
||||
return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
|
||||
return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -283,12 +319,12 @@ spider_net_free_chain(struct spider_net_card *card,
|
|||
descr = chain->ring;
|
||||
do {
|
||||
descr->bus_addr = 0;
|
||||
descr->next_descr_addr = 0;
|
||||
descr->hwdescr->next_descr_addr = 0;
|
||||
descr = descr->next;
|
||||
} while (descr != chain->ring);
|
||||
|
||||
dma_free_coherent(&card->pdev->dev, chain->num_desc,
|
||||
chain->ring, chain->dma_addr);
|
||||
chain->hwring, chain->dma_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -307,31 +343,34 @@ spider_net_init_chain(struct spider_net_card *card,
|
|||
{
|
||||
int i;
|
||||
struct spider_net_descr *descr;
|
||||
struct spider_net_hw_descr *hwdescr;
|
||||
dma_addr_t buf;
|
||||
size_t alloc_size;
|
||||
|
||||
alloc_size = chain->num_desc * sizeof (struct spider_net_descr);
|
||||
alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
|
||||
|
||||
chain->ring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
|
||||
chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
|
||||
&chain->dma_addr, GFP_KERNEL);
|
||||
|
||||
if (!chain->ring)
|
||||
if (!chain->hwring)
|
||||
return -ENOMEM;
|
||||
|
||||
descr = chain->ring;
|
||||
memset(descr, 0, alloc_size);
|
||||
memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr));
|
||||
|
||||
/* Set up the hardware pointers in each descriptor */
|
||||
descr = chain->ring;
|
||||
hwdescr = chain->hwring;
|
||||
buf = chain->dma_addr;
|
||||
for (i=0; i < chain->num_desc; i++, descr++) {
|
||||
descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
|
||||
for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
|
||||
hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
|
||||
hwdescr->next_descr_addr = 0;
|
||||
|
||||
descr->hwdescr = hwdescr;
|
||||
descr->bus_addr = buf;
|
||||
descr->next_descr_addr = 0;
|
||||
descr->next = descr + 1;
|
||||
descr->prev = descr - 1;
|
||||
|
||||
buf += sizeof(struct spider_net_descr);
|
||||
buf += sizeof(struct spider_net_hw_descr);
|
||||
}
|
||||
/* do actual circular list */
|
||||
(descr-1)->next = chain->ring;
|
||||
|
@ -357,10 +396,11 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
|
|||
descr = card->rx_chain.head;
|
||||
do {
|
||||
if (descr->skb) {
|
||||
dev_kfree_skb(descr->skb);
|
||||
pci_unmap_single(card->pdev, descr->buf_addr,
|
||||
pci_unmap_single(card->pdev, descr->hwdescr->buf_addr,
|
||||
SPIDER_NET_MAX_FRAME,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
dev_kfree_skb(descr->skb);
|
||||
descr->skb = NULL;
|
||||
}
|
||||
descr = descr->next;
|
||||
} while (descr != card->rx_chain.head);
|
||||
|
@ -380,6 +420,7 @@ static int
|
|||
spider_net_prepare_rx_descr(struct spider_net_card *card,
|
||||
struct spider_net_descr *descr)
|
||||
{
|
||||
struct spider_net_hw_descr *hwdescr = descr->hwdescr;
|
||||
dma_addr_t buf;
|
||||
int offset;
|
||||
int bufsize;
|
||||
|
@ -398,11 +439,11 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
|
|||
card->spider_stats.alloc_rx_skb_error++;
|
||||
return -ENOMEM;
|
||||
}
|
||||
descr->buf_size = bufsize;
|
||||
descr->result_size = 0;
|
||||
descr->valid_size = 0;
|
||||
descr->data_status = 0;
|
||||
descr->data_error = 0;
|
||||
hwdescr->buf_size = bufsize;
|
||||
hwdescr->result_size = 0;
|
||||
hwdescr->valid_size = 0;
|
||||
hwdescr->data_status = 0;
|
||||
hwdescr->data_error = 0;
|
||||
|
||||
offset = ((unsigned long)descr->skb->data) &
|
||||
(SPIDER_NET_RXBUF_ALIGN - 1);
|
||||
|
@ -411,21 +452,22 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
|
|||
/* iommu-map the skb */
|
||||
buf = pci_map_single(card->pdev, descr->skb->data,
|
||||
SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
|
||||
descr->buf_addr = buf;
|
||||
if (pci_dma_mapping_error(buf)) {
|
||||
dev_kfree_skb_any(descr->skb);
|
||||
descr->skb = NULL;
|
||||
if (netif_msg_rx_err(card) && net_ratelimit())
|
||||
pr_err("Could not iommu-map rx buffer\n");
|
||||
card->spider_stats.rx_iommu_map_error++;
|
||||
descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
|
||||
hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
|
||||
} else {
|
||||
descr->next_descr_addr = 0;
|
||||
hwdescr->buf_addr = buf;
|
||||
hwdescr->next_descr_addr = 0;
|
||||
wmb();
|
||||
descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
|
||||
hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
|
||||
SPIDER_NET_DMAC_NOINTR_COMPLETE;
|
||||
|
||||
wmb();
|
||||
descr->prev->next_descr_addr = descr->bus_addr;
|
||||
descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -481,7 +523,7 @@ spider_net_refill_rx_chain(struct spider_net_card *card)
|
|||
if (!spin_trylock_irqsave(&chain->lock, flags))
|
||||
return;
|
||||
|
||||
while (spider_net_get_descr_status(chain->head) ==
|
||||
while (spider_net_get_descr_status(chain->head->hwdescr) ==
|
||||
SPIDER_NET_DESCR_NOT_IN_USE) {
|
||||
if (spider_net_prepare_rx_descr(card, chain->head))
|
||||
break;
|
||||
|
@ -642,7 +684,9 @@ static int
|
|||
spider_net_prepare_tx_descr(struct spider_net_card *card,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct spider_net_descr_chain *chain = &card->tx_chain;
|
||||
struct spider_net_descr *descr;
|
||||
struct spider_net_hw_descr *hwdescr;
|
||||
dma_addr_t buf;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -655,32 +699,39 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&card->tx_chain.lock, flags);
|
||||
spin_lock_irqsave(&chain->lock, flags);
|
||||
descr = card->tx_chain.head;
|
||||
card->tx_chain.head = descr->next;
|
||||
if (descr->next == chain->tail->prev) {
|
||||
spin_unlock_irqrestore(&chain->lock, flags);
|
||||
pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE);
|
||||
return -ENOMEM;
|
||||
}
|
||||
hwdescr = descr->hwdescr;
|
||||
chain->head = descr->next;
|
||||
|
||||
descr->buf_addr = buf;
|
||||
descr->buf_size = skb->len;
|
||||
descr->next_descr_addr = 0;
|
||||
descr->skb = skb;
|
||||
descr->data_status = 0;
|
||||
hwdescr->buf_addr = buf;
|
||||
hwdescr->buf_size = skb->len;
|
||||
hwdescr->next_descr_addr = 0;
|
||||
hwdescr->data_status = 0;
|
||||
|
||||
descr->dmac_cmd_status =
|
||||
hwdescr->dmac_cmd_status =
|
||||
SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
|
||||
spin_unlock_irqrestore(&card->tx_chain.lock, flags);
|
||||
spin_unlock_irqrestore(&chain->lock, flags);
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
switch (skb->nh.iph->protocol) {
|
||||
case IPPROTO_TCP:
|
||||
descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
|
||||
hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
|
||||
hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Chain the bus address, so that the DMA engine finds this descr. */
|
||||
descr->prev->next_descr_addr = descr->bus_addr;
|
||||
wmb();
|
||||
descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
|
||||
|
||||
card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
|
||||
return 0;
|
||||
|
@ -689,16 +740,17 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
|
|||
static int
|
||||
spider_net_set_low_watermark(struct spider_net_card *card)
|
||||
{
|
||||
struct spider_net_descr *descr = card->tx_chain.tail;
|
||||
struct spider_net_hw_descr *hwdescr;
|
||||
unsigned long flags;
|
||||
int status;
|
||||
int cnt=0;
|
||||
int i;
|
||||
struct spider_net_descr *descr = card->tx_chain.tail;
|
||||
|
||||
/* Measure the length of the queue. Measurement does not
|
||||
* need to be precise -- does not need a lock. */
|
||||
while (descr != card->tx_chain.head) {
|
||||
status = descr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
|
||||
status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
|
||||
if (status == SPIDER_NET_DESCR_NOT_IN_USE)
|
||||
break;
|
||||
descr = descr->next;
|
||||
|
@ -717,10 +769,12 @@ spider_net_set_low_watermark(struct spider_net_card *card)
|
|||
|
||||
/* Set the new watermark, clear the old watermark */
|
||||
spin_lock_irqsave(&card->tx_chain.lock, flags);
|
||||
descr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
|
||||
if (card->low_watermark && card->low_watermark != descr)
|
||||
card->low_watermark->dmac_cmd_status =
|
||||
card->low_watermark->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
|
||||
descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
|
||||
if (card->low_watermark && card->low_watermark != descr) {
|
||||
hwdescr = card->low_watermark->hwdescr;
|
||||
hwdescr->dmac_cmd_status =
|
||||
hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
|
||||
}
|
||||
card->low_watermark = descr;
|
||||
spin_unlock_irqrestore(&card->tx_chain.lock, flags);
|
||||
return cnt;
|
||||
|
@ -743,16 +797,22 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
|
|||
{
|
||||
struct spider_net_descr_chain *chain = &card->tx_chain;
|
||||
struct spider_net_descr *descr;
|
||||
struct spider_net_hw_descr *hwdescr;
|
||||
struct sk_buff *skb;
|
||||
u32 buf_addr;
|
||||
unsigned long flags;
|
||||
int status;
|
||||
|
||||
while (chain->tail != chain->head) {
|
||||
while (1) {
|
||||
spin_lock_irqsave(&chain->lock, flags);
|
||||
if (chain->tail == chain->head) {
|
||||
spin_unlock_irqrestore(&chain->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
descr = chain->tail;
|
||||
hwdescr = descr->hwdescr;
|
||||
|
||||
status = spider_net_get_descr_status(descr);
|
||||
status = spider_net_get_descr_status(hwdescr);
|
||||
switch (status) {
|
||||
case SPIDER_NET_DESCR_COMPLETE:
|
||||
card->netdev_stats.tx_packets++;
|
||||
|
@ -788,9 +848,10 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
|
|||
}
|
||||
|
||||
chain->tail = descr->next;
|
||||
descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
|
||||
hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
|
||||
skb = descr->skb;
|
||||
buf_addr = descr->buf_addr;
|
||||
descr->skb = NULL;
|
||||
buf_addr = hwdescr->buf_addr;
|
||||
spin_unlock_irqrestore(&chain->lock, flags);
|
||||
|
||||
/* unmap the skb */
|
||||
|
@ -826,7 +887,7 @@ spider_net_kick_tx_dma(struct spider_net_card *card)
|
|||
|
||||
descr = card->tx_chain.tail;
|
||||
for (;;) {
|
||||
if (spider_net_get_descr_status(descr) ==
|
||||
if (spider_net_get_descr_status(descr->hwdescr) ==
|
||||
SPIDER_NET_DESCR_CARDOWNED) {
|
||||
spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
|
||||
descr->bus_addr);
|
||||
|
@ -855,13 +916,10 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
{
|
||||
int cnt;
|
||||
struct spider_net_card *card = netdev_priv(netdev);
|
||||
struct spider_net_descr_chain *chain = &card->tx_chain;
|
||||
|
||||
spider_net_release_tx_chain(card, 0);
|
||||
|
||||
if ((chain->head->next == chain->tail->prev) ||
|
||||
(spider_net_prepare_tx_descr(card, skb) != 0)) {
|
||||
|
||||
if (spider_net_prepare_tx_descr(card, skb) != 0) {
|
||||
card->netdev_stats.tx_dropped++;
|
||||
netif_stop_queue(netdev);
|
||||
return NETDEV_TX_BUSY;
|
||||
|
@ -922,17 +980,18 @@ static void
|
|||
spider_net_pass_skb_up(struct spider_net_descr *descr,
|
||||
struct spider_net_card *card)
|
||||
{
|
||||
struct spider_net_hw_descr *hwdescr= descr->hwdescr;
|
||||
struct sk_buff *skb;
|
||||
struct net_device *netdev;
|
||||
u32 data_status, data_error;
|
||||
|
||||
data_status = descr->data_status;
|
||||
data_error = descr->data_error;
|
||||
data_status = hwdescr->data_status;
|
||||
data_error = hwdescr->data_error;
|
||||
netdev = card->netdev;
|
||||
|
||||
skb = descr->skb;
|
||||
skb->dev = netdev;
|
||||
skb_put(skb, descr->valid_size);
|
||||
skb_put(skb, hwdescr->valid_size);
|
||||
|
||||
/* the card seems to add 2 bytes of junk in front
|
||||
* of the ethernet frame */
|
||||
|
@ -994,23 +1053,25 @@ static void show_rx_chain(struct spider_net_card *card)
|
|||
#endif
|
||||
|
||||
/**
|
||||
* spider_net_decode_one_descr - processes an rx descriptor
|
||||
* spider_net_decode_one_descr - processes an RX descriptor
|
||||
* @card: card structure
|
||||
*
|
||||
* Returns 1 if a packet has been sent to the stack, otherwise 0
|
||||
* Returns 1 if a packet has been sent to the stack, otherwise 0.
|
||||
*
|
||||
* Processes an rx descriptor by iommu-unmapping the data buffer and passing
|
||||
* the packet up to the stack. This function is called in softirq
|
||||
* context, e.g. either bottom half from interrupt or NAPI polling context
|
||||
* Processes an RX descriptor by iommu-unmapping the data buffer
|
||||
* and passing the packet up to the stack. This function is called
|
||||
* in softirq context, e.g. either bottom half from interrupt or
|
||||
* NAPI polling context.
|
||||
*/
|
||||
static int
|
||||
spider_net_decode_one_descr(struct spider_net_card *card)
|
||||
{
|
||||
struct spider_net_descr_chain *chain = &card->rx_chain;
|
||||
struct spider_net_descr *descr = chain->tail;
|
||||
struct spider_net_hw_descr *hwdescr = descr->hwdescr;
|
||||
int status;
|
||||
|
||||
status = spider_net_get_descr_status(descr);
|
||||
status = spider_net_get_descr_status(hwdescr);
|
||||
|
||||
/* Nothing in the descriptor, or ring must be empty */
|
||||
if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
|
||||
|
@ -1021,7 +1082,7 @@ spider_net_decode_one_descr(struct spider_net_card *card)
|
|||
chain->tail = descr->next;
|
||||
|
||||
/* unmap descriptor */
|
||||
pci_unmap_single(card->pdev, descr->buf_addr,
|
||||
pci_unmap_single(card->pdev, hwdescr->buf_addr,
|
||||
SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
|
||||
|
||||
if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
|
||||
|
@ -1037,34 +1098,33 @@ spider_net_decode_one_descr(struct spider_net_card *card)
|
|||
if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
|
||||
(status != SPIDER_NET_DESCR_FRAME_END) ) {
|
||||
if (netif_msg_rx_err(card))
|
||||
pr_err("%s: RX descriptor with unkown state %d\n",
|
||||
pr_err("%s: RX descriptor with unknown state %d\n",
|
||||
card->netdev->name, status);
|
||||
card->spider_stats.rx_desc_unk_state++;
|
||||
goto bad_desc;
|
||||
}
|
||||
|
||||
/* The cases we'll throw away the packet immediately */
|
||||
if (descr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
|
||||
if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
|
||||
if (netif_msg_rx_err(card))
|
||||
pr_err("%s: error in received descriptor found, "
|
||||
"data_status=x%08x, data_error=x%08x\n",
|
||||
card->netdev->name,
|
||||
descr->data_status, descr->data_error);
|
||||
hwdescr->data_status, hwdescr->data_error);
|
||||
goto bad_desc;
|
||||
}
|
||||
|
||||
if (descr->dmac_cmd_status & 0xfefe) {
|
||||
if (hwdescr->dmac_cmd_status & 0xfefe) {
|
||||
pr_err("%s: bad status, cmd_status=x%08x\n",
|
||||
card->netdev->name,
|
||||
descr->dmac_cmd_status);
|
||||
pr_err("buf_addr=x%08x\n", descr->buf_addr);
|
||||
pr_err("buf_size=x%08x\n", descr->buf_size);
|
||||
pr_err("next_descr_addr=x%08x\n", descr->next_descr_addr);
|
||||
pr_err("result_size=x%08x\n", descr->result_size);
|
||||
pr_err("valid_size=x%08x\n", descr->valid_size);
|
||||
pr_err("data_status=x%08x\n", descr->data_status);
|
||||
pr_err("data_error=x%08x\n", descr->data_error);
|
||||
pr_err("bus_addr=x%08x\n", descr->bus_addr);
|
||||
hwdescr->dmac_cmd_status);
|
||||
pr_err("buf_addr=x%08x\n", hwdescr->buf_addr);
|
||||
pr_err("buf_size=x%08x\n", hwdescr->buf_size);
|
||||
pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
|
||||
pr_err("result_size=x%08x\n", hwdescr->result_size);
|
||||
pr_err("valid_size=x%08x\n", hwdescr->valid_size);
|
||||
pr_err("data_status=x%08x\n", hwdescr->data_status);
|
||||
pr_err("data_error=x%08x\n", hwdescr->data_error);
|
||||
pr_err("which=%ld\n", descr - card->rx_chain.ring);
|
||||
|
||||
card->spider_stats.rx_desc_error++;
|
||||
|
@ -1073,12 +1133,13 @@ spider_net_decode_one_descr(struct spider_net_card *card)
|
|||
|
||||
/* Ok, we've got a packet in descr */
|
||||
spider_net_pass_skb_up(descr, card);
|
||||
descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
|
||||
hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
|
||||
return 1;
|
||||
|
||||
bad_desc:
|
||||
dev_kfree_skb_irq(descr->skb);
|
||||
descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
|
||||
descr->skb = NULL;
|
||||
hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1247,6 +1308,33 @@ spider_net_set_mac(struct net_device *netdev, void *p)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_link_reset
|
||||
* @netdev: net device structure
|
||||
*
|
||||
* This is called when the PHY_LINK signal is asserted. For the blade this is
|
||||
* not connected so we should never get here.
|
||||
*
|
||||
*/
|
||||
static void
|
||||
spider_net_link_reset(struct net_device *netdev)
|
||||
{
|
||||
|
||||
struct spider_net_card *card = netdev_priv(netdev);
|
||||
|
||||
del_timer_sync(&card->aneg_timer);
|
||||
|
||||
/* clear interrupt, block further interrupts */
|
||||
spider_net_write_reg(card, SPIDER_NET_GMACST,
|
||||
spider_net_read_reg(card, SPIDER_NET_GMACST));
|
||||
spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
|
||||
|
||||
/* reset phy and setup aneg */
|
||||
spider_net_setup_aneg(card);
|
||||
mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_handle_error_irq - handles errors raised by an interrupt
|
||||
* @card: card structure
|
||||
|
@ -1359,8 +1447,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
|
|||
switch (i)
|
||||
{
|
||||
case SPIDER_NET_GTMFLLINT:
|
||||
if (netif_msg_intr(card) && net_ratelimit())
|
||||
pr_err("Spider TX RAM full\n");
|
||||
/* TX RAM full may happen on a usual case.
|
||||
* Logging is not needed. */
|
||||
show_error = 0;
|
||||
break;
|
||||
case SPIDER_NET_GRFDFLLINT: /* fallthrough */
|
||||
|
@ -1500,6 +1588,9 @@ spider_net_interrupt(int irq, void *ptr)
|
|||
if (status_reg & SPIDER_NET_TXINT)
|
||||
netif_rx_schedule(netdev);
|
||||
|
||||
if (status_reg & SPIDER_NET_LINKINT)
|
||||
spider_net_link_reset(netdev);
|
||||
|
||||
if (status_reg & SPIDER_NET_ERRINT )
|
||||
spider_net_handle_error_irq(card, status_reg);
|
||||
|
||||
|
@ -1540,6 +1631,11 @@ spider_net_init_card(struct spider_net_card *card)
|
|||
|
||||
spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
|
||||
SPIDER_NET_CKRCTRL_RUN_VALUE);
|
||||
|
||||
/* trigger ETOMOD signal */
|
||||
spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
|
||||
spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1624,8 +1720,6 @@ spider_net_enable_card(struct spider_net_card *card)
|
|||
|
||||
spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
|
||||
SPIDER_NET_LENLMT_VALUE);
|
||||
spider_net_write_reg(card, SPIDER_NET_GMACMODE,
|
||||
SPIDER_NET_MACMODE_VALUE);
|
||||
spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
|
||||
SPIDER_NET_OPMODE_VALUE);
|
||||
|
||||
|
@ -1641,98 +1735,6 @@ spider_net_enable_card(struct spider_net_card *card)
|
|||
SPIDER_NET_GDTBSTA);
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_open - called upon ifonfig up
|
||||
* @netdev: interface device structure
|
||||
*
|
||||
* returns 0 on success, <0 on failure
|
||||
*
|
||||
* spider_net_open allocates all the descriptors and memory needed for
|
||||
* operation, sets up multicast list and enables interrupts
|
||||
*/
|
||||
int
|
||||
spider_net_open(struct net_device *netdev)
|
||||
{
|
||||
struct spider_net_card *card = netdev_priv(netdev);
|
||||
int result;
|
||||
|
||||
result = spider_net_init_chain(card, &card->tx_chain);
|
||||
if (result)
|
||||
goto alloc_tx_failed;
|
||||
card->low_watermark = NULL;
|
||||
|
||||
result = spider_net_init_chain(card, &card->rx_chain);
|
||||
if (result)
|
||||
goto alloc_rx_failed;
|
||||
|
||||
/* Allocate rx skbs */
|
||||
if (spider_net_alloc_rx_skbs(card))
|
||||
goto alloc_skbs_failed;
|
||||
|
||||
spider_net_set_multi(netdev);
|
||||
|
||||
/* further enhancement: setup hw vlan, if needed */
|
||||
|
||||
result = -EBUSY;
|
||||
if (request_irq(netdev->irq, spider_net_interrupt,
|
||||
IRQF_SHARED, netdev->name, netdev))
|
||||
goto register_int_failed;
|
||||
|
||||
spider_net_enable_card(card);
|
||||
|
||||
netif_start_queue(netdev);
|
||||
netif_carrier_on(netdev);
|
||||
netif_poll_enable(netdev);
|
||||
|
||||
return 0;
|
||||
|
||||
register_int_failed:
|
||||
spider_net_free_rx_chain_contents(card);
|
||||
alloc_skbs_failed:
|
||||
spider_net_free_chain(card, &card->rx_chain);
|
||||
alloc_rx_failed:
|
||||
spider_net_free_chain(card, &card->tx_chain);
|
||||
alloc_tx_failed:
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_setup_phy - setup PHY
|
||||
* @card: card structure
|
||||
*
|
||||
* returns 0 on success, <0 on failure
|
||||
*
|
||||
* spider_net_setup_phy is used as part of spider_net_probe. Sets
|
||||
* the PHY to 1000 Mbps
|
||||
**/
|
||||
static int
|
||||
spider_net_setup_phy(struct spider_net_card *card)
|
||||
{
|
||||
struct mii_phy *phy = &card->phy;
|
||||
|
||||
spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
|
||||
SPIDER_NET_DMASEL_VALUE);
|
||||
spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
|
||||
SPIDER_NET_PHY_CTRL_VALUE);
|
||||
phy->mii_id = 1;
|
||||
phy->dev = card->netdev;
|
||||
phy->mdio_read = spider_net_read_phy;
|
||||
phy->mdio_write = spider_net_write_phy;
|
||||
|
||||
mii_phy_probe(phy, phy->mii_id);
|
||||
|
||||
if (phy->def->ops->setup_forced)
|
||||
phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL);
|
||||
|
||||
phy->def->ops->enable_fiber(phy);
|
||||
|
||||
phy->def->ops->read_link(phy);
|
||||
pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name,
|
||||
phy->speed, phy->duplex==1 ? "Full" : "Half");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_download_firmware - loads firmware into the adapter
|
||||
* @card: card structure
|
||||
|
@ -1851,6 +1853,179 @@ spider_net_init_firmware(struct spider_net_card *card)
|
|||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_open - called upon ifonfig up
|
||||
* @netdev: interface device structure
|
||||
*
|
||||
* returns 0 on success, <0 on failure
|
||||
*
|
||||
* spider_net_open allocates all the descriptors and memory needed for
|
||||
* operation, sets up multicast list and enables interrupts
|
||||
*/
|
||||
int
|
||||
spider_net_open(struct net_device *netdev)
|
||||
{
|
||||
struct spider_net_card *card = netdev_priv(netdev);
|
||||
int result;
|
||||
|
||||
result = spider_net_init_firmware(card);
|
||||
if (result)
|
||||
goto init_firmware_failed;
|
||||
|
||||
/* start probing with copper */
|
||||
spider_net_setup_aneg(card);
|
||||
if (card->phy.def->phy_id)
|
||||
mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
|
||||
|
||||
result = spider_net_init_chain(card, &card->tx_chain);
|
||||
if (result)
|
||||
goto alloc_tx_failed;
|
||||
card->low_watermark = NULL;
|
||||
|
||||
result = spider_net_init_chain(card, &card->rx_chain);
|
||||
if (result)
|
||||
goto alloc_rx_failed;
|
||||
|
||||
/* Allocate rx skbs */
|
||||
if (spider_net_alloc_rx_skbs(card))
|
||||
goto alloc_skbs_failed;
|
||||
|
||||
spider_net_set_multi(netdev);
|
||||
|
||||
/* further enhancement: setup hw vlan, if needed */
|
||||
|
||||
result = -EBUSY;
|
||||
if (request_irq(netdev->irq, spider_net_interrupt,
|
||||
IRQF_SHARED, netdev->name, netdev))
|
||||
goto register_int_failed;
|
||||
|
||||
spider_net_enable_card(card);
|
||||
|
||||
netif_start_queue(netdev);
|
||||
netif_carrier_on(netdev);
|
||||
netif_poll_enable(netdev);
|
||||
|
||||
return 0;
|
||||
|
||||
register_int_failed:
|
||||
spider_net_free_rx_chain_contents(card);
|
||||
alloc_skbs_failed:
|
||||
spider_net_free_chain(card, &card->rx_chain);
|
||||
alloc_rx_failed:
|
||||
spider_net_free_chain(card, &card->tx_chain);
|
||||
alloc_tx_failed:
|
||||
del_timer_sync(&card->aneg_timer);
|
||||
init_firmware_failed:
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_link_phy
|
||||
* @data: used for pointer to card structure
|
||||
*
|
||||
*/
|
||||
static void spider_net_link_phy(unsigned long data)
|
||||
{
|
||||
struct spider_net_card *card = (struct spider_net_card *)data;
|
||||
struct mii_phy *phy = &card->phy;
|
||||
|
||||
/* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
|
||||
if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
|
||||
|
||||
pr_info("%s: link is down trying to bring it up\n", card->netdev->name);
|
||||
|
||||
switch (card->medium) {
|
||||
case BCM54XX_COPPER:
|
||||
/* enable fiber with autonegotiation first */
|
||||
if (phy->def->ops->enable_fiber)
|
||||
phy->def->ops->enable_fiber(phy, 1);
|
||||
card->medium = BCM54XX_FIBER;
|
||||
break;
|
||||
|
||||
case BCM54XX_FIBER:
|
||||
/* fiber didn't come up, try to disable fiber autoneg */
|
||||
if (phy->def->ops->enable_fiber)
|
||||
phy->def->ops->enable_fiber(phy, 0);
|
||||
card->medium = BCM54XX_UNKNOWN;
|
||||
break;
|
||||
|
||||
case BCM54XX_UNKNOWN:
|
||||
/* copper, fiber with and without failed,
|
||||
* retry from beginning */
|
||||
spider_net_setup_aneg(card);
|
||||
card->medium = BCM54XX_COPPER;
|
||||
break;
|
||||
}
|
||||
|
||||
card->aneg_count = 0;
|
||||
mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
|
||||
return;
|
||||
}
|
||||
|
||||
/* link still not up, try again later */
|
||||
if (!(phy->def->ops->poll_link(phy))) {
|
||||
card->aneg_count++;
|
||||
mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
|
||||
return;
|
||||
}
|
||||
|
||||
/* link came up, get abilities */
|
||||
phy->def->ops->read_link(phy);
|
||||
|
||||
spider_net_write_reg(card, SPIDER_NET_GMACST,
|
||||
spider_net_read_reg(card, SPIDER_NET_GMACST));
|
||||
spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
|
||||
|
||||
if (phy->speed == 1000)
|
||||
spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
|
||||
else
|
||||
spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
|
||||
|
||||
card->aneg_count = 0;
|
||||
|
||||
pr_debug("Found %s with %i Mbps, %s-duplex %sautoneg.\n",
|
||||
phy->def->name, phy->speed, phy->duplex==1 ? "Full" : "Half",
|
||||
phy->autoneg==1 ? "" : "no ");
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_setup_phy - setup PHY
|
||||
* @card: card structure
|
||||
*
|
||||
* returns 0 on success, <0 on failure
|
||||
*
|
||||
* spider_net_setup_phy is used as part of spider_net_probe.
|
||||
**/
|
||||
static int
|
||||
spider_net_setup_phy(struct spider_net_card *card)
|
||||
{
|
||||
struct mii_phy *phy = &card->phy;
|
||||
|
||||
spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
|
||||
SPIDER_NET_DMASEL_VALUE);
|
||||
spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
|
||||
SPIDER_NET_PHY_CTRL_VALUE);
|
||||
|
||||
phy->dev = card->netdev;
|
||||
phy->mdio_read = spider_net_read_phy;
|
||||
phy->mdio_write = spider_net_write_phy;
|
||||
|
||||
for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
|
||||
unsigned short id;
|
||||
id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
|
||||
if (id != 0x0000 && id != 0xffff) {
|
||||
if (!mii_phy_probe(phy, phy->mii_id)) {
|
||||
pr_info("Found %s.\n", phy->def->name);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_workaround_rxramfull - work around firmware bug
|
||||
* @card: card structure
|
||||
|
@ -1900,14 +2075,15 @@ spider_net_stop(struct net_device *netdev)
|
|||
netif_carrier_off(netdev);
|
||||
netif_stop_queue(netdev);
|
||||
del_timer_sync(&card->tx_timer);
|
||||
del_timer_sync(&card->aneg_timer);
|
||||
|
||||
/* disable/mask all interrupts */
|
||||
spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
|
||||
spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
|
||||
spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
|
||||
spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
|
||||
|
||||
/* free_irq(netdev->irq, netdev);*/
|
||||
free_irq(to_pci_dev(netdev->dev.parent)->irq, netdev);
|
||||
free_irq(netdev->irq, netdev);
|
||||
|
||||
spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
|
||||
SPIDER_NET_DMA_TX_FEND_VALUE);
|
||||
|
@ -1919,8 +2095,6 @@ spider_net_stop(struct net_device *netdev)
|
|||
spider_net_release_tx_chain(card, 1);
|
||||
spider_net_free_rx_chain_contents(card);
|
||||
|
||||
spider_net_free_rx_chain_contents(card);
|
||||
|
||||
spider_net_free_chain(card, &card->tx_chain);
|
||||
spider_net_free_chain(card, &card->rx_chain);
|
||||
|
||||
|
@ -1952,8 +2126,6 @@ spider_net_tx_timeout_task(struct work_struct *work)
|
|||
|
||||
if (spider_net_setup_phy(card))
|
||||
goto out;
|
||||
if (spider_net_init_firmware(card))
|
||||
goto out;
|
||||
|
||||
spider_net_open(netdev);
|
||||
spider_net_kick_tx_dma(card);
|
||||
|
@ -2046,10 +2218,12 @@ spider_net_setup_netdev(struct spider_net_card *card)
|
|||
card->tx_timer.data = (unsigned long) card;
|
||||
netdev->irq = card->pdev->irq;
|
||||
|
||||
card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
|
||||
card->aneg_count = 0;
|
||||
init_timer(&card->aneg_timer);
|
||||
card->aneg_timer.function = spider_net_link_phy;
|
||||
card->aneg_timer.data = (unsigned long) card;
|
||||
|
||||
card->tx_chain.num_desc = tx_descriptors;
|
||||
card->rx_chain.num_desc = rx_descriptors;
|
||||
card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
|
||||
|
||||
spider_net_setup_netdev_ops(netdev);
|
||||
|
||||
|
@ -2098,8 +2272,11 @@ spider_net_alloc_card(void)
|
|||
{
|
||||
struct net_device *netdev;
|
||||
struct spider_net_card *card;
|
||||
size_t alloc_size;
|
||||
|
||||
netdev = alloc_etherdev(sizeof(struct spider_net_card));
|
||||
alloc_size = sizeof(struct spider_net_card) +
|
||||
(tx_descriptors + rx_descriptors) * sizeof(struct spider_net_descr);
|
||||
netdev = alloc_etherdev(alloc_size);
|
||||
if (!netdev)
|
||||
return NULL;
|
||||
|
||||
|
@ -2110,6 +2287,11 @@ spider_net_alloc_card(void)
|
|||
init_waitqueue_head(&card->waitq);
|
||||
atomic_set(&card->tx_timeout_task_counter, 0);
|
||||
|
||||
card->rx_chain.num_desc = rx_descriptors;
|
||||
card->rx_chain.ring = card->darray;
|
||||
card->tx_chain.num_desc = tx_descriptors;
|
||||
card->tx_chain.ring = card->darray + rx_descriptors;
|
||||
|
||||
return card;
|
||||
}
|
||||
|
||||
|
@ -2220,10 +2402,6 @@ spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (err)
|
||||
goto out_undo_pci;
|
||||
|
||||
err = spider_net_init_firmware(card);
|
||||
if (err)
|
||||
goto out_undo_pci;
|
||||
|
||||
err = spider_net_setup_netdev(card);
|
||||
if (err)
|
||||
goto out_undo_pci;
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* Network device driver for Cell Processor-Based Blade
|
||||
* Network device driver for Cell Processor-Based Blade and Celleb platform
|
||||
*
|
||||
* (C) Copyright IBM Corp. 2005
|
||||
* (C) Copyright 2006 TOSHIBA CORPORATION
|
||||
*
|
||||
* Authors : Utz Bacher <utz.bacher@de.ibm.com>
|
||||
* Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
|
||||
|
@ -24,7 +25,7 @@
|
|||
#ifndef _SPIDER_NET_H
|
||||
#define _SPIDER_NET_H
|
||||
|
||||
#define VERSION "1.6 B"
|
||||
#define VERSION "2.0 A"
|
||||
|
||||
#include "sungem_phy.h"
|
||||
|
||||
|
@ -50,6 +51,8 @@ extern char spider_net_driver_name[];
|
|||
#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
|
||||
|
||||
#define SPIDER_NET_TX_TIMER (HZ/5)
|
||||
#define SPIDER_NET_ANEG_TIMER (HZ)
|
||||
#define SPIDER_NET_ANEG_TIMEOUT 2
|
||||
|
||||
#define SPIDER_NET_RX_CSUM_DEFAULT 1
|
||||
|
||||
|
@ -104,6 +107,7 @@ extern char spider_net_driver_name[];
|
|||
|
||||
#define SPIDER_NET_GMACOPEMD 0x00000100
|
||||
#define SPIDER_NET_GMACLENLMT 0x00000108
|
||||
#define SPIDER_NET_GMACST 0x00000110
|
||||
#define SPIDER_NET_GMACINTEN 0x00000118
|
||||
#define SPIDER_NET_GMACPHYCTRL 0x00000120
|
||||
|
||||
|
@ -181,7 +185,8 @@ extern char spider_net_driver_name[];
|
|||
|
||||
/* pause frames: automatic, no upper retransmission count */
|
||||
/* outside loopback mode: ETOMOD signal dont matter, not connected */
|
||||
#define SPIDER_NET_OPMODE_VALUE 0x00000063
|
||||
/* ETOMOD signal is brought to PHY reset. bit 2 must be 1 in Celleb */
|
||||
#define SPIDER_NET_OPMODE_VALUE 0x00000067
|
||||
/*#define SPIDER_NET_OPMODE_VALUE 0x001b0062*/
|
||||
#define SPIDER_NET_LENLMT_VALUE 0x00000908
|
||||
|
||||
|
@ -333,9 +338,12 @@ enum spider_net_int2_status {
|
|||
/* We rely on flagged descriptor interrupts */
|
||||
#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) )
|
||||
|
||||
#define SPIDER_NET_LINKINT ( 1 << SPIDER_NET_GMAC2INT )
|
||||
|
||||
#define SPIDER_NET_ERRINT ( 0xffffffff & \
|
||||
(~SPIDER_NET_TXINT) & \
|
||||
(~SPIDER_NET_RXINT) )
|
||||
(~SPIDER_NET_RXINT) & \
|
||||
(~SPIDER_NET_LINKINT) )
|
||||
|
||||
#define SPIDER_NET_GPREXEC 0x80000000
|
||||
#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
|
||||
|
@ -356,8 +364,8 @@ enum spider_net_int2_status {
|
|||
#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
|
||||
#define SPIDER_NET_DESCR_TXDESFLG 0x00800000
|
||||
|
||||
struct spider_net_descr {
|
||||
/* as defined by the hardware */
|
||||
/* Descriptor, as defined by the hardware */
|
||||
struct spider_net_hw_descr {
|
||||
u32 buf_addr;
|
||||
u32 buf_size;
|
||||
u32 next_descr_addr;
|
||||
|
@ -366,13 +374,15 @@ struct spider_net_descr {
|
|||
u32 valid_size; /* all zeroes for tx */
|
||||
u32 data_status;
|
||||
u32 data_error; /* all zeroes for tx */
|
||||
} __attribute__((aligned(32)));
|
||||
|
||||
/* used in the driver */
|
||||
struct spider_net_descr {
|
||||
struct spider_net_hw_descr *hwdescr;
|
||||
struct sk_buff *skb;
|
||||
u32 bus_addr;
|
||||
struct spider_net_descr *next;
|
||||
struct spider_net_descr *prev;
|
||||
} __attribute__((aligned(32)));
|
||||
};
|
||||
|
||||
struct spider_net_descr_chain {
|
||||
spinlock_t lock;
|
||||
|
@ -380,6 +390,7 @@ struct spider_net_descr_chain {
|
|||
struct spider_net_descr *tail;
|
||||
struct spider_net_descr *ring;
|
||||
int num_desc;
|
||||
struct spider_net_hw_descr *hwring;
|
||||
dma_addr_t dma_addr;
|
||||
};
|
||||
|
||||
|
@ -436,12 +447,16 @@ struct spider_net_card {
|
|||
struct pci_dev *pdev;
|
||||
struct mii_phy phy;
|
||||
|
||||
int medium;
|
||||
|
||||
void __iomem *regs;
|
||||
|
||||
struct spider_net_descr_chain tx_chain;
|
||||
struct spider_net_descr_chain rx_chain;
|
||||
struct spider_net_descr *low_watermark;
|
||||
|
||||
int aneg_count;
|
||||
struct timer_list aneg_timer;
|
||||
struct timer_list tx_timer;
|
||||
struct work_struct tx_timeout_task;
|
||||
atomic_t tx_timeout_task_counter;
|
||||
|
@ -452,6 +467,9 @@ struct spider_net_card {
|
|||
struct net_device_stats netdev_stats;
|
||||
struct spider_net_extra_stats spider_stats;
|
||||
struct spider_net_options options;
|
||||
|
||||
/* Must be last item in struct */
|
||||
struct spider_net_descr darray[0];
|
||||
};
|
||||
|
||||
#define pr_err(fmt,arg...) \
|
||||
|
|
|
@ -28,8 +28,6 @@ static int automatic_resume = 0; /* experimental .. better should be zero */
|
|||
static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */
|
||||
static int fifo=0x8; /* don't change */
|
||||
|
||||
/* #define REALLY_SLOW_IO */
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
|
|
|
@ -310,6 +310,107 @@ static int bcm5411_init(struct mii_phy* phy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
|
||||
{
|
||||
u16 ctl, adv;
|
||||
|
||||
phy->autoneg = 1;
|
||||
phy->speed = SPEED_10;
|
||||
phy->duplex = DUPLEX_HALF;
|
||||
phy->pause = 0;
|
||||
phy->advertising = advertise;
|
||||
|
||||
/* Setup standard advertise */
|
||||
adv = phy_read(phy, MII_ADVERTISE);
|
||||
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
|
||||
if (advertise & ADVERTISED_10baseT_Half)
|
||||
adv |= ADVERTISE_10HALF;
|
||||
if (advertise & ADVERTISED_10baseT_Full)
|
||||
adv |= ADVERTISE_10FULL;
|
||||
if (advertise & ADVERTISED_100baseT_Half)
|
||||
adv |= ADVERTISE_100HALF;
|
||||
if (advertise & ADVERTISED_100baseT_Full)
|
||||
adv |= ADVERTISE_100FULL;
|
||||
phy_write(phy, MII_ADVERTISE, adv);
|
||||
|
||||
/* Start/Restart aneg */
|
||||
ctl = phy_read(phy, MII_BMCR);
|
||||
ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
|
||||
phy_write(phy, MII_BMCR, ctl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
|
||||
{
|
||||
u16 ctl;
|
||||
|
||||
phy->autoneg = 0;
|
||||
phy->speed = speed;
|
||||
phy->duplex = fd;
|
||||
phy->pause = 0;
|
||||
|
||||
ctl = phy_read(phy, MII_BMCR);
|
||||
ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE);
|
||||
|
||||
/* First reset the PHY */
|
||||
phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
|
||||
|
||||
/* Select speed & duplex */
|
||||
switch(speed) {
|
||||
case SPEED_10:
|
||||
break;
|
||||
case SPEED_100:
|
||||
ctl |= BMCR_SPEED100;
|
||||
break;
|
||||
case SPEED_1000:
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (fd == DUPLEX_FULL)
|
||||
ctl |= BMCR_FULLDPLX;
|
||||
phy_write(phy, MII_BMCR, ctl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int genmii_poll_link(struct mii_phy *phy)
|
||||
{
|
||||
u16 status;
|
||||
|
||||
(void)phy_read(phy, MII_BMSR);
|
||||
status = phy_read(phy, MII_BMSR);
|
||||
if ((status & BMSR_LSTATUS) == 0)
|
||||
return 0;
|
||||
if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int genmii_read_link(struct mii_phy *phy)
|
||||
{
|
||||
u16 lpa;
|
||||
|
||||
if (phy->autoneg) {
|
||||
lpa = phy_read(phy, MII_LPA);
|
||||
|
||||
if (lpa & (LPA_10FULL | LPA_100FULL))
|
||||
phy->duplex = DUPLEX_FULL;
|
||||
else
|
||||
phy->duplex = DUPLEX_HALF;
|
||||
if (lpa & (LPA_100FULL | LPA_100HALF))
|
||||
phy->speed = SPEED_100;
|
||||
else
|
||||
phy->speed = SPEED_10;
|
||||
phy->pause = 0;
|
||||
}
|
||||
/* On non-aneg, we assume what we put in BMCR is the speed,
|
||||
* though magic-aneg shouldn't prevent this case from occurring
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int generic_suspend(struct mii_phy* phy)
|
||||
{
|
||||
phy_write(phy, MII_BMCR, BMCR_PDOWN);
|
||||
|
@ -364,30 +465,6 @@ static int bcm5421_init(struct mii_phy* phy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int bcm5421_enable_fiber(struct mii_phy* phy)
|
||||
{
|
||||
/* enable fiber mode */
|
||||
phy_write(phy, MII_NCONFIG, 0x9020);
|
||||
/* LEDs active in both modes, autosense prio = fiber */
|
||||
phy_write(phy, MII_NCONFIG, 0x945f);
|
||||
|
||||
/* switch off fibre autoneg */
|
||||
phy_write(phy, MII_NCONFIG, 0xfc01);
|
||||
phy_write(phy, 0x0b, 0x0004);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bcm5461_enable_fiber(struct mii_phy* phy)
|
||||
{
|
||||
phy_write(phy, MII_NCONFIG, 0xfc0c);
|
||||
phy_write(phy, MII_BMCR, 0x4140);
|
||||
phy_write(phy, MII_NCONFIG, 0xfc0b);
|
||||
phy_write(phy, MII_BMCR, 0x0140);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
|
||||
{
|
||||
u16 ctl, adv;
|
||||
|
@ -515,6 +592,155 @@ static int marvell88e1111_init(struct mii_phy* phy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define BCM5421_MODE_MASK (1 << 5)
|
||||
|
||||
static int bcm5421_poll_link(struct mii_phy* phy)
|
||||
{
|
||||
u32 phy_reg;
|
||||
int mode;
|
||||
|
||||
/* find out in what mode we are */
|
||||
phy_write(phy, MII_NCONFIG, 0x1000);
|
||||
phy_reg = phy_read(phy, MII_NCONFIG);
|
||||
|
||||
mode = (phy_reg & BCM5421_MODE_MASK) >> 5;
|
||||
|
||||
if ( mode == BCM54XX_COPPER)
|
||||
return genmii_poll_link(phy);
|
||||
|
||||
/* try to find out wether we have a link */
|
||||
phy_write(phy, MII_NCONFIG, 0x2000);
|
||||
phy_reg = phy_read(phy, MII_NCONFIG);
|
||||
|
||||
if (phy_reg & 0x0020)
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int bcm5421_read_link(struct mii_phy* phy)
|
||||
{
|
||||
u32 phy_reg;
|
||||
int mode;
|
||||
|
||||
/* find out in what mode we are */
|
||||
phy_write(phy, MII_NCONFIG, 0x1000);
|
||||
phy_reg = phy_read(phy, MII_NCONFIG);
|
||||
|
||||
mode = (phy_reg & BCM5421_MODE_MASK ) >> 5;
|
||||
|
||||
if ( mode == BCM54XX_COPPER)
|
||||
return bcm54xx_read_link(phy);
|
||||
|
||||
phy->speed = SPEED_1000;
|
||||
|
||||
/* find out wether we are running half- or full duplex */
|
||||
phy_write(phy, MII_NCONFIG, 0x2000);
|
||||
phy_reg = phy_read(phy, MII_NCONFIG);
|
||||
|
||||
if ( (phy_reg & 0x0080) >> 7)
|
||||
phy->duplex |= DUPLEX_HALF;
|
||||
else
|
||||
phy->duplex |= DUPLEX_FULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bcm5421_enable_fiber(struct mii_phy* phy, int autoneg)
|
||||
{
|
||||
/* enable fiber mode */
|
||||
phy_write(phy, MII_NCONFIG, 0x9020);
|
||||
/* LEDs active in both modes, autosense prio = fiber */
|
||||
phy_write(phy, MII_NCONFIG, 0x945f);
|
||||
|
||||
if (!autoneg) {
|
||||
/* switch off fibre autoneg */
|
||||
phy_write(phy, MII_NCONFIG, 0xfc01);
|
||||
phy_write(phy, 0x0b, 0x0004);
|
||||
}
|
||||
|
||||
phy->autoneg = autoneg;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define BCM5461_FIBER_LINK (1 << 2)
|
||||
#define BCM5461_MODE_MASK (3 << 1)
|
||||
|
||||
static int bcm5461_poll_link(struct mii_phy* phy)
|
||||
{
|
||||
u32 phy_reg;
|
||||
int mode;
|
||||
|
||||
/* find out in what mode we are */
|
||||
phy_write(phy, MII_NCONFIG, 0x7c00);
|
||||
phy_reg = phy_read(phy, MII_NCONFIG);
|
||||
|
||||
mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
|
||||
|
||||
if ( mode == BCM54XX_COPPER)
|
||||
return genmii_poll_link(phy);
|
||||
|
||||
/* find out wether we have a link */
|
||||
phy_write(phy, MII_NCONFIG, 0x7000);
|
||||
phy_reg = phy_read(phy, MII_NCONFIG);
|
||||
|
||||
if (phy_reg & BCM5461_FIBER_LINK)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define BCM5461_FIBER_DUPLEX (1 << 3)
|
||||
|
||||
static int bcm5461_read_link(struct mii_phy* phy)
|
||||
{
|
||||
u32 phy_reg;
|
||||
int mode;
|
||||
|
||||
/* find out in what mode we are */
|
||||
phy_write(phy, MII_NCONFIG, 0x7c00);
|
||||
phy_reg = phy_read(phy, MII_NCONFIG);
|
||||
|
||||
mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
|
||||
|
||||
if ( mode == BCM54XX_COPPER) {
|
||||
return bcm54xx_read_link(phy);
|
||||
}
|
||||
|
||||
phy->speed = SPEED_1000;
|
||||
|
||||
/* find out wether we are running half- or full duplex */
|
||||
phy_write(phy, MII_NCONFIG, 0x7000);
|
||||
phy_reg = phy_read(phy, MII_NCONFIG);
|
||||
|
||||
if (phy_reg & BCM5461_FIBER_DUPLEX)
|
||||
phy->duplex |= DUPLEX_FULL;
|
||||
else
|
||||
phy->duplex |= DUPLEX_HALF;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bcm5461_enable_fiber(struct mii_phy* phy, int autoneg)
|
||||
{
|
||||
/* select fiber mode, enable 1000 base-X registers */
|
||||
phy_write(phy, MII_NCONFIG, 0xfc0b);
|
||||
|
||||
if (autoneg) {
|
||||
/* enable fiber with no autonegotiation */
|
||||
phy_write(phy, MII_ADVERTISE, 0x01e0);
|
||||
phy_write(phy, MII_BMCR, 0x1140);
|
||||
} else {
|
||||
/* enable fiber with autonegotiation */
|
||||
phy_write(phy, MII_BMCR, 0x0140);
|
||||
}
|
||||
|
||||
phy->autoneg = autoneg;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
|
||||
{
|
||||
u16 ctl, adv;
|
||||
|
@ -645,113 +871,6 @@ static int marvell_read_link(struct mii_phy *phy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
|
||||
{
|
||||
u16 ctl, adv;
|
||||
|
||||
phy->autoneg = 1;
|
||||
phy->speed = SPEED_10;
|
||||
phy->duplex = DUPLEX_HALF;
|
||||
phy->pause = 0;
|
||||
phy->advertising = advertise;
|
||||
|
||||
/* Setup standard advertise */
|
||||
adv = phy_read(phy, MII_ADVERTISE);
|
||||
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
|
||||
if (advertise & ADVERTISED_10baseT_Half)
|
||||
adv |= ADVERTISE_10HALF;
|
||||
if (advertise & ADVERTISED_10baseT_Full)
|
||||
adv |= ADVERTISE_10FULL;
|
||||
if (advertise & ADVERTISED_100baseT_Half)
|
||||
adv |= ADVERTISE_100HALF;
|
||||
if (advertise & ADVERTISED_100baseT_Full)
|
||||
adv |= ADVERTISE_100FULL;
|
||||
if (advertise & ADVERTISED_Pause)
|
||||
adv |= ADVERTISE_PAUSE_CAP;
|
||||
if (advertise & ADVERTISED_Asym_Pause)
|
||||
adv |= ADVERTISE_PAUSE_ASYM;
|
||||
phy_write(phy, MII_ADVERTISE, adv);
|
||||
|
||||
/* Start/Restart aneg */
|
||||
ctl = phy_read(phy, MII_BMCR);
|
||||
ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
|
||||
phy_write(phy, MII_BMCR, ctl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
|
||||
{
|
||||
u16 ctl;
|
||||
|
||||
phy->autoneg = 0;
|
||||
phy->speed = speed;
|
||||
phy->duplex = fd;
|
||||
phy->pause = 0;
|
||||
|
||||
ctl = phy_read(phy, MII_BMCR);
|
||||
ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE);
|
||||
|
||||
/* First reset the PHY */
|
||||
phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
|
||||
|
||||
/* Select speed & duplex */
|
||||
switch(speed) {
|
||||
case SPEED_10:
|
||||
break;
|
||||
case SPEED_100:
|
||||
ctl |= BMCR_SPEED100;
|
||||
break;
|
||||
case SPEED_1000:
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (fd == DUPLEX_FULL)
|
||||
ctl |= BMCR_FULLDPLX;
|
||||
phy_write(phy, MII_BMCR, ctl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int genmii_poll_link(struct mii_phy *phy)
|
||||
{
|
||||
u16 status;
|
||||
|
||||
(void)phy_read(phy, MII_BMSR);
|
||||
status = phy_read(phy, MII_BMSR);
|
||||
if ((status & BMSR_LSTATUS) == 0)
|
||||
return 0;
|
||||
if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int genmii_read_link(struct mii_phy *phy)
|
||||
{
|
||||
u16 lpa;
|
||||
|
||||
if (phy->autoneg) {
|
||||
lpa = phy_read(phy, MII_LPA);
|
||||
|
||||
if (lpa & (LPA_10FULL | LPA_100FULL))
|
||||
phy->duplex = DUPLEX_FULL;
|
||||
else
|
||||
phy->duplex = DUPLEX_HALF;
|
||||
if (lpa & (LPA_100FULL | LPA_100HALF))
|
||||
phy->speed = SPEED_100;
|
||||
else
|
||||
phy->speed = SPEED_10;
|
||||
phy->pause = (phy->duplex == DUPLEX_FULL) &&
|
||||
((lpa & LPA_PAUSE) != 0);
|
||||
}
|
||||
/* On non-aneg, we assume what we put in BMCR is the speed,
|
||||
* though magic-aneg shouldn't prevent this case from occurring
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#define MII_BASIC_FEATURES \
|
||||
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
|
||||
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
|
||||
|
@ -885,8 +1004,8 @@ static struct mii_phy_ops bcm5421_phy_ops = {
|
|||
.suspend = generic_suspend,
|
||||
.setup_aneg = bcm54xx_setup_aneg,
|
||||
.setup_forced = bcm54xx_setup_forced,
|
||||
.poll_link = genmii_poll_link,
|
||||
.read_link = bcm54xx_read_link,
|
||||
.poll_link = bcm5421_poll_link,
|
||||
.read_link = bcm5421_read_link,
|
||||
.enable_fiber = bcm5421_enable_fiber,
|
||||
};
|
||||
|
||||
|
@ -923,8 +1042,8 @@ static struct mii_phy_ops bcm5461_phy_ops = {
|
|||
.suspend = generic_suspend,
|
||||
.setup_aneg = bcm54xx_setup_aneg,
|
||||
.setup_forced = bcm54xx_setup_forced,
|
||||
.poll_link = genmii_poll_link,
|
||||
.read_link = bcm54xx_read_link,
|
||||
.poll_link = bcm5461_poll_link,
|
||||
.read_link = bcm5461_read_link,
|
||||
.enable_fiber = bcm5461_enable_fiber,
|
||||
};
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ struct mii_phy_ops
|
|||
int (*setup_forced)(struct mii_phy *phy, int speed, int fd);
|
||||
int (*poll_link)(struct mii_phy *phy);
|
||||
int (*read_link)(struct mii_phy *phy);
|
||||
int (*enable_fiber)(struct mii_phy *phy);
|
||||
int (*enable_fiber)(struct mii_phy *phy, int autoneg);
|
||||
};
|
||||
|
||||
/* Structure used to statically define an mii/gii based PHY */
|
||||
|
@ -26,6 +26,14 @@ struct mii_phy_def
|
|||
const struct mii_phy_ops* ops;
|
||||
};
|
||||
|
||||
enum {
|
||||
BCM54XX_COPPER,
|
||||
BCM54XX_FIBER,
|
||||
BCM54XX_GBIC,
|
||||
BCM54XX_SGMII,
|
||||
BCM54XX_UNKNOWN,
|
||||
};
|
||||
|
||||
/* An instance of a PHY, partially borrowed from mii_if_info */
|
||||
struct mii_phy
|
||||
{
|
||||
|
|
|
@ -657,7 +657,7 @@ tc35815_init_queues(struct net_device *dev)
|
|||
dma_cache_wback_inv((unsigned long)lp->fd_buf, PAGE_SIZE * FD_PAGE_NUM);
|
||||
#endif
|
||||
} else {
|
||||
clear_page(lp->fd_buf);
|
||||
memset(lp->fd_buf, 0, PAGE_SIZE * FD_PAGE_NUM);
|
||||
#ifdef __mips__
|
||||
dma_cache_wback_inv((unsigned long)lp->fd_buf, PAGE_SIZE * FD_PAGE_NUM);
|
||||
#endif
|
||||
|
@ -1732,6 +1732,11 @@ static void __exit tc35815_cleanup_module(void)
|
|||
{
|
||||
struct net_device *next_dev;
|
||||
|
||||
/*
|
||||
* TODO: implement a tc35815_driver.remove hook, and
|
||||
* move this code into that function. Then, delete
|
||||
* all root_tc35815_dev list handling code.
|
||||
*/
|
||||
while (root_tc35815_dev) {
|
||||
struct net_device *dev = root_tc35815_dev;
|
||||
next_dev = ((struct tc35815_local *)dev->priv)->next_module;
|
||||
|
@ -1740,6 +1745,9 @@ static void __exit tc35815_cleanup_module(void)
|
|||
free_netdev(dev);
|
||||
root_tc35815_dev = next_dev;
|
||||
}
|
||||
|
||||
pci_unregister_driver(&tc35815_driver);
|
||||
}
|
||||
|
||||
module_init(tc35815_init_module);
|
||||
module_exit(tc35815_cleanup_module);
|
||||
|
|
|
@ -4199,9 +4199,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
|
|||
ugeth->ug_info = ug_info;
|
||||
ugeth->dev = dev;
|
||||
|
||||
mac_addr = get_property(np, "mac-address", NULL);
|
||||
if (mac_addr == NULL)
|
||||
mac_addr = get_property(np, "local-mac-address", NULL);
|
||||
mac_addr = of_get_mac_address(np);
|
||||
if (mac_addr)
|
||||
memcpy(dev->dev_addr, mac_addr, 6);
|
||||
|
||||
|
|
|
@ -94,7 +94,6 @@
|
|||
#include <linux/device.h>
|
||||
|
||||
#undef COSA_SLOW_IO /* for testing purposes only */
|
||||
#undef REALLY_SLOW_IO
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/dma.h>
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
* Tested with Planet AP in 2.5.73-bk, 216 Kbytes/s in Infrastructure mode
|
||||
* with a SMP machine (dual pentium 100), using pktgen, 432 pps (pkt_size = 60)
|
||||
*/
|
||||
#undef REALLY_SLOW_IO /* most systems can safely undef this */
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/types.h>
|
||||
|
|
Loading…
Reference in a new issue