Merge branch 'master' of ../netdev-next/
This commit is contained in:
commit
986eaa9041
20 changed files with 81 additions and 91 deletions
|
@ -2179,9 +2179,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||||
|
|
||||||
vp->tx_ring[entry].frag[i+1].addr =
|
vp->tx_ring[entry].frag[i+1].addr =
|
||||||
cpu_to_le32(pci_map_single(VORTEX_PCI(vp),
|
cpu_to_le32(pci_map_single(
|
||||||
(void*)page_address(frag->page) + frag->page_offset,
|
VORTEX_PCI(vp),
|
||||||
frag->size, PCI_DMA_TODEVICE));
|
(void *)skb_frag_address(frag),
|
||||||
|
frag->size, PCI_DMA_TODEVICE));
|
||||||
|
|
||||||
if (i == skb_shinfo(skb)->nr_frags-1)
|
if (i == skb_shinfo(skb)->nr_frags-1)
|
||||||
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
|
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
|
||||||
|
|
|
@ -113,9 +113,8 @@ static void greth_print_tx_packet(struct sk_buff *skb)
|
||||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||||
|
|
||||||
print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
|
print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
|
||||||
phys_to_virt(page_to_phys(skb_shinfo(skb)->frags[i].page)) +
|
skb_frag_address(&skb_shinfo(skb)->frags[i]),
|
||||||
skb_shinfo(skb)->frags[i].page_offset,
|
skb_shinfo(skb)->frags[i].size, true);
|
||||||
length, true);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -528,11 +527,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
|
||||||
|
|
||||||
greth_write_bd(&bdp->stat, status);
|
greth_write_bd(&bdp->stat, status);
|
||||||
|
|
||||||
dma_addr = dma_map_page(greth->dev,
|
dma_addr = skb_frag_dma_map(greth->dev, frag, 0, frag->size,
|
||||||
frag->page,
|
DMA_TO_DEVICE);
|
||||||
frag->page_offset,
|
|
||||||
frag->size,
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
|
|
||||||
if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
|
if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
|
||||||
goto frag_map_error;
|
goto frag_map_error;
|
||||||
|
|
|
@ -2485,9 +2485,9 @@ static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
|
||||||
info = ap->skb->tx_skbuff + idx;
|
info = ap->skb->tx_skbuff + idx;
|
||||||
desc = ap->tx_ring + idx;
|
desc = ap->tx_ring + idx;
|
||||||
|
|
||||||
mapping = pci_map_page(ap->pdev, frag->page,
|
mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0,
|
||||||
frag->page_offset, frag->size,
|
frag->size,
|
||||||
PCI_DMA_TODEVICE);
|
PCI_DMA_TODEVICE);
|
||||||
|
|
||||||
flagsize = (frag->size << 16);
|
flagsize = (frag->size << 16);
|
||||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||||
|
|
|
@ -2180,11 +2180,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
|
||||||
|
|
||||||
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
|
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
|
||||||
buffer_info->length = frag->size;
|
buffer_info->length = frag->size;
|
||||||
buffer_info->dma =
|
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
|
||||||
pci_map_page(adapter->pdev, frag->page,
|
frag, 0,
|
||||||
frag->page_offset,
|
buffer_info->length,
|
||||||
buffer_info->length,
|
PCI_DMA_TODEVICE);
|
||||||
PCI_DMA_TODEVICE);
|
|
||||||
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
|
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
|
||||||
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
|
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
|
||||||
ATL1C_PCIMAP_TODEVICE);
|
ATL1C_PCIMAP_TODEVICE);
|
||||||
|
|
|
@ -1765,12 +1765,11 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
|
||||||
MAX_TX_BUF_LEN : buf_len;
|
MAX_TX_BUF_LEN : buf_len;
|
||||||
buf_len -= tx_buffer->length;
|
buf_len -= tx_buffer->length;
|
||||||
|
|
||||||
tx_buffer->dma =
|
tx_buffer->dma = skb_frag_dma_map(&adapter->pdev->dev,
|
||||||
pci_map_page(adapter->pdev, frag->page,
|
frag,
|
||||||
frag->page_offset +
|
(i * MAX_TX_BUF_LEN),
|
||||||
(i * MAX_TX_BUF_LEN),
|
tx_buffer->length,
|
||||||
tx_buffer->length,
|
PCI_DMA_TODEVICE);
|
||||||
PCI_DMA_TODEVICE);
|
|
||||||
ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
|
ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
|
||||||
use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
|
use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
|
||||||
use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
|
use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
|
||||||
|
|
|
@ -2283,9 +2283,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
|
||||||
buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
|
buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
|
||||||
ATL1_MAX_TX_BUF_LEN : buf_len;
|
ATL1_MAX_TX_BUF_LEN : buf_len;
|
||||||
buf_len -= buffer_info->length;
|
buf_len -= buffer_info->length;
|
||||||
buffer_info->dma = pci_map_page(adapter->pdev,
|
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
|
||||||
frag->page,
|
frag, i * ATL1_MAX_TX_BUF_LEN,
|
||||||
frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN),
|
|
||||||
buffer_info->length, PCI_DMA_TODEVICE);
|
buffer_info->length, PCI_DMA_TODEVICE);
|
||||||
|
|
||||||
if (++next_to_use == tpd_ring->count)
|
if (++next_to_use == tpd_ring->count)
|
||||||
|
|
|
@ -2753,8 +2753,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||||
|
|
||||||
BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
|
BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
|
||||||
txqent->vector[vect_id].length = htons(size);
|
txqent->vector[vect_id].length = htons(size);
|
||||||
dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
|
dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
|
||||||
frag->page_offset, size, DMA_TO_DEVICE);
|
0, size, DMA_TO_DEVICE);
|
||||||
dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
|
dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
|
||||||
dma_addr);
|
dma_addr);
|
||||||
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
|
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
|
||||||
|
|
|
@ -591,9 +591,9 @@ static inline void enic_queue_wq_skb_cont(struct enic *enic,
|
||||||
for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
|
for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
|
||||||
len_left -= frag->size;
|
len_left -= frag->size;
|
||||||
enic_queue_wq_desc_cont(wq, skb,
|
enic_queue_wq_desc_cont(wq, skb,
|
||||||
pci_map_page(enic->pdev, frag->page,
|
skb_frag_dma_map(&enic->pdev->dev,
|
||||||
frag->page_offset, frag->size,
|
frag, 0, frag->size,
|
||||||
PCI_DMA_TODEVICE),
|
PCI_DMA_TODEVICE),
|
||||||
frag->size,
|
frag->size,
|
||||||
(len_left == 0), /* EOP? */
|
(len_left == 0), /* EOP? */
|
||||||
loopback);
|
loopback);
|
||||||
|
@ -705,14 +705,14 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
|
||||||
for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
|
for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
|
||||||
len_left -= frag->size;
|
len_left -= frag->size;
|
||||||
frag_len_left = frag->size;
|
frag_len_left = frag->size;
|
||||||
offset = frag->page_offset;
|
offset = 0;
|
||||||
|
|
||||||
while (frag_len_left) {
|
while (frag_len_left) {
|
||||||
len = min(frag_len_left,
|
len = min(frag_len_left,
|
||||||
(unsigned int)WQ_ENET_MAX_DESC_LEN);
|
(unsigned int)WQ_ENET_MAX_DESC_LEN);
|
||||||
dma_addr = pci_map_page(enic->pdev, frag->page,
|
dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
|
||||||
offset, len,
|
offset, len,
|
||||||
PCI_DMA_TODEVICE);
|
PCI_DMA_TODEVICE);
|
||||||
enic_queue_wq_desc_cont(wq, skb,
|
enic_queue_wq_desc_cont(wq, skb,
|
||||||
dma_addr,
|
dma_addr,
|
||||||
len,
|
len,
|
||||||
|
|
|
@ -638,8 +638,8 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
|
||||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||||
struct skb_frag_struct *frag =
|
struct skb_frag_struct *frag =
|
||||||
&skb_shinfo(skb)->frags[i];
|
&skb_shinfo(skb)->frags[i];
|
||||||
busaddr = dma_map_page(dev, frag->page, frag->page_offset,
|
busaddr = skb_frag_dma_map(dev, frag, 0,
|
||||||
frag->size, DMA_TO_DEVICE);
|
frag->size, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(dev, busaddr))
|
if (dma_mapping_error(dev, busaddr))
|
||||||
goto dma_err;
|
goto dma_err;
|
||||||
wrb = queue_head_node(txq);
|
wrb = queue_head_node(txq);
|
||||||
|
@ -1066,7 +1066,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
|
||||||
skb->tail += curr_frag_len;
|
skb->tail += curr_frag_len;
|
||||||
} else {
|
} else {
|
||||||
skb_shinfo(skb)->nr_frags = 1;
|
skb_shinfo(skb)->nr_frags = 1;
|
||||||
skb_shinfo(skb)->frags[0].page = page_info->page;
|
skb_frag_set_page(skb, 0, page_info->page);
|
||||||
skb_shinfo(skb)->frags[0].page_offset =
|
skb_shinfo(skb)->frags[0].page_offset =
|
||||||
page_info->page_offset + hdr_len;
|
page_info->page_offset + hdr_len;
|
||||||
skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
|
skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
|
||||||
|
@ -1091,7 +1091,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
|
||||||
if (page_info->page_offset == 0) {
|
if (page_info->page_offset == 0) {
|
||||||
/* Fresh page */
|
/* Fresh page */
|
||||||
j++;
|
j++;
|
||||||
skb_shinfo(skb)->frags[j].page = page_info->page;
|
skb_frag_set_page(skb, j, page_info->page);
|
||||||
skb_shinfo(skb)->frags[j].page_offset =
|
skb_shinfo(skb)->frags[j].page_offset =
|
||||||
page_info->page_offset;
|
page_info->page_offset;
|
||||||
skb_shinfo(skb)->frags[j].size = 0;
|
skb_shinfo(skb)->frags[j].size = 0;
|
||||||
|
@ -1173,7 +1173,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
|
||||||
if (i == 0 || page_info->page_offset == 0) {
|
if (i == 0 || page_info->page_offset == 0) {
|
||||||
/* First frag or Fresh page */
|
/* First frag or Fresh page */
|
||||||
j++;
|
j++;
|
||||||
skb_shinfo(skb)->frags[j].page = page_info->page;
|
skb_frag_set_page(skb, j, page_info->page);
|
||||||
skb_shinfo(skb)->frags[j].page_offset =
|
skb_shinfo(skb)->frags[j].page_offset =
|
||||||
page_info->page_offset;
|
page_info->page_offset;
|
||||||
skb_shinfo(skb)->frags[j].size = 0;
|
skb_shinfo(skb)->frags[j].size = 0;
|
||||||
|
|
|
@ -2140,11 +2140,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
if (i == nr_frags - 1)
|
if (i == nr_frags - 1)
|
||||||
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
|
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
|
||||||
|
|
||||||
bufaddr = dma_map_page(&priv->ofdev->dev,
|
bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
|
||||||
skb_shinfo(skb)->frags[i].page,
|
&skb_shinfo(skb)->frags[i],
|
||||||
skb_shinfo(skb)->frags[i].page_offset,
|
0,
|
||||||
length,
|
length,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
/* set the TxBD length and buffer pointer */
|
/* set the TxBD length and buffer pointer */
|
||||||
txbdp->bufPtr = bufaddr;
|
txbdp->bufPtr = bufaddr;
|
||||||
|
|
|
@ -2911,9 +2911,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
|
||||||
|
|
||||||
frag = &skb_shinfo(skb)->frags[f];
|
frag = &skb_shinfo(skb)->frags[f];
|
||||||
len = frag->size;
|
len = frag->size;
|
||||||
offset = frag->page_offset;
|
offset = 0;
|
||||||
|
|
||||||
while (len) {
|
while (len) {
|
||||||
|
unsigned long bufend;
|
||||||
i++;
|
i++;
|
||||||
if (unlikely(i == tx_ring->count))
|
if (unlikely(i == tx_ring->count))
|
||||||
i = 0;
|
i = 0;
|
||||||
|
@ -2927,18 +2928,19 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
|
||||||
/* Workaround for potential 82544 hang in PCI-X.
|
/* Workaround for potential 82544 hang in PCI-X.
|
||||||
* Avoid terminating buffers within evenly-aligned
|
* Avoid terminating buffers within evenly-aligned
|
||||||
* dwords. */
|
* dwords. */
|
||||||
|
bufend = (unsigned long)
|
||||||
|
page_to_phys(skb_frag_page(frag));
|
||||||
|
bufend += offset + size - 1;
|
||||||
if (unlikely(adapter->pcix_82544 &&
|
if (unlikely(adapter->pcix_82544 &&
|
||||||
!((unsigned long)(page_to_phys(frag->page) + offset
|
!(bufend & 4) &&
|
||||||
+ size - 1) & 4) &&
|
size > 4))
|
||||||
size > 4))
|
|
||||||
size -= 4;
|
size -= 4;
|
||||||
|
|
||||||
buffer_info->length = size;
|
buffer_info->length = size;
|
||||||
buffer_info->time_stamp = jiffies;
|
buffer_info->time_stamp = jiffies;
|
||||||
buffer_info->mapped_as_page = true;
|
buffer_info->mapped_as_page = true;
|
||||||
buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
|
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
|
||||||
offset, size,
|
offset, size, DMA_TO_DEVICE);
|
||||||
DMA_TO_DEVICE);
|
|
||||||
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
|
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
|
||||||
goto dma_error;
|
goto dma_error;
|
||||||
buffer_info->next_to_watch = i;
|
buffer_info->next_to_watch = i;
|
||||||
|
|
|
@ -4677,7 +4677,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
|
||||||
|
|
||||||
frag = &skb_shinfo(skb)->frags[f];
|
frag = &skb_shinfo(skb)->frags[f];
|
||||||
len = frag->size;
|
len = frag->size;
|
||||||
offset = frag->page_offset;
|
offset = 0;
|
||||||
|
|
||||||
while (len) {
|
while (len) {
|
||||||
i++;
|
i++;
|
||||||
|
@ -4690,9 +4690,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
|
||||||
buffer_info->length = size;
|
buffer_info->length = size;
|
||||||
buffer_info->time_stamp = jiffies;
|
buffer_info->time_stamp = jiffies;
|
||||||
buffer_info->next_to_watch = i;
|
buffer_info->next_to_watch = i;
|
||||||
buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
|
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
|
||||||
offset, size,
|
offset, size, DMA_TO_DEVICE);
|
||||||
DMA_TO_DEVICE);
|
|
||||||
buffer_info->mapped_as_page = true;
|
buffer_info->mapped_as_page = true;
|
||||||
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
|
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
|
||||||
goto dma_error;
|
goto dma_error;
|
||||||
|
|
|
@ -4174,10 +4174,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
|
||||||
buffer_info->time_stamp = jiffies;
|
buffer_info->time_stamp = jiffies;
|
||||||
buffer_info->next_to_watch = i;
|
buffer_info->next_to_watch = i;
|
||||||
buffer_info->mapped_as_page = true;
|
buffer_info->mapped_as_page = true;
|
||||||
buffer_info->dma = dma_map_page(dev,
|
buffer_info->dma = skb_frag_dma_map(dev, frag, 0, len,
|
||||||
frag->page,
|
|
||||||
frag->page_offset,
|
|
||||||
len,
|
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(dev, buffer_info->dma))
|
if (dma_mapping_error(dev, buffer_info->dma))
|
||||||
goto dma_error;
|
goto dma_error;
|
||||||
|
|
|
@ -2061,10 +2061,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
|
||||||
buffer_info->time_stamp = jiffies;
|
buffer_info->time_stamp = jiffies;
|
||||||
buffer_info->next_to_watch = i;
|
buffer_info->next_to_watch = i;
|
||||||
buffer_info->mapped_as_page = true;
|
buffer_info->mapped_as_page = true;
|
||||||
buffer_info->dma = dma_map_page(&pdev->dev,
|
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
|
||||||
frag->page,
|
|
||||||
frag->page_offset,
|
|
||||||
len,
|
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
|
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
|
||||||
goto dma_error;
|
goto dma_error;
|
||||||
|
|
|
@ -1341,7 +1341,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
|
||||||
|
|
||||||
frag = &skb_shinfo(skb)->frags[f];
|
frag = &skb_shinfo(skb)->frags[f];
|
||||||
len = frag->size;
|
len = frag->size;
|
||||||
offset = frag->page_offset;
|
offset = 0;
|
||||||
|
|
||||||
while (len) {
|
while (len) {
|
||||||
i++;
|
i++;
|
||||||
|
@ -1361,8 +1361,8 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
|
||||||
buffer_info->time_stamp = jiffies;
|
buffer_info->time_stamp = jiffies;
|
||||||
buffer_info->mapped_as_page = true;
|
buffer_info->mapped_as_page = true;
|
||||||
buffer_info->dma =
|
buffer_info->dma =
|
||||||
dma_map_page(&pdev->dev, frag->page,
|
skb_frag_dma_map(&pdev->dev, frag, offset, size,
|
||||||
offset, size, DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
|
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
|
||||||
goto dma_error;
|
goto dma_error;
|
||||||
buffer_info->next_to_watch = 0;
|
buffer_info->next_to_watch = 0;
|
||||||
|
|
|
@ -6494,8 +6494,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
|
||||||
offset = 0;
|
offset = 0;
|
||||||
tx_flags |= IXGBE_TX_FLAGS_MAPPED_AS_PAGE;
|
tx_flags |= IXGBE_TX_FLAGS_MAPPED_AS_PAGE;
|
||||||
|
|
||||||
dma = dma_map_page(dev, frag->page, frag->page_offset,
|
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
|
||||||
size, DMA_TO_DEVICE);
|
|
||||||
if (dma_mapping_error(dev, dma))
|
if (dma_mapping_error(dev, dma))
|
||||||
goto dma_error;
|
goto dma_error;
|
||||||
|
|
||||||
|
|
|
@ -2918,18 +2918,16 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
|
||||||
|
|
||||||
frag = &skb_shinfo(skb)->frags[f];
|
frag = &skb_shinfo(skb)->frags[f];
|
||||||
len = min((unsigned int)frag->size, total);
|
len = min((unsigned int)frag->size, total);
|
||||||
offset = frag->page_offset;
|
offset = 0;
|
||||||
|
|
||||||
while (len) {
|
while (len) {
|
||||||
tx_buffer_info = &tx_ring->tx_buffer_info[i];
|
tx_buffer_info = &tx_ring->tx_buffer_info[i];
|
||||||
size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
|
size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
|
||||||
|
|
||||||
tx_buffer_info->length = size;
|
tx_buffer_info->length = size;
|
||||||
tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
|
tx_buffer_info->dma =
|
||||||
frag->page,
|
skb_frag_dma_map(&adapter->pdev->dev, frag,
|
||||||
offset,
|
offset, size, DMA_TO_DEVICE);
|
||||||
size,
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
tx_buffer_info->mapped_as_page = true;
|
tx_buffer_info->mapped_as_page = true;
|
||||||
if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
|
if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
|
||||||
goto dma_error;
|
goto dma_error;
|
||||||
|
|
|
@ -2146,8 +2146,11 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
prev_tx = put_tx;
|
prev_tx = put_tx;
|
||||||
prev_tx_ctx = np->put_tx_ctx;
|
prev_tx_ctx = np->put_tx_ctx;
|
||||||
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
|
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
|
||||||
np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
|
np->put_tx_ctx->dma = skb_frag_dma_map(
|
||||||
PCI_DMA_TODEVICE);
|
&np->pci_dev->dev,
|
||||||
|
frag, offset,
|
||||||
|
bcnt,
|
||||||
|
PCI_DMA_TODEVICE);
|
||||||
np->put_tx_ctx->dma_len = bcnt;
|
np->put_tx_ctx->dma_len = bcnt;
|
||||||
np->put_tx_ctx->dma_single = 0;
|
np->put_tx_ctx->dma_single = 0;
|
||||||
put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
|
put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
|
||||||
|
@ -2257,8 +2260,11 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
|
||||||
prev_tx = put_tx;
|
prev_tx = put_tx;
|
||||||
prev_tx_ctx = np->put_tx_ctx;
|
prev_tx_ctx = np->put_tx_ctx;
|
||||||
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
|
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
|
||||||
np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
|
np->put_tx_ctx->dma = skb_frag_dma_map(
|
||||||
PCI_DMA_TODEVICE);
|
&np->pci_dev->dev,
|
||||||
|
frag, offset,
|
||||||
|
bcnt,
|
||||||
|
PCI_DMA_TODEVICE);
|
||||||
np->put_tx_ctx->dma_len = bcnt;
|
np->put_tx_ctx->dma_len = bcnt;
|
||||||
np->put_tx_ctx->dma_single = 0;
|
np->put_tx_ctx->dma_single = 0;
|
||||||
put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
|
put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
|
||||||
|
|
|
@ -784,8 +784,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
|
||||||
|
|
||||||
len = this_frag->size;
|
len = this_frag->size;
|
||||||
mapping = dma_map_single(&cp->pdev->dev,
|
mapping = dma_map_single(&cp->pdev->dev,
|
||||||
((void *) page_address(this_frag->page) +
|
skb_frag_address(this_frag),
|
||||||
this_frag->page_offset),
|
|
||||||
len, PCI_DMA_TODEVICE);
|
len, PCI_DMA_TODEVICE);
|
||||||
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
|
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
|
||||||
|
|
||||||
|
|
|
@ -2048,8 +2048,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
|
||||||
skb->truesize += hlen - swivel;
|
skb->truesize += hlen - swivel;
|
||||||
skb->len += hlen - swivel;
|
skb->len += hlen - swivel;
|
||||||
|
|
||||||
get_page(page->buffer);
|
__skb_frag_set_page(frag, page->buffer);
|
||||||
frag->page = page->buffer;
|
__skb_frag_ref(frag);
|
||||||
frag->page_offset = off;
|
frag->page_offset = off;
|
||||||
frag->size = hlen - swivel;
|
frag->size = hlen - swivel;
|
||||||
|
|
||||||
|
@ -2072,8 +2072,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
|
||||||
skb->len += hlen;
|
skb->len += hlen;
|
||||||
frag++;
|
frag++;
|
||||||
|
|
||||||
get_page(page->buffer);
|
__skb_frag_set_page(frag, page->buffer);
|
||||||
frag->page = page->buffer;
|
__skb_frag_ref(frag);
|
||||||
frag->page_offset = 0;
|
frag->page_offset = 0;
|
||||||
frag->size = hlen;
|
frag->size = hlen;
|
||||||
RX_USED_ADD(page, hlen + cp->crc_size);
|
RX_USED_ADD(page, hlen + cp->crc_size);
|
||||||
|
@ -2830,9 +2830,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
|
||||||
skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
|
skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
|
||||||
|
|
||||||
len = fragp->size;
|
len = fragp->size;
|
||||||
mapping = pci_map_page(cp->pdev, fragp->page,
|
mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
|
||||||
fragp->page_offset, len,
|
PCI_DMA_TODEVICE);
|
||||||
PCI_DMA_TODEVICE);
|
|
||||||
|
|
||||||
tabort = cas_calc_tabort(cp, fragp->page_offset, len);
|
tabort = cas_calc_tabort(cp, fragp->page_offset, len);
|
||||||
if (unlikely(tabort)) {
|
if (unlikely(tabort)) {
|
||||||
|
@ -2843,7 +2842,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
|
||||||
ctrl, 0);
|
ctrl, 0);
|
||||||
entry = TX_DESC_NEXT(ring, entry);
|
entry = TX_DESC_NEXT(ring, entry);
|
||||||
|
|
||||||
addr = cas_page_map(fragp->page);
|
addr = cas_page_map(skb_frag_page(fragp));
|
||||||
memcpy(tx_tiny_buf(cp, ring, entry),
|
memcpy(tx_tiny_buf(cp, ring, entry),
|
||||||
addr + fragp->page_offset + len - tabort,
|
addr + fragp->page_offset + len - tabort,
|
||||||
tabort);
|
tabort);
|
||||||
|
|
Loading…
Reference in a new issue