Merge branch 'skb_alloc_pages'
Alexander Duyck says: ==================== Replace __skb_alloc_pages with simpler function This patch series replaces __skb_alloc_pages with a much simpler function, __dev_alloc_pages. The main difference between the two is that __skb_alloc_pages had an sk_buff pointer that was being passed as NULL in call places where it was called. In a couple of cases the NULL was passed by variable and this led to unnecessary code being run. As such in order to simplify things the __dev_alloc_pages call only takes a mask and the page order being requested. In addition it takes advantage of several behaviors already built into the page allocator so that it can just set GFP flags unconditionally. v2: Renamed functions to dev_alloc_page(s) instead of netdev_alloc_page(s) Removed __GFP_COLD flag from usb code as it was redundant v3: Update patch descriptions and subjects to match changes in v2 ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
ee47ad42c5
8 changed files with 48 additions and 43 deletions
|
@ -576,7 +576,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
|
|||
__be64 *d = &q->desc[q->pidx];
|
||||
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
|
||||
|
||||
gfp |= __GFP_NOWARN | __GFP_COLD;
|
||||
gfp |= __GFP_NOWARN;
|
||||
|
||||
if (s->fl_pg_order == 0)
|
||||
goto alloc_small_pages;
|
||||
|
@ -585,7 +585,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
|
|||
* Prefer large buffers
|
||||
*/
|
||||
while (n) {
|
||||
pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order);
|
||||
pg = __dev_alloc_pages(gfp, s->fl_pg_order);
|
||||
if (unlikely(!pg)) {
|
||||
q->large_alloc_failed++;
|
||||
break; /* fall back to single pages */
|
||||
|
@ -615,7 +615,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
|
|||
|
||||
alloc_small_pages:
|
||||
while (n--) {
|
||||
pg = __skb_alloc_page(gfp, NULL);
|
||||
pg = __dev_alloc_page(gfp);
|
||||
if (unlikely(!pg)) {
|
||||
q->alloc_failed++;
|
||||
break;
|
||||
|
|
|
@ -602,6 +602,8 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
|
|||
*/
|
||||
BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
|
||||
|
||||
gfp |= __GFP_NOWARN;
|
||||
|
||||
/*
|
||||
* If we support large pages, prefer large buffers and fail over to
|
||||
* small pages if we can't allocate large pages to satisfy the refill.
|
||||
|
@ -612,8 +614,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
|
|||
goto alloc_small_pages;
|
||||
|
||||
while (n) {
|
||||
page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
|
||||
FL_PG_ORDER);
|
||||
page = __dev_alloc_pages(gfp, FL_PG_ORDER);
|
||||
if (unlikely(!page)) {
|
||||
/*
|
||||
* We've failed inour attempt to allocate a "large
|
||||
|
@ -657,7 +658,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
|
|||
|
||||
alloc_small_pages:
|
||||
while (n--) {
|
||||
page = __skb_alloc_page(gfp | __GFP_NOWARN, NULL);
|
||||
page = __dev_alloc_page(gfp);
|
||||
if (unlikely(!page)) {
|
||||
fl->alloc_failed++;
|
||||
break;
|
||||
|
|
|
@ -83,7 +83,7 @@ static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
|
|||
return true;
|
||||
|
||||
/* alloc new page for storage */
|
||||
page = alloc_page(GFP_ATOMIC | __GFP_COLD);
|
||||
page = dev_alloc_page();
|
||||
if (unlikely(!page)) {
|
||||
rx_ring->rx_stats.alloc_failed++;
|
||||
return false;
|
||||
|
|
|
@ -6988,7 +6988,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
|
|||
return true;
|
||||
|
||||
/* alloc new page for storage */
|
||||
page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
|
||||
page = dev_alloc_page();
|
||||
if (unlikely(!page)) {
|
||||
rx_ring->rx_stats.alloc_failed++;
|
||||
return false;
|
||||
|
|
|
@ -1444,8 +1444,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
|
|||
|
||||
/* alloc new page for storage */
|
||||
if (likely(!page)) {
|
||||
page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
|
||||
bi->skb, ixgbe_rx_pg_order(rx_ring));
|
||||
page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
|
||||
if (unlikely(!page)) {
|
||||
rx_ring->rx_stats.alloc_rx_page_failed++;
|
||||
return false;
|
||||
|
|
|
@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
|
|||
struct page *page;
|
||||
int err;
|
||||
|
||||
page = __skb_alloc_page(gfp_flags | __GFP_NOMEMALLOC, NULL);
|
||||
page = __dev_alloc_page(gfp_flags | __GFP_NOMEMALLOC);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -212,7 +212,7 @@ static void rx_complete(struct urb *req)
|
|||
if (page)
|
||||
put_page(page);
|
||||
if (req)
|
||||
rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD);
|
||||
rx_submit(pnd, req, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static int usbpn_close(struct net_device *dev);
|
||||
|
@ -231,7 +231,7 @@ static int usbpn_open(struct net_device *dev)
|
|||
for (i = 0; i < rxq_size; i++) {
|
||||
struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
|
||||
|
||||
if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
|
||||
if (!req || rx_submit(pnd, req, GFP_KERNEL)) {
|
||||
usb_free_urb(req);
|
||||
usbpn_close(dev);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -303,7 +303,7 @@ pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
|
|||
struct page *page;
|
||||
int err;
|
||||
|
||||
page = __skb_alloc_page(gfp_flags | __GFP_NOMEMALLOC, NULL);
|
||||
page = __dev_alloc_page(gfp_flags | __GFP_NOMEMALLOC);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -377,7 +377,7 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
|
|||
if (page)
|
||||
put_page(page);
|
||||
if (req)
|
||||
pn_rx_submit(fp, req, GFP_ATOMIC | __GFP_COLD);
|
||||
pn_rx_submit(fp, req, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
@ -437,7 +437,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
|
|||
|
||||
netif_carrier_on(dev);
|
||||
for (i = 0; i < phonet_rxq_size; i++)
|
||||
pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC | __GFP_COLD);
|
||||
pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC);
|
||||
}
|
||||
spin_unlock(&port->lock);
|
||||
return 0;
|
||||
|
|
|
@ -2185,46 +2185,51 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
|
|||
}
|
||||
|
||||
/**
|
||||
* __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data
|
||||
* @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
|
||||
* @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
|
||||
* @order: size of the allocation
|
||||
* __dev_alloc_pages - allocate page for network Rx
|
||||
* @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
|
||||
* @order: size of the allocation
|
||||
*
|
||||
* Allocate a new page.
|
||||
* Allocate a new page.
|
||||
*
|
||||
* %NULL is returned if there is no free memory.
|
||||
* %NULL is returned if there is no free memory.
|
||||
*/
|
||||
static inline struct page *__skb_alloc_pages(gfp_t gfp_mask,
|
||||
struct sk_buff *skb,
|
||||
unsigned int order)
|
||||
static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
|
||||
unsigned int order)
|
||||
{
|
||||
struct page *page;
|
||||
/* This piece of code contains several assumptions.
|
||||
* 1. This is for device Rx, therefor a cold page is preferred.
|
||||
* 2. The expectation is the user wants a compound page.
|
||||
* 3. If requesting a order 0 page it will not be compound
|
||||
* due to the check to see if order has a value in prep_new_page
|
||||
* 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
|
||||
* code in gfp_to_alloc_flags that should be enforcing this.
|
||||
*/
|
||||
gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
|
||||
|
||||
gfp_mask |= __GFP_COLD;
|
||||
return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
|
||||
}
|
||||
|
||||
if (!(gfp_mask & __GFP_NOMEMALLOC))
|
||||
gfp_mask |= __GFP_MEMALLOC;
|
||||
|
||||
page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
|
||||
if (skb && page && page->pfmemalloc)
|
||||
skb->pfmemalloc = true;
|
||||
|
||||
return page;
|
||||
static inline struct page *dev_alloc_pages(unsigned int order)
|
||||
{
|
||||
return __dev_alloc_pages(GFP_ATOMIC, order);
|
||||
}
|
||||
|
||||
/**
|
||||
* __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data
|
||||
* @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
|
||||
* @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
|
||||
* __dev_alloc_page - allocate a page for network Rx
|
||||
* @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
|
||||
*
|
||||
* Allocate a new page.
|
||||
* Allocate a new page.
|
||||
*
|
||||
* %NULL is returned if there is no free memory.
|
||||
* %NULL is returned if there is no free memory.
|
||||
*/
|
||||
static inline struct page *__skb_alloc_page(gfp_t gfp_mask,
|
||||
struct sk_buff *skb)
|
||||
static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
|
||||
{
|
||||
return __skb_alloc_pages(gfp_mask, skb, 0);
|
||||
return __dev_alloc_pages(gfp_mask, 0);
|
||||
}
|
||||
|
||||
static inline struct page *dev_alloc_page(void)
|
||||
{
|
||||
return __dev_alloc_page(GFP_ATOMIC);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in a new issue