[PATCH] slab: remove SLAB_DMA
SLAB_DMA is an alias of GFP_DMA. This is the last one so we remove the leftover comment too. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
e94b176609
commit
441e143e95
6 changed files with 8 additions and 11 deletions
|
@ -820,7 +820,7 @@ he_init_group(struct he_dev *he_dev, int group)
|
||||||
void *cpuaddr;
|
void *cpuaddr;
|
||||||
|
|
||||||
#ifdef USE_RBPS_POOL
|
#ifdef USE_RBPS_POOL
|
||||||
cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|SLAB_DMA, &dma_handle);
|
cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
|
||||||
if (cpuaddr == NULL)
|
if (cpuaddr == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
#else
|
#else
|
||||||
|
@ -884,7 +884,7 @@ he_init_group(struct he_dev *he_dev, int group)
|
||||||
void *cpuaddr;
|
void *cpuaddr;
|
||||||
|
|
||||||
#ifdef USE_RBPL_POOL
|
#ifdef USE_RBPL_POOL
|
||||||
cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|SLAB_DMA, &dma_handle);
|
cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
|
||||||
if (cpuaddr == NULL)
|
if (cpuaddr == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
#else
|
#else
|
||||||
|
@ -1724,7 +1724,7 @@ __alloc_tpd(struct he_dev *he_dev)
|
||||||
struct he_tpd *tpd;
|
struct he_tpd *tpd;
|
||||||
dma_addr_t dma_handle;
|
dma_addr_t dma_handle;
|
||||||
|
|
||||||
tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|SLAB_DMA, &dma_handle);
|
tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
|
||||||
if (tpd == NULL)
|
if (tpd == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -1215,7 +1215,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
|
||||||
dst = page_address(bv->bv_page) + bv->bv_offset;
|
dst = page_address(bv->bv_page) + bv->bv_offset;
|
||||||
if (dasd_page_cache) {
|
if (dasd_page_cache) {
|
||||||
char *copy = kmem_cache_alloc(dasd_page_cache,
|
char *copy = kmem_cache_alloc(dasd_page_cache,
|
||||||
SLAB_DMA | __GFP_NOWARN);
|
GFP_DMA | __GFP_NOWARN);
|
||||||
if (copy && rq_data_dir(req) == WRITE)
|
if (copy && rq_data_dir(req) == WRITE)
|
||||||
memcpy(copy + bv->bv_offset, dst, bv->bv_len);
|
memcpy(copy + bv->bv_offset, dst, bv->bv_len);
|
||||||
if (copy)
|
if (copy)
|
||||||
|
|
|
@ -308,7 +308,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
|
||||||
dst = page_address(bv->bv_page) + bv->bv_offset;
|
dst = page_address(bv->bv_page) + bv->bv_offset;
|
||||||
if (dasd_page_cache) {
|
if (dasd_page_cache) {
|
||||||
char *copy = kmem_cache_alloc(dasd_page_cache,
|
char *copy = kmem_cache_alloc(dasd_page_cache,
|
||||||
SLAB_DMA | __GFP_NOWARN);
|
GFP_DMA | __GFP_NOWARN);
|
||||||
if (copy && rq_data_dir(req) == WRITE)
|
if (copy && rq_data_dir(req) == WRITE)
|
||||||
memcpy(copy + bv->bv_offset, dst, bv->bv_len);
|
memcpy(copy + bv->bv_offset, dst, bv->bv_len);
|
||||||
if (copy)
|
if (copy)
|
||||||
|
|
|
@ -93,7 +93,7 @@ void hcd_buffer_destroy (struct usb_hcd *hcd)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* sometimes alloc/free could use kmalloc with SLAB_DMA, for
|
/* sometimes alloc/free could use kmalloc with GFP_DMA, for
|
||||||
* better sharing and to leverage mm/slab.c intelligence.
|
* better sharing and to leverage mm/slab.c intelligence.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -18,9 +18,6 @@ typedef struct kmem_cache kmem_cache_t;
|
||||||
#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
|
#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
|
||||||
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
|
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
|
||||||
|
|
||||||
/* flags for kmem_cache_alloc() */
|
|
||||||
#define SLAB_DMA GFP_DMA
|
|
||||||
|
|
||||||
/* flags to pass to kmem_cache_create().
|
/* flags to pass to kmem_cache_create().
|
||||||
* The first 3 are only valid when the allocator as been build
|
* The first 3 are only valid when the allocator as been build
|
||||||
* SLAB_DEBUG_SUPPORT.
|
* SLAB_DEBUG_SUPPORT.
|
||||||
|
|
|
@ -2637,7 +2637,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
|
||||||
|
|
||||||
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
|
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
|
||||||
{
|
{
|
||||||
if (flags & SLAB_DMA)
|
if (flags & GFP_DMA)
|
||||||
BUG_ON(!(cachep->gfpflags & GFP_DMA));
|
BUG_ON(!(cachep->gfpflags & GFP_DMA));
|
||||||
else
|
else
|
||||||
BUG_ON(cachep->gfpflags & GFP_DMA);
|
BUG_ON(cachep->gfpflags & GFP_DMA);
|
||||||
|
@ -2721,7 +2721,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
|
||||||
* Be lazy and only check for valid flags here, keeping it out of the
|
* Be lazy and only check for valid flags here, keeping it out of the
|
||||||
* critical path in kmem_cache_alloc().
|
* critical path in kmem_cache_alloc().
|
||||||
*/
|
*/
|
||||||
BUG_ON(flags & ~(SLAB_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW));
|
BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW));
|
||||||
if (flags & __GFP_NO_GROW)
|
if (flags & __GFP_NO_GROW)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue