[PATCH] mempool: use common mempool kmalloc allocator
This patch changes several mempool users, all of which are basically just wrappers around kmalloc(), to use the common mempool_kmalloc/kfree, rather than their own wrapper function, removing a bunch of duplicated code. Signed-off-by: Matthew Dobson <colpatch@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
53184082b0
commit
0eaae62aba
7 changed files with 34 additions and 129 deletions
|
@ -230,16 +230,6 @@ static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *pkt_rb_alloc(gfp_t gfp_mask, void *data)
|
|
||||||
{
|
|
||||||
return kmalloc(sizeof(struct pkt_rb_node), gfp_mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pkt_rb_free(void *ptr, void *data)
|
|
||||||
{
|
|
||||||
kfree(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
|
static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
|
||||||
{
|
{
|
||||||
struct rb_node *n = rb_next(&node->rb_node);
|
struct rb_node *n = rb_next(&node->rb_node);
|
||||||
|
@ -2073,16 +2063,6 @@ static int pkt_close(struct inode *inode, struct file *file)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void *psd_pool_alloc(gfp_t gfp_mask, void *data)
|
|
||||||
{
|
|
||||||
return kmalloc(sizeof(struct packet_stacked_data), gfp_mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void psd_pool_free(void *ptr, void *data)
|
|
||||||
{
|
|
||||||
kfree(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err)
|
static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err)
|
||||||
{
|
{
|
||||||
struct packet_stacked_data *psd = bio->bi_private;
|
struct packet_stacked_data *psd = bio->bi_private;
|
||||||
|
@ -2475,7 +2455,8 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
|
||||||
if (!pd)
|
if (!pd)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
pd->rb_pool = mempool_create(PKT_RB_POOL_SIZE, pkt_rb_alloc, pkt_rb_free, NULL);
|
pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
|
||||||
|
sizeof(struct pkt_rb_node));
|
||||||
if (!pd->rb_pool)
|
if (!pd->rb_pool)
|
||||||
goto out_mem;
|
goto out_mem;
|
||||||
|
|
||||||
|
@ -2639,7 +2620,8 @@ static int __init pkt_init(void)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
psd_pool = mempool_create(PSD_POOL_SIZE, psd_pool_alloc, psd_pool_free, NULL);
|
psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE,
|
||||||
|
sizeof(struct packet_stacked_data));
|
||||||
if (!psd_pool)
|
if (!psd_pool)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -89,16 +89,6 @@ int bitmap_active(struct bitmap *bitmap)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define WRITE_POOL_SIZE 256
|
#define WRITE_POOL_SIZE 256
|
||||||
/* mempool for queueing pending writes on the bitmap file */
|
|
||||||
static void *write_pool_alloc(gfp_t gfp_flags, void *data)
|
|
||||||
{
|
|
||||||
return kmalloc(sizeof(struct page_list), gfp_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void write_pool_free(void *ptr, void *data)
|
|
||||||
{
|
|
||||||
kfree(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* just a placeholder - calls kmalloc for bitmap pages
|
* just a placeholder - calls kmalloc for bitmap pages
|
||||||
|
@ -1564,8 +1554,8 @@ int bitmap_create(mddev_t *mddev)
|
||||||
spin_lock_init(&bitmap->write_lock);
|
spin_lock_init(&bitmap->write_lock);
|
||||||
INIT_LIST_HEAD(&bitmap->complete_pages);
|
INIT_LIST_HEAD(&bitmap->complete_pages);
|
||||||
init_waitqueue_head(&bitmap->write_wait);
|
init_waitqueue_head(&bitmap->write_wait);
|
||||||
bitmap->write_pool = mempool_create(WRITE_POOL_SIZE, write_pool_alloc,
|
bitmap->write_pool = mempool_create_kmalloc_pool(WRITE_POOL_SIZE,
|
||||||
write_pool_free, NULL);
|
sizeof(struct page_list));
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
if (!bitmap->write_pool)
|
if (!bitmap->write_pool)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
|
@ -32,16 +32,6 @@ struct io {
|
||||||
static unsigned _num_ios;
|
static unsigned _num_ios;
|
||||||
static mempool_t *_io_pool;
|
static mempool_t *_io_pool;
|
||||||
|
|
||||||
static void *alloc_io(gfp_t gfp_mask, void *pool_data)
|
|
||||||
{
|
|
||||||
return kmalloc(sizeof(struct io), gfp_mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void free_io(void *element, void *pool_data)
|
|
||||||
{
|
|
||||||
kfree(element);
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned int pages_to_ios(unsigned int pages)
|
static unsigned int pages_to_ios(unsigned int pages)
|
||||||
{
|
{
|
||||||
return 4 * pages; /* too many ? */
|
return 4 * pages; /* too many ? */
|
||||||
|
@ -65,7 +55,8 @@ static int resize_pool(unsigned int new_ios)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
/* create new pool */
|
/* create new pool */
|
||||||
_io_pool = mempool_create(new_ios, alloc_io, free_io, NULL);
|
_io_pool = mempool_create_kmalloc_pool(new_ios,
|
||||||
|
sizeof(struct io));
|
||||||
if (!_io_pool)
|
if (!_io_pool)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -122,16 +122,6 @@ static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
|
||||||
/* FIXME move this */
|
/* FIXME move this */
|
||||||
static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
|
static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
|
||||||
|
|
||||||
static void *region_alloc(gfp_t gfp_mask, void *pool_data)
|
|
||||||
{
|
|
||||||
return kmalloc(sizeof(struct region), gfp_mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void region_free(void *element, void *pool_data)
|
|
||||||
{
|
|
||||||
kfree(element);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define MIN_REGIONS 64
|
#define MIN_REGIONS 64
|
||||||
#define MAX_RECOVERY 1
|
#define MAX_RECOVERY 1
|
||||||
static int rh_init(struct region_hash *rh, struct mirror_set *ms,
|
static int rh_init(struct region_hash *rh, struct mirror_set *ms,
|
||||||
|
@ -173,8 +163,8 @@ static int rh_init(struct region_hash *rh, struct mirror_set *ms,
|
||||||
INIT_LIST_HEAD(&rh->quiesced_regions);
|
INIT_LIST_HEAD(&rh->quiesced_regions);
|
||||||
INIT_LIST_HEAD(&rh->recovered_regions);
|
INIT_LIST_HEAD(&rh->recovered_regions);
|
||||||
|
|
||||||
rh->region_pool = mempool_create(MIN_REGIONS, region_alloc,
|
rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
|
||||||
region_free, NULL);
|
sizeof(struct region));
|
||||||
if (!rh->region_pool) {
|
if (!rh->region_pool) {
|
||||||
vfree(rh->buckets);
|
vfree(rh->buckets);
|
||||||
rh->buckets = NULL;
|
rh->buckets = NULL;
|
||||||
|
|
|
@ -829,18 +829,6 @@ zfcp_unit_dequeue(struct zfcp_unit *unit)
|
||||||
device_unregister(&unit->sysfs_device);
|
device_unregister(&unit->sysfs_device);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
|
||||||
zfcp_mempool_alloc(gfp_t gfp_mask, void *size)
|
|
||||||
{
|
|
||||||
return kmalloc((size_t) size, gfp_mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
zfcp_mempool_free(void *element, void *size)
|
|
||||||
{
|
|
||||||
kfree(element);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI
|
* Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI
|
||||||
* commands.
|
* commands.
|
||||||
|
@ -853,51 +841,39 @@ static int
|
||||||
zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
|
zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
|
||||||
{
|
{
|
||||||
adapter->pool.fsf_req_erp =
|
adapter->pool.fsf_req_erp =
|
||||||
mempool_create(ZFCP_POOL_FSF_REQ_ERP_NR,
|
mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ERP_NR,
|
||||||
zfcp_mempool_alloc, zfcp_mempool_free, (void *)
|
sizeof(struct zfcp_fsf_req_pool_element));
|
||||||
sizeof(struct zfcp_fsf_req_pool_element));
|
if (!adapter->pool.fsf_req_erp)
|
||||||
|
|
||||||
if (NULL == adapter->pool.fsf_req_erp)
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
adapter->pool.fsf_req_scsi =
|
adapter->pool.fsf_req_scsi =
|
||||||
mempool_create(ZFCP_POOL_FSF_REQ_SCSI_NR,
|
mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_SCSI_NR,
|
||||||
zfcp_mempool_alloc, zfcp_mempool_free, (void *)
|
sizeof(struct zfcp_fsf_req_pool_element));
|
||||||
sizeof(struct zfcp_fsf_req_pool_element));
|
if (!adapter->pool.fsf_req_scsi)
|
||||||
|
|
||||||
if (NULL == adapter->pool.fsf_req_scsi)
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
adapter->pool.fsf_req_abort =
|
adapter->pool.fsf_req_abort =
|
||||||
mempool_create(ZFCP_POOL_FSF_REQ_ABORT_NR,
|
mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ABORT_NR,
|
||||||
zfcp_mempool_alloc, zfcp_mempool_free, (void *)
|
sizeof(struct zfcp_fsf_req_pool_element));
|
||||||
sizeof(struct zfcp_fsf_req_pool_element));
|
if (!adapter->pool.fsf_req_abort)
|
||||||
|
|
||||||
if (NULL == adapter->pool.fsf_req_abort)
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
adapter->pool.fsf_req_status_read =
|
adapter->pool.fsf_req_status_read =
|
||||||
mempool_create(ZFCP_POOL_STATUS_READ_NR,
|
mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR,
|
||||||
zfcp_mempool_alloc, zfcp_mempool_free,
|
sizeof(struct zfcp_fsf_req));
|
||||||
(void *) sizeof(struct zfcp_fsf_req));
|
if (!adapter->pool.fsf_req_status_read)
|
||||||
|
|
||||||
if (NULL == adapter->pool.fsf_req_status_read)
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
adapter->pool.data_status_read =
|
adapter->pool.data_status_read =
|
||||||
mempool_create(ZFCP_POOL_STATUS_READ_NR,
|
mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR,
|
||||||
zfcp_mempool_alloc, zfcp_mempool_free,
|
sizeof(struct fsf_status_read_buffer));
|
||||||
(void *) sizeof(struct fsf_status_read_buffer));
|
if (!adapter->pool.data_status_read)
|
||||||
|
|
||||||
if (NULL == adapter->pool.data_status_read)
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
adapter->pool.data_gid_pn =
|
adapter->pool.data_gid_pn =
|
||||||
mempool_create(ZFCP_POOL_DATA_GID_PN_NR,
|
mempool_create_kmalloc_pool(ZFCP_POOL_DATA_GID_PN_NR,
|
||||||
zfcp_mempool_alloc, zfcp_mempool_free, (void *)
|
sizeof(struct zfcp_gid_pn_data));
|
||||||
sizeof(struct zfcp_gid_pn_data));
|
if (!adapter->pool.data_gid_pn)
|
||||||
|
|
||||||
if (NULL == adapter->pool.data_gid_pn)
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -38,18 +38,6 @@
|
||||||
#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
|
#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
|
||||||
#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
|
#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
|
||||||
|
|
||||||
static void *
|
|
||||||
lpfc_pool_kmalloc(gfp_t gfp_flags, void *data)
|
|
||||||
{
|
|
||||||
return kmalloc((unsigned long)data, gfp_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
lpfc_pool_kfree(void *obj, void *data)
|
|
||||||
{
|
|
||||||
kfree(obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
int
|
||||||
lpfc_mem_alloc(struct lpfc_hba * phba)
|
lpfc_mem_alloc(struct lpfc_hba * phba)
|
||||||
{
|
{
|
||||||
|
@ -79,15 +67,13 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
|
||||||
pool->current_count++;
|
pool->current_count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
phba->mbox_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
|
phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
|
||||||
lpfc_pool_kmalloc, lpfc_pool_kfree,
|
sizeof(LPFC_MBOXQ_t));
|
||||||
(void *)(unsigned long)sizeof(LPFC_MBOXQ_t));
|
|
||||||
if (!phba->mbox_mem_pool)
|
if (!phba->mbox_mem_pool)
|
||||||
goto fail_free_mbuf_pool;
|
goto fail_free_mbuf_pool;
|
||||||
|
|
||||||
phba->nlp_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
|
phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
|
||||||
lpfc_pool_kmalloc, lpfc_pool_kfree,
|
sizeof(struct lpfc_nodelist));
|
||||||
(void *)(unsigned long)sizeof(struct lpfc_nodelist));
|
|
||||||
if (!phba->nlp_mem_pool)
|
if (!phba->nlp_mem_pool)
|
||||||
goto fail_free_mbox_pool;
|
goto fail_free_mbox_pool;
|
||||||
|
|
||||||
|
|
14
fs/bio.c
14
fs/bio.c
|
@ -1125,16 +1125,6 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
|
||||||
return bp;
|
return bp;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *bio_pair_alloc(gfp_t gfp_flags, void *data)
|
|
||||||
{
|
|
||||||
return kmalloc(sizeof(struct bio_pair), gfp_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void bio_pair_free(void *bp, void *data)
|
|
||||||
{
|
|
||||||
kfree(bp);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* create memory pools for biovec's in a bio_set.
|
* create memory pools for biovec's in a bio_set.
|
||||||
|
@ -1254,8 +1244,8 @@ static int __init init_bio(void)
|
||||||
if (!fs_bio_set)
|
if (!fs_bio_set)
|
||||||
panic("bio: can't allocate bios\n");
|
panic("bio: can't allocate bios\n");
|
||||||
|
|
||||||
bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES,
|
bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
|
||||||
bio_pair_alloc, bio_pair_free, NULL);
|
sizeof(struct bio_pair));
|
||||||
if (!bio_split_pool)
|
if (!bio_split_pool)
|
||||||
panic("bio: can't create split pool\n");
|
panic("bio: can't create split pool\n");
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue