nvmet/fc: Use sgl_alloc() and sgl_free()

Use the sgl_alloc() and sgl_free() functions instead of open coding
these functions.

Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Reviewed-by:  James Smart <james.smart@broadcom.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Bart Van Assche 2018-01-05 08:26:48 -08:00 committed by Jens Axboe
parent 8cd579d279
commit 4442b56fb5
2 changed files with 3 additions and 34 deletions

View file

@ -39,6 +39,7 @@ config NVME_TARGET_FC
tristate "NVMe over Fabrics FC target driver" tristate "NVMe over Fabrics FC target driver"
depends on NVME_TARGET depends on NVME_TARGET
depends on HAS_DMA depends on HAS_DMA
select SGL_ALLOC
help help
This enables the NVMe FC target support, which allows exporting NVMe This enables the NVMe FC target support, which allows exporting NVMe
devices over FC. devices over FC.

View file

@ -1697,31 +1697,12 @@ static int
nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
{ {
struct scatterlist *sg; struct scatterlist *sg;
struct page *page;
unsigned int nent; unsigned int nent;
u32 page_len, length;
int i = 0;
length = fod->req.transfer_len; sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
nent = DIV_ROUND_UP(length, PAGE_SIZE);
sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
if (!sg) if (!sg)
goto out; goto out;
sg_init_table(sg, nent);
while (length) {
page_len = min_t(u32, length, PAGE_SIZE);
page = alloc_page(GFP_KERNEL);
if (!page)
goto out_free_pages;
sg_set_page(&sg[i], page, page_len, 0);
length -= page_len;
i++;
}
fod->data_sg = sg; fod->data_sg = sg;
fod->data_sg_cnt = nent; fod->data_sg_cnt = nent;
fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
@ -1731,14 +1712,6 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
return 0; return 0;
out_free_pages:
while (i > 0) {
i--;
__free_page(sg_page(&sg[i]));
}
kfree(sg);
fod->data_sg = NULL;
fod->data_sg_cnt = 0;
out: out:
return NVME_SC_INTERNAL; return NVME_SC_INTERNAL;
} }
@ -1746,18 +1719,13 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
static void static void
nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
{ {
struct scatterlist *sg;
int count;
if (!fod->data_sg || !fod->data_sg_cnt) if (!fod->data_sg || !fod->data_sg_cnt)
return; return;
fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
((fod->io_dir == NVMET_FCP_WRITE) ? ((fod->io_dir == NVMET_FCP_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE)); DMA_FROM_DEVICE : DMA_TO_DEVICE));
for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count) sgl_free(fod->data_sg);
__free_page(sg_page(sg));
kfree(fod->data_sg);
fod->data_sg = NULL; fod->data_sg = NULL;
fod->data_sg_cnt = 0; fod->data_sg_cnt = 0;
} }