block: separate out padding from alignment
Block layer alignment was used for two different purposes - memory
alignment and padding. This causes problems in lower layers because
drivers which only require memory alignment ends up with adjusted
rq->data_len. Separate out padding such that padding occurs iff
driver explicitly requests it.
Tomo: restorethe code to update bio in blk_rq_map_user
introduced by the commit 40b01b9bbd
according to padding alignment.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
7a85f8896f
commit
e3790c7d42
4 changed files with 34 additions and 8 deletions
|
@ -43,6 +43,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
|
|||
void __user *ubuf, unsigned int len)
|
||||
{
|
||||
unsigned long uaddr;
|
||||
unsigned int alignment;
|
||||
struct bio *bio, *orig_bio;
|
||||
int reading, ret;
|
||||
|
||||
|
@ -53,8 +54,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
|
|||
* direct dma. else, set up kernel bounce buffers
|
||||
*/
|
||||
uaddr = (unsigned long) ubuf;
|
||||
if (!(uaddr & queue_dma_alignment(q)) &&
|
||||
!(len & queue_dma_alignment(q)))
|
||||
alignment = queue_dma_alignment(q) | q->dma_pad_mask;
|
||||
if (!(uaddr & alignment) && !(len & alignment))
|
||||
bio = bio_map_user(q, NULL, uaddr, len, reading);
|
||||
else
|
||||
bio = bio_copy_user(q, uaddr, len, reading);
|
||||
|
@ -141,15 +142,20 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
|
|||
|
||||
/*
|
||||
* __blk_rq_map_user() copies the buffers if starting address
|
||||
* or length isn't aligned. As the copied buffer is always
|
||||
* page aligned, we know that there's enough room for padding.
|
||||
* Extend the last bio and update rq->data_len accordingly.
|
||||
* or length isn't aligned to dma_pad_mask. As the copied
|
||||
* buffer is always page aligned, we know that there's enough
|
||||
* room for padding. Extend the last bio and update
|
||||
* rq->data_len accordingly.
|
||||
*
|
||||
* On unmap, bio_uncopy_user() will use unmodified
|
||||
* bio_map_data pointed to by bio->bi_private.
|
||||
*/
|
||||
if (len & queue_dma_alignment(q)) {
|
||||
unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1;
|
||||
if (len & q->dma_pad_mask) {
|
||||
unsigned int pad_len = (q->dma_pad_mask & ~len) + 1;
|
||||
struct bio *bio = rq->biotail;
|
||||
|
||||
bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len;
|
||||
bio->bi_size += pad_len;
|
||||
|
||||
rq->extra_len += pad_len;
|
||||
}
|
||||
|
|
|
@ -292,6 +292,23 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
|
|||
}
|
||||
EXPORT_SYMBOL(blk_queue_stack_limits);
|
||||
|
||||
/**
|
||||
* blk_queue_dma_pad - set pad mask
|
||||
* @q: the request queue for the device
|
||||
* @mask: pad mask
|
||||
*
|
||||
* Set pad mask. Direct IO requests are padded to the mask specified.
|
||||
*
|
||||
* Appending pad buffer to a request modifies ->data_len such that it
|
||||
* includes the pad buffer. The original requested data length can be
|
||||
* obtained using blk_rq_raw_data_len().
|
||||
**/
|
||||
void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
|
||||
{
|
||||
q->dma_pad_mask = mask;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_dma_pad);
|
||||
|
||||
/**
|
||||
* blk_queue_dma_drain - Set up a drain buffer for excess dma.
|
||||
* @q: the request queue for the device
|
||||
|
|
|
@ -862,9 +862,10 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
|
|||
struct request_queue *q = sdev->request_queue;
|
||||
void *buf;
|
||||
|
||||
/* set the min alignment */
|
||||
/* set the min alignment and padding */
|
||||
blk_queue_update_dma_alignment(sdev->request_queue,
|
||||
ATA_DMA_PAD_SZ - 1);
|
||||
blk_queue_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1);
|
||||
|
||||
/* configure draining */
|
||||
buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
|
||||
|
|
|
@ -362,6 +362,7 @@ struct request_queue
|
|||
unsigned long seg_boundary_mask;
|
||||
void *dma_drain_buffer;
|
||||
unsigned int dma_drain_size;
|
||||
unsigned int dma_pad_mask;
|
||||
unsigned int dma_alignment;
|
||||
|
||||
struct blk_queue_tag *queue_tags;
|
||||
|
@ -701,6 +702,7 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
|
|||
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
|
||||
extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
|
||||
extern int blk_queue_dma_drain(struct request_queue *q,
|
||||
dma_drain_needed_fn *dma_drain_needed,
|
||||
void *buf, unsigned int size);
|
||||
|
|
Loading…
Reference in a new issue