cfq-iosched: split seeky coop queues after one slice
Currently we split seeky coop queues after 1s, which is too big. Below patch marks seeky coop queue split_coop flag after one slice. After that, if new requests come in, the queues will be splitted. Patch is suggested by Corrado. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Reviewed-by: Corrado Zoccolo <czoccolo@gmail.com> Acked-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
fc76be434d
commit
ae54abed63
1 changed files with 16 additions and 33 deletions
|
@ -42,16 +42,13 @@ static const int cfq_hist_divisor = 4;
|
|||
*/
|
||||
#define CFQ_MIN_TT (2)
|
||||
|
||||
/*
|
||||
* Allow merged cfqqs to perform this amount of seeky I/O before
|
||||
* deciding to break the queues up again.
|
||||
*/
|
||||
#define CFQQ_COOP_TOUT (HZ)
|
||||
|
||||
#define CFQ_SLICE_SCALE (5)
|
||||
#define CFQ_HW_QUEUE_MIN (5)
|
||||
#define CFQ_SERVICE_SHIFT 12
|
||||
|
||||
#define CFQQ_SEEK_THR 8 * 1024
|
||||
#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
|
||||
|
||||
#define RQ_CIC(rq) \
|
||||
((struct cfq_io_context *) (rq)->elevator_private)
|
||||
#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
|
||||
|
@ -137,7 +134,6 @@ struct cfq_queue {
|
|||
u64 seek_total;
|
||||
sector_t seek_mean;
|
||||
sector_t last_request_pos;
|
||||
unsigned long seeky_start;
|
||||
|
||||
pid_t pid;
|
||||
|
||||
|
@ -314,6 +310,7 @@ enum cfqq_state_flags {
|
|||
CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
|
||||
CFQ_CFQQ_FLAG_sync, /* synchronous queue */
|
||||
CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
|
||||
CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
|
||||
CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
|
||||
CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
|
||||
};
|
||||
|
@ -342,6 +339,7 @@ CFQ_CFQQ_FNS(prio_changed);
|
|||
CFQ_CFQQ_FNS(slice_new);
|
||||
CFQ_CFQQ_FNS(sync);
|
||||
CFQ_CFQQ_FNS(coop);
|
||||
CFQ_CFQQ_FNS(split_coop);
|
||||
CFQ_CFQQ_FNS(deep);
|
||||
CFQ_CFQQ_FNS(wait_busy);
|
||||
#undef CFQ_CFQQ_FNS
|
||||
|
@ -1565,6 +1563,15 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
cfq_clear_cfqq_wait_request(cfqq);
|
||||
cfq_clear_cfqq_wait_busy(cfqq);
|
||||
|
||||
/*
|
||||
* If this cfqq is shared between multiple processes, check to
|
||||
* make sure that those processes are still issuing I/Os within
|
||||
* the mean seek distance. If not, it may be time to break the
|
||||
* queues apart again.
|
||||
*/
|
||||
if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
|
||||
cfq_mark_cfqq_split_coop(cfqq);
|
||||
|
||||
/*
|
||||
* store what was left of this slice, if the queue idled/timed out
|
||||
*/
|
||||
|
@ -1663,9 +1670,6 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
|
|||
return cfqd->last_position - blk_rq_pos(rq);
|
||||
}
|
||||
|
||||
#define CFQQ_SEEK_THR 8 * 1024
|
||||
#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
|
||||
|
||||
static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
struct request *rq, bool for_preempt)
|
||||
{
|
||||
|
@ -3000,19 +3004,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
total = cfqq->seek_total + (cfqq->seek_samples/2);
|
||||
do_div(total, cfqq->seek_samples);
|
||||
cfqq->seek_mean = (sector_t)total;
|
||||
|
||||
/*
|
||||
* If this cfqq is shared between multiple processes, check to
|
||||
* make sure that those processes are still issuing I/Os within
|
||||
* the mean seek distance. If not, it may be time to break the
|
||||
* queues apart again.
|
||||
*/
|
||||
if (cfq_cfqq_coop(cfqq)) {
|
||||
if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
|
||||
cfqq->seeky_start = jiffies;
|
||||
else if (!CFQQ_SEEKY(cfqq))
|
||||
cfqq->seeky_start = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3453,14 +3444,6 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
|
|||
return cic_to_cfqq(cic, 1);
|
||||
}
|
||||
|
||||
static int should_split_cfqq(struct cfq_queue *cfqq)
|
||||
{
|
||||
if (cfqq->seeky_start &&
|
||||
time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns NULL if a new cfqq should be allocated, or the old cfqq if this
|
||||
* was the last process referring to said cfqq.
|
||||
|
@ -3469,9 +3452,9 @@ static struct cfq_queue *
|
|||
split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
|
||||
{
|
||||
if (cfqq_process_refs(cfqq) == 1) {
|
||||
cfqq->seeky_start = 0;
|
||||
cfqq->pid = current->pid;
|
||||
cfq_clear_cfqq_coop(cfqq);
|
||||
cfq_clear_cfqq_split_coop(cfqq);
|
||||
return cfqq;
|
||||
}
|
||||
|
||||
|
@ -3510,7 +3493,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
|||
/*
|
||||
* If the queue was seeky for too long, break it apart.
|
||||
*/
|
||||
if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) {
|
||||
if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
|
||||
cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
|
||||
cfqq = split_cfqq(cic, cfqq);
|
||||
if (!cfqq)
|
||||
|
|
Loading…
Reference in a new issue