blk-mq: turn hctx->run_work into a regular work struct
We don't need the larger delayed work struct, since we always run it immediately. Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
ee63cfa7fc
commit
27489a3c82
3 changed files with 6 additions and 7 deletions
|
@ -288,7 +288,7 @@ void blk_sync_queue(struct request_queue *q)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
queue_for_each_hw_ctx(q, hctx, i) {
|
queue_for_each_hw_ctx(q, hctx, i) {
|
||||||
cancel_delayed_work_sync(&hctx->run_work);
|
cancel_work_sync(&hctx->run_work);
|
||||||
cancel_delayed_work_sync(&hctx->delay_work);
|
cancel_delayed_work_sync(&hctx->delay_work);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -936,8 +936,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
||||||
put_cpu();
|
put_cpu();
|
||||||
}
|
}
|
||||||
|
|
||||||
kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
|
kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
|
||||||
&hctx->run_work, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
|
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
|
||||||
|
@ -958,7 +957,7 @@ EXPORT_SYMBOL(blk_mq_run_hw_queues);
|
||||||
|
|
||||||
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
|
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
|
||||||
{
|
{
|
||||||
cancel_delayed_work(&hctx->run_work);
|
cancel_work(&hctx->run_work);
|
||||||
cancel_delayed_work(&hctx->delay_work);
|
cancel_delayed_work(&hctx->delay_work);
|
||||||
set_bit(BLK_MQ_S_STOPPED, &hctx->state);
|
set_bit(BLK_MQ_S_STOPPED, &hctx->state);
|
||||||
}
|
}
|
||||||
|
@ -1011,7 +1010,7 @@ static void blk_mq_run_work_fn(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
|
|
||||||
hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
|
hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
|
||||||
|
|
||||||
__blk_mq_run_hw_queue(hctx);
|
__blk_mq_run_hw_queue(hctx);
|
||||||
}
|
}
|
||||||
|
@ -1722,7 +1721,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
||||||
if (node == NUMA_NO_NODE)
|
if (node == NUMA_NO_NODE)
|
||||||
node = hctx->numa_node = set->numa_node;
|
node = hctx->numa_node = set->numa_node;
|
||||||
|
|
||||||
INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
|
INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
|
||||||
INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
|
INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
|
||||||
spin_lock_init(&hctx->lock);
|
spin_lock_init(&hctx->lock);
|
||||||
INIT_LIST_HEAD(&hctx->dispatch);
|
INIT_LIST_HEAD(&hctx->dispatch);
|
||||||
|
|
|
@ -25,7 +25,7 @@ struct blk_mq_hw_ctx {
|
||||||
} ____cacheline_aligned_in_smp;
|
} ____cacheline_aligned_in_smp;
|
||||||
|
|
||||||
unsigned long state; /* BLK_MQ_S_* flags */
|
unsigned long state; /* BLK_MQ_S_* flags */
|
||||||
struct delayed_work run_work;
|
struct work_struct run_work;
|
||||||
struct delayed_work delay_work;
|
struct delayed_work delay_work;
|
||||||
cpumask_var_t cpumask;
|
cpumask_var_t cpumask;
|
||||||
int next_cpu;
|
int next_cpu;
|
||||||
|
|
Loading…
Reference in a new issue