irq_poll: fold irq_poll_sched_prep into irq_poll_sched
There is no good reason to keep them apart, and this makes using the API a bit simpler. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>
This commit is contained in:
parent
78d0264eb7
commit
ea51190c03
4 changed files with 10 additions and 22 deletions
|
@ -910,8 +910,7 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
|
|||
num_eq_processed = 0;
|
||||
while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
|
||||
& EQE_VALID_MASK) {
|
||||
if (!irq_poll_sched_prep(&pbe_eq->iopoll))
|
||||
irq_poll_sched(&pbe_eq->iopoll);
|
||||
irq_poll_sched(&pbe_eq->iopoll);
|
||||
|
||||
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
|
||||
queue_tail_inc(eq);
|
||||
|
@ -972,8 +971,7 @@ static irqreturn_t be_isr(int irq, void *dev_id)
|
|||
spin_unlock_irqrestore(&phba->isr_lock, flags);
|
||||
num_mcceq_processed++;
|
||||
} else {
|
||||
if (!irq_poll_sched_prep(&pbe_eq->iopoll))
|
||||
irq_poll_sched(&pbe_eq->iopoll);
|
||||
irq_poll_sched(&pbe_eq->iopoll);
|
||||
num_ioeq_processed++;
|
||||
}
|
||||
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
|
||||
|
|
|
@ -5692,8 +5692,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
|
|||
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
||||
if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
|
||||
hrrq->toggle_bit) {
|
||||
if (!irq_poll_sched_prep(&hrrq->iopoll))
|
||||
irq_poll_sched(&hrrq->iopoll);
|
||||
irq_poll_sched(&hrrq->iopoll);
|
||||
spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -18,19 +18,6 @@ enum {
|
|||
IRQ_POLL_F_DISABLE = 1,
|
||||
};
|
||||
|
||||
/*
|
||||
* Returns 0 if we successfully set the IRQ_POLL_F_SCHED bit, indicating
|
||||
* that we were the first to acquire this iop for scheduling. If this iop
|
||||
* is currently disabled, return "failure".
|
||||
*/
|
||||
static inline int irq_poll_sched_prep(struct irq_poll *iop)
|
||||
{
|
||||
if (!test_bit(IRQ_POLL_F_DISABLE, &iop->state))
|
||||
return test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int irq_poll_disable_pending(struct irq_poll *iop)
|
||||
{
|
||||
return test_bit(IRQ_POLL_F_DISABLE, &iop->state);
|
||||
|
|
|
@ -21,13 +21,17 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
|
|||
*
|
||||
* Description:
|
||||
* Add this irq_poll structure to the pending poll list and trigger the
|
||||
* raise of the blk iopoll softirq. The driver must already have gotten a
|
||||
* successful return from irq_poll_sched_prep() before calling this.
|
||||
* raise of the blk iopoll softirq.
|
||||
**/
|
||||
void irq_poll_sched(struct irq_poll *iop)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
|
||||
return;
|
||||
if (!test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
|
||||
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
|
||||
|
@ -58,7 +62,7 @@ EXPORT_SYMBOL(__irq_poll_complete);
|
|||
* Description:
|
||||
* If a driver consumes less than the assigned budget in its run of the
|
||||
* iopoll handler, it'll end the polled mode by calling this function. The
|
||||
* iopoll handler will not be invoked again before irq_poll_sched_prep()
|
||||
* iopoll handler will not be invoked again before irq_poll_sched()
|
||||
* is called.
|
||||
**/
|
||||
void irq_poll_complete(struct irq_poll *iop)
|
||||
|
|
Loading…
Reference in a new issue