kernel-fxtec-pro1x/drivers/scsi/qla2xxx/qla_inline.h
Chad Dupuis 50280c014d [SCSI] Revert "qla2xxx: Ramp down queue depth for attached SCSI devices when driver resources are low."
This reverts commit 3c290d0b5f

Signed-off-by: Chad Dupuis <chad.dupuis@qlogic.com>
Signed-off-by: Saurav Kashyap <saurav.kashyap@qlogic.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2013-12-19 07:38:57 -08:00

272 lines
6.1 KiB
C

/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
/**
* qla24xx_calc_iocbs() - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate.
*
* @dsds: number of data segment decriptors needed
*
* Returns the number of IOCB entries needed to store @dsds.
*/
static inline uint16_t
qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
{
uint16_t iocbs;
iocbs = 1;
if (dsds > 1) {
iocbs += (dsds - 1) / 5;
if ((dsds - 1) % 5)
iocbs++;
}
return iocbs;
}
/*
* qla2x00_debounce_register
* Debounce register.
*
* Input:
* port = register address.
*
* Returns:
* register value.
*/
static __inline__ uint16_t
qla2x00_debounce_register(volatile uint16_t __iomem *addr)
{
volatile uint16_t first;
volatile uint16_t second;
do {
first = RD_REG_WORD(addr);
barrier();
cpu_relax();
second = RD_REG_WORD(addr);
} while (first != second);
return (first);
}
static inline void
qla2x00_poll(struct rsp_que *rsp)
{
unsigned long flags;
struct qla_hw_data *ha = rsp->hw;
local_irq_save(flags);
if (IS_P3P_TYPE(ha))
qla82xx_poll(0, rsp);
else
ha->isp_ops->intr_handler(0, rsp);
local_irq_restore(flags);
}
static inline uint8_t *
host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
{
uint32_t *ifcp = (uint32_t *) fcp;
uint32_t *ofcp = (uint32_t *) fcp;
uint32_t iter = bsize >> 2;
for (; iter ; iter--)
*ofcp++ = swab32(*ifcp++);
return fcp;
}
static inline void
host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
{
uint32_t *isrc = (uint32_t *) src;
__le32 *odest = (__le32 *) dst;
uint32_t iter = bsize >> 2;
for (; iter ; iter--)
*odest++ = cpu_to_le32(*isrc++);
}
static inline void
qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
{
int i;
if (IS_FWI2_CAPABLE(ha))
return;
for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
set_bit(i, ha->loop_id_map);
set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
set_bit(BROADCAST, ha->loop_id_map);
}
static inline int
qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
{
struct qla_hw_data *ha = vha->hw;
if (IS_FWI2_CAPABLE(ha))
return (loop_id > NPH_LAST_HANDLE);
return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
}
static inline void
qla2x00_clear_loop_id(fc_port_t *fcport) {
struct qla_hw_data *ha = fcport->vha->hw;
if (fcport->loop_id == FC_NO_LOOP_ID ||
qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
return;
clear_bit(fcport->loop_id, ha->loop_id_map);
fcport->loop_id = FC_NO_LOOP_ID;
}
static inline void
qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
{
struct dsd_dma *dsd_ptr, *tdsd_ptr;
struct crc_context *ctx;
ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
/* clean up allocated prev pool */
list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
&ctx->dsd_list, list) {
dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
dsd_ptr->dsd_list_dma);
list_del(&dsd_ptr->list);
kfree(dsd_ptr);
}
INIT_LIST_HEAD(&ctx->dsd_list);
}
static inline void
qla2x00_set_fcport_state(fc_port_t *fcport, int state)
{
int old_state;
old_state = atomic_read(&fcport->state);
atomic_set(&fcport->state, state);
/* Don't print state transitions during initial allocation of fcport */
if (old_state && old_state != state) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
"FCPort state transitioned from %s to %s - "
"portid=%02x%02x%02x.\n",
port_state_str[old_state], port_state_str[state],
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
}
}
static inline int
qla2x00_hba_err_chk_enabled(srb_t *sp)
{
/*
* Uncomment when corresponding SCSI changes are done.
*
if (!sp->cmd->prot_chk)
return 0;
*
*/
switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
if (ql2xenablehba_err_chk >= 1)
return 1;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
if (ql2xenablehba_err_chk >= 2)
return 1;
break;
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
return 1;
}
return 0;
}
static inline int
qla2x00_reset_active(scsi_qla_host_t *vha)
{
scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
/* Test appropriate base-vha and vha flags. */
return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
}
static inline srb_t *
qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
{
srb_t *sp = NULL;
struct qla_hw_data *ha = vha->hw;
uint8_t bail;
QLA_VHA_MARK_BUSY(vha, bail);
if (unlikely(bail))
return NULL;
sp = mempool_alloc(ha->srb_mempool, flag);
if (!sp)
goto done;
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->iocbs = 1;
done:
if (!sp)
QLA_VHA_MARK_NOT_BUSY(vha);
return sp;
}
static inline void
qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp)
{
mempool_free(sp, vha->hw->srb_mempool);
QLA_VHA_MARK_NOT_BUSY(vha);
}
static inline void
qla2x00_init_timer(srb_t *sp, unsigned long tmo)
{
init_timer(&sp->u.iocb_cmd.timer);
sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
sp->u.iocb_cmd.timer.data = (unsigned long)sp;
sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
add_timer(&sp->u.iocb_cmd.timer);
sp->free = qla2x00_sp_free;
if ((IS_QLAFX00(sp->fcport->vha->hw)) &&
(sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
}
static inline int
qla2x00_gid_list_size(struct qla_hw_data *ha)
{
if (IS_QLAFX00(ha))
return sizeof(uint32_t) * 32;
else
return sizeof(struct gid_list_info) * ha->max_fibre_devices;
}
static inline void
qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
{
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
complete(&ha->mbx_intr_comp);
}
}