Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (60 commits) [SCSI] lpfc 8.3.24: Extend BSG infrastructure and add link diagnostics [SCSI] lpfc 8.3.24: Add resource extent support [SCSI] lpfc 8.3.24: Add request-firmware support [SCSI] lpfc 8.3.24: Add SR-IOV control [SCSI] lpfc 8.3.24: Extended hardware support and support dump images [SCSI] lpfc 8.3.24: Miscellaneous Fixes and Corrections [SCSI] libsas: Add option for SATA soft reset [SCSI] libsas: check dev->gone before submitting sata i/o [SCSI] libsas: fix/amend device gone notification in sas_deform_port() [SCSI] MAINTAINERS update for SCSI (new email address) [SCSI] Fix Ultrastor asm snippet [SCSI] osst: fix warning [SCSI] osst: wrong index used in inner loop [SCSI] aic94xx: world-writable sysfs update_bios file [SCSI] MAINTAINERS: Add drivers/target/ entry [SCSI] target: Convert TASK_ATTR to scsi_tcq.h definitions [SCSI] target: Convert REPORT_LUNs to use int_to_scsilun [SCSI] target: Fix task->task_execute_queue=1 clear bug + LUN_RESET OOPs [SCSI] target: Fix bug with task_sg chained transport_free_dev_tasks release [SCSI] target: Fix interrupt context bug with stats_lock and core_tmr_alloc_req ...
This commit is contained in:
commit
426048313d
79 changed files with 5890 additions and 1227 deletions
|
@ -1,3 +1,17 @@
|
|||
Release Date : Wed. May 11, 2011 17:00:00 PST 2010 -
|
||||
(emaild-id:megaraidlinux@lsi.com)
|
||||
Adam Radford
|
||||
Current Version : 00.00.05.38-rc1
|
||||
Old Version : 00.00.05.34-rc1
|
||||
1. Remove MSI-X black list, use MFI_REG_STATE.ready.msiEnable.
|
||||
2. Remove un-used function megasas_return_cmd_for_smid().
|
||||
3. Check MFI_REG_STATE.fault.resetAdapter in megasas_reset_fusion().
|
||||
4. Disable interrupts/free_irq() in megasas_shutdown().
|
||||
5. Fix bug where AENs could be lost in probe() and resume().
|
||||
6. Convert 6,10,12 byte CDB's to 16 byte CDB for large LBA's for FastPath
|
||||
IO.
|
||||
7. Add 1078 OCR support.
|
||||
-------------------------------------------------------------------------------
|
||||
Release Date : Thu. Feb 24, 2011 17:00:00 PST 2010 -
|
||||
(emaild-id:megaraidlinux@lsi.com)
|
||||
Adam Radford
|
||||
|
|
13
MAINTAINERS
13
MAINTAINERS
|
@ -5515,7 +5515,7 @@ F: drivers/scsi/sg.c
|
|||
F: include/scsi/sg.h
|
||||
|
||||
SCSI SUBSYSTEM
|
||||
M: "James E.J. Bottomley" <James.Bottomley@suse.de>
|
||||
M: "James E.J. Bottomley" <JBottomley@parallels.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6.git
|
||||
|
@ -6084,6 +6084,17 @@ F: Documentation/filesystems/sysv-fs.txt
|
|||
F: fs/sysv/
|
||||
F: include/linux/sysv_fs.h
|
||||
|
||||
TARGET SUBSYSTEM
|
||||
M: Nicholas A. Bellinger <nab@linux-iscsi.org>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
L: http://groups.google.com/group/linux-iscsi-target-dev
|
||||
W: http://www.linux-iscsi.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core-2.6.git master
|
||||
S: Supported
|
||||
F: drivers/target/
|
||||
F: include/target/
|
||||
F: Documentation/target/
|
||||
|
||||
TASKSTATS STATISTICS INTERFACE
|
||||
M: Balbir Singh <balbir@linux.vnet.ibm.com>
|
||||
S: Maintained
|
||||
|
|
|
@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
|
|||
flash_error_table[i].reason);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
|
||||
static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
|
||||
asd_show_update_bios, asd_store_update_bios);
|
||||
|
||||
static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
|
||||
|
|
|
@ -56,6 +56,8 @@ BFA_TRC_FILE(CNA, IOC);
|
|||
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
|
||||
#define bfa_ioc_notify_fail(__ioc) \
|
||||
((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
|
||||
#define bfa_ioc_sync_start(__ioc) \
|
||||
((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
|
||||
#define bfa_ioc_sync_join(__ioc) \
|
||||
((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
|
||||
#define bfa_ioc_sync_leave(__ioc) \
|
||||
|
@ -647,7 +649,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
|
|||
switch (event) {
|
||||
case IOCPF_E_SEMLOCKED:
|
||||
if (bfa_ioc_firmware_lock(ioc)) {
|
||||
if (bfa_ioc_sync_complete(ioc)) {
|
||||
if (bfa_ioc_sync_start(ioc)) {
|
||||
iocpf->retry_count = 0;
|
||||
bfa_ioc_sync_join(ioc);
|
||||
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
|
||||
|
|
|
@ -263,6 +263,7 @@ struct bfa_ioc_hwif_s {
|
|||
bfa_boolean_t msix);
|
||||
void (*ioc_notify_fail) (struct bfa_ioc_s *ioc);
|
||||
void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
|
||||
bfa_boolean_t (*ioc_sync_start) (struct bfa_ioc_s *ioc);
|
||||
void (*ioc_sync_join) (struct bfa_ioc_s *ioc);
|
||||
void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
|
||||
void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
|
||||
|
|
|
@ -32,6 +32,7 @@ static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
|
|||
static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
|
||||
static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
|
||||
static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
|
||||
|
@ -53,6 +54,7 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
|
|||
hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
|
||||
hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
|
||||
hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
|
||||
hwif_cb.ioc_sync_start = bfa_ioc_cb_sync_start;
|
||||
hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
|
||||
hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
|
||||
hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
|
||||
|
@ -194,6 +196,15 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
|
|||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Synchronized IOC failure processing routines
|
||||
*/
|
||||
static bfa_boolean_t
|
||||
bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
return bfa_ioc_cb_sync_complete(ioc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Cleanup hw semaphore and usecnt registers
|
||||
*/
|
||||
|
|
|
@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
|
|||
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
|
||||
static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
|
||||
static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
|
||||
|
@ -62,6 +63,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
|
|||
hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
|
||||
hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
|
||||
hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
|
||||
hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
|
||||
hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
|
||||
hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
|
||||
hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
|
||||
|
@ -351,6 +353,30 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
|
|||
writel(1, ioc->ioc_regs.ioc_sem_reg);
|
||||
}
|
||||
|
||||
static bfa_boolean_t
|
||||
bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
|
||||
uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
|
||||
|
||||
/*
|
||||
* Driver load time. If the sync required bit for this PCI fn
|
||||
* is set, it is due to an unclean exit by the driver for this
|
||||
* PCI fn in the previous incarnation. Whoever comes here first
|
||||
* should clean it up, no matter which PCI fn.
|
||||
*/
|
||||
|
||||
if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
|
||||
writel(0, ioc->ioc_regs.ioc_fail_sync);
|
||||
writel(1, ioc->ioc_regs.ioc_usage_reg);
|
||||
writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
|
||||
writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
|
||||
return BFA_TRUE;
|
||||
}
|
||||
|
||||
return bfa_ioc_ct_sync_complete(ioc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Synchronized IOC failure processing routines
|
||||
*/
|
||||
|
|
|
@ -66,11 +66,11 @@
|
|||
#define BD_SPLIT_SIZE 32768
|
||||
|
||||
/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
|
||||
#define BNX2I_SQ_WQES_MIN 16
|
||||
#define BNX2I_570X_SQ_WQES_MAX 128
|
||||
#define BNX2I_5770X_SQ_WQES_MAX 512
|
||||
#define BNX2I_570X_SQ_WQES_DEFAULT 128
|
||||
#define BNX2I_5770X_SQ_WQES_DEFAULT 256
|
||||
#define BNX2I_SQ_WQES_MIN 16
|
||||
#define BNX2I_570X_SQ_WQES_MAX 128
|
||||
#define BNX2I_5770X_SQ_WQES_MAX 512
|
||||
#define BNX2I_570X_SQ_WQES_DEFAULT 128
|
||||
#define BNX2I_5770X_SQ_WQES_DEFAULT 128
|
||||
|
||||
#define BNX2I_570X_CQ_WQES_MAX 128
|
||||
#define BNX2I_5770X_CQ_WQES_MAX 512
|
||||
|
@ -115,6 +115,7 @@
|
|||
#define BNX2X_MAX_CQS 8
|
||||
|
||||
#define CNIC_ARM_CQE 1
|
||||
#define CNIC_ARM_CQE_FP 2
|
||||
#define CNIC_DISARM_CQE 0
|
||||
|
||||
#define REG_RD(__hba, offset) \
|
||||
|
@ -666,7 +667,9 @@ enum {
|
|||
* after HBA reset is completed by bnx2i/cnic/bnx2
|
||||
* modules
|
||||
* @state: tracks offload connection state machine
|
||||
* @teardown_mode: indicates if conn teardown is abortive or orderly
|
||||
* @timestamp: tracks the start time when the ep begins to connect
|
||||
* @num_active_cmds: tracks the number of outstanding commands for this ep
|
||||
* @ec_shift: the amount of shift as part of the event coal calc
|
||||
* @qp: QP information
|
||||
* @ids: contains chip allocated *context id* & driver assigned
|
||||
* *iscsi cid*
|
||||
|
@ -685,6 +688,7 @@ struct bnx2i_endpoint {
|
|||
u32 state;
|
||||
unsigned long timestamp;
|
||||
int num_active_cmds;
|
||||
u32 ec_shift;
|
||||
|
||||
struct qp_info qp;
|
||||
struct ep_handles ids;
|
||||
|
|
|
@ -138,7 +138,6 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
|
|||
u16 next_index;
|
||||
u32 num_active_cmds;
|
||||
|
||||
|
||||
/* Coalesce CQ entries only on 10G devices */
|
||||
if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
|
||||
return;
|
||||
|
@ -148,16 +147,19 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
|
|||
* interrupts and other unwanted results
|
||||
*/
|
||||
cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
|
||||
if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
|
||||
return;
|
||||
|
||||
if (action == CNIC_ARM_CQE) {
|
||||
if (action != CNIC_ARM_CQE_FP)
|
||||
if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
|
||||
return;
|
||||
|
||||
if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) {
|
||||
num_active_cmds = ep->num_active_cmds;
|
||||
if (num_active_cmds <= event_coal_min)
|
||||
next_index = 1;
|
||||
else
|
||||
next_index = event_coal_min +
|
||||
(num_active_cmds - event_coal_min) / event_coal_div;
|
||||
((num_active_cmds - event_coal_min) >>
|
||||
ep->ec_shift);
|
||||
if (!next_index)
|
||||
next_index = 1;
|
||||
cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
|
||||
|
@ -1274,6 +1276,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
|
|||
iscsi_init.dummy_buffer_addr_hi =
|
||||
(u32) ((u64) hba->dummy_buf_dma >> 32);
|
||||
|
||||
hba->num_ccell = hba->max_sqes >> 1;
|
||||
hba->ctx_ccell_tasks =
|
||||
((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
|
||||
iscsi_init.num_ccells_per_conn = hba->num_ccell;
|
||||
|
@ -1934,7 +1937,6 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
|
|||
qp->cq_cons_idx++;
|
||||
}
|
||||
}
|
||||
bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1948,22 +1950,23 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
|
|||
static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
|
||||
struct iscsi_kcqe *new_cqe_kcqe)
|
||||
{
|
||||
struct bnx2i_conn *conn;
|
||||
struct bnx2i_conn *bnx2i_conn;
|
||||
u32 iscsi_cid;
|
||||
|
||||
iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
|
||||
conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
|
||||
bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
|
||||
|
||||
if (!conn) {
|
||||
if (!bnx2i_conn) {
|
||||
printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
|
||||
return;
|
||||
}
|
||||
if (!conn->ep) {
|
||||
if (!bnx2i_conn->ep) {
|
||||
printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
|
||||
return;
|
||||
}
|
||||
|
||||
bnx2i_process_new_cqes(conn);
|
||||
bnx2i_process_new_cqes(bnx2i_conn);
|
||||
bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
|
||||
bnx2i_process_new_cqes(bnx2i_conn);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -244,7 +244,7 @@ void bnx2i_stop(void *handle)
|
|||
wait_event_interruptible_timeout(hba->eh_wait,
|
||||
(list_empty(&hba->ep_ofld_list) &&
|
||||
list_empty(&hba->ep_destroy_list)),
|
||||
10 * HZ);
|
||||
2 * HZ);
|
||||
/* Wait for all endpoints to be torn down, Chip will be reset once
|
||||
* control returns to network driver. So it is required to cleanup and
|
||||
* release all connection resources before returning from this routine.
|
||||
|
|
|
@ -379,6 +379,7 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
|
|||
{
|
||||
struct iscsi_endpoint *ep;
|
||||
struct bnx2i_endpoint *bnx2i_ep;
|
||||
u32 ec_div;
|
||||
|
||||
ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
|
||||
if (!ep) {
|
||||
|
@ -393,6 +394,11 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
|
|||
bnx2i_ep->ep_iscsi_cid = (u16) -1;
|
||||
bnx2i_ep->hba = hba;
|
||||
bnx2i_ep->hba_age = hba->age;
|
||||
|
||||
ec_div = event_coal_div;
|
||||
while (ec_div >>= 1)
|
||||
bnx2i_ep->ec_shift += 1;
|
||||
|
||||
hba->ofld_conns_active++;
|
||||
init_waitqueue_head(&bnx2i_ep->ofld_wait);
|
||||
return ep;
|
||||
|
@ -858,7 +864,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
|
|||
mutex_init(&hba->net_dev_lock);
|
||||
init_waitqueue_head(&hba->eh_wait);
|
||||
if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
|
||||
hba->hba_shutdown_tmo = 20 * HZ;
|
||||
hba->hba_shutdown_tmo = 30 * HZ;
|
||||
hba->conn_teardown_tmo = 20 * HZ;
|
||||
hba->conn_ctx_destroy_tmo = 6 * HZ;
|
||||
} else { /* 5706/5708/5709 */
|
||||
|
@ -1208,6 +1214,9 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
|
|||
struct bnx2i_cmd *cmd = task->dd_data;
|
||||
struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
|
||||
|
||||
if (bnx2i_conn->ep->num_active_cmds + 1 > hba->max_sqes)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* If there is no scsi_cmnd this must be a mgmt task
|
||||
*/
|
||||
|
@ -2156,7 +2165,7 @@ static struct scsi_host_template bnx2i_host_template = {
|
|||
.change_queue_depth = iscsi_change_queue_depth,
|
||||
.can_queue = 1024,
|
||||
.max_sectors = 127,
|
||||
.cmd_per_lun = 32,
|
||||
.cmd_per_lun = 24,
|
||||
.this_id = -1,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
|
||||
|
|
|
@ -137,6 +137,7 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
|
|||
static int fcoe_vport_disable(struct fc_vport *, bool disable);
|
||||
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
|
||||
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
|
||||
static int fcoe_validate_vport_create(struct fc_vport *);
|
||||
|
||||
static struct libfc_function_template fcoe_libfc_fcn_templ = {
|
||||
.frame_send = fcoe_xmit,
|
||||
|
@ -2351,6 +2352,17 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
|
|||
struct fcoe_interface *fcoe = port->priv;
|
||||
struct net_device *netdev = fcoe->netdev;
|
||||
struct fc_lport *vn_port;
|
||||
int rc;
|
||||
char buf[32];
|
||||
|
||||
rc = fcoe_validate_vport_create(vport);
|
||||
if (rc) {
|
||||
wwn_to_str(vport->port_name, buf, sizeof(buf));
|
||||
printk(KERN_ERR "fcoe: Failed to create vport, "
|
||||
"WWPN (0x%s) already exists\n",
|
||||
buf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
mutex_lock(&fcoe_config_mutex);
|
||||
vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
|
||||
|
@ -2497,3 +2509,49 @@ static void fcoe_set_port_id(struct fc_lport *lport,
|
|||
if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
|
||||
fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_validate_vport_create() - Validate a vport before creating it
|
||||
* @vport: NPIV port to be created
|
||||
*
|
||||
* This routine is meant to add validation for a vport before creating it
|
||||
* via fcoe_vport_create().
|
||||
* Current validations are:
|
||||
* - WWPN supplied is unique for given lport
|
||||
*
|
||||
*
|
||||
*/
|
||||
static int fcoe_validate_vport_create(struct fc_vport *vport)
|
||||
{
|
||||
struct Scsi_Host *shost = vport_to_shost(vport);
|
||||
struct fc_lport *n_port = shost_priv(shost);
|
||||
struct fc_lport *vn_port;
|
||||
int rc = 0;
|
||||
char buf[32];
|
||||
|
||||
mutex_lock(&n_port->lp_mutex);
|
||||
|
||||
wwn_to_str(vport->port_name, buf, sizeof(buf));
|
||||
/* Check if the wwpn is not same as that of the lport */
|
||||
if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
|
||||
FCOE_DBG("vport WWPN 0x%s is same as that of the "
|
||||
"base port WWPN\n", buf);
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Check if there is any existing vport with same wwpn */
|
||||
list_for_each_entry(vn_port, &n_port->vports, list) {
|
||||
if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
|
||||
FCOE_DBG("vport with given WWPN 0x%s already "
|
||||
"exists\n", buf);
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&n_port->lp_mutex);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -99,4 +99,14 @@ static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
|
|||
((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
|
||||
}
|
||||
|
||||
static inline void wwn_to_str(u64 wwn, char *buf, int len)
|
||||
{
|
||||
u8 wwpn[8];
|
||||
|
||||
u64_to_wwn(wwn, wwpn);
|
||||
snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
|
||||
wwpn[0], wwpn[1], wwpn[2], wwpn[3],
|
||||
wwpn[4], wwpn[5], wwpn[6], wwpn[7]);
|
||||
}
|
||||
|
||||
#endif /* _FCOE_H_ */
|
||||
|
|
|
@ -1173,7 +1173,9 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
|
|||
struct fc_lport *lport = fip->lp;
|
||||
struct fc_lport *vn_port = NULL;
|
||||
u32 desc_mask;
|
||||
int is_vn_port = 0;
|
||||
int num_vlink_desc;
|
||||
int reset_phys_port = 0;
|
||||
struct fip_vn_desc **vlink_desc_arr = NULL;
|
||||
|
||||
LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
|
||||
|
||||
|
@ -1183,70 +1185,73 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
|
|||
/*
|
||||
* mask of required descriptors. Validating each one clears its bit.
|
||||
*/
|
||||
desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | BIT(FIP_DT_VN_ID);
|
||||
desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
|
||||
|
||||
rlen = ntohs(fh->fip_dl_len) * FIP_BPW;
|
||||
desc = (struct fip_desc *)(fh + 1);
|
||||
|
||||
/*
|
||||
* Actually need to subtract 'sizeof(*mp) - sizeof(*wp)' from 'rlen'
|
||||
* before determining max Vx_Port descriptor but a buggy FCF could have
|
||||
* omited either or both MAC Address and Name Identifier descriptors
|
||||
*/
|
||||
num_vlink_desc = rlen / sizeof(*vp);
|
||||
if (num_vlink_desc)
|
||||
vlink_desc_arr = kmalloc(sizeof(vp) * num_vlink_desc,
|
||||
GFP_ATOMIC);
|
||||
if (!vlink_desc_arr)
|
||||
return;
|
||||
num_vlink_desc = 0;
|
||||
|
||||
while (rlen >= sizeof(*desc)) {
|
||||
dlen = desc->fip_dlen * FIP_BPW;
|
||||
if (dlen > rlen)
|
||||
return;
|
||||
goto err;
|
||||
/* Drop CVL if there are duplicate critical descriptors */
|
||||
if ((desc->fip_dtype < 32) &&
|
||||
(desc->fip_dtype != FIP_DT_VN_ID) &&
|
||||
!(desc_mask & 1U << desc->fip_dtype)) {
|
||||
LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
|
||||
"Descriptors in FIP CVL\n");
|
||||
return;
|
||||
goto err;
|
||||
}
|
||||
switch (desc->fip_dtype) {
|
||||
case FIP_DT_MAC:
|
||||
mp = (struct fip_mac_desc *)desc;
|
||||
if (dlen < sizeof(*mp))
|
||||
return;
|
||||
goto err;
|
||||
if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac))
|
||||
return;
|
||||
goto err;
|
||||
desc_mask &= ~BIT(FIP_DT_MAC);
|
||||
break;
|
||||
case FIP_DT_NAME:
|
||||
wp = (struct fip_wwn_desc *)desc;
|
||||
if (dlen < sizeof(*wp))
|
||||
return;
|
||||
goto err;
|
||||
if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name)
|
||||
return;
|
||||
goto err;
|
||||
desc_mask &= ~BIT(FIP_DT_NAME);
|
||||
break;
|
||||
case FIP_DT_VN_ID:
|
||||
vp = (struct fip_vn_desc *)desc;
|
||||
if (dlen < sizeof(*vp))
|
||||
return;
|
||||
if (compare_ether_addr(vp->fd_mac,
|
||||
fip->get_src_addr(lport)) == 0 &&
|
||||
get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn &&
|
||||
ntoh24(vp->fd_fc_id) == lport->port_id) {
|
||||
desc_mask &= ~BIT(FIP_DT_VN_ID);
|
||||
break;
|
||||
goto err;
|
||||
vlink_desc_arr[num_vlink_desc++] = vp;
|
||||
vn_port = fc_vport_id_lookup(lport,
|
||||
ntoh24(vp->fd_fc_id));
|
||||
if (vn_port && (vn_port == lport)) {
|
||||
mutex_lock(&fip->ctlr_mutex);
|
||||
per_cpu_ptr(lport->dev_stats,
|
||||
get_cpu())->VLinkFailureCount++;
|
||||
put_cpu();
|
||||
fcoe_ctlr_reset(fip);
|
||||
mutex_unlock(&fip->ctlr_mutex);
|
||||
}
|
||||
/* check if clr_vlink is for NPIV port */
|
||||
mutex_lock(&lport->lp_mutex);
|
||||
list_for_each_entry(vn_port, &lport->vports, list) {
|
||||
if (compare_ether_addr(vp->fd_mac,
|
||||
fip->get_src_addr(vn_port)) == 0 &&
|
||||
(get_unaligned_be64(&vp->fd_wwpn)
|
||||
== vn_port->wwpn) &&
|
||||
(ntoh24(vp->fd_fc_id) ==
|
||||
fc_host_port_id(vn_port->host))) {
|
||||
desc_mask &= ~BIT(FIP_DT_VN_ID);
|
||||
is_vn_port = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&lport->lp_mutex);
|
||||
|
||||
break;
|
||||
default:
|
||||
/* standard says ignore unknown descriptors >= 128 */
|
||||
if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
|
||||
return;
|
||||
goto err;
|
||||
break;
|
||||
}
|
||||
desc = (struct fip_desc *)((char *)desc + dlen);
|
||||
|
@ -1256,26 +1261,68 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
|
|||
/*
|
||||
* reset only if all required descriptors were present and valid.
|
||||
*/
|
||||
if (desc_mask) {
|
||||
if (desc_mask)
|
||||
LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n",
|
||||
desc_mask);
|
||||
} else {
|
||||
LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
|
||||
|
||||
if (is_vn_port)
|
||||
else if (!num_vlink_desc) {
|
||||
LIBFCOE_FIP_DBG(fip, "CVL: no Vx_Port descriptor found\n");
|
||||
/*
|
||||
* No Vx_Port description. Clear all NPIV ports,
|
||||
* followed by physical port
|
||||
*/
|
||||
mutex_lock(&lport->lp_mutex);
|
||||
list_for_each_entry(vn_port, &lport->vports, list)
|
||||
fc_lport_reset(vn_port);
|
||||
else {
|
||||
mutex_lock(&fip->ctlr_mutex);
|
||||
per_cpu_ptr(lport->dev_stats,
|
||||
get_cpu())->VLinkFailureCount++;
|
||||
put_cpu();
|
||||
fcoe_ctlr_reset(fip);
|
||||
mutex_unlock(&fip->ctlr_mutex);
|
||||
mutex_unlock(&lport->lp_mutex);
|
||||
|
||||
mutex_lock(&fip->ctlr_mutex);
|
||||
per_cpu_ptr(lport->dev_stats,
|
||||
get_cpu())->VLinkFailureCount++;
|
||||
put_cpu();
|
||||
fcoe_ctlr_reset(fip);
|
||||
mutex_unlock(&fip->ctlr_mutex);
|
||||
|
||||
fc_lport_reset(fip->lp);
|
||||
fcoe_ctlr_solicit(fip, NULL);
|
||||
} else {
|
||||
int i;
|
||||
|
||||
LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
|
||||
for (i = 0; i < num_vlink_desc; i++) {
|
||||
vp = vlink_desc_arr[i];
|
||||
vn_port = fc_vport_id_lookup(lport,
|
||||
ntoh24(vp->fd_fc_id));
|
||||
if (!vn_port)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* 'port_id' is already validated, check MAC address and
|
||||
* wwpn
|
||||
*/
|
||||
if (compare_ether_addr(fip->get_src_addr(vn_port),
|
||||
vp->fd_mac) != 0 ||
|
||||
get_unaligned_be64(&vp->fd_wwpn) !=
|
||||
vn_port->wwpn)
|
||||
continue;
|
||||
|
||||
if (vn_port == lport)
|
||||
/*
|
||||
* Physical port, defer processing till all
|
||||
* listed NPIV ports are cleared
|
||||
*/
|
||||
reset_phys_port = 1;
|
||||
else /* NPIV port */
|
||||
fc_lport_reset(vn_port);
|
||||
}
|
||||
|
||||
if (reset_phys_port) {
|
||||
fc_lport_reset(fip->lp);
|
||||
fcoe_ctlr_solicit(fip, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
err:
|
||||
kfree(vlink_desc_arr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -544,16 +544,6 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
|
|||
struct fcoe_transport *ft = NULL;
|
||||
enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
|
||||
|
||||
#ifdef CONFIG_LIBFCOE_MODULE
|
||||
/*
|
||||
* Make sure the module has been initialized, and is not about to be
|
||||
* removed. Module parameter sysfs files are writable before the
|
||||
* module_init function is called and after module_exit.
|
||||
*/
|
||||
if (THIS_MODULE->state != MODULE_STATE_LIVE)
|
||||
goto out_nodev;
|
||||
#endif
|
||||
|
||||
mutex_lock(&ft_mutex);
|
||||
|
||||
netdev = fcoe_if_to_netdev(buffer);
|
||||
|
@ -618,16 +608,6 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
|
|||
struct net_device *netdev = NULL;
|
||||
struct fcoe_transport *ft = NULL;
|
||||
|
||||
#ifdef CONFIG_LIBFCOE_MODULE
|
||||
/*
|
||||
* Make sure the module has been initialized, and is not about to be
|
||||
* removed. Module parameter sysfs files are writable before the
|
||||
* module_init function is called and after module_exit.
|
||||
*/
|
||||
if (THIS_MODULE->state != MODULE_STATE_LIVE)
|
||||
goto out_nodev;
|
||||
#endif
|
||||
|
||||
mutex_lock(&ft_mutex);
|
||||
|
||||
netdev = fcoe_if_to_netdev(buffer);
|
||||
|
@ -672,16 +652,6 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
|
|||
struct net_device *netdev = NULL;
|
||||
struct fcoe_transport *ft = NULL;
|
||||
|
||||
#ifdef CONFIG_LIBFCOE_MODULE
|
||||
/*
|
||||
* Make sure the module has been initialized, and is not about to be
|
||||
* removed. Module parameter sysfs files are writable before the
|
||||
* module_init function is called and after module_exit.
|
||||
*/
|
||||
if (THIS_MODULE->state != MODULE_STATE_LIVE)
|
||||
goto out_nodev;
|
||||
#endif
|
||||
|
||||
mutex_lock(&ft_mutex);
|
||||
|
||||
netdev = fcoe_if_to_netdev(buffer);
|
||||
|
@ -720,16 +690,6 @@ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
|
|||
struct net_device *netdev = NULL;
|
||||
struct fcoe_transport *ft = NULL;
|
||||
|
||||
#ifdef CONFIG_LIBFCOE_MODULE
|
||||
/*
|
||||
* Make sure the module has been initialized, and is not about to be
|
||||
* removed. Module parameter sysfs files are writable before the
|
||||
* module_init function is called and after module_exit.
|
||||
*/
|
||||
if (THIS_MODULE->state != MODULE_STATE_LIVE)
|
||||
goto out_nodev;
|
||||
#endif
|
||||
|
||||
mutex_lock(&ft_mutex);
|
||||
|
||||
netdev = fcoe_if_to_netdev(buffer);
|
||||
|
|
|
@ -5149,21 +5149,21 @@ static irqreturn_t ipr_isr(int irq, void *devp)
|
|||
|
||||
if (ipr_cmd != NULL) {
|
||||
/* Clear the PCI interrupt */
|
||||
num_hrrq = 0;
|
||||
do {
|
||||
writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
|
||||
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
|
||||
} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
|
||||
num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
|
||||
|
||||
if (int_reg & IPR_PCII_HRRQ_UPDATED) {
|
||||
ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
} else if (rc == IRQ_NONE && irq_none == 0) {
|
||||
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
|
||||
irq_none++;
|
||||
} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
|
||||
int_reg & IPR_PCII_HRRQ_UPDATED) {
|
||||
ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
return IRQ_HANDLED;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -205,6 +205,7 @@ static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
|
|||
default:
|
||||
FC_DISC_DBG(disc, "Received an unsupported request, "
|
||||
"the opcode is (%x)\n", op);
|
||||
fc_frame_free(fp);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1434,6 +1434,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
|
|||
(f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
|
||||
(FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
|
||||
spin_lock_bh(&ep->ex_lock);
|
||||
resp = ep->resp;
|
||||
rc = fc_exch_done_locked(ep);
|
||||
WARN_ON(fc_seq_exch(sp) != ep);
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
|
@ -1978,6 +1979,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
|
|||
spin_unlock_bh(&ep->ex_lock);
|
||||
return sp;
|
||||
err:
|
||||
fc_fcp_ddp_done(fr_fsp(fp));
|
||||
rc = fc_exch_done_locked(ep);
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
if (!rc)
|
||||
|
|
|
@ -312,7 +312,7 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
|
|||
* DDP related resources for a fcp_pkt
|
||||
* @fsp: The FCP packet that DDP had been used on
|
||||
*/
|
||||
static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
|
||||
void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
|
||||
{
|
||||
struct fc_lport *lport;
|
||||
|
||||
|
@ -681,8 +681,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
|
|||
error = lport->tt.seq_send(lport, seq, fp);
|
||||
if (error) {
|
||||
WARN_ON(1); /* send error should be rare */
|
||||
fc_fcp_retry_cmd(fsp);
|
||||
return 0;
|
||||
return error;
|
||||
}
|
||||
fp = NULL;
|
||||
}
|
||||
|
@ -1673,7 +1672,8 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
|
|||
FC_FCTL_REQ, 0);
|
||||
|
||||
rec_tov = get_fsp_rec_tov(fsp);
|
||||
seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
|
||||
seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp,
|
||||
fc_fcp_pkt_destroy,
|
||||
fsp, jiffies_to_msecs(rec_tov));
|
||||
if (!seq)
|
||||
goto retry;
|
||||
|
@ -1720,7 +1720,6 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
|
|||
return;
|
||||
}
|
||||
|
||||
fsp->recov_seq = NULL;
|
||||
switch (fc_frame_payload_op(fp)) {
|
||||
case ELS_LS_ACC:
|
||||
fsp->recov_retry = 0;
|
||||
|
@ -1732,10 +1731,9 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
|
|||
break;
|
||||
}
|
||||
fc_fcp_unlock_pkt(fsp);
|
||||
fsp->lp->tt.exch_done(seq);
|
||||
out:
|
||||
fsp->lp->tt.exch_done(seq);
|
||||
fc_frame_free(fp);
|
||||
fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1747,8 +1745,6 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
|
|||
{
|
||||
if (fc_fcp_lock_pkt(fsp))
|
||||
goto out;
|
||||
fsp->lp->tt.exch_done(fsp->recov_seq);
|
||||
fsp->recov_seq = NULL;
|
||||
switch (PTR_ERR(fp)) {
|
||||
case -FC_EX_TIMEOUT:
|
||||
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
|
||||
|
@ -1764,7 +1760,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
|
|||
}
|
||||
fc_fcp_unlock_pkt(fsp);
|
||||
out:
|
||||
fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
|
||||
fsp->lp->tt.exch_done(fsp->recov_seq);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -108,6 +108,7 @@ extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */
|
|||
* Set up direct-data placement for this I/O request
|
||||
*/
|
||||
void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
|
||||
void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp);
|
||||
|
||||
/*
|
||||
* Module setup functions
|
||||
|
|
|
@ -99,19 +99,29 @@ static void sas_ata_task_done(struct sas_task *task)
|
|||
struct sas_ha_struct *sas_ha;
|
||||
enum ata_completion_errors ac;
|
||||
unsigned long flags;
|
||||
struct ata_link *link;
|
||||
|
||||
if (!qc)
|
||||
goto qc_already_gone;
|
||||
|
||||
dev = qc->ap->private_data;
|
||||
sas_ha = dev->port->ha;
|
||||
link = &dev->sata_dev.ap->link;
|
||||
|
||||
spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
|
||||
if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
|
||||
((stat->stat == SAM_STAT_CHECK_CONDITION &&
|
||||
dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
|
||||
ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
|
||||
qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
|
||||
|
||||
if (!link->sactive) {
|
||||
qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
|
||||
} else {
|
||||
link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
|
||||
if (unlikely(link->eh_info.err_mask))
|
||||
qc->flags |= ATA_QCFLAG_FAILED;
|
||||
}
|
||||
|
||||
dev->sata_dev.sstatus = resp->sstatus;
|
||||
dev->sata_dev.serror = resp->serror;
|
||||
dev->sata_dev.scontrol = resp->scontrol;
|
||||
|
@ -121,7 +131,13 @@ static void sas_ata_task_done(struct sas_task *task)
|
|||
SAS_DPRINTK("%s: SAS error %x\n", __func__,
|
||||
stat->stat);
|
||||
/* We saw a SAS error. Send a vague error. */
|
||||
qc->err_mask = ac;
|
||||
if (!link->sactive) {
|
||||
qc->err_mask = ac;
|
||||
} else {
|
||||
link->eh_info.err_mask |= AC_ERR_DEV;
|
||||
qc->flags |= ATA_QCFLAG_FAILED;
|
||||
}
|
||||
|
||||
dev->sata_dev.tf.feature = 0x04; /* status err */
|
||||
dev->sata_dev.tf.command = ATA_ERR;
|
||||
}
|
||||
|
@ -279,6 +295,44 @@ static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int sas_ata_soft_reset(struct ata_link *link, unsigned int *class,
|
||||
unsigned long deadline)
|
||||
{
|
||||
struct ata_port *ap = link->ap;
|
||||
struct domain_device *dev = ap->private_data;
|
||||
struct sas_internal *i =
|
||||
to_sas_internal(dev->port->ha->core.shost->transportt);
|
||||
int res = TMF_RESP_FUNC_FAILED;
|
||||
int ret = 0;
|
||||
|
||||
if (i->dft->lldd_ata_soft_reset)
|
||||
res = i->dft->lldd_ata_soft_reset(dev);
|
||||
|
||||
if (res != TMF_RESP_FUNC_COMPLETE) {
|
||||
SAS_DPRINTK("%s: Unable to soft reset\n", __func__);
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
|
||||
switch (dev->sata_dev.command_set) {
|
||||
case ATA_COMMAND_SET:
|
||||
SAS_DPRINTK("%s: Found ATA device.\n", __func__);
|
||||
*class = ATA_DEV_ATA;
|
||||
break;
|
||||
case ATAPI_COMMAND_SET:
|
||||
SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
|
||||
*class = ATA_DEV_ATAPI;
|
||||
break;
|
||||
default:
|
||||
SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
|
||||
__func__, dev->sata_dev.command_set);
|
||||
*class = ATA_DEV_UNKNOWN;
|
||||
break;
|
||||
}
|
||||
|
||||
ap->cbl = ATA_CBL_SATA;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void sas_ata_post_internal(struct ata_queued_cmd *qc)
|
||||
{
|
||||
if (qc->flags & ATA_QCFLAG_FAILED)
|
||||
|
@ -309,7 +363,7 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
|
|||
|
||||
static struct ata_port_operations sas_sata_ops = {
|
||||
.prereset = ata_std_prereset,
|
||||
.softreset = NULL,
|
||||
.softreset = sas_ata_soft_reset,
|
||||
.hardreset = sas_ata_hard_reset,
|
||||
.postreset = ata_std_postreset,
|
||||
.error_handler = ata_std_error_handler,
|
||||
|
|
|
@ -57,7 +57,7 @@ int sas_init_queue(struct sas_ha_struct *sas_ha);
|
|||
int sas_init_events(struct sas_ha_struct *sas_ha);
|
||||
void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
|
||||
|
||||
void sas_deform_port(struct asd_sas_phy *phy);
|
||||
void sas_deform_port(struct asd_sas_phy *phy, int gone);
|
||||
|
||||
void sas_porte_bytes_dmaed(struct work_struct *work);
|
||||
void sas_porte_broadcast_rcvd(struct work_struct *work);
|
||||
|
|
|
@ -39,7 +39,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
|
|||
sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
|
||||
&phy->phy_events_pending);
|
||||
phy->error = 0;
|
||||
sas_deform_port(phy);
|
||||
sas_deform_port(phy, 1);
|
||||
}
|
||||
|
||||
static void sas_phye_oob_done(struct work_struct *work)
|
||||
|
@ -66,7 +66,7 @@ static void sas_phye_oob_error(struct work_struct *work)
|
|||
sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock,
|
||||
&phy->phy_events_pending);
|
||||
|
||||
sas_deform_port(phy);
|
||||
sas_deform_port(phy, 1);
|
||||
|
||||
if (!port && phy->enabled && i->dft->lldd_control_phy) {
|
||||
phy->error++;
|
||||
|
|
|
@ -57,7 +57,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
|
|||
|
||||
if (port) {
|
||||
if (!phy_is_wideport_member(port, phy))
|
||||
sas_deform_port(phy);
|
||||
sas_deform_port(phy, 0);
|
||||
else {
|
||||
SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
|
||||
__func__, phy->id, phy->port->id,
|
||||
|
@ -153,28 +153,31 @@ static void sas_form_port(struct asd_sas_phy *phy)
|
|||
* This is called when the physical link to the other phy has been
|
||||
* lost (on this phy), in Event thread context. We cannot delay here.
|
||||
*/
|
||||
void sas_deform_port(struct asd_sas_phy *phy)
|
||||
void sas_deform_port(struct asd_sas_phy *phy, int gone)
|
||||
{
|
||||
struct sas_ha_struct *sas_ha = phy->ha;
|
||||
struct asd_sas_port *port = phy->port;
|
||||
struct sas_internal *si =
|
||||
to_sas_internal(sas_ha->core.shost->transportt);
|
||||
struct domain_device *dev;
|
||||
unsigned long flags;
|
||||
|
||||
if (!port)
|
||||
return; /* done by a phy event */
|
||||
|
||||
if (port->port_dev)
|
||||
port->port_dev->pathways--;
|
||||
dev = port->port_dev;
|
||||
if (dev)
|
||||
dev->pathways--;
|
||||
|
||||
if (port->num_phys == 1) {
|
||||
if (dev && gone)
|
||||
dev->gone = 1;
|
||||
sas_unregister_domain_devices(port);
|
||||
sas_port_delete(port->port);
|
||||
port->port = NULL;
|
||||
} else
|
||||
sas_port_delete_phy(port->port, phy->phy);
|
||||
|
||||
|
||||
if (si->dft->lldd_port_deformed)
|
||||
si->dft->lldd_port_deformed(phy);
|
||||
|
||||
|
@ -244,7 +247,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
|
|||
sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
|
||||
&phy->port_events_pending);
|
||||
|
||||
sas_deform_port(phy);
|
||||
sas_deform_port(phy, 1);
|
||||
}
|
||||
|
||||
void sas_porte_timer_event(struct work_struct *work)
|
||||
|
@ -256,7 +259,7 @@ void sas_porte_timer_event(struct work_struct *work)
|
|||
sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
|
||||
&phy->port_events_pending);
|
||||
|
||||
sas_deform_port(phy);
|
||||
sas_deform_port(phy, 1);
|
||||
}
|
||||
|
||||
void sas_porte_hard_reset(struct work_struct *work)
|
||||
|
@ -268,7 +271,7 @@ void sas_porte_hard_reset(struct work_struct *work)
|
|||
sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
|
||||
&phy->port_events_pending);
|
||||
|
||||
sas_deform_port(phy);
|
||||
sas_deform_port(phy, 1);
|
||||
}
|
||||
|
||||
/* ---------- SAS port registration ---------- */
|
||||
|
@ -306,6 +309,6 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
|
|||
|
||||
for (i = 0; i < sas_ha->num_phys; i++)
|
||||
if (sas_ha->sas_phy[i]->port)
|
||||
sas_deform_port(sas_ha->sas_phy[i]);
|
||||
sas_deform_port(sas_ha->sas_phy[i], 0);
|
||||
|
||||
}
|
||||
|
|
|
@ -207,6 +207,13 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
|
|||
struct sas_ha_struct *sas_ha = dev->port->ha;
|
||||
struct sas_task *task;
|
||||
|
||||
/* If the device fell off, no sense in issuing commands */
|
||||
if (dev->gone) {
|
||||
cmd->result = DID_BAD_TARGET << 16;
|
||||
scsi_done(cmd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (dev_is_sata(dev)) {
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -216,13 +223,6 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* If the device fell off, no sense in issuing commands */
|
||||
if (dev->gone) {
|
||||
cmd->result = DID_BAD_TARGET << 16;
|
||||
scsi_done(cmd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
res = -ENOMEM;
|
||||
task = sas_create_task(cmd, dev, GFP_ATOMIC);
|
||||
if (!task)
|
||||
|
|
|
@ -41,6 +41,7 @@ struct lpfc_sli2_slim;
|
|||
downloads using bsg */
|
||||
#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
|
||||
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
|
||||
#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
|
||||
#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
|
||||
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
|
||||
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
|
||||
|
@ -486,6 +487,42 @@ struct unsol_rcv_ct_ctx {
|
|||
(1 << LPFC_USER_LINK_SPEED_AUTO))
|
||||
#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
|
||||
|
||||
enum nemb_type {
|
||||
nemb_mse = 1,
|
||||
nemb_hbd
|
||||
};
|
||||
|
||||
enum mbox_type {
|
||||
mbox_rd = 1,
|
||||
mbox_wr
|
||||
};
|
||||
|
||||
enum dma_type {
|
||||
dma_mbox = 1,
|
||||
dma_ebuf
|
||||
};
|
||||
|
||||
enum sta_type {
|
||||
sta_pre_addr = 1,
|
||||
sta_pos_addr
|
||||
};
|
||||
|
||||
struct lpfc_mbox_ext_buf_ctx {
|
||||
uint32_t state;
|
||||
#define LPFC_BSG_MBOX_IDLE 0
|
||||
#define LPFC_BSG_MBOX_HOST 1
|
||||
#define LPFC_BSG_MBOX_PORT 2
|
||||
#define LPFC_BSG_MBOX_DONE 3
|
||||
#define LPFC_BSG_MBOX_ABTS 4
|
||||
enum nemb_type nembType;
|
||||
enum mbox_type mboxType;
|
||||
uint32_t numBuf;
|
||||
uint32_t mbxTag;
|
||||
uint32_t seqNum;
|
||||
struct lpfc_dmabuf *mbx_dmabuf;
|
||||
struct list_head ext_dmabuf_list;
|
||||
};
|
||||
|
||||
struct lpfc_hba {
|
||||
/* SCSI interface function jump table entries */
|
||||
int (*lpfc_new_scsi_buf)
|
||||
|
@ -589,6 +626,7 @@ struct lpfc_hba {
|
|||
|
||||
MAILBOX_t *mbox;
|
||||
uint32_t *mbox_ext;
|
||||
struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx;
|
||||
uint32_t ha_copy;
|
||||
struct _PCB *pcb;
|
||||
struct _IOCB *IOCBs;
|
||||
|
@ -659,6 +697,7 @@ struct lpfc_hba {
|
|||
uint32_t cfg_hostmem_hgp;
|
||||
uint32_t cfg_log_verbose;
|
||||
uint32_t cfg_aer_support;
|
||||
uint32_t cfg_sriov_nr_virtfn;
|
||||
uint32_t cfg_iocb_cnt;
|
||||
uint32_t cfg_suppress_link_up;
|
||||
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
|
||||
|
@ -706,7 +745,6 @@ struct lpfc_hba {
|
|||
uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
|
||||
|
||||
int brd_no; /* FC board number */
|
||||
|
||||
char SerialNumber[32]; /* adapter Serial Number */
|
||||
char OptionROMVersion[32]; /* adapter BIOS / Fcode version */
|
||||
char ModelDesc[256]; /* Model Description */
|
||||
|
@ -778,6 +816,9 @@ struct lpfc_hba {
|
|||
uint16_t vpi_base;
|
||||
uint16_t vfi_base;
|
||||
unsigned long *vpi_bmask; /* vpi allocation table */
|
||||
uint16_t *vpi_ids;
|
||||
uint16_t vpi_count;
|
||||
struct list_head lpfc_vpi_blk_list;
|
||||
|
||||
/* Data structure used by fabric iocb scheduler */
|
||||
struct list_head fabric_iocb_list;
|
||||
|
|
|
@ -754,6 +754,73 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
|
|||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
|
||||
* @phba: lpfc_hba pointer.
|
||||
*
|
||||
* Description:
|
||||
* Request SLI4 interface type-2 device to perform a physical register set
|
||||
* access.
|
||||
*
|
||||
* Returns:
|
||||
* zero for success
|
||||
**/
|
||||
static ssize_t
|
||||
lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
|
||||
{
|
||||
struct completion online_compl;
|
||||
uint32_t reg_val;
|
||||
int status = 0;
|
||||
int rc;
|
||||
|
||||
if (!phba->cfg_enable_hba_reset)
|
||||
return -EIO;
|
||||
|
||||
if ((phba->sli_rev < LPFC_SLI_REV4) ||
|
||||
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
|
||||
LPFC_SLI_INTF_IF_TYPE_2))
|
||||
return -EPERM;
|
||||
|
||||
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
|
||||
|
||||
if (status != 0)
|
||||
return status;
|
||||
|
||||
/* wait for the device to be quiesced before firmware reset */
|
||||
msleep(100);
|
||||
|
||||
reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
|
||||
LPFC_CTL_PDEV_CTL_OFFSET);
|
||||
|
||||
if (opcode == LPFC_FW_DUMP)
|
||||
reg_val |= LPFC_FW_DUMP_REQUEST;
|
||||
else if (opcode == LPFC_FW_RESET)
|
||||
reg_val |= LPFC_CTL_PDEV_CTL_FRST;
|
||||
else if (opcode == LPFC_DV_RESET)
|
||||
reg_val |= LPFC_CTL_PDEV_CTL_DRST;
|
||||
|
||||
writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
|
||||
LPFC_CTL_PDEV_CTL_OFFSET);
|
||||
/* flush */
|
||||
readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
|
||||
|
||||
/* delay driver action following IF_TYPE_2 reset */
|
||||
msleep(100);
|
||||
|
||||
init_completion(&online_compl);
|
||||
rc = lpfc_workq_post_event(phba, &status, &online_compl,
|
||||
LPFC_EVT_ONLINE);
|
||||
if (rc == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
wait_for_completion(&online_compl);
|
||||
|
||||
if (status != 0)
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_nport_evt_cnt_show - Return the number of nport events
|
||||
* @dev: class device that is converted into a Scsi_host.
|
||||
|
@ -848,6 +915,12 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
|
|||
return -EINVAL;
|
||||
else
|
||||
status = lpfc_do_offline(phba, LPFC_EVT_KILL);
|
||||
else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
|
||||
status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
|
||||
else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
|
||||
status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
|
||||
else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
|
||||
status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1321,6 +1394,102 @@ lpfc_dss_show(struct device *dev, struct device_attribute *attr,
|
|||
"" : "Not ");
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
|
||||
* @dev: class converted to a Scsi_host structure.
|
||||
* @attr: device attribute, not used.
|
||||
* @buf: on return contains the formatted support level.
|
||||
*
|
||||
* Description:
|
||||
* Returns the maximum number of virtual functions a physical function can
|
||||
* support, 0 will be returned if called on virtual function.
|
||||
*
|
||||
* Returns: size of formatted string.
|
||||
**/
|
||||
static ssize_t
|
||||
lpfc_sriov_hw_max_virtfn_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct pci_dev *pdev = phba->pcidev;
|
||||
union lpfc_sli4_cfg_shdr *shdr;
|
||||
uint32_t shdr_status, shdr_add_status;
|
||||
LPFC_MBOXQ_t *mboxq;
|
||||
struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
|
||||
struct lpfc_rsrc_desc_pcie *desc;
|
||||
uint32_t max_nr_virtfn;
|
||||
uint32_t desc_count;
|
||||
int length, rc, i;
|
||||
|
||||
if ((phba->sli_rev < LPFC_SLI_REV4) ||
|
||||
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
|
||||
LPFC_SLI_INTF_IF_TYPE_2))
|
||||
return -EPERM;
|
||||
|
||||
if (!pdev->is_physfn)
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", 0);
|
||||
|
||||
mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!mboxq)
|
||||
return -ENOMEM;
|
||||
|
||||
/* get the maximum number of virtfn support by physfn */
|
||||
length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
|
||||
sizeof(struct lpfc_sli4_cfg_mhdr));
|
||||
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
|
||||
LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
|
||||
length, LPFC_SLI4_MBX_EMBED);
|
||||
shdr = (union lpfc_sli4_cfg_shdr *)
|
||||
&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
|
||||
bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
|
||||
phba->sli4_hba.iov.pf_number + 1);
|
||||
|
||||
get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
|
||||
bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
|
||||
LPFC_CFG_TYPE_CURRENT_ACTIVE);
|
||||
|
||||
rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
|
||||
lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
|
||||
|
||||
if (rc != MBX_TIMEOUT) {
|
||||
/* check return status */
|
||||
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
||||
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
|
||||
&shdr->response);
|
||||
if (shdr_status || shdr_add_status || rc)
|
||||
goto error_out;
|
||||
|
||||
} else
|
||||
goto error_out;
|
||||
|
||||
desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
|
||||
|
||||
for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
|
||||
desc = (struct lpfc_rsrc_desc_pcie *)
|
||||
&get_prof_cfg->u.response.prof_cfg.desc[i];
|
||||
if (LPFC_RSRC_DESC_TYPE_PCIE ==
|
||||
bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
|
||||
max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
|
||||
desc);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i < LPFC_RSRC_DESC_MAX_NUM) {
|
||||
if (rc != MBX_TIMEOUT)
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
|
||||
}
|
||||
|
||||
error_out:
|
||||
if (rc != MBX_TIMEOUT)
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_param_show - Return a cfg attribute value in decimal
|
||||
*
|
||||
|
@ -1762,6 +1931,8 @@ static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
|
|||
static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
|
||||
static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
|
||||
static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
|
||||
static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
|
||||
lpfc_sriov_hw_max_virtfn_show, NULL);
|
||||
|
||||
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
|
||||
|
||||
|
@ -3014,7 +3185,7 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
|
|||
*
|
||||
* @dev: class device that is converted into a Scsi_host.
|
||||
* @attr: device attribute, not used.
|
||||
* @buf: containing the string "selective".
|
||||
* @buf: containing enable or disable aer flag.
|
||||
* @count: unused variable.
|
||||
*
|
||||
* Description:
|
||||
|
@ -3098,7 +3269,7 @@ lpfc_param_show(aer_support)
|
|||
/**
|
||||
* lpfc_aer_support_init - Set the initial adapters aer support flag
|
||||
* @phba: lpfc_hba pointer.
|
||||
* @val: link speed value.
|
||||
* @val: enable aer or disable aer flag.
|
||||
*
|
||||
* Description:
|
||||
* If val is in a valid range [0,1], then set the adapter's initial
|
||||
|
@ -3137,7 +3308,7 @@ static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
|
|||
* lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
|
||||
* @dev: class device that is converted into a Scsi_host.
|
||||
* @attr: device attribute, not used.
|
||||
* @buf: containing the string "selective".
|
||||
* @buf: containing flag 1 for aer cleanup state.
|
||||
* @count: unused variable.
|
||||
*
|
||||
* Description:
|
||||
|
@ -3180,6 +3351,136 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
|
|||
static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
|
||||
lpfc_aer_cleanup_state);
|
||||
|
||||
/**
|
||||
* lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
|
||||
*
|
||||
* @dev: class device that is converted into a Scsi_host.
|
||||
* @attr: device attribute, not used.
|
||||
* @buf: containing the string the number of vfs to be enabled.
|
||||
* @count: unused variable.
|
||||
*
|
||||
* Description:
|
||||
* When this api is called either through user sysfs, the driver shall
|
||||
* try to enable or disable SR-IOV virtual functions according to the
|
||||
* following:
|
||||
*
|
||||
* If zero virtual function has been enabled to the physical function,
|
||||
* the driver shall invoke the pci enable virtual function api trying
|
||||
* to enable the virtual functions. If the nr_vfn provided is greater
|
||||
* than the maximum supported, the maximum virtual function number will
|
||||
* be used for invoking the api; otherwise, the nr_vfn provided shall
|
||||
* be used for invoking the api. If the api call returned success, the
|
||||
* actual number of virtual functions enabled will be set to the driver
|
||||
* cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
|
||||
* cfg_sriov_nr_virtfn remains zero.
|
||||
*
|
||||
* If none-zero virtual functions have already been enabled to the
|
||||
* physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
|
||||
* -EINVAL will be returned and the driver does nothing;
|
||||
*
|
||||
* If the nr_vfn provided is zero and none-zero virtual functions have
|
||||
* been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
|
||||
* disabling virtual function api shall be invoded to disable all the
|
||||
* virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
|
||||
* zero. Otherwise, if zero virtual function has been enabled, do
|
||||
* nothing.
|
||||
*
|
||||
* Returns:
|
||||
* length of the buf on success if val is in range the intended mode
|
||||
* is supported.
|
||||
* -EINVAL if val out of range or intended mode is not supported.
|
||||
**/
|
||||
static ssize_t
|
||||
lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct pci_dev *pdev = phba->pcidev;
|
||||
int val = 0, rc = -EINVAL;
|
||||
|
||||
/* Sanity check on user data */
|
||||
if (!isdigit(buf[0]))
|
||||
return -EINVAL;
|
||||
if (sscanf(buf, "%i", &val) != 1)
|
||||
return -EINVAL;
|
||||
if (val < 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Request disabling virtual functions */
|
||||
if (val == 0) {
|
||||
if (phba->cfg_sriov_nr_virtfn > 0) {
|
||||
pci_disable_sriov(pdev);
|
||||
phba->cfg_sriov_nr_virtfn = 0;
|
||||
}
|
||||
return strlen(buf);
|
||||
}
|
||||
|
||||
/* Request enabling virtual functions */
|
||||
if (phba->cfg_sriov_nr_virtfn > 0) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3018 There are %d virtual functions "
|
||||
"enabled on physical function.\n",
|
||||
phba->cfg_sriov_nr_virtfn);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
if (val <= LPFC_MAX_VFN_PER_PFN)
|
||||
phba->cfg_sriov_nr_virtfn = val;
|
||||
else {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3019 Enabling %d virtual functions is not "
|
||||
"allowed.\n", val);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
|
||||
if (rc) {
|
||||
phba->cfg_sriov_nr_virtfn = 0;
|
||||
rc = -EPERM;
|
||||
} else
|
||||
rc = strlen(buf);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
|
||||
module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
|
||||
lpfc_param_show(sriov_nr_virtfn)
|
||||
|
||||
/**
|
||||
* lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
|
||||
* @phba: lpfc_hba pointer.
|
||||
* @val: link speed value.
|
||||
*
|
||||
* Description:
|
||||
* If val is in a valid range [0,255], then set the adapter's initial
|
||||
* cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
|
||||
* number shall be used instead. It will be up to the driver's probe_one
|
||||
* routine to determine whether the device's SR-IOV is supported or not.
|
||||
*
|
||||
* Returns:
|
||||
* zero if val saved.
|
||||
* -EINVAL val out of range
|
||||
**/
|
||||
static int
|
||||
lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
|
||||
{
|
||||
if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
|
||||
phba->cfg_sriov_nr_virtfn = val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3017 Enabling %d virtual functions is not "
|
||||
"allowed.\n", val);
|
||||
return -EINVAL;
|
||||
}
|
||||
static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
|
||||
lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
|
||||
|
||||
/*
|
||||
# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
|
||||
# Value range is [2,3]. Default value is 3.
|
||||
|
@ -3497,6 +3798,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
|
|||
&dev_attr_lpfc_prot_sg_seg_cnt,
|
||||
&dev_attr_lpfc_aer_support,
|
||||
&dev_attr_lpfc_aer_state_cleanup,
|
||||
&dev_attr_lpfc_sriov_nr_virtfn,
|
||||
&dev_attr_lpfc_suppress_link_up,
|
||||
&dev_attr_lpfc_iocb_cnt,
|
||||
&dev_attr_iocb_hw,
|
||||
|
@ -3505,6 +3807,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
|
|||
&dev_attr_lpfc_fips_level,
|
||||
&dev_attr_lpfc_fips_rev,
|
||||
&dev_attr_lpfc_dss,
|
||||
&dev_attr_lpfc_sriov_hw_max_virtfn,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -3961,7 +4264,7 @@ static struct bin_attribute sysfs_mbox_attr = {
|
|||
.name = "mbox",
|
||||
.mode = S_IRUSR | S_IWUSR,
|
||||
},
|
||||
.size = MAILBOX_CMD_SIZE,
|
||||
.size = MAILBOX_SYSFS_MAX,
|
||||
.read = sysfs_mbox_read,
|
||||
.write = sysfs_mbox_write,
|
||||
};
|
||||
|
@ -4705,6 +5008,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
|||
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
|
||||
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
|
||||
lpfc_aer_support_init(phba, lpfc_aer_support);
|
||||
lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
|
||||
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
|
||||
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
|
||||
phba->cfg_enable_dss = 1;
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -24,15 +24,17 @@
|
|||
* These are the vendor unique structures passed in using the bsg
|
||||
* FC_BSG_HST_VENDOR message code type.
|
||||
*/
|
||||
#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
|
||||
#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
|
||||
#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
|
||||
#define LPFC_BSG_VENDOR_DIAG_MODE 4
|
||||
#define LPFC_BSG_VENDOR_DIAG_TEST 5
|
||||
#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
|
||||
#define LPFC_BSG_VENDOR_MBOX 7
|
||||
#define LPFC_BSG_VENDOR_MENLO_CMD 8
|
||||
#define LPFC_BSG_VENDOR_MENLO_DATA 9
|
||||
#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
|
||||
#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
|
||||
#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
|
||||
#define LPFC_BSG_VENDOR_DIAG_MODE 4
|
||||
#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5
|
||||
#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
|
||||
#define LPFC_BSG_VENDOR_MBOX 7
|
||||
#define LPFC_BSG_VENDOR_MENLO_CMD 8
|
||||
#define LPFC_BSG_VENDOR_MENLO_DATA 9
|
||||
#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
|
||||
#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
|
||||
|
||||
struct set_ct_event {
|
||||
uint32_t command;
|
||||
|
@ -67,10 +69,25 @@ struct diag_mode_set {
|
|||
uint32_t timeout;
|
||||
};
|
||||
|
||||
struct sli4_link_diag {
|
||||
uint32_t command;
|
||||
uint32_t timeout;
|
||||
uint32_t test_id;
|
||||
uint32_t loops;
|
||||
uint32_t test_version;
|
||||
uint32_t error_action;
|
||||
};
|
||||
|
||||
struct diag_mode_test {
|
||||
uint32_t command;
|
||||
};
|
||||
|
||||
struct diag_status {
|
||||
uint32_t mbox_status;
|
||||
uint32_t shdr_status;
|
||||
uint32_t shdr_add_status;
|
||||
};
|
||||
|
||||
#define LPFC_WWNN_TYPE 0
|
||||
#define LPFC_WWPN_TYPE 1
|
||||
|
||||
|
@ -92,11 +109,15 @@ struct get_mgmt_rev_reply {
|
|||
};
|
||||
|
||||
#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
|
||||
|
||||
/* BSG mailbox request header */
|
||||
struct dfc_mbox_req {
|
||||
uint32_t command;
|
||||
uint32_t mbOffset;
|
||||
uint32_t inExtWLen;
|
||||
uint32_t outExtWLen;
|
||||
uint32_t extMboxTag;
|
||||
uint32_t extSeqNum;
|
||||
};
|
||||
|
||||
/* Used for menlo command or menlo data. The xri is only used for menlo data */
|
||||
|
@ -171,7 +192,7 @@ struct lpfc_sli_config_mse {
|
|||
#define lpfc_mbox_sli_config_mse_len_WORD buf_len
|
||||
};
|
||||
|
||||
struct lpfc_sli_config_subcmd_hbd {
|
||||
struct lpfc_sli_config_hbd {
|
||||
uint32_t buf_len;
|
||||
#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0
|
||||
#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff
|
||||
|
@ -194,21 +215,39 @@ struct lpfc_sli_config_hdr {
|
|||
uint32_t reserved5;
|
||||
};
|
||||
|
||||
struct lpfc_sli_config_generic {
|
||||
struct lpfc_sli_config_emb0_subsys {
|
||||
struct lpfc_sli_config_hdr sli_config_hdr;
|
||||
#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19
|
||||
struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE];
|
||||
uint32_t padding;
|
||||
uint32_t word64;
|
||||
#define lpfc_emb0_subcmnd_opcode_SHIFT 0
|
||||
#define lpfc_emb0_subcmnd_opcode_MASK 0xff
|
||||
#define lpfc_emb0_subcmnd_opcode_WORD word64
|
||||
#define lpfc_emb0_subcmnd_subsys_SHIFT 8
|
||||
#define lpfc_emb0_subcmnd_subsys_MASK 0xff
|
||||
#define lpfc_emb0_subcmnd_subsys_WORD word64
|
||||
/* Subsystem FCOE (0x0C) OpCodes */
|
||||
#define SLI_CONFIG_SUBSYS_FCOE 0x0C
|
||||
#define FCOE_OPCODE_READ_FCF 0x08
|
||||
#define FCOE_OPCODE_ADD_FCF 0x09
|
||||
};
|
||||
|
||||
struct lpfc_sli_config_subcmnd {
|
||||
struct lpfc_sli_config_emb1_subsys {
|
||||
struct lpfc_sli_config_hdr sli_config_hdr;
|
||||
uint32_t word6;
|
||||
#define lpfc_subcmnd_opcode_SHIFT 0
|
||||
#define lpfc_subcmnd_opcode_MASK 0xff
|
||||
#define lpfc_subcmnd_opcode_WORD word6
|
||||
#define lpfc_subcmnd_subsys_SHIFT 8
|
||||
#define lpfc_subcmnd_subsys_MASK 0xff
|
||||
#define lpfc_subcmnd_subsys_WORD word6
|
||||
#define lpfc_emb1_subcmnd_opcode_SHIFT 0
|
||||
#define lpfc_emb1_subcmnd_opcode_MASK 0xff
|
||||
#define lpfc_emb1_subcmnd_opcode_WORD word6
|
||||
#define lpfc_emb1_subcmnd_subsys_SHIFT 8
|
||||
#define lpfc_emb1_subcmnd_subsys_MASK 0xff
|
||||
#define lpfc_emb1_subcmnd_subsys_WORD word6
|
||||
/* Subsystem COMN (0x01) OpCodes */
|
||||
#define SLI_CONFIG_SUBSYS_COMN 0x01
|
||||
#define COMN_OPCODE_READ_OBJECT 0xAB
|
||||
#define COMN_OPCODE_WRITE_OBJECT 0xAC
|
||||
#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
|
||||
#define COMN_OPCODE_DELETE_OBJECT 0xAE
|
||||
uint32_t timeout;
|
||||
uint32_t request_length;
|
||||
uint32_t word9;
|
||||
|
@ -222,8 +261,8 @@ struct lpfc_sli_config_subcmnd {
|
|||
uint32_t rd_offset;
|
||||
uint32_t obj_name[26];
|
||||
uint32_t hbd_count;
|
||||
#define LPFC_MBX_SLI_CONFIG_MAX_HBD 10
|
||||
struct lpfc_sli_config_subcmd_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
|
||||
#define LPFC_MBX_SLI_CONFIG_MAX_HBD 8
|
||||
struct lpfc_sli_config_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
|
||||
};
|
||||
|
||||
struct lpfc_sli_config_mbox {
|
||||
|
@ -235,7 +274,11 @@ struct lpfc_sli_config_mbox {
|
|||
#define lpfc_mqe_command_MASK 0x000000FF
|
||||
#define lpfc_mqe_command_WORD word0
|
||||
union {
|
||||
struct lpfc_sli_config_generic sli_config_generic;
|
||||
struct lpfc_sli_config_subcmnd sli_config_subcmnd;
|
||||
struct lpfc_sli_config_emb0_subsys sli_config_emb0_subsys;
|
||||
struct lpfc_sli_config_emb1_subsys sli_config_emb1_subsys;
|
||||
} un;
|
||||
};
|
||||
|
||||
/* driver only */
|
||||
#define SLI_CONFIG_NOT_HANDLED 0
|
||||
#define SLI_CONFIG_HANDLED 1
|
||||
|
|
|
@ -55,6 +55,8 @@ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
|
|||
void lpfc_supported_pages(struct lpfcMboxq *);
|
||||
void lpfc_pc_sli4_params(struct lpfcMboxq *);
|
||||
int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
|
||||
uint16_t, uint16_t, bool);
|
||||
int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
|
||||
void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
|
||||
|
@ -171,6 +173,7 @@ void lpfc_delayed_disc_tmo(unsigned long);
|
|||
void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
|
||||
|
||||
int lpfc_config_port_prep(struct lpfc_hba *);
|
||||
void lpfc_update_vport_wwn(struct lpfc_vport *vport);
|
||||
int lpfc_config_port_post(struct lpfc_hba *);
|
||||
int lpfc_hba_down_prep(struct lpfc_hba *);
|
||||
int lpfc_hba_down_post(struct lpfc_hba *);
|
||||
|
@ -365,6 +368,10 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
|
|||
uint32_t, uint32_t);
|
||||
extern struct lpfc_hbq_init *lpfc_hbq_defs[];
|
||||
|
||||
/* SLI4 if_type 2 externs. */
|
||||
int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
|
||||
int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
|
||||
|
||||
/* externs BlockGuard */
|
||||
extern char *_dump_buf_data;
|
||||
extern unsigned long _dump_buf_data_order;
|
||||
|
@ -429,3 +436,6 @@ void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
|
|||
void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
|
||||
struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
|
||||
uint32_t);
|
||||
int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
|
||||
/* functions to support SR-IOV */
|
||||
int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
|
||||
|
|
|
@ -352,6 +352,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
|
|||
icmd->ulpLe = 1;
|
||||
icmd->ulpClass = CLASS3;
|
||||
icmd->ulpContext = ndlp->nlp_rpi;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
|
||||
|
||||
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
|
||||
/* For GEN_REQUEST64_CR, use the RPI */
|
||||
|
|
|
@ -1665,7 +1665,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
|
|||
/* Get fast-path complete queue information */
|
||||
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
|
||||
"Fast-path FCP CQ information:\n");
|
||||
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
|
||||
fcp_qidx = 0;
|
||||
do {
|
||||
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
|
||||
"Associated EQID[%02d]:\n",
|
||||
phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
|
||||
|
@ -1678,7 +1679,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
|
|||
phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
|
||||
phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
|
||||
phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
|
||||
}
|
||||
} while (++fcp_qidx < phba->cfg_fcp_eq_count);
|
||||
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
|
||||
|
||||
/* Get mailbox queue information */
|
||||
|
@ -2012,7 +2013,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
|
|||
goto pass_check;
|
||||
}
|
||||
/* FCP complete queue */
|
||||
for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
|
||||
qidx = 0;
|
||||
do {
|
||||
if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
|
||||
/* Sanity check */
|
||||
rc = lpfc_idiag_que_param_check(
|
||||
|
@ -2024,7 +2026,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
|
|||
phba->sli4_hba.fcp_cq[qidx];
|
||||
goto pass_check;
|
||||
}
|
||||
}
|
||||
} while (++qidx < phba->cfg_fcp_eq_count);
|
||||
goto error_out;
|
||||
break;
|
||||
case LPFC_IDIAG_MQ:
|
||||
|
|
|
@ -250,7 +250,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
|
|||
icmd->un.elsreq64.myID = vport->fc_myDID;
|
||||
|
||||
/* For ELS_REQUEST64_CR, use the VPI by default */
|
||||
icmd->ulpContext = vport->vpi + phba->vpi_base;
|
||||
icmd->ulpContext = phba->vpi_ids[vport->vpi];
|
||||
icmd->ulpCt_h = 0;
|
||||
/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
|
||||
if (elscmd == ELS_CMD_ECHO)
|
||||
|
@ -454,6 +454,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
|
|||
rc = -ENOMEM;
|
||||
goto fail_free_dmabuf;
|
||||
}
|
||||
|
||||
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!mboxq) {
|
||||
rc = -ENOMEM;
|
||||
|
@ -6585,6 +6586,26 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
|
|||
{
|
||||
struct lpfc_vport *vport;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
/* The physical ports are always vpi 0 - translate is unnecessary. */
|
||||
if (vpi > 0) {
|
||||
/*
|
||||
* Translate the physical vpi to the logical vpi. The
|
||||
* vport stores the logical vpi.
|
||||
*/
|
||||
for (i = 0; i < phba->max_vpi; i++) {
|
||||
if (vpi == phba->vpi_ids[i])
|
||||
break;
|
||||
}
|
||||
|
||||
if (i >= phba->max_vpi) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
|
||||
"2936 Could not find Vport mapped "
|
||||
"to vpi %d\n", vpi);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
list_for_each_entry(vport, &phba->port_list, listentry) {
|
||||
|
@ -6641,8 +6662,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
vport = phba->pport;
|
||||
else
|
||||
vport = lpfc_find_vport_by_vpid(phba,
|
||||
icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
|
||||
icmd->unsli3.rcvsli3.vpi);
|
||||
}
|
||||
|
||||
/* If there are no BDEs associated
|
||||
* with this IOCB, there is nothing to do.
|
||||
*/
|
||||
|
@ -7222,7 +7244,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||
elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
|
||||
elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
|
||||
/* Set the ulpContext to the vpi */
|
||||
elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
|
||||
elsiocb->iocb.ulpContext = phba->vpi_ids[vport->vpi];
|
||||
} else {
|
||||
/* For FDISC, Let FDISC rsp set the NPortID for this VPI */
|
||||
icmd->ulpCt_h = 1;
|
||||
|
|
|
@ -881,7 +881,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
|
|||
/* Clean up any firmware default rpi's */
|
||||
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (mb) {
|
||||
lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
|
||||
lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
|
||||
mb->vport = vport;
|
||||
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||||
if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
|
||||
|
@ -2690,16 +2690,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|||
|
||||
memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
|
||||
sizeof (struct serv_parm));
|
||||
if (phba->cfg_soft_wwnn)
|
||||
u64_to_wwn(phba->cfg_soft_wwnn,
|
||||
vport->fc_sparam.nodeName.u.wwn);
|
||||
if (phba->cfg_soft_wwpn)
|
||||
u64_to_wwn(phba->cfg_soft_wwpn,
|
||||
vport->fc_sparam.portName.u.wwn);
|
||||
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
|
||||
sizeof(vport->fc_nodename));
|
||||
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
|
||||
sizeof(vport->fc_portname));
|
||||
lpfc_update_vport_wwn(vport);
|
||||
if (vport->port_type == LPFC_PHYSICAL_PORT) {
|
||||
memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
|
||||
memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
|
||||
|
@ -3430,7 +3421,8 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|||
return;
|
||||
}
|
||||
|
||||
ndlp->nlp_rpi = mb->un.varWords[0];
|
||||
if (phba->sli_rev < LPFC_SLI_REV4)
|
||||
ndlp->nlp_rpi = mb->un.varWords[0];
|
||||
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
|
||||
ndlp->nlp_type |= NLP_FABRIC;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
|
||||
|
@ -3504,7 +3496,8 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|||
return;
|
||||
}
|
||||
|
||||
ndlp->nlp_rpi = mb->un.varWords[0];
|
||||
if (phba->sli_rev < LPFC_SLI_REV4)
|
||||
ndlp->nlp_rpi = mb->un.varWords[0];
|
||||
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
|
||||
ndlp->nlp_type |= NLP_FABRIC;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
|
||||
|
@ -3591,7 +3584,6 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
|||
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
|
||||
rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
|
||||
|
||||
|
||||
if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
|
||||
fc_remote_port_rolechg(rport, rport_ids.roles);
|
||||
|
||||
|
@ -4106,11 +4098,16 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
|||
struct lpfc_hba *phba = vport->phba;
|
||||
LPFC_MBOXQ_t *mbox;
|
||||
int rc;
|
||||
uint16_t rpi;
|
||||
|
||||
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (mbox) {
|
||||
lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
|
||||
/* SLI4 ports require the physical rpi value. */
|
||||
rpi = ndlp->nlp_rpi;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
|
||||
lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
|
||||
mbox->vport = vport;
|
||||
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||||
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
|
||||
|
@ -4179,7 +4176,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
|
|||
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (mbox) {
|
||||
lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
|
||||
lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
|
||||
mbox);
|
||||
mbox->vport = vport;
|
||||
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||||
mbox->context1 = NULL;
|
||||
|
@ -4203,7 +4201,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
|
|||
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (mbox) {
|
||||
lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
|
||||
lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
|
||||
mbox);
|
||||
mbox->vport = vport;
|
||||
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||||
mbox->context1 = NULL;
|
||||
|
@ -4653,10 +4652,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
|
|||
if (num_sent)
|
||||
return;
|
||||
|
||||
/*
|
||||
* For SLI3, cmpl_reg_vpi will set port_state to READY, and
|
||||
* continue discovery.
|
||||
*/
|
||||
/* Register the VPI for SLI3, NON-NPIV only. */
|
||||
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
|
||||
!(vport->fc_flag & FC_PT2PT) &&
|
||||
!(vport->fc_flag & FC_RSCN_MODE) &&
|
||||
|
@ -4943,7 +4939,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
|
|||
if (phba->sli_rev < LPFC_SLI_REV4) {
|
||||
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
|
||||
lpfc_issue_reg_vpi(phba, vport);
|
||||
else { /* NPIV Not enabled */
|
||||
else {
|
||||
lpfc_issue_clear_la(phba, vport);
|
||||
vport->port_state = LPFC_VPORT_READY;
|
||||
}
|
||||
|
@ -5069,7 +5065,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|||
pmb->context1 = NULL;
|
||||
pmb->context2 = NULL;
|
||||
|
||||
ndlp->nlp_rpi = mb->un.varWords[0];
|
||||
if (phba->sli_rev < LPFC_SLI_REV4)
|
||||
ndlp->nlp_rpi = mb->un.varWords[0];
|
||||
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
|
||||
ndlp->nlp_type |= NLP_FABRIC;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
|
||||
|
@ -5354,6 +5351,17 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
|
|||
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
|
||||
shost = lpfc_shost_from_vport(vports[i]);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
/*
|
||||
* IF the CVL_RCVD bit is not set then we have sent the
|
||||
* flogi.
|
||||
* If dev_loss fires while we are waiting we do not want to
|
||||
* unreg the fcf.
|
||||
*/
|
||||
if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
|
||||
if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
|
||||
(ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
|
||||
|
|
|
@ -64,6 +64,8 @@
|
|||
#define SLI3_IOCB_CMD_SIZE 128
|
||||
#define SLI3_IOCB_RSP_SIZE 64
|
||||
|
||||
#define LPFC_UNREG_ALL_RPIS_VPORT 0xffff
|
||||
#define LPFC_UNREG_ALL_DFLT_RPIS 0xffffffff
|
||||
|
||||
/* vendor ID used in SCSI netlink calls */
|
||||
#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
|
||||
|
@ -903,6 +905,8 @@ struct RRQ { /* Structure is in Big Endian format */
|
|||
#define rrq_rxid_WORD rrq_exchg
|
||||
};
|
||||
|
||||
#define LPFC_MAX_VFN_PER_PFN 255 /* Maximum VFs allowed per ARI */
|
||||
#define LPFC_DEF_VFN_PER_PFN 0 /* Default VFs due to platform limitation*/
|
||||
|
||||
struct RTV_RSP { /* Structure is in Big Endian format */
|
||||
uint32_t ratov;
|
||||
|
@ -1199,7 +1203,9 @@ typedef struct {
|
|||
#define PCI_DEVICE_ID_BALIUS 0xe131
|
||||
#define PCI_DEVICE_ID_PROTEUS_PF 0xe180
|
||||
#define PCI_DEVICE_ID_LANCER_FC 0xe200
|
||||
#define PCI_DEVICE_ID_LANCER_FC_VF 0xe208
|
||||
#define PCI_DEVICE_ID_LANCER_FCOE 0xe260
|
||||
#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
|
||||
#define PCI_DEVICE_ID_SAT_SMB 0xf011
|
||||
#define PCI_DEVICE_ID_SAT_MID 0xf015
|
||||
#define PCI_DEVICE_ID_RFLY 0xf095
|
||||
|
@ -3021,7 +3027,7 @@ typedef struct {
|
|||
#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
|
||||
#define MAILBOX_HBA_EXT_OFFSET 0x100
|
||||
/* max mbox xmit size is a page size for sysfs IO operations */
|
||||
#define MAILBOX_MAX_XMIT_SIZE PAGE_SIZE
|
||||
#define MAILBOX_SYSFS_MAX 4096
|
||||
|
||||
typedef union {
|
||||
uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
|
||||
|
|
|
@ -170,6 +170,25 @@ struct lpfc_sli_intf {
|
|||
#define LPFC_PCI_FUNC3 3
|
||||
#define LPFC_PCI_FUNC4 4
|
||||
|
||||
/* SLI4 interface type-2 control register offsets */
|
||||
#define LPFC_CTL_PORT_SEM_OFFSET 0x400
|
||||
#define LPFC_CTL_PORT_STA_OFFSET 0x404
|
||||
#define LPFC_CTL_PORT_CTL_OFFSET 0x408
|
||||
#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
|
||||
#define LPFC_CTL_PORT_ER2_OFFSET 0x410
|
||||
#define LPFC_CTL_PDEV_CTL_OFFSET 0x414
|
||||
|
||||
/* Some SLI4 interface type-2 PDEV_CTL register bits */
|
||||
#define LPFC_CTL_PDEV_CTL_DRST 0x00000001
|
||||
#define LPFC_CTL_PDEV_CTL_FRST 0x00000002
|
||||
#define LPFC_CTL_PDEV_CTL_DD 0x00000004
|
||||
#define LPFC_CTL_PDEV_CTL_LC 0x00000008
|
||||
#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00
|
||||
#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10
|
||||
#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20
|
||||
|
||||
#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
|
||||
|
||||
/* Active interrupt test count */
|
||||
#define LPFC_ACT_INTR_CNT 4
|
||||
|
||||
|
@ -210,9 +229,26 @@ struct ulp_bde64 {
|
|||
|
||||
struct lpfc_sli4_flags {
|
||||
uint32_t word0;
|
||||
#define lpfc_fip_flag_SHIFT 0
|
||||
#define lpfc_fip_flag_MASK 0x00000001
|
||||
#define lpfc_fip_flag_WORD word0
|
||||
#define lpfc_idx_rsrc_rdy_SHIFT 0
|
||||
#define lpfc_idx_rsrc_rdy_MASK 0x00000001
|
||||
#define lpfc_idx_rsrc_rdy_WORD word0
|
||||
#define LPFC_IDX_RSRC_RDY 1
|
||||
#define lpfc_xri_rsrc_rdy_SHIFT 1
|
||||
#define lpfc_xri_rsrc_rdy_MASK 0x00000001
|
||||
#define lpfc_xri_rsrc_rdy_WORD word0
|
||||
#define LPFC_XRI_RSRC_RDY 1
|
||||
#define lpfc_rpi_rsrc_rdy_SHIFT 2
|
||||
#define lpfc_rpi_rsrc_rdy_MASK 0x00000001
|
||||
#define lpfc_rpi_rsrc_rdy_WORD word0
|
||||
#define LPFC_RPI_RSRC_RDY 1
|
||||
#define lpfc_vpi_rsrc_rdy_SHIFT 3
|
||||
#define lpfc_vpi_rsrc_rdy_MASK 0x00000001
|
||||
#define lpfc_vpi_rsrc_rdy_WORD word0
|
||||
#define LPFC_VPI_RSRC_RDY 1
|
||||
#define lpfc_vfi_rsrc_rdy_SHIFT 4
|
||||
#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
|
||||
#define lpfc_vfi_rsrc_rdy_WORD word0
|
||||
#define LPFC_VFI_RSRC_RDY 1
|
||||
};
|
||||
|
||||
struct sli4_bls_rsp {
|
||||
|
@ -739,6 +775,12 @@ union lpfc_sli4_cfg_shdr {
|
|||
#define lpfc_mbox_hdr_version_SHIFT 0
|
||||
#define lpfc_mbox_hdr_version_MASK 0x000000FF
|
||||
#define lpfc_mbox_hdr_version_WORD word9
|
||||
#define lpfc_mbox_hdr_pf_num_SHIFT 16
|
||||
#define lpfc_mbox_hdr_pf_num_MASK 0x000000FF
|
||||
#define lpfc_mbox_hdr_pf_num_WORD word9
|
||||
#define lpfc_mbox_hdr_vh_num_SHIFT 24
|
||||
#define lpfc_mbox_hdr_vh_num_MASK 0x000000FF
|
||||
#define lpfc_mbox_hdr_vh_num_WORD word9
|
||||
#define LPFC_Q_CREATE_VERSION_2 2
|
||||
#define LPFC_Q_CREATE_VERSION_1 1
|
||||
#define LPFC_Q_CREATE_VERSION_0 0
|
||||
|
@ -766,12 +808,22 @@ union lpfc_sli4_cfg_shdr {
|
|||
} response;
|
||||
};
|
||||
|
||||
/* Mailbox structures */
|
||||
/* Mailbox Header structures.
|
||||
* struct mbox_header is defined for first generation SLI4_CFG mailbox
|
||||
* calls deployed for BE-based ports.
|
||||
*
|
||||
* struct sli4_mbox_header is defined for second generation SLI4
|
||||
* ports that don't deploy the SLI4_CFG mechanism.
|
||||
*/
|
||||
struct mbox_header {
|
||||
struct lpfc_sli4_cfg_mhdr cfg_mhdr;
|
||||
union lpfc_sli4_cfg_shdr cfg_shdr;
|
||||
};
|
||||
|
||||
#define LPFC_EXTENT_LOCAL 0
|
||||
#define LPFC_TIMEOUT_DEFAULT 0
|
||||
#define LPFC_EXTENT_VERSION_DEFAULT 0
|
||||
|
||||
/* Subsystem Definitions */
|
||||
#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
|
||||
#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
|
||||
|
@ -794,6 +846,13 @@ struct mbox_header {
|
|||
#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
|
||||
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
|
||||
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
|
||||
#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
|
||||
#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
|
||||
#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
|
||||
#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
|
||||
#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
|
||||
#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
|
||||
#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC
|
||||
#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
|
||||
|
||||
/* FCoE Opcodes */
|
||||
|
@ -808,6 +867,8 @@ struct mbox_header {
|
|||
#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
|
||||
#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
|
||||
#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
|
||||
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
|
||||
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
|
||||
|
||||
/* Mailbox command structures */
|
||||
struct eq_context {
|
||||
|
@ -1210,6 +1271,187 @@ struct lpfc_mbx_mq_destroy {
|
|||
} u;
|
||||
};
|
||||
|
||||
/* Start Gen 2 SLI4 Mailbox definitions: */
|
||||
|
||||
/* Define allocate-ready Gen 2 SLI4 FCoE Resource Extent Types. */
|
||||
#define LPFC_RSC_TYPE_FCOE_VFI 0x20
|
||||
#define LPFC_RSC_TYPE_FCOE_VPI 0x21
|
||||
#define LPFC_RSC_TYPE_FCOE_RPI 0x22
|
||||
#define LPFC_RSC_TYPE_FCOE_XRI 0x23
|
||||
|
||||
struct lpfc_mbx_get_rsrc_extent_info {
|
||||
struct mbox_header header;
|
||||
union {
|
||||
struct {
|
||||
uint32_t word4;
|
||||
#define lpfc_mbx_get_rsrc_extent_info_type_SHIFT 0
|
||||
#define lpfc_mbx_get_rsrc_extent_info_type_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_get_rsrc_extent_info_type_WORD word4
|
||||
} req;
|
||||
struct {
|
||||
uint32_t word4;
|
||||
#define lpfc_mbx_get_rsrc_extent_info_cnt_SHIFT 0
|
||||
#define lpfc_mbx_get_rsrc_extent_info_cnt_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_get_rsrc_extent_info_cnt_WORD word4
|
||||
#define lpfc_mbx_get_rsrc_extent_info_size_SHIFT 16
|
||||
#define lpfc_mbx_get_rsrc_extent_info_size_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_get_rsrc_extent_info_size_WORD word4
|
||||
} rsp;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct lpfc_id_range {
|
||||
uint32_t word5;
|
||||
#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
|
||||
#define lpfc_mbx_rsrc_id_word4_0_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_rsrc_id_word4_0_WORD word5
|
||||
#define lpfc_mbx_rsrc_id_word4_1_SHIFT 16
|
||||
#define lpfc_mbx_rsrc_id_word4_1_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_rsrc_id_word4_1_WORD word5
|
||||
};
|
||||
|
||||
struct lpfc_mbx_set_link_diag_state {
|
||||
struct mbox_header header;
|
||||
union {
|
||||
struct {
|
||||
uint32_t word0;
|
||||
#define lpfc_mbx_set_diag_state_diag_SHIFT 0
|
||||
#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001
|
||||
#define lpfc_mbx_set_diag_state_diag_WORD word0
|
||||
#define lpfc_mbx_set_diag_state_link_num_SHIFT 16
|
||||
#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F
|
||||
#define lpfc_mbx_set_diag_state_link_num_WORD word0
|
||||
#define lpfc_mbx_set_diag_state_link_type_SHIFT 22
|
||||
#define lpfc_mbx_set_diag_state_link_type_MASK 0x00000003
|
||||
#define lpfc_mbx_set_diag_state_link_type_WORD word0
|
||||
} req;
|
||||
struct {
|
||||
uint32_t word0;
|
||||
} rsp;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct lpfc_mbx_set_link_diag_loopback {
|
||||
struct mbox_header header;
|
||||
union {
|
||||
struct {
|
||||
uint32_t word0;
|
||||
#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0
|
||||
#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000001
|
||||
#define lpfc_mbx_set_diag_lpbk_type_WORD word0
|
||||
#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0
|
||||
#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1
|
||||
#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL 0x2
|
||||
#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16
|
||||
#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F
|
||||
#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0
|
||||
#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT 22
|
||||
#define lpfc_mbx_set_diag_lpbk_link_type_MASK 0x00000003
|
||||
#define lpfc_mbx_set_diag_lpbk_link_type_WORD word0
|
||||
} req;
|
||||
struct {
|
||||
uint32_t word0;
|
||||
} rsp;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct lpfc_mbx_run_link_diag_test {
|
||||
struct mbox_header header;
|
||||
union {
|
||||
struct {
|
||||
uint32_t word0;
|
||||
#define lpfc_mbx_run_diag_test_link_num_SHIFT 16
|
||||
#define lpfc_mbx_run_diag_test_link_num_MASK 0x0000003F
|
||||
#define lpfc_mbx_run_diag_test_link_num_WORD word0
|
||||
#define lpfc_mbx_run_diag_test_link_type_SHIFT 22
|
||||
#define lpfc_mbx_run_diag_test_link_type_MASK 0x00000003
|
||||
#define lpfc_mbx_run_diag_test_link_type_WORD word0
|
||||
uint32_t word1;
|
||||
#define lpfc_mbx_run_diag_test_test_id_SHIFT 0
|
||||
#define lpfc_mbx_run_diag_test_test_id_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_run_diag_test_test_id_WORD word1
|
||||
#define lpfc_mbx_run_diag_test_loops_SHIFT 16
|
||||
#define lpfc_mbx_run_diag_test_loops_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_run_diag_test_loops_WORD word1
|
||||
uint32_t word2;
|
||||
#define lpfc_mbx_run_diag_test_test_ver_SHIFT 0
|
||||
#define lpfc_mbx_run_diag_test_test_ver_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_run_diag_test_test_ver_WORD word2
|
||||
#define lpfc_mbx_run_diag_test_err_act_SHIFT 16
|
||||
#define lpfc_mbx_run_diag_test_err_act_MASK 0x000000FF
|
||||
#define lpfc_mbx_run_diag_test_err_act_WORD word2
|
||||
} req;
|
||||
struct {
|
||||
uint32_t word0;
|
||||
} rsp;
|
||||
} u;
|
||||
};
|
||||
|
||||
/*
|
||||
* struct lpfc_mbx_alloc_rsrc_extents:
|
||||
* A mbox is generically 256 bytes long. An SLI4_CONFIG mailbox requires
|
||||
* 6 words of header + 4 words of shared subcommand header +
|
||||
* 1 words of Extent-Opcode-specific header = 11 words or 44 bytes total.
|
||||
*
|
||||
* An embedded version of SLI4_CONFIG therefore has 256 - 44 = 212 bytes
|
||||
* for extents payload.
|
||||
*
|
||||
* 212/2 (bytes per extent) = 106 extents.
|
||||
* 106/2 (extents per word) = 53 words.
|
||||
* lpfc_id_range id is statically size to 53.
|
||||
*
|
||||
* This mailbox definition is used for ALLOC or GET_ALLOCATED
|
||||
* extent ranges. For ALLOC, the type and cnt are required.
|
||||
* For GET_ALLOCATED, only the type is required.
|
||||
*/
|
||||
struct lpfc_mbx_alloc_rsrc_extents {
|
||||
struct mbox_header header;
|
||||
union {
|
||||
struct {
|
||||
uint32_t word4;
|
||||
#define lpfc_mbx_alloc_rsrc_extents_type_SHIFT 0
|
||||
#define lpfc_mbx_alloc_rsrc_extents_type_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_alloc_rsrc_extents_type_WORD word4
|
||||
#define lpfc_mbx_alloc_rsrc_extents_cnt_SHIFT 16
|
||||
#define lpfc_mbx_alloc_rsrc_extents_cnt_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_alloc_rsrc_extents_cnt_WORD word4
|
||||
} req;
|
||||
struct {
|
||||
uint32_t word4;
|
||||
#define lpfc_mbx_rsrc_cnt_SHIFT 0
|
||||
#define lpfc_mbx_rsrc_cnt_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_rsrc_cnt_WORD word4
|
||||
struct lpfc_id_range id[53];
|
||||
} rsp;
|
||||
} u;
|
||||
};
|
||||
|
||||
/*
|
||||
* This is the non-embedded version of ALLOC or GET RSRC_EXTENTS. Word4 in this
|
||||
* structure shares the same SHIFT/MASK/WORD defines provided in the
|
||||
* mbx_alloc_rsrc_extents and mbx_get_alloc_rsrc_extents, word4, provided in
|
||||
* the structures defined above. This non-embedded structure provides for the
|
||||
* maximum number of extents supported by the port.
|
||||
*/
|
||||
struct lpfc_mbx_nembed_rsrc_extent {
|
||||
union lpfc_sli4_cfg_shdr cfg_shdr;
|
||||
uint32_t word4;
|
||||
struct lpfc_id_range id;
|
||||
};
|
||||
|
||||
struct lpfc_mbx_dealloc_rsrc_extents {
|
||||
struct mbox_header header;
|
||||
struct {
|
||||
uint32_t word4;
|
||||
#define lpfc_mbx_dealloc_rsrc_extents_type_SHIFT 0
|
||||
#define lpfc_mbx_dealloc_rsrc_extents_type_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_dealloc_rsrc_extents_type_WORD word4
|
||||
} req;
|
||||
|
||||
};
|
||||
|
||||
/* Start SLI4 FCoE specific mbox structures. */
|
||||
|
||||
struct lpfc_mbx_post_hdr_tmpl {
|
||||
struct mbox_header header;
|
||||
uint32_t word10;
|
||||
|
@ -1229,7 +1471,7 @@ struct sli4_sge { /* SLI-4 */
|
|||
|
||||
uint32_t word2;
|
||||
#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
|
||||
#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF
|
||||
#define lpfc_sli4_sge_offset_MASK 0x1FFFFFFF
|
||||
#define lpfc_sli4_sge_offset_WORD word2
|
||||
#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
|
||||
this flag !! */
|
||||
|
@ -1773,61 +2015,31 @@ struct lpfc_mbx_read_rev {
|
|||
|
||||
struct lpfc_mbx_read_config {
|
||||
uint32_t word1;
|
||||
#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0
|
||||
#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF
|
||||
#define lpfc_mbx_rd_conf_max_bbc_WORD word1
|
||||
#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
|
||||
#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
|
||||
#define lpfc_mbx_rd_conf_init_bbc_WORD word1
|
||||
#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
|
||||
#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
|
||||
#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
|
||||
uint32_t word2;
|
||||
#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
|
||||
#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
|
||||
#define lpfc_mbx_rd_conf_nport_did_WORD word2
|
||||
#define lpfc_mbx_rd_conf_topology_SHIFT 24
|
||||
#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
|
||||
#define lpfc_mbx_rd_conf_topology_WORD word2
|
||||
uint32_t word3;
|
||||
#define lpfc_mbx_rd_conf_ao_SHIFT 0
|
||||
#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
|
||||
#define lpfc_mbx_rd_conf_ao_WORD word3
|
||||
#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
|
||||
#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
|
||||
#define lpfc_mbx_rd_conf_bb_scn_WORD word3
|
||||
#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
|
||||
#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
|
||||
#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
|
||||
#define lpfc_mbx_rd_conf_mc_SHIFT 29
|
||||
#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
|
||||
#define lpfc_mbx_rd_conf_mc_WORD word3
|
||||
uint32_t rsvd_3;
|
||||
uint32_t word4;
|
||||
#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
|
||||
#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
|
||||
uint32_t word5;
|
||||
#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
|
||||
#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_rd_conf_lp_tov_WORD word5
|
||||
uint32_t rsvd_5;
|
||||
uint32_t word6;
|
||||
#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
|
||||
#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
|
||||
uint32_t word7;
|
||||
#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0
|
||||
#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
|
||||
#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
|
||||
uint32_t word8;
|
||||
#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
|
||||
#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
|
||||
#define lpfc_mbx_rd_conf_al_tov_WORD word8
|
||||
uint32_t rsvd_7;
|
||||
uint32_t rsvd_8;
|
||||
uint32_t word9;
|
||||
#define lpfc_mbx_rd_conf_lmt_SHIFT 0
|
||||
#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_rd_conf_lmt_WORD word9
|
||||
uint32_t word10;
|
||||
#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0
|
||||
#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
|
||||
#define lpfc_mbx_rd_conf_max_alpa_WORD word10
|
||||
uint32_t word11_rsvd;
|
||||
uint32_t rsvd_10;
|
||||
uint32_t rsvd_11;
|
||||
uint32_t word12;
|
||||
#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
|
||||
#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
|
||||
|
@ -1857,9 +2069,6 @@ struct lpfc_mbx_read_config {
|
|||
#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_rd_conf_vfi_count_WORD word15
|
||||
uint32_t word16;
|
||||
#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
|
||||
#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
|
||||
#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
|
||||
#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
|
||||
|
@ -2169,6 +2378,12 @@ struct lpfc_sli4_parameters {
|
|||
#define cfg_fcoe_SHIFT 0
|
||||
#define cfg_fcoe_MASK 0x00000001
|
||||
#define cfg_fcoe_WORD word12
|
||||
#define cfg_ext_SHIFT 1
|
||||
#define cfg_ext_MASK 0x00000001
|
||||
#define cfg_ext_WORD word12
|
||||
#define cfg_hdrr_SHIFT 2
|
||||
#define cfg_hdrr_MASK 0x00000001
|
||||
#define cfg_hdrr_WORD word12
|
||||
#define cfg_phwq_SHIFT 15
|
||||
#define cfg_phwq_MASK 0x00000001
|
||||
#define cfg_phwq_WORD word12
|
||||
|
@ -2198,6 +2413,145 @@ struct lpfc_mbx_get_sli4_parameters {
|
|||
struct lpfc_sli4_parameters sli4_parameters;
|
||||
};
|
||||
|
||||
struct lpfc_rscr_desc_generic {
|
||||
#define LPFC_RSRC_DESC_WSIZE 18
|
||||
uint32_t desc[LPFC_RSRC_DESC_WSIZE];
|
||||
};
|
||||
|
||||
struct lpfc_rsrc_desc_pcie {
|
||||
uint32_t word0;
|
||||
#define lpfc_rsrc_desc_pcie_type_SHIFT 0
|
||||
#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff
|
||||
#define lpfc_rsrc_desc_pcie_type_WORD word0
|
||||
#define LPFC_RSRC_DESC_TYPE_PCIE 0x40
|
||||
uint32_t word1;
|
||||
#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0
|
||||
#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff
|
||||
#define lpfc_rsrc_desc_pcie_pfnum_WORD word1
|
||||
uint32_t reserved;
|
||||
uint32_t word3;
|
||||
#define lpfc_rsrc_desc_pcie_sriov_sta_SHIFT 0
|
||||
#define lpfc_rsrc_desc_pcie_sriov_sta_MASK 0x000000ff
|
||||
#define lpfc_rsrc_desc_pcie_sriov_sta_WORD word3
|
||||
#define lpfc_rsrc_desc_pcie_pf_sta_SHIFT 8
|
||||
#define lpfc_rsrc_desc_pcie_pf_sta_MASK 0x000000ff
|
||||
#define lpfc_rsrc_desc_pcie_pf_sta_WORD word3
|
||||
#define lpfc_rsrc_desc_pcie_pf_type_SHIFT 16
|
||||
#define lpfc_rsrc_desc_pcie_pf_type_MASK 0x000000ff
|
||||
#define lpfc_rsrc_desc_pcie_pf_type_WORD word3
|
||||
uint32_t word4;
|
||||
#define lpfc_rsrc_desc_pcie_nr_virtfn_SHIFT 0
|
||||
#define lpfc_rsrc_desc_pcie_nr_virtfn_MASK 0x0000ffff
|
||||
#define lpfc_rsrc_desc_pcie_nr_virtfn_WORD word4
|
||||
};
|
||||
|
||||
struct lpfc_rsrc_desc_fcfcoe {
|
||||
uint32_t word0;
|
||||
#define lpfc_rsrc_desc_fcfcoe_type_SHIFT 0
|
||||
#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff
|
||||
#define lpfc_rsrc_desc_fcfcoe_type_WORD word0
|
||||
#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43
|
||||
uint32_t word1;
|
||||
#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0
|
||||
#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff
|
||||
#define lpfc_rsrc_desc_fcfcoe_vfnum_WORD word1
|
||||
#define lpfc_rsrc_desc_fcfcoe_pfnum_SHIFT 16
|
||||
#define lpfc_rsrc_desc_fcfcoe_pfnum_MASK 0x000007ff
|
||||
#define lpfc_rsrc_desc_fcfcoe_pfnum_WORD word1
|
||||
uint32_t word2;
|
||||
#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_SHIFT 0
|
||||
#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_MASK 0x0000ffff
|
||||
#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_WORD word2
|
||||
#define lpfc_rsrc_desc_fcfcoe_xri_cnt_SHIFT 16
|
||||
#define lpfc_rsrc_desc_fcfcoe_xri_cnt_MASK 0x0000ffff
|
||||
#define lpfc_rsrc_desc_fcfcoe_xri_cnt_WORD word2
|
||||
uint32_t word3;
|
||||
#define lpfc_rsrc_desc_fcfcoe_wq_cnt_SHIFT 0
|
||||
#define lpfc_rsrc_desc_fcfcoe_wq_cnt_MASK 0x0000ffff
|
||||
#define lpfc_rsrc_desc_fcfcoe_wq_cnt_WORD word3
|
||||
#define lpfc_rsrc_desc_fcfcoe_rq_cnt_SHIFT 16
|
||||
#define lpfc_rsrc_desc_fcfcoe_rq_cnt_MASK 0x0000ffff
|
||||
#define lpfc_rsrc_desc_fcfcoe_rq_cnt_WORD word3
|
||||
uint32_t word4;
|
||||
#define lpfc_rsrc_desc_fcfcoe_cq_cnt_SHIFT 0
|
||||
#define lpfc_rsrc_desc_fcfcoe_cq_cnt_MASK 0x0000ffff
|
||||
#define lpfc_rsrc_desc_fcfcoe_cq_cnt_WORD word4
|
||||
#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_SHIFT 16
|
||||
#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_MASK 0x0000ffff
|
||||
#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_WORD word4
|
||||
uint32_t word5;
|
||||
#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_SHIFT 0
|
||||
#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_MASK 0x0000ffff
|
||||
#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_WORD word5
|
||||
#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_SHIFT 16
|
||||
#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_MASK 0x0000ffff
|
||||
#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_WORD word5
|
||||
uint32_t word6;
|
||||
uint32_t word7;
|
||||
uint32_t word8;
|
||||
uint32_t word9;
|
||||
uint32_t word10;
|
||||
uint32_t word11;
|
||||
uint32_t word12;
|
||||
uint32_t word13;
|
||||
#define lpfc_rsrc_desc_fcfcoe_lnk_nr_SHIFT 0
|
||||
#define lpfc_rsrc_desc_fcfcoe_lnk_nr_MASK 0x0000003f
|
||||
#define lpfc_rsrc_desc_fcfcoe_lnk_nr_WORD word13
|
||||
#define lpfc_rsrc_desc_fcfcoe_lnk_tp_SHIFT 6
|
||||
#define lpfc_rsrc_desc_fcfcoe_lnk_tp_MASK 0x00000003
|
||||
#define lpfc_rsrc_desc_fcfcoe_lnk_tp_WORD word13
|
||||
#define lpfc_rsrc_desc_fcfcoe_lmc_SHIFT 8
|
||||
#define lpfc_rsrc_desc_fcfcoe_lmc_MASK 0x00000001
|
||||
#define lpfc_rsrc_desc_fcfcoe_lmc_WORD word13
|
||||
#define lpfc_rsrc_desc_fcfcoe_lld_SHIFT 9
|
||||
#define lpfc_rsrc_desc_fcfcoe_lld_MASK 0x00000001
|
||||
#define lpfc_rsrc_desc_fcfcoe_lld_WORD word13
|
||||
#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16
|
||||
#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff
|
||||
#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13
|
||||
};
|
||||
|
||||
struct lpfc_func_cfg {
|
||||
#define LPFC_RSRC_DESC_MAX_NUM 2
|
||||
uint32_t rsrc_desc_count;
|
||||
struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
|
||||
};
|
||||
|
||||
struct lpfc_mbx_get_func_cfg {
|
||||
struct mbox_header header;
|
||||
#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
|
||||
#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
|
||||
#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
|
||||
struct lpfc_func_cfg func_cfg;
|
||||
};
|
||||
|
||||
struct lpfc_prof_cfg {
|
||||
#define LPFC_RSRC_DESC_MAX_NUM 2
|
||||
uint32_t rsrc_desc_count;
|
||||
struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
|
||||
};
|
||||
|
||||
struct lpfc_mbx_get_prof_cfg {
|
||||
struct mbox_header header;
|
||||
#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
|
||||
#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
|
||||
#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
|
||||
union {
|
||||
struct {
|
||||
uint32_t word10;
|
||||
#define lpfc_mbx_get_prof_cfg_prof_id_SHIFT 0
|
||||
#define lpfc_mbx_get_prof_cfg_prof_id_MASK 0x000000ff
|
||||
#define lpfc_mbx_get_prof_cfg_prof_id_WORD word10
|
||||
#define lpfc_mbx_get_prof_cfg_prof_tp_SHIFT 8
|
||||
#define lpfc_mbx_get_prof_cfg_prof_tp_MASK 0x00000003
|
||||
#define lpfc_mbx_get_prof_cfg_prof_tp_WORD word10
|
||||
} request;
|
||||
struct {
|
||||
struct lpfc_prof_cfg prof_cfg;
|
||||
} response;
|
||||
} u;
|
||||
};
|
||||
|
||||
/* Mailbox Completion Queue Error Messages */
|
||||
#define MB_CQE_STATUS_SUCCESS 0x0
|
||||
#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
|
||||
|
@ -2206,6 +2560,29 @@ struct lpfc_mbx_get_sli4_parameters {
|
|||
#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
|
||||
#define MB_CQE_STATUS_DMA_FAILED 0x5
|
||||
|
||||
#define LPFC_MBX_WR_CONFIG_MAX_BDE 8
|
||||
struct lpfc_mbx_wr_object {
|
||||
struct mbox_header header;
|
||||
union {
|
||||
struct {
|
||||
uint32_t word4;
|
||||
#define lpfc_wr_object_eof_SHIFT 31
|
||||
#define lpfc_wr_object_eof_MASK 0x00000001
|
||||
#define lpfc_wr_object_eof_WORD word4
|
||||
#define lpfc_wr_object_write_length_SHIFT 0
|
||||
#define lpfc_wr_object_write_length_MASK 0x00FFFFFF
|
||||
#define lpfc_wr_object_write_length_WORD word4
|
||||
uint32_t write_offset;
|
||||
uint32_t object_name[26];
|
||||
uint32_t bde_count;
|
||||
struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
|
||||
} request;
|
||||
struct {
|
||||
uint32_t actual_write_length;
|
||||
} response;
|
||||
} u;
|
||||
};
|
||||
|
||||
/* mailbox queue entry structure */
|
||||
struct lpfc_mqe {
|
||||
uint32_t word0;
|
||||
|
@ -2241,6 +2618,9 @@ struct lpfc_mqe {
|
|||
struct lpfc_mbx_cq_destroy cq_destroy;
|
||||
struct lpfc_mbx_wq_destroy wq_destroy;
|
||||
struct lpfc_mbx_rq_destroy rq_destroy;
|
||||
struct lpfc_mbx_get_rsrc_extent_info rsrc_extent_info;
|
||||
struct lpfc_mbx_alloc_rsrc_extents alloc_rsrc_extents;
|
||||
struct lpfc_mbx_dealloc_rsrc_extents dealloc_rsrc_extents;
|
||||
struct lpfc_mbx_post_sgl_pages post_sgl_pages;
|
||||
struct lpfc_mbx_nembed_cmd nembed_cmd;
|
||||
struct lpfc_mbx_read_rev read_rev;
|
||||
|
@ -2252,7 +2632,13 @@ struct lpfc_mqe {
|
|||
struct lpfc_mbx_supp_pages supp_pages;
|
||||
struct lpfc_mbx_pc_sli4_params sli4_params;
|
||||
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
|
||||
struct lpfc_mbx_set_link_diag_state link_diag_state;
|
||||
struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
|
||||
struct lpfc_mbx_run_link_diag_test link_diag_test;
|
||||
struct lpfc_mbx_get_func_cfg get_func_cfg;
|
||||
struct lpfc_mbx_get_prof_cfg get_prof_cfg;
|
||||
struct lpfc_mbx_nop nop;
|
||||
struct lpfc_mbx_wr_object wr_object;
|
||||
} un;
|
||||
};
|
||||
|
||||
|
@ -2458,7 +2844,7 @@ struct lpfc_bmbx_create {
|
|||
#define SGL_ALIGN_SZ 64
|
||||
#define SGL_PAGE_SIZE 4096
|
||||
/* align SGL addr on a size boundary - adjust address up */
|
||||
#define NO_XRI ((uint16_t)-1)
|
||||
#define NO_XRI 0xffff
|
||||
|
||||
struct wqe_common {
|
||||
uint32_t word6;
|
||||
|
@ -2798,9 +3184,28 @@ union lpfc_wqe {
|
|||
struct gen_req64_wqe gen_req;
|
||||
};
|
||||
|
||||
#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
|
||||
#define LPFC_FILE_TYPE_GROUP 0xf7
|
||||
#define LPFC_FILE_ID_GROUP 0xa2
|
||||
struct lpfc_grp_hdr {
|
||||
uint32_t size;
|
||||
uint32_t magic_number;
|
||||
uint32_t word2;
|
||||
#define lpfc_grp_hdr_file_type_SHIFT 24
|
||||
#define lpfc_grp_hdr_file_type_MASK 0x000000FF
|
||||
#define lpfc_grp_hdr_file_type_WORD word2
|
||||
#define lpfc_grp_hdr_id_SHIFT 16
|
||||
#define lpfc_grp_hdr_id_MASK 0x000000FF
|
||||
#define lpfc_grp_hdr_id_WORD word2
|
||||
uint8_t rev_name[128];
|
||||
};
|
||||
|
||||
#define FCP_COMMAND 0x0
|
||||
#define FCP_COMMAND_DATA_OUT 0x1
|
||||
#define ELS_COMMAND_NON_FIP 0xC
|
||||
#define ELS_COMMAND_FIP 0xD
|
||||
#define OTHER_COMMAND 0x8
|
||||
|
||||
#define LPFC_FW_DUMP 1
|
||||
#define LPFC_FW_RESET 2
|
||||
#define LPFC_DV_RESET 3
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/ctype.h>
|
||||
#include <linux/aer.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/firmware.h>
|
||||
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
|
@ -211,7 +212,6 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
|
|||
lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
|
||||
if (!lpfc_vpd_data)
|
||||
goto out_free_mbox;
|
||||
|
||||
do {
|
||||
lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
|
||||
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
|
||||
|
@ -308,6 +308,45 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
|
|||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
|
||||
* cfg_soft_wwnn, cfg_soft_wwpn
|
||||
* @vport: pointer to lpfc vport data structure.
|
||||
*
|
||||
*
|
||||
* Return codes
|
||||
* None.
|
||||
**/
|
||||
void
|
||||
lpfc_update_vport_wwn(struct lpfc_vport *vport)
|
||||
{
|
||||
/* If the soft name exists then update it using the service params */
|
||||
if (vport->phba->cfg_soft_wwnn)
|
||||
u64_to_wwn(vport->phba->cfg_soft_wwnn,
|
||||
vport->fc_sparam.nodeName.u.wwn);
|
||||
if (vport->phba->cfg_soft_wwpn)
|
||||
u64_to_wwn(vport->phba->cfg_soft_wwpn,
|
||||
vport->fc_sparam.portName.u.wwn);
|
||||
|
||||
/*
|
||||
* If the name is empty or there exists a soft name
|
||||
* then copy the service params name, otherwise use the fc name
|
||||
*/
|
||||
if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
|
||||
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
|
||||
sizeof(struct lpfc_name));
|
||||
else
|
||||
memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
|
||||
sizeof(struct lpfc_name));
|
||||
|
||||
if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
|
||||
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
|
||||
sizeof(struct lpfc_name));
|
||||
else
|
||||
memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
|
||||
sizeof(struct lpfc_name));
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_config_port_post - Perform lpfc initialization after config port
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
|
@ -377,17 +416,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
|
|||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
pmb->context1 = NULL;
|
||||
|
||||
if (phba->cfg_soft_wwnn)
|
||||
u64_to_wwn(phba->cfg_soft_wwnn,
|
||||
vport->fc_sparam.nodeName.u.wwn);
|
||||
if (phba->cfg_soft_wwpn)
|
||||
u64_to_wwn(phba->cfg_soft_wwpn,
|
||||
vport->fc_sparam.portName.u.wwn);
|
||||
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
|
||||
sizeof (struct lpfc_name));
|
||||
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
|
||||
sizeof (struct lpfc_name));
|
||||
lpfc_update_vport_wwn(vport);
|
||||
|
||||
/* Update the fc_host data structures with new wwn. */
|
||||
fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
|
||||
|
@ -573,7 +602,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
|
|||
/* Clear all pending interrupts */
|
||||
writel(0xffffffff, phba->HAregaddr);
|
||||
readl(phba->HAregaddr); /* flush */
|
||||
|
||||
phba->link_state = LPFC_HBA_ERROR;
|
||||
if (rc != MBX_BUSY)
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
|
@ -1755,7 +1783,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
|
|||
&& descp && descp[0] != '\0')
|
||||
return;
|
||||
|
||||
if (phba->lmt & LMT_10Gb)
|
||||
if (phba->lmt & LMT_16Gb)
|
||||
max_speed = 16;
|
||||
else if (phba->lmt & LMT_10Gb)
|
||||
max_speed = 10;
|
||||
else if (phba->lmt & LMT_8Gb)
|
||||
max_speed = 8;
|
||||
|
@ -1922,12 +1952,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
|
|||
"Fibre Channel Adapter"};
|
||||
break;
|
||||
case PCI_DEVICE_ID_LANCER_FC:
|
||||
oneConnect = 1;
|
||||
m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"};
|
||||
case PCI_DEVICE_ID_LANCER_FC_VF:
|
||||
m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
|
||||
break;
|
||||
case PCI_DEVICE_ID_LANCER_FCOE:
|
||||
case PCI_DEVICE_ID_LANCER_FCOE_VF:
|
||||
oneConnect = 1;
|
||||
m = (typeof(m)){"Undefined", "PCIe", "FCoE"};
|
||||
m = (typeof(m)){"OCe50100", "PCIe", "FCoE"};
|
||||
break;
|
||||
default:
|
||||
m = (typeof(m)){"Unknown", "", ""};
|
||||
|
@ -1936,7 +1967,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
|
|||
|
||||
if (mdp && mdp[0] == '\0')
|
||||
snprintf(mdp, 79,"%s", m.name);
|
||||
/* oneConnect hba requires special processing, they are all initiators
|
||||
/*
|
||||
* oneConnect hba requires special processing, they are all initiators
|
||||
* and we put the port number on the end
|
||||
*/
|
||||
if (descp && descp[0] == '\0') {
|
||||
|
@ -2656,6 +2688,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
|
|||
kfree(io);
|
||||
phba->total_iocbq_bufs--;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
return 0;
|
||||
}
|
||||
|
@ -3612,6 +3645,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
|
|||
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
|
||||
"2718 Clear Virtual Link Received for VPI 0x%x"
|
||||
" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
|
||||
|
||||
vport = lpfc_find_vport_by_vpid(phba,
|
||||
acqe_fip->index - phba->vpi_base);
|
||||
ndlp = lpfc_sli4_perform_vport_cvl(vport);
|
||||
|
@ -3935,6 +3969,10 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
|
|||
pci_try_set_mwi(pdev);
|
||||
pci_save_state(pdev);
|
||||
|
||||
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
|
||||
if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
|
||||
pdev->needs_freset = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
out_disable_device:
|
||||
|
@ -3996,6 +4034,36 @@ lpfc_reset_hba(struct lpfc_hba *phba)
|
|||
lpfc_unblock_mgmt_io(phba);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @nr_vfn: number of virtual functions to be enabled.
|
||||
*
|
||||
* This function enables the PCI SR-IOV virtual functions to a physical
|
||||
* function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
|
||||
* enable the number of virtual functions to the physical function. As
|
||||
* not all devices support SR-IOV, the return code from the pci_enable_sriov()
|
||||
* API call does not considered as an error condition for most of the device.
|
||||
**/
|
||||
int
|
||||
lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
|
||||
{
|
||||
struct pci_dev *pdev = phba->pcidev;
|
||||
int rc;
|
||||
|
||||
rc = pci_enable_sriov(pdev, nr_vfn);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||||
"2806 Failed to enable sriov on this device "
|
||||
"with vfn number nr_vf:%d, rc:%d\n",
|
||||
nr_vfn, rc);
|
||||
} else
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||||
"2807 Successful enable sriov on this device "
|
||||
"with vfn number nr_vf:%d\n", nr_vfn);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
|
@ -4011,6 +4079,7 @@ static int
|
|||
lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_sli *psli;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* Initialize timers used by driver
|
||||
|
@ -4085,6 +4154,23 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
|
|||
if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Enable sr-iov virtual functions if supported and configured
|
||||
* through the module parameter.
|
||||
*/
|
||||
if (phba->cfg_sriov_nr_virtfn > 0) {
|
||||
rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
|
||||
phba->cfg_sriov_nr_virtfn);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||||
"2808 Requested number of SR-IOV "
|
||||
"virtual functions (%d) is not "
|
||||
"supported\n",
|
||||
phba->cfg_sriov_nr_virtfn);
|
||||
phba->cfg_sriov_nr_virtfn = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4160,6 +4246,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|||
phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
|
||||
phba->fcf.redisc_wait.data = (unsigned long)phba;
|
||||
|
||||
/*
|
||||
* Control structure for handling external multi-buffer mailbox
|
||||
* command pass-through.
|
||||
*/
|
||||
memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
|
||||
sizeof(struct lpfc_mbox_ext_buf_ctx));
|
||||
INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
|
||||
|
||||
/*
|
||||
* We need to do a READ_CONFIG mailbox command here before
|
||||
* calling lpfc_get_cfgparam. For VFs this will report the
|
||||
|
@ -4233,7 +4327,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|||
spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
|
||||
|
||||
/*
|
||||
* Initialize dirver internal slow-path work queues
|
||||
* Initialize driver internal slow-path work queues
|
||||
*/
|
||||
|
||||
/* Driver internel slow-path CQ Event pool */
|
||||
|
@ -4249,6 +4343,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|||
/* Receive queue CQ Event work queue list */
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
|
||||
|
||||
/* Initialize extent block lists. */
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
|
||||
INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
|
||||
|
||||
/* Initialize the driver internal SLI layer lists. */
|
||||
lpfc_sli_setup(phba);
|
||||
lpfc_sli_queue_setup(phba);
|
||||
|
@ -4323,9 +4423,19 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|||
}
|
||||
/*
|
||||
* Get sli4 parameters that override parameters from Port capabilities.
|
||||
* If this call fails it is not a critical error so continue loading.
|
||||
* If this call fails, it isn't critical unless the SLI4 parameters come
|
||||
* back in conflict.
|
||||
*/
|
||||
lpfc_get_sli4_parameters(phba, mboxq);
|
||||
rc = lpfc_get_sli4_parameters(phba, mboxq);
|
||||
if (rc) {
|
||||
if (phba->sli4_hba.extents_in_use &&
|
||||
phba->sli4_hba.rpi_hdrs_in_use) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2999 Unsupported SLI4 Parameters "
|
||||
"Extents and RPI headers enabled.\n");
|
||||
goto out_free_bsmbx;
|
||||
}
|
||||
}
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
/* Create all the SLI4 queues */
|
||||
rc = lpfc_sli4_queue_create(phba);
|
||||
|
@ -4350,7 +4460,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|||
"1430 Failed to initialize sgl list.\n");
|
||||
goto out_free_sgl_list;
|
||||
}
|
||||
|
||||
rc = lpfc_sli4_init_rpi_hdrs(phba);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
|
@ -4366,6 +4475,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2759 Failed allocate memory for FCF round "
|
||||
"robin failover bmask\n");
|
||||
rc = -ENOMEM;
|
||||
goto out_remove_rpi_hdrs;
|
||||
}
|
||||
|
||||
|
@ -4375,6 +4485,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2572 Failed allocate memory for fast-path "
|
||||
"per-EQ handle array\n");
|
||||
rc = -ENOMEM;
|
||||
goto out_free_fcf_rr_bmask;
|
||||
}
|
||||
|
||||
|
@ -4384,9 +4495,27 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2573 Failed allocate memory for msi-x "
|
||||
"interrupt vector entries\n");
|
||||
rc = -ENOMEM;
|
||||
goto out_free_fcp_eq_hdl;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable sr-iov virtual functions if supported and configured
|
||||
* through the module parameter.
|
||||
*/
|
||||
if (phba->cfg_sriov_nr_virtfn > 0) {
|
||||
rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
|
||||
phba->cfg_sriov_nr_virtfn);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||||
"3020 Requested number of SR-IOV "
|
||||
"virtual functions (%d) is not "
|
||||
"supported\n",
|
||||
phba->cfg_sriov_nr_virtfn);
|
||||
phba->cfg_sriov_nr_virtfn = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
||||
out_free_fcp_eq_hdl:
|
||||
|
@ -4449,6 +4578,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
|
|||
lpfc_sli4_cq_event_release_all(phba);
|
||||
lpfc_sli4_cq_event_pool_destroy(phba);
|
||||
|
||||
/* Release resource identifiers. */
|
||||
lpfc_sli4_dealloc_resource_identifiers(phba);
|
||||
|
||||
/* Free the bsmbx region. */
|
||||
lpfc_destroy_bootstrap_mbox(phba);
|
||||
|
||||
|
@ -4649,6 +4781,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
|
|||
"Unloading driver.\n", __func__);
|
||||
goto out_free_iocbq;
|
||||
}
|
||||
iocbq_entry->sli4_lxritag = NO_XRI;
|
||||
iocbq_entry->sli4_xritag = NO_XRI;
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
|
@ -4746,7 +4879,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
|
|||
|
||||
els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"2400 lpfc_init_sgl_list els %d.\n",
|
||||
"2400 ELS XRI count %d.\n",
|
||||
els_xri_cnt);
|
||||
/* Initialize and populate the sglq list per host/VF. */
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
|
||||
|
@ -4779,7 +4912,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
|
|||
phba->sli4_hba.scsi_xri_max =
|
||||
phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
|
||||
phba->sli4_hba.scsi_xri_cnt = 0;
|
||||
|
||||
phba->sli4_hba.lpfc_scsi_psb_array =
|
||||
kzalloc((sizeof(struct lpfc_scsi_buf *) *
|
||||
phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
|
||||
|
@ -4802,13 +4934,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
|
|||
goto out_free_mem;
|
||||
}
|
||||
|
||||
sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
|
||||
if (sglq_entry->sli4_xritag == NO_XRI) {
|
||||
kfree(sglq_entry);
|
||||
printk(KERN_ERR "%s: failed to allocate XRI.\n"
|
||||
"Unloading driver.\n", __func__);
|
||||
goto out_free_mem;
|
||||
}
|
||||
sglq_entry->buff_type = GEN_BUFF_TYPE;
|
||||
sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
|
||||
if (sglq_entry->virt == NULL) {
|
||||
|
@ -4857,24 +4982,20 @@ int
|
|||
lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
|
||||
{
|
||||
int rc = 0;
|
||||
int longs;
|
||||
uint16_t rpi_count;
|
||||
struct lpfc_rpi_hdr *rpi_hdr;
|
||||
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
|
||||
|
||||
/*
|
||||
* Provision an rpi bitmask range for discovery. The total count
|
||||
* is the difference between max and base + 1.
|
||||
* If the SLI4 port supports extents, posting the rpi header isn't
|
||||
* required. Set the expected maximum count and let the actual value
|
||||
* get set when extents are fully allocated.
|
||||
*/
|
||||
rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
|
||||
phba->sli4_hba.max_cfg_param.max_rpi - 1;
|
||||
|
||||
longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
|
||||
phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
|
||||
GFP_KERNEL);
|
||||
if (!phba->sli4_hba.rpi_bmask)
|
||||
return -ENOMEM;
|
||||
if (!phba->sli4_hba.rpi_hdrs_in_use) {
|
||||
phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
|
||||
return rc;
|
||||
}
|
||||
if (phba->sli4_hba.extents_in_use)
|
||||
return -EIO;
|
||||
|
||||
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
|
||||
if (!rpi_hdr) {
|
||||
|
@ -4908,11 +5029,28 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
|
|||
struct lpfc_rpi_hdr *rpi_hdr;
|
||||
uint32_t rpi_count;
|
||||
|
||||
/*
|
||||
* If the SLI4 port supports extents, posting the rpi header isn't
|
||||
* required. Set the expected maximum count and let the actual value
|
||||
* get set when extents are fully allocated.
|
||||
*/
|
||||
if (!phba->sli4_hba.rpi_hdrs_in_use)
|
||||
return NULL;
|
||||
if (phba->sli4_hba.extents_in_use)
|
||||
return NULL;
|
||||
|
||||
/* The limit on the logical index is just the max_rpi count. */
|
||||
rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
|
||||
phba->sli4_hba.max_cfg_param.max_rpi - 1;
|
||||
phba->sli4_hba.max_cfg_param.max_rpi - 1;
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
curr_rpi_range = phba->sli4_hba.next_rpi;
|
||||
/*
|
||||
* Establish the starting RPI in this header block. The starting
|
||||
* rpi is normalized to a zero base because the physical rpi is
|
||||
* port based.
|
||||
*/
|
||||
curr_rpi_range = phba->sli4_hba.next_rpi -
|
||||
phba->sli4_hba.max_cfg_param.rpi_base;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
/*
|
||||
|
@ -4925,6 +5063,8 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
|
|||
else
|
||||
rpi_count = LPFC_RPI_HDR_COUNT;
|
||||
|
||||
if (!rpi_count)
|
||||
return NULL;
|
||||
/*
|
||||
* First allocate the protocol header region for the port. The
|
||||
* port expects a 4KB DMA-mapped memory region that is 4K aligned.
|
||||
|
@ -4957,12 +5097,14 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
|
|||
rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
|
||||
rpi_hdr->page_count = 1;
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
|
||||
|
||||
/* The rpi_hdr stores the logical index only. */
|
||||
rpi_hdr->start_rpi = curr_rpi_range;
|
||||
list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
|
||||
|
||||
/*
|
||||
* The next_rpi stores the next module-64 rpi value to post
|
||||
* in any subsequent rpi memory region postings.
|
||||
* The next_rpi stores the next logical module-64 rpi value used
|
||||
* to post physical rpis in subsequent rpi postings.
|
||||
*/
|
||||
phba->sli4_hba.next_rpi += rpi_count;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
@ -4981,15 +5123,18 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
|
|||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This routine is invoked to remove all memory resources allocated
|
||||
* to support rpis. This routine presumes the caller has released all
|
||||
* rpis consumed by fabric or port logins and is prepared to have
|
||||
* the header pages removed.
|
||||
* to support rpis for SLI4 ports not supporting extents. This routine
|
||||
* presumes the caller has released all rpis consumed by fabric or port
|
||||
* logins and is prepared to have the header pages removed.
|
||||
**/
|
||||
void
|
||||
lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
|
||||
|
||||
if (!phba->sli4_hba.rpi_hdrs_in_use)
|
||||
goto exit;
|
||||
|
||||
list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
|
||||
&phba->sli4_hba.lpfc_rpi_hdr_list, list) {
|
||||
list_del(&rpi_hdr->list);
|
||||
|
@ -4998,9 +5143,9 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
|
|||
kfree(rpi_hdr->dmabuf);
|
||||
kfree(rpi_hdr);
|
||||
}
|
||||
|
||||
phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
|
||||
memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
|
||||
exit:
|
||||
/* There are no rpis available to the port now. */
|
||||
phba->sli4_hba.next_rpi = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -5487,7 +5632,8 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
|
|||
/* Final checks. The port status should be clean. */
|
||||
if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
|
||||
®_data.word0) ||
|
||||
bf_get(lpfc_sliport_status_err, ®_data)) {
|
||||
(bf_get(lpfc_sliport_status_err, ®_data) &&
|
||||
!bf_get(lpfc_sliport_status_rn, ®_data))) {
|
||||
phba->work_status[0] =
|
||||
readl(phba->sli4_hba.u.if_type2.
|
||||
ERR1regaddr);
|
||||
|
@ -5741,7 +5887,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
|
|||
{
|
||||
LPFC_MBOXQ_t *pmb;
|
||||
struct lpfc_mbx_read_config *rd_config;
|
||||
uint32_t rc = 0;
|
||||
union lpfc_sli4_cfg_shdr *shdr;
|
||||
uint32_t shdr_status, shdr_add_status;
|
||||
struct lpfc_mbx_get_func_cfg *get_func_cfg;
|
||||
struct lpfc_rsrc_desc_fcfcoe *desc;
|
||||
uint32_t desc_count;
|
||||
int length, i, rc = 0;
|
||||
|
||||
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!pmb) {
|
||||
|
@ -5763,6 +5914,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
|
|||
rc = -EIO;
|
||||
} else {
|
||||
rd_config = &pmb->u.mqe.un.rd_config;
|
||||
phba->sli4_hba.extents_in_use =
|
||||
bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
|
||||
phba->sli4_hba.max_cfg_param.max_xri =
|
||||
bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
|
||||
phba->sli4_hba.max_cfg_param.xri_base =
|
||||
|
@ -5781,8 +5934,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
|
|||
bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
|
||||
phba->sli4_hba.max_cfg_param.max_fcfi =
|
||||
bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
|
||||
phba->sli4_hba.max_cfg_param.fcfi_base =
|
||||
bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
|
||||
phba->sli4_hba.max_cfg_param.max_eq =
|
||||
bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
|
||||
phba->sli4_hba.max_cfg_param.max_rq =
|
||||
|
@ -5800,11 +5951,13 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
|
|||
(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
|
||||
phba->max_vports = phba->max_vpi;
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"2003 cfg params XRI(B:%d M:%d), "
|
||||
"2003 cfg params Extents? %d "
|
||||
"XRI(B:%d M:%d), "
|
||||
"VPI(B:%d M:%d) "
|
||||
"VFI(B:%d M:%d) "
|
||||
"RPI(B:%d M:%d) "
|
||||
"FCFI(B:%d M:%d)\n",
|
||||
"FCFI(Count:%d)\n",
|
||||
phba->sli4_hba.extents_in_use,
|
||||
phba->sli4_hba.max_cfg_param.xri_base,
|
||||
phba->sli4_hba.max_cfg_param.max_xri,
|
||||
phba->sli4_hba.max_cfg_param.vpi_base,
|
||||
|
@ -5813,10 +5966,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
|
|||
phba->sli4_hba.max_cfg_param.max_vfi,
|
||||
phba->sli4_hba.max_cfg_param.rpi_base,
|
||||
phba->sli4_hba.max_cfg_param.max_rpi,
|
||||
phba->sli4_hba.max_cfg_param.fcfi_base,
|
||||
phba->sli4_hba.max_cfg_param.max_fcfi);
|
||||
}
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
|
||||
if (rc)
|
||||
goto read_cfg_out;
|
||||
|
||||
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
|
||||
if (phba->cfg_hba_queue_depth >
|
||||
|
@ -5825,6 +5979,65 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
|
|||
phba->cfg_hba_queue_depth =
|
||||
phba->sli4_hba.max_cfg_param.max_xri -
|
||||
lpfc_sli4_get_els_iocb_cnt(phba);
|
||||
|
||||
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
|
||||
LPFC_SLI_INTF_IF_TYPE_2)
|
||||
goto read_cfg_out;
|
||||
|
||||
/* get the pf# and vf# for SLI4 if_type 2 port */
|
||||
length = (sizeof(struct lpfc_mbx_get_func_cfg) -
|
||||
sizeof(struct lpfc_sli4_cfg_mhdr));
|
||||
lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
|
||||
LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
|
||||
length, LPFC_SLI4_MBX_EMBED);
|
||||
|
||||
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
|
||||
shdr = (union lpfc_sli4_cfg_shdr *)
|
||||
&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
|
||||
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
||||
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
|
||||
if (rc || shdr_status || shdr_add_status) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"3026 Mailbox failed , mbxCmd x%x "
|
||||
"GET_FUNCTION_CONFIG, mbxStatus x%x\n",
|
||||
bf_get(lpfc_mqe_command, &pmb->u.mqe),
|
||||
bf_get(lpfc_mqe_status, &pmb->u.mqe));
|
||||
rc = -EIO;
|
||||
goto read_cfg_out;
|
||||
}
|
||||
|
||||
/* search for fc_fcoe resrouce descriptor */
|
||||
get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
|
||||
desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
|
||||
|
||||
for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
|
||||
desc = (struct lpfc_rsrc_desc_fcfcoe *)
|
||||
&get_func_cfg->func_cfg.desc[i];
|
||||
if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
|
||||
bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
|
||||
phba->sli4_hba.iov.pf_number =
|
||||
bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
|
||||
phba->sli4_hba.iov.vf_number =
|
||||
bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i < LPFC_RSRC_DESC_MAX_NUM)
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"3027 GET_FUNCTION_CONFIG: pf_number:%d, "
|
||||
"vf_number:%d\n", phba->sli4_hba.iov.pf_number,
|
||||
phba->sli4_hba.iov.vf_number);
|
||||
else {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"3028 GET_FUNCTION_CONFIG: failed to find "
|
||||
"Resrouce Descriptor:x%x\n",
|
||||
LPFC_RSRC_DESC_TYPE_FCFCOE);
|
||||
rc = -EIO;
|
||||
}
|
||||
|
||||
read_cfg_out:
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -6229,8 +6442,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
|
|||
phba->sli4_hba.mbx_cq = NULL;
|
||||
|
||||
/* Release FCP response complete queue */
|
||||
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
|
||||
fcp_qidx = 0;
|
||||
do
|
||||
lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
|
||||
while (++fcp_qidx < phba->cfg_fcp_eq_count);
|
||||
kfree(phba->sli4_hba.fcp_cq);
|
||||
phba->sli4_hba.fcp_cq = NULL;
|
||||
|
||||
|
@ -6353,16 +6568,24 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
|||
phba->sli4_hba.sp_eq->queue_id);
|
||||
|
||||
/* Set up fast-path FCP Response Complete Queue */
|
||||
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
|
||||
fcp_cqidx = 0;
|
||||
do {
|
||||
if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0526 Fast-path FCP CQ (%d) not "
|
||||
"allocated\n", fcp_cqidx);
|
||||
goto out_destroy_fcp_cq;
|
||||
}
|
||||
rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
|
||||
phba->sli4_hba.fp_eq[fcp_cqidx],
|
||||
LPFC_WCQ, LPFC_FCP);
|
||||
if (phba->cfg_fcp_eq_count)
|
||||
rc = lpfc_cq_create(phba,
|
||||
phba->sli4_hba.fcp_cq[fcp_cqidx],
|
||||
phba->sli4_hba.fp_eq[fcp_cqidx],
|
||||
LPFC_WCQ, LPFC_FCP);
|
||||
else
|
||||
rc = lpfc_cq_create(phba,
|
||||
phba->sli4_hba.fcp_cq[fcp_cqidx],
|
||||
phba->sli4_hba.sp_eq,
|
||||
LPFC_WCQ, LPFC_FCP);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0527 Failed setup of fast-path FCP "
|
||||
|
@ -6371,12 +6594,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
|||
}
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"2588 FCP CQ setup: cq[%d]-id=%d, "
|
||||
"parent eq[%d]-id=%d\n",
|
||||
"parent %seq[%d]-id=%d\n",
|
||||
fcp_cqidx,
|
||||
phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
|
||||
(phba->cfg_fcp_eq_count) ? "" : "sp_",
|
||||
fcp_cqidx,
|
||||
phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
|
||||
}
|
||||
(phba->cfg_fcp_eq_count) ?
|
||||
phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
|
||||
phba->sli4_hba.sp_eq->queue_id);
|
||||
} while (++fcp_cqidx < phba->cfg_fcp_eq_count);
|
||||
|
||||
/*
|
||||
* Set up all the Work Queues (WQs)
|
||||
|
@ -6445,7 +6671,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
|||
fcp_cq_index,
|
||||
phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
|
||||
/* Round robin FCP Work Queue's Completion Queue assignment */
|
||||
fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
|
||||
if (phba->cfg_fcp_eq_count)
|
||||
fcp_cq_index = ((fcp_cq_index + 1) %
|
||||
phba->cfg_fcp_eq_count);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -6827,6 +7055,8 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
|
|||
if (rdy_chk < 1000)
|
||||
break;
|
||||
}
|
||||
/* delay driver action following IF_TYPE_2 function reset */
|
||||
msleep(100);
|
||||
break;
|
||||
case LPFC_SLI_INTF_IF_TYPE_1:
|
||||
default:
|
||||
|
@ -7419,11 +7649,15 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
|
|||
/*
|
||||
* Assign MSI-X vectors to interrupt handlers
|
||||
*/
|
||||
|
||||
/* The first vector must associated to slow-path handler for MQ */
|
||||
rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
|
||||
&lpfc_sli4_sp_intr_handler, IRQF_SHARED,
|
||||
LPFC_SP_DRIVER_HANDLER_NAME, phba);
|
||||
if (vectors > 1)
|
||||
rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
|
||||
&lpfc_sli4_sp_intr_handler, IRQF_SHARED,
|
||||
LPFC_SP_DRIVER_HANDLER_NAME, phba);
|
||||
else
|
||||
/* All Interrupts need to be handled by one EQ */
|
||||
rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
|
||||
&lpfc_sli4_intr_handler, IRQF_SHARED,
|
||||
LPFC_DRIVER_NAME, phba);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||||
"0485 MSI-X slow-path request_irq failed "
|
||||
|
@ -7765,6 +7999,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
|
|||
{
|
||||
int wait_cnt = 0;
|
||||
LPFC_MBOXQ_t *mboxq;
|
||||
struct pci_dev *pdev = phba->pcidev;
|
||||
|
||||
lpfc_stop_hba_timers(phba);
|
||||
phba->sli4_hba.intr_enable = 0;
|
||||
|
@ -7804,6 +8039,10 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
|
|||
/* Disable PCI subsystem interrupt */
|
||||
lpfc_sli4_disable_intr(phba);
|
||||
|
||||
/* Disable SR-IOV if enabled */
|
||||
if (phba->cfg_sriov_nr_virtfn)
|
||||
pci_disable_sriov(pdev);
|
||||
|
||||
/* Stop kthread signal shall trigger work_done one more time */
|
||||
kthread_stop(phba->worker_thread);
|
||||
|
||||
|
@ -7878,6 +8117,11 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
|||
sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
|
||||
sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
|
||||
sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
|
||||
|
||||
/* Make sure that sge_supp_len can be handled by the driver */
|
||||
if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
|
||||
sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -7902,6 +8146,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
|||
int length;
|
||||
struct lpfc_sli4_parameters *mbx_sli4_parameters;
|
||||
|
||||
/*
|
||||
* By default, the driver assumes the SLI4 port requires RPI
|
||||
* header postings. The SLI4_PARAM response will correct this
|
||||
* assumption.
|
||||
*/
|
||||
phba->sli4_hba.rpi_hdrs_in_use = 1;
|
||||
|
||||
/* Read the port's SLI4 Config Parameters */
|
||||
length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
|
||||
sizeof(struct lpfc_sli4_cfg_mhdr));
|
||||
|
@ -7938,6 +8189,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
|||
mbx_sli4_parameters);
|
||||
sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
|
||||
mbx_sli4_parameters);
|
||||
phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
|
||||
phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
|
||||
|
||||
/* Make sure that sge_supp_len can be handled by the driver */
|
||||
if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
|
||||
sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -8173,6 +8431,10 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
|
|||
|
||||
lpfc_debugfs_terminate(vport);
|
||||
|
||||
/* Disable SR-IOV if enabled */
|
||||
if (phba->cfg_sriov_nr_virtfn)
|
||||
pci_disable_sriov(pdev);
|
||||
|
||||
/* Disable interrupt */
|
||||
lpfc_sli_disable_intr(phba);
|
||||
|
||||
|
@ -8564,6 +8826,97 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_write_firmware - attempt to write a firmware image to the port
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @fw: pointer to firmware image returned from request_firmware.
|
||||
*
|
||||
* returns the number of bytes written if write is successful.
|
||||
* returns a negative error value if there were errors.
|
||||
* returns 0 if firmware matches currently active firmware on port.
|
||||
**/
|
||||
int
|
||||
lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
|
||||
{
|
||||
char fwrev[32];
|
||||
struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
|
||||
struct list_head dma_buffer_list;
|
||||
int i, rc = 0;
|
||||
struct lpfc_dmabuf *dmabuf, *next;
|
||||
uint32_t offset = 0, temp_offset = 0;
|
||||
|
||||
INIT_LIST_HEAD(&dma_buffer_list);
|
||||
if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) ||
|
||||
(bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) ||
|
||||
(bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
|
||||
(image->size != fw->size)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3022 Invalid FW image found. "
|
||||
"Magic:%d Type:%x ID:%x\n",
|
||||
image->magic_number,
|
||||
bf_get(lpfc_grp_hdr_file_type, image),
|
||||
bf_get(lpfc_grp_hdr_id, image));
|
||||
return -EINVAL;
|
||||
}
|
||||
lpfc_decode_firmware_rev(phba, fwrev, 1);
|
||||
if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3023 Updating Firmware. Current Version:%s "
|
||||
"New Version:%s\n",
|
||||
fwrev, image->rev_name);
|
||||
for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
|
||||
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
|
||||
GFP_KERNEL);
|
||||
if (!dmabuf) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
|
||||
SLI4_PAGE_SIZE,
|
||||
&dmabuf->phys,
|
||||
GFP_KERNEL);
|
||||
if (!dmabuf->virt) {
|
||||
kfree(dmabuf);
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
list_add_tail(&dmabuf->list, &dma_buffer_list);
|
||||
}
|
||||
while (offset < fw->size) {
|
||||
temp_offset = offset;
|
||||
list_for_each_entry(dmabuf, &dma_buffer_list, list) {
|
||||
if (offset + SLI4_PAGE_SIZE > fw->size) {
|
||||
temp_offset += fw->size - offset;
|
||||
memcpy(dmabuf->virt,
|
||||
fw->data + temp_offset,
|
||||
fw->size - offset);
|
||||
break;
|
||||
}
|
||||
temp_offset += SLI4_PAGE_SIZE;
|
||||
memcpy(dmabuf->virt, fw->data + temp_offset,
|
||||
SLI4_PAGE_SIZE);
|
||||
}
|
||||
rc = lpfc_wr_object(phba, &dma_buffer_list,
|
||||
(fw->size - offset), &offset);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3024 Firmware update failed. "
|
||||
"%d\n", rc);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
rc = offset;
|
||||
}
|
||||
out:
|
||||
list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
|
||||
list_del(&dmabuf->list);
|
||||
dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
|
||||
dmabuf->virt, dmabuf->phys);
|
||||
kfree(dmabuf);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
|
||||
* @pdev: pointer to PCI device
|
||||
|
@ -8591,6 +8944,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|||
int error;
|
||||
uint32_t cfg_mode, intr_mode;
|
||||
int mcnt;
|
||||
int adjusted_fcp_eq_count;
|
||||
int fcp_qidx;
|
||||
const struct firmware *fw;
|
||||
uint8_t file_name[16];
|
||||
|
||||
/* Allocate memory for HBA structure */
|
||||
phba = lpfc_hba_alloc(pdev);
|
||||
|
@ -8688,11 +9045,25 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|||
error = -ENODEV;
|
||||
goto out_free_sysfs_attr;
|
||||
}
|
||||
/* Default to single FCP EQ for non-MSI-X */
|
||||
/* Default to single EQ for non-MSI-X */
|
||||
if (phba->intr_type != MSIX)
|
||||
phba->cfg_fcp_eq_count = 1;
|
||||
else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count)
|
||||
phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
|
||||
adjusted_fcp_eq_count = 0;
|
||||
else if (phba->sli4_hba.msix_vec_nr <
|
||||
phba->cfg_fcp_eq_count + 1)
|
||||
adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
|
||||
else
|
||||
adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
|
||||
/* Free unused EQs */
|
||||
for (fcp_qidx = adjusted_fcp_eq_count;
|
||||
fcp_qidx < phba->cfg_fcp_eq_count;
|
||||
fcp_qidx++) {
|
||||
lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
|
||||
/* do not delete the first fcp_cq */
|
||||
if (fcp_qidx)
|
||||
lpfc_sli4_queue_free(
|
||||
phba->sli4_hba.fcp_cq[fcp_qidx]);
|
||||
}
|
||||
phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
|
||||
/* Set up SLI-4 HBA */
|
||||
if (lpfc_sli4_hba_setup(phba)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
|
@ -8731,6 +9102,14 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|||
/* Perform post initialization setup */
|
||||
lpfc_post_init_setup(phba);
|
||||
|
||||
/* check for firmware upgrade or downgrade */
|
||||
snprintf(file_name, 16, "%s.grp", phba->ModelName);
|
||||
error = request_firmware(&fw, file_name, &phba->pcidev->dev);
|
||||
if (!error) {
|
||||
lpfc_write_firmware(phba, fw);
|
||||
release_firmware(fw);
|
||||
}
|
||||
|
||||
/* Check if there are static vports to be created. */
|
||||
lpfc_create_static_vport(phba);
|
||||
|
||||
|
@ -9498,6 +9877,10 @@ static struct pci_device_id lpfc_id_table[] = {
|
|||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
|
||||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
|
||||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
|
||||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ 0 }
|
||||
};
|
||||
|
||||
|
|
|
@ -610,7 +610,8 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
|
|||
mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
|
||||
mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
|
||||
mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
|
||||
mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
|
||||
if (phba->sli_rev >= LPFC_SLI_REV3)
|
||||
mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
|
||||
|
||||
/* save address for completion */
|
||||
pmb->context1 = mp;
|
||||
|
@ -643,9 +644,10 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
|
|||
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
|
||||
|
||||
mb->un.varUnregDID.did = did;
|
||||
if (vpi != 0xffff)
|
||||
vpi += phba->vpi_base;
|
||||
mb->un.varUnregDID.vpi = vpi;
|
||||
if ((vpi != 0xffff) &&
|
||||
(phba->sli_rev == LPFC_SLI_REV4))
|
||||
mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
|
||||
|
||||
mb->mbxCommand = MBX_UNREG_D_ID;
|
||||
mb->mbxOwner = OWN_HOST;
|
||||
|
@ -738,12 +740,10 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
|
|||
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
|
||||
|
||||
mb->un.varRegLogin.rpi = 0;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
mb->un.varRegLogin.rpi = rpi;
|
||||
if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
|
||||
return 1;
|
||||
}
|
||||
mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
|
||||
if (phba->sli_rev >= LPFC_SLI_REV3)
|
||||
mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
|
||||
mb->un.varRegLogin.did = did;
|
||||
mb->mbxOwner = OWN_HOST;
|
||||
/* Get a buffer to hold NPorts Service Parameters */
|
||||
|
@ -757,7 +757,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
|
|||
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
|
||||
"0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
|
||||
"rpi x%x\n", vpi, did, rpi);
|
||||
return (1);
|
||||
return 1;
|
||||
}
|
||||
INIT_LIST_HEAD(&mp->list);
|
||||
sparam = mp->virt;
|
||||
|
@ -773,7 +773,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
|
|||
mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
|
||||
mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -789,6 +789,9 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
|
|||
*
|
||||
* This routine prepares the mailbox command for unregistering remote port
|
||||
* login.
|
||||
*
|
||||
* For SLI4 ports, the rpi passed to this function must be the physical
|
||||
* rpi value, not the logical index.
|
||||
**/
|
||||
void
|
||||
lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
|
||||
|
@ -799,9 +802,10 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
|
|||
mb = &pmb->u.mb;
|
||||
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
|
||||
|
||||
mb->un.varUnregLogin.rpi = (uint16_t) rpi;
|
||||
mb->un.varUnregLogin.rpi = rpi;
|
||||
mb->un.varUnregLogin.rsvd1 = 0;
|
||||
mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
|
||||
if (phba->sli_rev >= LPFC_SLI_REV3)
|
||||
mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
|
||||
|
||||
mb->mbxCommand = MBX_UNREG_LOGIN;
|
||||
mb->mbxOwner = OWN_HOST;
|
||||
|
@ -825,9 +829,16 @@ lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
|
|||
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (mbox) {
|
||||
lpfc_unreg_login(phba, vport->vpi,
|
||||
vport->vpi + phba->vpi_base, mbox);
|
||||
mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ;
|
||||
/*
|
||||
* For SLI4 functions, the rpi field is overloaded for
|
||||
* the vport context unreg all. This routine passes
|
||||
* 0 for the rpi field in lpfc_unreg_login for compatibility
|
||||
* with SLI3 and then overrides the rpi field with the
|
||||
* expected value for SLI4.
|
||||
*/
|
||||
lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
|
||||
mbox);
|
||||
mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
|
||||
mbox->vport = vport;
|
||||
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||||
mbox->context1 = NULL;
|
||||
|
@ -865,9 +876,13 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
|
|||
if ((phba->sli_rev == LPFC_SLI_REV4) &&
|
||||
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
|
||||
mb->un.varRegVpi.upd = 1;
|
||||
mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
|
||||
|
||||
mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
|
||||
mb->un.varRegVpi.sid = vport->fc_myDID;
|
||||
mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
|
||||
else
|
||||
mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
|
||||
memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
|
||||
sizeof(struct lpfc_name));
|
||||
mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
|
||||
|
@ -901,10 +916,10 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
|
|||
MAILBOX_t *mb = &pmb->u.mb;
|
||||
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
|
||||
|
||||
if (phba->sli_rev < LPFC_SLI_REV4)
|
||||
mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
|
||||
else
|
||||
mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base;
|
||||
if (phba->sli_rev == LPFC_SLI_REV3)
|
||||
mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
|
||||
else if (phba->sli_rev >= LPFC_SLI_REV4)
|
||||
mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
|
||||
|
||||
mb->mbxCommand = MBX_UNREG_VPI;
|
||||
mb->mbxOwner = OWN_HOST;
|
||||
|
@ -1735,12 +1750,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
|
|||
return length;
|
||||
}
|
||||
|
||||
/* Setup for the none-embedded mbox command */
|
||||
/* Setup for the non-embedded mbox command */
|
||||
pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
|
||||
pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
|
||||
LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
|
||||
/* Allocate record for keeping SGE virtual addresses */
|
||||
mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
|
||||
mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
|
||||
GFP_KERNEL);
|
||||
if (!mbox->sge_array) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
|
||||
|
@ -1790,11 +1805,86 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
|
|||
/* The sub-header is in DMA memory, which needs endian converstion */
|
||||
if (cfg_shdr)
|
||||
lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
|
||||
sizeof(union lpfc_sli4_cfg_shdr));
|
||||
|
||||
sizeof(union lpfc_sli4_cfg_shdr));
|
||||
return alloc_len;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent.
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @mbox: pointer to an allocated lpfc mbox resource.
|
||||
* @exts_count: the number of extents, if required, to allocate.
|
||||
* @rsrc_type: the resource extent type.
|
||||
* @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED.
|
||||
*
|
||||
* This routine completes the subcommand header for SLI4 resource extent
|
||||
* mailbox commands. It is called after lpfc_sli4_config. The caller must
|
||||
* pass an allocated mailbox and the attributes required to initialize the
|
||||
* mailbox correctly.
|
||||
*
|
||||
* Return: the actual length of the mbox command allocated.
|
||||
**/
|
||||
int
|
||||
lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
|
||||
uint16_t exts_count, uint16_t rsrc_type, bool emb)
|
||||
{
|
||||
uint8_t opcode = 0;
|
||||
struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
|
||||
void *virtaddr = NULL;
|
||||
|
||||
/* Set up SLI4 ioctl command header fields */
|
||||
if (emb == LPFC_SLI4_MBX_NEMBED) {
|
||||
/* Get the first SGE entry from the non-embedded DMA memory */
|
||||
virtaddr = mbox->sge_array->addr[0];
|
||||
if (virtaddr == NULL)
|
||||
return 1;
|
||||
n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
|
||||
}
|
||||
|
||||
/*
|
||||
* The resource type is common to all extent Opcodes and resides in the
|
||||
* same position.
|
||||
*/
|
||||
if (emb == LPFC_SLI4_MBX_EMBED)
|
||||
bf_set(lpfc_mbx_alloc_rsrc_extents_type,
|
||||
&mbox->u.mqe.un.alloc_rsrc_extents.u.req,
|
||||
rsrc_type);
|
||||
else {
|
||||
/* This is DMA data. Byteswap is required. */
|
||||
bf_set(lpfc_mbx_alloc_rsrc_extents_type,
|
||||
n_rsrc_extnt, rsrc_type);
|
||||
lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
|
||||
&n_rsrc_extnt->word4,
|
||||
sizeof(uint32_t));
|
||||
}
|
||||
|
||||
/* Complete the initialization for the particular Opcode. */
|
||||
opcode = lpfc_sli4_mbox_opcode_get(phba, mbox);
|
||||
switch (opcode) {
|
||||
case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
|
||||
if (emb == LPFC_SLI4_MBX_EMBED)
|
||||
bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
|
||||
&mbox->u.mqe.un.alloc_rsrc_extents.u.req,
|
||||
exts_count);
|
||||
else
|
||||
bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
|
||||
n_rsrc_extnt, exts_count);
|
||||
break;
|
||||
case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
|
||||
case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
|
||||
case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
|
||||
/* Initialization is complete.*/
|
||||
break;
|
||||
default:
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
|
||||
"2929 Resource Extent Opcode x%x is "
|
||||
"unsupported\n", opcode);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
|
@ -1939,9 +2029,12 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
|
|||
bf_set(lpfc_init_vfi_vr, init_vfi, 1);
|
||||
bf_set(lpfc_init_vfi_vt, init_vfi, 1);
|
||||
bf_set(lpfc_init_vfi_vp, init_vfi, 1);
|
||||
bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
|
||||
bf_set(lpfc_init_vpi_vpi, init_vfi, vport->vpi + vport->phba->vpi_base);
|
||||
bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
|
||||
bf_set(lpfc_init_vfi_vfi, init_vfi,
|
||||
vport->phba->sli4_hba.vfi_ids[vport->vfi]);
|
||||
bf_set(lpfc_init_vpi_vpi, init_vfi,
|
||||
vport->phba->vpi_ids[vport->vpi]);
|
||||
bf_set(lpfc_init_vfi_fcfi, init_vfi,
|
||||
vport->phba->fcf.fcfi);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1964,9 +2057,10 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
|
|||
reg_vfi = &mbox->u.mqe.un.reg_vfi;
|
||||
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
|
||||
bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
|
||||
bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
|
||||
bf_set(lpfc_reg_vfi_vfi, reg_vfi,
|
||||
vport->phba->sli4_hba.vfi_ids[vport->vfi]);
|
||||
bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
|
||||
bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
|
||||
bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
|
||||
memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
|
||||
reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
|
||||
reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
|
||||
|
@ -1997,9 +2091,9 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
|
|||
memset(mbox, 0, sizeof(*mbox));
|
||||
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
|
||||
bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
|
||||
vpi + phba->vpi_base);
|
||||
phba->vpi_ids[vpi]);
|
||||
bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
|
||||
phba->pport->vfi + phba->vfi_base);
|
||||
phba->sli4_hba.vfi_ids[phba->pport->vfi]);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2019,7 +2113,7 @@ lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
|
|||
memset(mbox, 0, sizeof(*mbox));
|
||||
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
|
||||
bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
|
||||
vport->vfi + vport->phba->vfi_base);
|
||||
vport->phba->sli4_hba.vfi_ids[vport->vfi]);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2131,12 +2225,14 @@ lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
|
|||
void
|
||||
lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
|
||||
{
|
||||
struct lpfc_hba *phba = ndlp->phba;
|
||||
struct lpfc_mbx_resume_rpi *resume_rpi;
|
||||
|
||||
memset(mbox, 0, sizeof(*mbox));
|
||||
resume_rpi = &mbox->u.mqe.un.resume_rpi;
|
||||
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
|
||||
bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi);
|
||||
bf_set(lpfc_resume_rpi_index, resume_rpi,
|
||||
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
|
||||
bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
|
||||
resume_rpi->event_tag = ndlp->phba->fc_eventTag;
|
||||
}
|
||||
|
|
|
@ -62,7 +62,6 @@ int
|
|||
lpfc_mem_alloc(struct lpfc_hba *phba, int align)
|
||||
{
|
||||
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
|
||||
int longs;
|
||||
int i;
|
||||
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
|
@ -138,17 +137,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
|
|||
phba->lpfc_hrb_pool = NULL;
|
||||
phba->lpfc_drb_pool = NULL;
|
||||
}
|
||||
/* vpi zero is reserved for the physical port so add 1 to max */
|
||||
longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
|
||||
phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
|
||||
if (!phba->vpi_bmask)
|
||||
goto fail_free_dbq_pool;
|
||||
|
||||
return 0;
|
||||
|
||||
fail_free_dbq_pool:
|
||||
pci_pool_destroy(phba->lpfc_drb_pool);
|
||||
phba->lpfc_drb_pool = NULL;
|
||||
fail_free_hrb_pool:
|
||||
pci_pool_destroy(phba->lpfc_hrb_pool);
|
||||
phba->lpfc_hrb_pool = NULL;
|
||||
|
@ -191,9 +181,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
|
|||
int i;
|
||||
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
|
||||
|
||||
/* Free VPI bitmask memory */
|
||||
kfree(phba->vpi_bmask);
|
||||
|
||||
/* Free HBQ pools */
|
||||
lpfc_sli_hbqbuf_free_all(phba);
|
||||
if (phba->lpfc_drb_pool)
|
||||
|
|
|
@ -350,11 +350,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||
ndlp->nlp_maxframe =
|
||||
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
|
||||
|
||||
/*
|
||||
* Need to unreg_login if we are already in one of these states and
|
||||
* change to NPR state. This will block the port until after the ACC
|
||||
* completes and the reg_login is issued and completed.
|
||||
*/
|
||||
/* no need to reg_login if we are already in one of these states */
|
||||
switch (ndlp->nlp_state) {
|
||||
case NLP_STE_NPR_NODE:
|
||||
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
|
||||
|
@ -363,9 +359,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||
case NLP_STE_PRLI_ISSUE:
|
||||
case NLP_STE_UNMAPPED_NODE:
|
||||
case NLP_STE_MAPPED_NODE:
|
||||
lpfc_unreg_rpi(vport, ndlp);
|
||||
ndlp->nlp_prev_state = ndlp->nlp_state;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
|
||||
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ((vport->fc_flag & FC_PT2PT) &&
|
||||
|
@ -657,6 +652,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
|||
lpfc_unreg_rpi(vport, ndlp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
|
||||
* @phba : Pointer to lpfc_hba structure.
|
||||
|
@ -1399,8 +1395,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
|
|||
if (mb->mbxStatus) {
|
||||
/* RegLogin failed */
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
|
||||
"0246 RegLogin failed Data: x%x x%x x%x\n",
|
||||
did, mb->mbxStatus, vport->port_state);
|
||||
"0246 RegLogin failed Data: x%x x%x x%x x%x "
|
||||
"x%x\n",
|
||||
did, mb->mbxStatus, vport->port_state,
|
||||
mb->un.varRegLogin.vpi,
|
||||
mb->un.varRegLogin.rpi);
|
||||
/*
|
||||
* If RegLogin failed due to lack of HBA resources do not
|
||||
* retry discovery.
|
||||
|
@ -1424,7 +1423,10 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
|
|||
return ndlp->nlp_state;
|
||||
}
|
||||
|
||||
ndlp->nlp_rpi = mb->un.varWords[0];
|
||||
/* SLI4 ports have preallocated logical rpis. */
|
||||
if (vport->phba->sli_rev < LPFC_SLI_REV4)
|
||||
ndlp->nlp_rpi = mb->un.varWords[0];
|
||||
|
||||
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
|
||||
|
||||
/* Only if we are not a fabric nport do we issue PRLI */
|
||||
|
@ -2025,7 +2027,9 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
|
|||
MAILBOX_t *mb = &pmb->u.mb;
|
||||
|
||||
if (!mb->mbxStatus) {
|
||||
ndlp->nlp_rpi = mb->un.varWords[0];
|
||||
/* SLI4 ports have preallocated logical rpis. */
|
||||
if (vport->phba->sli_rev < LPFC_SLI_REV4)
|
||||
ndlp->nlp_rpi = mb->un.varWords[0];
|
||||
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
|
||||
} else {
|
||||
if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
|
||||
|
|
|
@ -743,7 +743,14 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
|
|||
if (bcnt == 0)
|
||||
continue;
|
||||
/* Now, post the SCSI buffer list sgls as a block */
|
||||
status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
|
||||
if (!phba->sli4_hba.extents_in_use)
|
||||
status = lpfc_sli4_post_scsi_sgl_block(phba,
|
||||
&sblist,
|
||||
bcnt);
|
||||
else
|
||||
status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
|
||||
&sblist,
|
||||
bcnt);
|
||||
/* Reset SCSI buffer count for next round of posting */
|
||||
bcnt = 0;
|
||||
while (!list_empty(&sblist)) {
|
||||
|
@ -787,7 +794,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
|
|||
dma_addr_t pdma_phys_fcp_cmd;
|
||||
dma_addr_t pdma_phys_fcp_rsp;
|
||||
dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
|
||||
uint16_t iotag, last_xritag = NO_XRI;
|
||||
uint16_t iotag, last_xritag = NO_XRI, lxri = 0;
|
||||
int status = 0, index;
|
||||
int bcnt;
|
||||
int non_sequential_xri = 0;
|
||||
|
@ -823,13 +830,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
|
|||
break;
|
||||
}
|
||||
|
||||
psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
|
||||
if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
|
||||
lxri = lpfc_sli4_next_xritag(phba);
|
||||
if (lxri == NO_XRI) {
|
||||
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
|
||||
psb->data, psb->dma_handle);
|
||||
kfree(psb);
|
||||
break;
|
||||
}
|
||||
psb->cur_iocbq.sli4_lxritag = lxri;
|
||||
psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
|
||||
if (last_xritag != NO_XRI
|
||||
&& psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
|
||||
non_sequential_xri = 1;
|
||||
|
@ -861,6 +870,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
|
|||
*/
|
||||
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
|
||||
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
|
||||
sgl->word2 = le32_to_cpu(sgl->word2);
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 0);
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
|
||||
|
@ -869,6 +879,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
|
|||
/* Setup the physical region for the FCP RSP */
|
||||
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
|
||||
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
|
||||
sgl->word2 = le32_to_cpu(sgl->word2);
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 1);
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
|
||||
|
@ -914,7 +925,21 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
|
|||
}
|
||||
}
|
||||
if (bcnt) {
|
||||
status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
|
||||
if (!phba->sli4_hba.extents_in_use)
|
||||
status = lpfc_sli4_post_scsi_sgl_block(phba,
|
||||
&sblist,
|
||||
bcnt);
|
||||
else
|
||||
status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
|
||||
&sblist,
|
||||
bcnt);
|
||||
|
||||
if (status) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
|
||||
"3021 SCSI SGL post error %d\n",
|
||||
status);
|
||||
bcnt = 0;
|
||||
}
|
||||
/* Reset SCSI buffer count for next round of posting */
|
||||
while (!list_empty(&sblist)) {
|
||||
list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
|
||||
|
@ -2081,6 +2106,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
|
|||
dma_len = sg_dma_len(sgel);
|
||||
sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
|
||||
sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
|
||||
sgl->word2 = le32_to_cpu(sgl->word2);
|
||||
if ((num_bde + 1) == nseg)
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 1);
|
||||
else
|
||||
|
@ -2794,6 +2820,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
|
|||
* of the scsi_cmnd request_buffer
|
||||
*/
|
||||
piocbq->iocb.ulpContext = pnode->nlp_rpi;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
piocbq->iocb.ulpContext =
|
||||
phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
|
||||
if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
|
||||
piocbq->iocb.ulpFCP2Rcvy = 1;
|
||||
else
|
||||
|
@ -2807,7 +2836,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
|
|||
}
|
||||
|
||||
/**
|
||||
* lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
|
||||
* lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
|
||||
* @vport: The virtual port for which this call is being executed.
|
||||
* @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
|
||||
* @lun: Logical unit number.
|
||||
|
@ -2851,6 +2880,10 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
|
|||
lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
|
||||
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
|
||||
piocb->ulpContext = ndlp->nlp_rpi;
|
||||
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
|
||||
piocb->ulpContext =
|
||||
vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
|
||||
}
|
||||
if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
|
||||
piocb->ulpFCP2Rcvy = 1;
|
||||
}
|
||||
|
@ -3405,9 +3438,10 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
|
|||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
|
||||
"0702 Issue %s to TGT %d LUN %d "
|
||||
"rpi x%x nlp_flag x%x\n",
|
||||
"rpi x%x nlp_flag x%x Data: x%x x%x\n",
|
||||
lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
|
||||
pnode->nlp_rpi, pnode->nlp_flag);
|
||||
pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
|
||||
iocbq->iocb_flag);
|
||||
|
||||
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
|
||||
iocbq, iocbqrsp, lpfc_cmd->timeout);
|
||||
|
@ -3419,10 +3453,12 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
|
|||
ret = FAILED;
|
||||
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
|
||||
"0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
|
||||
"0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
|
||||
"iocb_flag x%x\n",
|
||||
lpfc_taskmgmt_name(task_mgmt_cmd),
|
||||
tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
|
||||
iocbqrsp->iocb.un.ulpWord[4]);
|
||||
iocbqrsp->iocb.un.ulpWord[4],
|
||||
iocbq->iocb_flag);
|
||||
} else if (status == IOCB_BUSY)
|
||||
ret = FAILED;
|
||||
else
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -52,6 +52,7 @@ struct lpfc_iocbq {
|
|||
struct list_head clist;
|
||||
struct list_head dlist;
|
||||
uint16_t iotag; /* pre-assigned IO tag */
|
||||
uint16_t sli4_lxritag; /* logical pre-assigned XRI. */
|
||||
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
|
||||
struct lpfc_cq_event cq_event;
|
||||
|
||||
|
|
|
@ -310,7 +310,6 @@ struct lpfc_max_cfg_param {
|
|||
uint16_t vfi_base;
|
||||
uint16_t vfi_used;
|
||||
uint16_t max_fcfi;
|
||||
uint16_t fcfi_base;
|
||||
uint16_t fcfi_used;
|
||||
uint16_t max_eq;
|
||||
uint16_t max_rq;
|
||||
|
@ -365,6 +364,11 @@ struct lpfc_pc_sli4_params {
|
|||
uint8_t rqv;
|
||||
};
|
||||
|
||||
struct lpfc_iov {
|
||||
uint32_t pf_number;
|
||||
uint32_t vf_number;
|
||||
};
|
||||
|
||||
/* SLI4 HBA data structure entries */
|
||||
struct lpfc_sli4_hba {
|
||||
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
|
||||
|
@ -444,10 +448,13 @@ struct lpfc_sli4_hba {
|
|||
uint32_t intr_enable;
|
||||
struct lpfc_bmbx bmbx;
|
||||
struct lpfc_max_cfg_param max_cfg_param;
|
||||
uint16_t extents_in_use; /* must allocate resource extents. */
|
||||
uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
|
||||
uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
|
||||
uint16_t next_rpi;
|
||||
uint16_t scsi_xri_max;
|
||||
uint16_t scsi_xri_cnt;
|
||||
uint16_t scsi_xri_start;
|
||||
struct list_head lpfc_free_sgl_list;
|
||||
struct list_head lpfc_sgl_list;
|
||||
struct lpfc_sglq **lpfc_els_sgl_array;
|
||||
|
@ -458,7 +465,17 @@ struct lpfc_sli4_hba {
|
|||
struct lpfc_sglq **lpfc_sglq_active_list;
|
||||
struct list_head lpfc_rpi_hdr_list;
|
||||
unsigned long *rpi_bmask;
|
||||
uint16_t *rpi_ids;
|
||||
uint16_t rpi_count;
|
||||
struct list_head lpfc_rpi_blk_list;
|
||||
unsigned long *xri_bmask;
|
||||
uint16_t *xri_ids;
|
||||
uint16_t xri_count;
|
||||
struct list_head lpfc_xri_blk_list;
|
||||
unsigned long *vfi_bmask;
|
||||
uint16_t *vfi_ids;
|
||||
uint16_t vfi_count;
|
||||
struct list_head lpfc_vfi_blk_list;
|
||||
struct lpfc_sli4_flags sli4_flags;
|
||||
struct list_head sp_queue_event;
|
||||
struct list_head sp_cqe_event_pool;
|
||||
|
@ -467,6 +484,7 @@ struct lpfc_sli4_hba {
|
|||
struct list_head sp_els_xri_aborted_work_queue;
|
||||
struct list_head sp_unsol_work_queue;
|
||||
struct lpfc_sli4_link link_state;
|
||||
struct lpfc_iov iov;
|
||||
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
|
||||
spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
|
||||
};
|
||||
|
@ -490,6 +508,7 @@ struct lpfc_sglq {
|
|||
enum lpfc_sgl_state state;
|
||||
struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
|
||||
uint16_t iotag; /* pre-assigned IO tag */
|
||||
uint16_t sli4_lxritag; /* logical pre-assigned xri. */
|
||||
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
|
||||
struct sli4_sge *sgl; /* pre-assigned SGL */
|
||||
void *virt; /* virtual address. */
|
||||
|
@ -504,6 +523,13 @@ struct lpfc_rpi_hdr {
|
|||
uint32_t start_rpi;
|
||||
};
|
||||
|
||||
struct lpfc_rsrc_blks {
|
||||
struct list_head list;
|
||||
uint16_t rsrc_start;
|
||||
uint16_t rsrc_size;
|
||||
uint16_t rsrc_used;
|
||||
};
|
||||
|
||||
/*
|
||||
* SLI4 specific function prototypes
|
||||
*/
|
||||
|
@ -543,8 +569,11 @@ int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
|
|||
int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
|
||||
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
|
||||
int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
|
||||
int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
|
||||
int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba);
|
||||
int lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba);
|
||||
int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
|
||||
int lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *, struct list_head *,
|
||||
int);
|
||||
struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
|
||||
struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
|
||||
void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
|
||||
|
|
|
@ -83,7 +83,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
|
|||
static int
|
||||
lpfc_alloc_vpi(struct lpfc_hba *phba)
|
||||
{
|
||||
int vpi;
|
||||
unsigned long vpi;
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
/* Start at bit 1 because vpi zero is reserved for the physical port */
|
||||
|
|
|
@ -33,9 +33,9 @@
|
|||
/*
|
||||
* MegaRAID SAS Driver meta data
|
||||
*/
|
||||
#define MEGASAS_VERSION "00.00.05.34-rc1"
|
||||
#define MEGASAS_RELDATE "Feb. 24, 2011"
|
||||
#define MEGASAS_EXT_VERSION "Thu. Feb. 24 17:00:00 PDT 2011"
|
||||
#define MEGASAS_VERSION "00.00.05.38-rc1"
|
||||
#define MEGASAS_RELDATE "May. 11, 2011"
|
||||
#define MEGASAS_EXT_VERSION "Wed. May. 11 17:00:00 PDT 2011"
|
||||
|
||||
/*
|
||||
* Device IDs
|
||||
|
@ -76,8 +76,8 @@
|
|||
#define MFI_STATE_READY 0xB0000000
|
||||
#define MFI_STATE_OPERATIONAL 0xC0000000
|
||||
#define MFI_STATE_FAULT 0xF0000000
|
||||
#define MFI_RESET_REQUIRED 0x00000001
|
||||
|
||||
#define MFI_RESET_REQUIRED 0x00000001
|
||||
#define MFI_RESET_ADAPTER 0x00000002
|
||||
#define MEGAMFI_FRAME_SIZE 64
|
||||
|
||||
/*
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
* FILE: megaraid_sas_base.c
|
||||
* Version : v00.00.05.34-rc1
|
||||
* Version : v00.00.05.38-rc1
|
||||
*
|
||||
* Authors: LSI Corporation
|
||||
* Sreenivas Bagalkote
|
||||
|
@ -437,15 +437,18 @@ megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
|
|||
static int
|
||||
megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
|
||||
{
|
||||
u32 status;
|
||||
u32 status, mfiStatus = 0;
|
||||
|
||||
/*
|
||||
* Check if it is our interrupt
|
||||
*/
|
||||
status = readl(®s->outbound_intr_status);
|
||||
|
||||
if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) {
|
||||
return 0;
|
||||
}
|
||||
if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
|
||||
mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
|
||||
|
||||
if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
|
||||
mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
|
||||
|
||||
/*
|
||||
* Clear the interrupt by writing back the same value
|
||||
|
@ -455,8 +458,9 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
|
|||
/* Dummy readl to force pci flush */
|
||||
readl(®s->outbound_doorbell_clear);
|
||||
|
||||
return 1;
|
||||
return mfiStatus;
|
||||
}
|
||||
|
||||
/**
|
||||
* megasas_fire_cmd_ppc - Sends command to the FW
|
||||
* @frame_phys_addr : Physical address of cmd
|
||||
|
@ -476,17 +480,6 @@ megasas_fire_cmd_ppc(struct megasas_instance *instance,
|
|||
spin_unlock_irqrestore(&instance->hba_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* megasas_adp_reset_ppc - For controller reset
|
||||
* @regs: MFI register set
|
||||
*/
|
||||
static int
|
||||
megasas_adp_reset_ppc(struct megasas_instance *instance,
|
||||
struct megasas_register_set __iomem *regs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* megasas_check_reset_ppc - For controller reset check
|
||||
* @regs: MFI register set
|
||||
|
@ -495,8 +488,12 @@ static int
|
|||
megasas_check_reset_ppc(struct megasas_instance *instance,
|
||||
struct megasas_register_set __iomem *regs)
|
||||
{
|
||||
if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct megasas_instance_template megasas_instance_template_ppc = {
|
||||
|
||||
.fire_cmd = megasas_fire_cmd_ppc,
|
||||
|
@ -504,7 +501,7 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
|
|||
.disable_intr = megasas_disable_intr_ppc,
|
||||
.clear_intr = megasas_clear_intr_ppc,
|
||||
.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
|
||||
.adp_reset = megasas_adp_reset_ppc,
|
||||
.adp_reset = megasas_adp_reset_xscale,
|
||||
.check_reset = megasas_check_reset_ppc,
|
||||
.service_isr = megasas_isr,
|
||||
.tasklet = megasas_complete_cmd_dpc,
|
||||
|
@ -620,6 +617,9 @@ static int
|
|||
megasas_check_reset_skinny(struct megasas_instance *instance,
|
||||
struct megasas_register_set __iomem *regs)
|
||||
{
|
||||
if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3454,7 +3454,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
|
|||
{
|
||||
u32 max_sectors_1;
|
||||
u32 max_sectors_2;
|
||||
u32 tmp_sectors;
|
||||
u32 tmp_sectors, msix_enable;
|
||||
struct megasas_register_set __iomem *reg_set;
|
||||
struct megasas_ctrl_info *ctrl_info;
|
||||
unsigned long bar_list;
|
||||
|
@ -3507,6 +3507,13 @@ static int megasas_init_fw(struct megasas_instance *instance)
|
|||
if (megasas_transition_to_ready(instance))
|
||||
goto fail_ready_state;
|
||||
|
||||
/* Check if MSI-X is supported while in ready state */
|
||||
msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
|
||||
0x4000000) >> 0x1a;
|
||||
if (msix_enable && !msix_disable &&
|
||||
!pci_enable_msix(instance->pdev, &instance->msixentry, 1))
|
||||
instance->msi_flag = 1;
|
||||
|
||||
/* Get operational params, sge flags, send init cmd to controller */
|
||||
if (instance->instancet->init_adapter(instance))
|
||||
goto fail_init_adapter;
|
||||
|
@ -4076,14 +4083,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
else
|
||||
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
|
||||
|
||||
/* Try to enable MSI-X */
|
||||
if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) &&
|
||||
(instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) &&
|
||||
(instance->pdev->device != PCI_DEVICE_ID_LSI_VERDE_ZCR) &&
|
||||
!msix_disable && !pci_enable_msix(instance->pdev,
|
||||
&instance->msixentry, 1))
|
||||
instance->msi_flag = 1;
|
||||
|
||||
/*
|
||||
* Initialize MFI Firmware
|
||||
*/
|
||||
|
@ -4115,6 +4114,14 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
|
||||
megasas_mgmt_info.max_index++;
|
||||
|
||||
/*
|
||||
* Register with SCSI mid-layer
|
||||
*/
|
||||
if (megasas_io_attach(instance))
|
||||
goto fail_io_attach;
|
||||
|
||||
instance->unload = 0;
|
||||
|
||||
/*
|
||||
* Initiate AEN (Asynchronous Event Notification)
|
||||
*/
|
||||
|
@ -4123,13 +4130,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
goto fail_start_aen;
|
||||
}
|
||||
|
||||
/*
|
||||
* Register with SCSI mid-layer
|
||||
*/
|
||||
if (megasas_io_attach(instance))
|
||||
goto fail_io_attach;
|
||||
|
||||
instance->unload = 0;
|
||||
return 0;
|
||||
|
||||
fail_start_aen:
|
||||
|
@ -4332,10 +4332,6 @@ megasas_resume(struct pci_dev *pdev)
|
|||
if (megasas_set_dma_mask(pdev))
|
||||
goto fail_set_dma_mask;
|
||||
|
||||
/* Now re-enable MSI-X */
|
||||
if (instance->msi_flag)
|
||||
pci_enable_msix(instance->pdev, &instance->msixentry, 1);
|
||||
|
||||
/*
|
||||
* Initialize MFI Firmware
|
||||
*/
|
||||
|
@ -4348,6 +4344,10 @@ megasas_resume(struct pci_dev *pdev)
|
|||
if (megasas_transition_to_ready(instance))
|
||||
goto fail_ready_state;
|
||||
|
||||
/* Now re-enable MSI-X */
|
||||
if (instance->msi_flag)
|
||||
pci_enable_msix(instance->pdev, &instance->msixentry, 1);
|
||||
|
||||
switch (instance->pdev->device) {
|
||||
case PCI_DEVICE_ID_LSI_FUSION:
|
||||
{
|
||||
|
@ -4384,12 +4384,6 @@ megasas_resume(struct pci_dev *pdev)
|
|||
|
||||
instance->instancet->enable_intr(instance->reg_set);
|
||||
|
||||
/*
|
||||
* Initiate AEN (Asynchronous Event Notification)
|
||||
*/
|
||||
if (megasas_start_aen(instance))
|
||||
printk(KERN_ERR "megasas: Start AEN failed\n");
|
||||
|
||||
/* Initialize the cmd completion timer */
|
||||
if (poll_mode_io)
|
||||
megasas_start_timer(instance, &instance->io_completion_timer,
|
||||
|
@ -4397,6 +4391,12 @@ megasas_resume(struct pci_dev *pdev)
|
|||
MEGASAS_COMPLETION_TIMER_INTERVAL);
|
||||
instance->unload = 0;
|
||||
|
||||
/*
|
||||
* Initiate AEN (Asynchronous Event Notification)
|
||||
*/
|
||||
if (megasas_start_aen(instance))
|
||||
printk(KERN_ERR "megasas: Start AEN failed\n");
|
||||
|
||||
return 0;
|
||||
|
||||
fail_irq:
|
||||
|
@ -4527,6 +4527,11 @@ static void megasas_shutdown(struct pci_dev *pdev)
|
|||
instance->unload = 1;
|
||||
megasas_flush_cache(instance);
|
||||
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
|
||||
instance->instancet->disable_intr(instance->reg_set);
|
||||
free_irq(instance->msi_flag ? instance->msixentry.vector :
|
||||
instance->pdev->irq, instance);
|
||||
if (instance->msi_flag)
|
||||
pci_disable_msix(instance->pdev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -695,22 +695,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* megasas_return_cmd_for_smid - Returns a cmd_fusion for a SMID
|
||||
* @instance: Adapter soft state
|
||||
*
|
||||
*/
|
||||
void
|
||||
megasas_return_cmd_for_smid(struct megasas_instance *instance, u16 smid)
|
||||
{
|
||||
struct fusion_context *fusion;
|
||||
struct megasas_cmd_fusion *cmd;
|
||||
|
||||
fusion = instance->ctrl_context;
|
||||
cmd = fusion->cmd_list[smid - 1];
|
||||
megasas_return_cmd_fusion(instance, cmd);
|
||||
}
|
||||
|
||||
/*
|
||||
* megasas_get_ld_map_info - Returns FW's ld_map structure
|
||||
* @instance: Adapter soft state
|
||||
|
@ -1153,7 +1137,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
|
|||
u64 start_blk = io_info->pdBlock;
|
||||
u8 *cdb = io_request->CDB.CDB32;
|
||||
u32 num_blocks = io_info->numBlocks;
|
||||
u8 opcode, flagvals, groupnum, control;
|
||||
u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
|
||||
|
||||
/* Check if T10 PI (DIF) is enabled for this LD */
|
||||
ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
|
||||
|
@ -1235,7 +1219,46 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
|
|||
cdb[8] = (u8)(num_blocks & 0xff);
|
||||
cdb[7] = (u8)((num_blocks >> 8) & 0xff);
|
||||
|
||||
io_request->IoFlags = 10; /* Specify 10-byte cdb */
|
||||
cdb_len = 10;
|
||||
} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
|
||||
/* Convert to 16 byte CDB for large LBA's */
|
||||
switch (cdb_len) {
|
||||
case 6:
|
||||
opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
|
||||
control = cdb[5];
|
||||
break;
|
||||
case 10:
|
||||
opcode =
|
||||
cdb[0] == READ_10 ? READ_16 : WRITE_16;
|
||||
flagvals = cdb[1];
|
||||
groupnum = cdb[6];
|
||||
control = cdb[9];
|
||||
break;
|
||||
case 12:
|
||||
opcode =
|
||||
cdb[0] == READ_12 ? READ_16 : WRITE_16;
|
||||
flagvals = cdb[1];
|
||||
groupnum = cdb[10];
|
||||
control = cdb[11];
|
||||
break;
|
||||
}
|
||||
|
||||
memset(cdb, 0, sizeof(io_request->CDB.CDB32));
|
||||
|
||||
cdb[0] = opcode;
|
||||
cdb[1] = flagvals;
|
||||
cdb[14] = groupnum;
|
||||
cdb[15] = control;
|
||||
|
||||
/* Transfer length */
|
||||
cdb[13] = (u8)(num_blocks & 0xff);
|
||||
cdb[12] = (u8)((num_blocks >> 8) & 0xff);
|
||||
cdb[11] = (u8)((num_blocks >> 16) & 0xff);
|
||||
cdb[10] = (u8)((num_blocks >> 24) & 0xff);
|
||||
|
||||
io_request->IoFlags = 16; /* Specify 16-byte cdb */
|
||||
cdb_len = 16;
|
||||
}
|
||||
|
||||
/* Normal case, just load LBA here */
|
||||
|
@ -2026,17 +2049,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
|
|||
struct fusion_context *fusion;
|
||||
struct megasas_cmd *cmd_mfi;
|
||||
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
|
||||
u32 host_diag, abs_state;
|
||||
u32 host_diag, abs_state, status_reg, reset_adapter;
|
||||
|
||||
instance = (struct megasas_instance *)shost->hostdata;
|
||||
fusion = instance->ctrl_context;
|
||||
|
||||
mutex_lock(&instance->reset_mutex);
|
||||
set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
|
||||
instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
|
||||
instance->instancet->disable_intr(instance->reg_set);
|
||||
msleep(1000);
|
||||
|
||||
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
|
||||
printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
|
||||
"returning FAILED.\n");
|
||||
|
@ -2044,6 +2061,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
|
|||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&instance->reset_mutex);
|
||||
set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
|
||||
instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
|
||||
instance->instancet->disable_intr(instance->reg_set);
|
||||
msleep(1000);
|
||||
|
||||
/* First try waiting for commands to complete */
|
||||
if (megasas_wait_for_outstanding_fusion(instance)) {
|
||||
printk(KERN_WARNING "megaraid_sas: resetting fusion "
|
||||
|
@ -2060,7 +2083,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
|
|||
}
|
||||
}
|
||||
|
||||
if (instance->disableOnlineCtrlReset == 1) {
|
||||
status_reg = instance->instancet->read_fw_status_reg(
|
||||
instance->reg_set);
|
||||
abs_state = status_reg & MFI_STATE_MASK;
|
||||
reset_adapter = status_reg & MFI_RESET_ADAPTER;
|
||||
if (instance->disableOnlineCtrlReset ||
|
||||
(abs_state == MFI_STATE_FAULT && !reset_adapter)) {
|
||||
/* Reset not supported, kill adapter */
|
||||
printk(KERN_WARNING "megaraid_sas: Reset not supported"
|
||||
", killing adapter.\n");
|
||||
|
@ -2089,6 +2117,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
|
|||
|
||||
/* Check that the diag write enable (DRWE) bit is on */
|
||||
host_diag = readl(&instance->reg_set->fusion_host_diag);
|
||||
retry = 0;
|
||||
while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
|
||||
msleep(100);
|
||||
host_diag =
|
||||
|
@ -2126,7 +2155,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
|
|||
|
||||
abs_state =
|
||||
instance->instancet->read_fw_status_reg(
|
||||
instance->reg_set);
|
||||
instance->reg_set) & MFI_STATE_MASK;
|
||||
retry = 0;
|
||||
|
||||
while ((abs_state <= MFI_STATE_FW_INIT) &&
|
||||
|
@ -2134,7 +2163,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
|
|||
msleep(100);
|
||||
abs_state =
|
||||
instance->instancet->read_fw_status_reg(
|
||||
instance->reg_set);
|
||||
instance->reg_set) & MFI_STATE_MASK;
|
||||
}
|
||||
if (abs_state <= MFI_STATE_FW_INIT) {
|
||||
printk(KERN_WARNING "megaraid_sas: firmware "
|
||||
|
|
|
@ -69,11 +69,11 @@
|
|||
#define MPT2SAS_DRIVER_NAME "mpt2sas"
|
||||
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
|
||||
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
|
||||
#define MPT2SAS_DRIVER_VERSION "08.100.00.01"
|
||||
#define MPT2SAS_DRIVER_VERSION "08.100.00.02"
|
||||
#define MPT2SAS_MAJOR_VERSION 08
|
||||
#define MPT2SAS_MINOR_VERSION 100
|
||||
#define MPT2SAS_BUILD_VERSION 00
|
||||
#define MPT2SAS_RELEASE_VERSION 01
|
||||
#define MPT2SAS_RELEASE_VERSION 02
|
||||
|
||||
/*
|
||||
* Set MPT2SAS_SG_DEPTH value based on user input.
|
||||
|
|
|
@ -113,6 +113,7 @@ struct sense_info {
|
|||
};
|
||||
|
||||
|
||||
#define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC)
|
||||
#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
|
||||
|
||||
/**
|
||||
|
@ -121,6 +122,7 @@ struct sense_info {
|
|||
* @work: work object (ioc->fault_reset_work_q)
|
||||
* @cancel_pending_work: flag set during reset handling
|
||||
* @ioc: per adapter object
|
||||
* @device_handle: device handle
|
||||
* @VF_ID: virtual function id
|
||||
* @VP_ID: virtual port id
|
||||
* @ignore: flag meaning this event has been marked to ignore
|
||||
|
@ -134,6 +136,7 @@ struct fw_event_work {
|
|||
u8 cancel_pending_work;
|
||||
struct delayed_work delayed_work;
|
||||
struct MPT2SAS_ADAPTER *ioc;
|
||||
u16 device_handle;
|
||||
u8 VF_ID;
|
||||
u8 VP_ID;
|
||||
u8 ignore;
|
||||
|
@ -3499,6 +3502,7 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
|
|||
|
||||
switch (prot_type) {
|
||||
case SCSI_PROT_DIF_TYPE1:
|
||||
case SCSI_PROT_DIF_TYPE2:
|
||||
|
||||
/*
|
||||
* enable ref/guard checking
|
||||
|
@ -3511,13 +3515,6 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
|
|||
cpu_to_be32(scsi_get_lba(scmd));
|
||||
break;
|
||||
|
||||
case SCSI_PROT_DIF_TYPE2:
|
||||
|
||||
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
|
||||
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
|
||||
MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
|
||||
break;
|
||||
|
||||
case SCSI_PROT_DIF_TYPE3:
|
||||
|
||||
/*
|
||||
|
@ -4047,17 +4044,75 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
|
|||
#endif
|
||||
|
||||
/**
|
||||
* _scsih_smart_predicted_fault - illuminate Fault LED
|
||||
* _scsih_turn_on_fault_led - illuminate Fault LED
|
||||
* @ioc: per adapter object
|
||||
* @handle: device handle
|
||||
* Context: process
|
||||
*
|
||||
* Return nothing.
|
||||
*/
|
||||
static void
|
||||
_scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
|
||||
{
|
||||
Mpi2SepReply_t mpi_reply;
|
||||
Mpi2SepRequest_t mpi_request;
|
||||
|
||||
memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
|
||||
mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
|
||||
mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
|
||||
mpi_request.SlotStatus =
|
||||
cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
|
||||
mpi_request.DevHandle = cpu_to_le16(handle);
|
||||
mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
|
||||
if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
|
||||
&mpi_request)) != 0) {
|
||||
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
|
||||
__FILE__, __LINE__, __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
|
||||
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: "
|
||||
"ioc_status (0x%04x), loginfo(0x%08x)\n", ioc->name,
|
||||
le16_to_cpu(mpi_reply.IOCStatus),
|
||||
le32_to_cpu(mpi_reply.IOCLogInfo)));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* _scsih_send_event_to_turn_on_fault_led - fire delayed event
|
||||
* @ioc: per adapter object
|
||||
* @handle: device handle
|
||||
* Context: interrupt.
|
||||
*
|
||||
* Return nothing.
|
||||
*/
|
||||
static void
|
||||
_scsih_send_event_to_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
|
||||
{
|
||||
struct fw_event_work *fw_event;
|
||||
|
||||
fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
|
||||
if (!fw_event)
|
||||
return;
|
||||
fw_event->event = MPT2SAS_TURN_ON_FAULT_LED;
|
||||
fw_event->device_handle = handle;
|
||||
fw_event->ioc = ioc;
|
||||
_scsih_fw_event_add(ioc, fw_event);
|
||||
}
|
||||
|
||||
/**
|
||||
* _scsih_smart_predicted_fault - process smart errors
|
||||
* @ioc: per adapter object
|
||||
* @handle: device handle
|
||||
* Context: interrupt.
|
||||
*
|
||||
* Return nothing.
|
||||
*/
|
||||
static void
|
||||
_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
|
||||
{
|
||||
Mpi2SepReply_t mpi_reply;
|
||||
Mpi2SepRequest_t mpi_request;
|
||||
struct scsi_target *starget;
|
||||
struct MPT2SAS_TARGET *sas_target_priv_data;
|
||||
Mpi2EventNotificationReply_t *event_reply;
|
||||
|
@ -4084,30 +4139,8 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
|
|||
starget_printk(KERN_WARNING, starget, "predicted fault\n");
|
||||
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
|
||||
|
||||
if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) {
|
||||
memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
|
||||
mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
|
||||
mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
|
||||
mpi_request.SlotStatus =
|
||||
cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
|
||||
mpi_request.DevHandle = cpu_to_le16(handle);
|
||||
mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
|
||||
if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
|
||||
&mpi_request)) != 0) {
|
||||
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
|
||||
ioc->name, __FILE__, __LINE__, __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
|
||||
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
|
||||
"enclosure_processor: ioc_status (0x%04x), "
|
||||
"loginfo(0x%08x)\n", ioc->name,
|
||||
le16_to_cpu(mpi_reply.IOCStatus),
|
||||
le32_to_cpu(mpi_reply.IOCLogInfo)));
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
|
||||
_scsih_send_event_to_turn_on_fault_led(ioc, handle);
|
||||
|
||||
/* insert into event log */
|
||||
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
|
||||
|
@ -6753,6 +6786,9 @@ _firmware_event_work(struct work_struct *work)
|
|||
}
|
||||
|
||||
switch (fw_event->event) {
|
||||
case MPT2SAS_TURN_ON_FAULT_LED:
|
||||
_scsih_turn_on_fault_led(ioc, fw_event->device_handle);
|
||||
break;
|
||||
case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
|
||||
_scsih_sas_topology_change_event(ioc, fw_event);
|
||||
break;
|
||||
|
|
|
@ -4698,12 +4698,14 @@ static int __os_scsi_tape_open(struct inode * inode, struct file * filp)
|
|||
break;
|
||||
|
||||
if ((SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) {
|
||||
int j;
|
||||
|
||||
STp->pos_unknown = 0;
|
||||
STp->partition = STp->new_partition = 0;
|
||||
if (STp->can_partitions)
|
||||
STp->nbr_partitions = 1; /* This guess will be updated later if necessary */
|
||||
for (i=0; i < ST_NBR_PARTITIONS; i++) {
|
||||
STps = &(STp->ps[i]);
|
||||
for (j = 0; j < ST_NBR_PARTITIONS; j++) {
|
||||
STps = &(STp->ps[j]);
|
||||
STps->rw = ST_IDLE;
|
||||
STps->eof = ST_NOEOF;
|
||||
STps->at_sm = 0;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
|
||||
ql4_nx.o ql4_nvram.o ql4_dbg.o
|
||||
ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o
|
||||
|
||||
obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
|
||||
|
||||
|
|
69
drivers/scsi/qla4xxx/ql4_attr.c
Normal file
69
drivers/scsi/qla4xxx/ql4_attr.c
Normal file
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* QLogic iSCSI HBA Driver
|
||||
* Copyright (c) 2003-2011 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla4xxx for copyright and licensing details.
|
||||
*/
|
||||
|
||||
#include "ql4_def.h"
|
||||
#include "ql4_glbl.h"
|
||||
#include "ql4_dbg.h"
|
||||
|
||||
/* Scsi_Host attributes. */
|
||||
static ssize_t
|
||||
qla4xxx_fw_version_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
|
||||
|
||||
if (is_qla8022(ha))
|
||||
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
|
||||
ha->firmware_version[0],
|
||||
ha->firmware_version[1],
|
||||
ha->patch_number, ha->build_number);
|
||||
else
|
||||
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
|
||||
ha->firmware_version[0],
|
||||
ha->firmware_version[1],
|
||||
ha->patch_number, ha->build_number);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
qla4xxx_serial_num_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", ha->serial_number);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
|
||||
return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->iscsi_major,
|
||||
ha->iscsi_minor);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
|
||||
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
|
||||
ha->bootload_major, ha->bootload_minor,
|
||||
ha->bootload_patch, ha->bootload_build);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
|
||||
static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
|
||||
static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
|
||||
static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL);
|
||||
|
||||
struct device_attribute *qla4xxx_host_attrs[] = {
|
||||
&dev_attr_fw_version,
|
||||
&dev_attr_serial_num,
|
||||
&dev_attr_iscsi_version,
|
||||
&dev_attr_optrom_version,
|
||||
NULL,
|
||||
};
|
|
@ -115,7 +115,7 @@
|
|||
#define INVALID_ENTRY 0xFFFF
|
||||
#define MAX_CMDS_TO_RISC 1024
|
||||
#define MAX_SRBS MAX_CMDS_TO_RISC
|
||||
#define MBOX_AEN_REG_COUNT 5
|
||||
#define MBOX_AEN_REG_COUNT 8
|
||||
#define MAX_INIT_RETRIES 5
|
||||
|
||||
/*
|
||||
|
@ -368,7 +368,6 @@ struct scsi_qla_host {
|
|||
#define AF_INIT_DONE 1 /* 0x00000002 */
|
||||
#define AF_MBOX_COMMAND 2 /* 0x00000004 */
|
||||
#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
|
||||
#define AF_DPC_SCHEDULED 5 /* 0x00000020 */
|
||||
#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
|
||||
#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
|
||||
#define AF_LINK_UP 8 /* 0x00000100 */
|
||||
|
@ -584,6 +583,14 @@ struct scsi_qla_host {
|
|||
uint32_t nx_reset_timeout;
|
||||
|
||||
struct completion mbx_intr_comp;
|
||||
|
||||
/* --- From About Firmware --- */
|
||||
uint16_t iscsi_major;
|
||||
uint16_t iscsi_minor;
|
||||
uint16_t bootload_major;
|
||||
uint16_t bootload_minor;
|
||||
uint16_t bootload_patch;
|
||||
uint16_t bootload_build;
|
||||
};
|
||||
|
||||
static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
|
||||
|
|
|
@ -690,6 +690,29 @@ struct mbx_sys_info {
|
|||
uint8_t reserved[12]; /* 34-3f */
|
||||
};
|
||||
|
||||
struct about_fw_info {
|
||||
uint16_t fw_major; /* 00 - 01 */
|
||||
uint16_t fw_minor; /* 02 - 03 */
|
||||
uint16_t fw_patch; /* 04 - 05 */
|
||||
uint16_t fw_build; /* 06 - 07 */
|
||||
uint8_t fw_build_date[16]; /* 08 - 17 ASCII String */
|
||||
uint8_t fw_build_time[16]; /* 18 - 27 ASCII String */
|
||||
uint8_t fw_build_user[16]; /* 28 - 37 ASCII String */
|
||||
uint16_t fw_load_source; /* 38 - 39 */
|
||||
/* 1 = Flash Primary,
|
||||
2 = Flash Secondary,
|
||||
3 = Host Download
|
||||
*/
|
||||
uint8_t reserved1[6]; /* 3A - 3F */
|
||||
uint16_t iscsi_major; /* 40 - 41 */
|
||||
uint16_t iscsi_minor; /* 42 - 43 */
|
||||
uint16_t bootload_major; /* 44 - 45 */
|
||||
uint16_t bootload_minor; /* 46 - 47 */
|
||||
uint16_t bootload_patch; /* 48 - 49 */
|
||||
uint16_t bootload_build; /* 4A - 4B */
|
||||
uint8_t reserved2[180]; /* 4C - FF */
|
||||
};
|
||||
|
||||
struct crash_record {
|
||||
uint16_t fw_major_version; /* 00 - 01 */
|
||||
uint16_t fw_minor_version; /* 02 - 03 */
|
||||
|
|
|
@ -61,7 +61,7 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
|
|||
int qla4xxx_add_sess(struct ddb_entry *);
|
||||
void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
|
||||
int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha);
|
||||
int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
|
||||
int qla4xxx_about_firmware(struct scsi_qla_host *ha);
|
||||
void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha,
|
||||
uint32_t intr_status);
|
||||
int qla4xxx_init_rings(struct scsi_qla_host *ha);
|
||||
|
@ -139,4 +139,5 @@ extern int ql4xextended_error_logging;
|
|||
extern int ql4xdontresethba;
|
||||
extern int ql4xenablemsix;
|
||||
|
||||
extern struct device_attribute *qla4xxx_host_attrs[];
|
||||
#endif /* _QLA4x_GBL_H */
|
||||
|
|
|
@ -1275,7 +1275,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
|
|||
if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
|
||||
goto exit_init_hba;
|
||||
|
||||
if (qla4xxx_get_fw_version(ha) == QLA_ERROR)
|
||||
if (qla4xxx_about_firmware(ha) == QLA_ERROR)
|
||||
goto exit_init_hba;
|
||||
|
||||
if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
|
||||
|
|
|
@ -25,9 +25,14 @@ static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
|
|||
|
||||
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
|
||||
sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
|
||||
if (sense_len == 0)
|
||||
if (sense_len == 0) {
|
||||
DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: %s:"
|
||||
" sense len 0\n", ha->host_no,
|
||||
cmd->device->channel, cmd->device->id,
|
||||
cmd->device->lun, __func__));
|
||||
ha->status_srb = NULL;
|
||||
return;
|
||||
|
||||
}
|
||||
/* Save total available sense length,
|
||||
* not to exceed cmd's sense buffer size */
|
||||
sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
|
||||
|
@ -541,6 +546,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
|
|||
case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
|
||||
case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
|
||||
case MBOX_ASTS_SUBNET_STATE_CHANGE:
|
||||
case MBOX_ASTS_DUPLICATE_IP:
|
||||
/* No action */
|
||||
DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
|
||||
mbox_status));
|
||||
|
@ -593,11 +599,13 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
|
|||
mbox_sts[i];
|
||||
|
||||
/* print debug message */
|
||||
DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
|
||||
" mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
|
||||
ha->host_no, ha->aen_in, mbox_sts[0],
|
||||
mbox_sts[1], mbox_sts[2], mbox_sts[3],
|
||||
mbox_sts[4]));
|
||||
DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
|
||||
"mb1:0x%x mb2:0x%x mb3:0x%x "
|
||||
"mb4:0x%x mb5:0x%x\n",
|
||||
ha->host_no, ha->aen_in,
|
||||
mbox_sts[0], mbox_sts[1],
|
||||
mbox_sts[2], mbox_sts[3],
|
||||
mbox_sts[4], mbox_sts[5]));
|
||||
|
||||
/* advance pointer */
|
||||
ha->aen_in++;
|
||||
|
|
|
@ -86,22 +86,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
|
|||
msleep(10);
|
||||
}
|
||||
|
||||
/* To prevent overwriting mailbox registers for a command that has
|
||||
* not yet been serviced, check to see if an active command
|
||||
* (AEN, IOCB, etc.) is interrupting, then service it.
|
||||
* -----------------------------------------------------------------
|
||||
*/
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
|
||||
if (!is_qla8022(ha)) {
|
||||
intr_status = readl(&ha->reg->ctrl_status);
|
||||
if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
|
||||
/* Service existing interrupt */
|
||||
ha->isp_ops->interrupt_service_routine(ha, intr_status);
|
||||
clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
|
||||
}
|
||||
}
|
||||
|
||||
ha->mbox_status_count = outCount;
|
||||
for (i = 0; i < outCount; i++)
|
||||
ha->mbox_status[i] = 0;
|
||||
|
@ -1057,38 +1043,65 @@ int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
|
|||
}
|
||||
|
||||
/**
|
||||
* qla4xxx_get_fw_version - gets firmware version
|
||||
* qla4xxx_about_firmware - gets FW, iscsi draft and boot loader version
|
||||
* @ha: Pointer to host adapter structure.
|
||||
*
|
||||
* Retrieves the firmware version on HBA. In QLA4010, mailboxes 2 & 3 may
|
||||
* hold an address for data. Make sure that we write 0 to those mailboxes,
|
||||
* if unused.
|
||||
* Retrieves the FW version, iSCSI draft version & bootloader version of HBA.
|
||||
* Mailboxes 2 & 3 may hold an address for data. Make sure that we write 0 to
|
||||
* those mailboxes, if unused.
|
||||
**/
|
||||
int qla4xxx_get_fw_version(struct scsi_qla_host * ha)
|
||||
int qla4xxx_about_firmware(struct scsi_qla_host *ha)
|
||||
{
|
||||
struct about_fw_info *about_fw = NULL;
|
||||
dma_addr_t about_fw_dma;
|
||||
uint32_t mbox_cmd[MBOX_REG_COUNT];
|
||||
uint32_t mbox_sts[MBOX_REG_COUNT];
|
||||
int status = QLA_ERROR;
|
||||
|
||||
/* Get firmware version. */
|
||||
about_fw = dma_alloc_coherent(&ha->pdev->dev,
|
||||
sizeof(struct about_fw_info),
|
||||
&about_fw_dma, GFP_KERNEL);
|
||||
if (!about_fw) {
|
||||
DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
|
||||
"for about_fw\n", __func__));
|
||||
return status;
|
||||
}
|
||||
|
||||
memset(about_fw, 0, sizeof(struct about_fw_info));
|
||||
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
|
||||
memset(&mbox_sts, 0, sizeof(mbox_sts));
|
||||
|
||||
mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
|
||||
mbox_cmd[2] = LSDW(about_fw_dma);
|
||||
mbox_cmd[3] = MSDW(about_fw_dma);
|
||||
mbox_cmd[4] = sizeof(struct about_fw_info);
|
||||
|
||||
if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
|
||||
QLA_SUCCESS) {
|
||||
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ "
|
||||
"status %04X\n", ha->host_no, __func__, mbox_sts[0]));
|
||||
return QLA_ERROR;
|
||||
status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
|
||||
&mbox_cmd[0], &mbox_sts[0]);
|
||||
if (status != QLA_SUCCESS) {
|
||||
DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_ABOUT_FW "
|
||||
"failed w/ status %04X\n", __func__,
|
||||
mbox_sts[0]));
|
||||
goto exit_about_fw;
|
||||
}
|
||||
|
||||
/* Save firmware version information. */
|
||||
ha->firmware_version[0] = mbox_sts[1];
|
||||
ha->firmware_version[1] = mbox_sts[2];
|
||||
ha->patch_number = mbox_sts[3];
|
||||
ha->build_number = mbox_sts[4];
|
||||
/* Save version information. */
|
||||
ha->firmware_version[0] = le16_to_cpu(about_fw->fw_major);
|
||||
ha->firmware_version[1] = le16_to_cpu(about_fw->fw_minor);
|
||||
ha->patch_number = le16_to_cpu(about_fw->fw_patch);
|
||||
ha->build_number = le16_to_cpu(about_fw->fw_build);
|
||||
ha->iscsi_major = le16_to_cpu(about_fw->iscsi_major);
|
||||
ha->iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
|
||||
ha->bootload_major = le16_to_cpu(about_fw->bootload_major);
|
||||
ha->bootload_minor = le16_to_cpu(about_fw->bootload_minor);
|
||||
ha->bootload_patch = le16_to_cpu(about_fw->bootload_patch);
|
||||
ha->bootload_build = le16_to_cpu(about_fw->bootload_build);
|
||||
status = QLA_SUCCESS;
|
||||
|
||||
return QLA_SUCCESS;
|
||||
exit_about_fw:
|
||||
dma_free_coherent(&ha->pdev->dev, sizeof(struct about_fw_info),
|
||||
about_fw, about_fw_dma);
|
||||
return status;
|
||||
}
|
||||
|
||||
static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
|
||||
|
|
|
@ -964,12 +964,26 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
|
|||
/* Halt all the indiviual PEGs and other blocks of the ISP */
|
||||
qla4_8xxx_rom_lock(ha);
|
||||
|
||||
/* mask all niu interrupts */
|
||||
/* disable all I2Q */
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
|
||||
|
||||
/* disable all niu interrupts */
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
|
||||
/* disable xge rx/tx */
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
|
||||
/* disable xg1 rx/tx */
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
|
||||
/* disable sideband mac */
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
|
||||
/* disable ap0 mac */
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
|
||||
/* disable ap1 mac */
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
|
||||
|
||||
/* halt sre */
|
||||
val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
|
||||
|
@ -984,6 +998,7 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
|
|||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
|
||||
|
||||
/* halt pegs */
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
|
||||
|
@ -991,9 +1006,9 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
|
|||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
|
||||
msleep(5);
|
||||
|
||||
/* big hammer */
|
||||
msleep(1000);
|
||||
if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
|
||||
/* don't reset CAM block on reset */
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
|
||||
|
|
|
@ -124,6 +124,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
|
||||
.max_sectors = 0xFFFF,
|
||||
.shost_attrs = qla4xxx_host_attrs,
|
||||
};
|
||||
|
||||
static struct iscsi_transport qla4xxx_iscsi_transport = {
|
||||
|
@ -412,8 +413,7 @@ void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
|
|||
|
||||
static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
|
||||
struct ddb_entry *ddb_entry,
|
||||
struct scsi_cmnd *cmd,
|
||||
void (*done)(struct scsi_cmnd *))
|
||||
struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct srb *srb;
|
||||
|
||||
|
@ -427,7 +427,6 @@ static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
|
|||
srb->cmd = cmd;
|
||||
srb->flags = 0;
|
||||
CMD_SP(cmd) = (void *)srb;
|
||||
cmd->scsi_done = done;
|
||||
|
||||
return srb;
|
||||
}
|
||||
|
@ -458,9 +457,8 @@ void qla4xxx_srb_compl(struct kref *ref)
|
|||
|
||||
/**
|
||||
* qla4xxx_queuecommand - scsi layer issues scsi command to driver.
|
||||
* @host: scsi host
|
||||
* @cmd: Pointer to Linux's SCSI command structure
|
||||
* @done_fn: Function that the driver calls to notify the SCSI mid-layer
|
||||
* that the command has been processed.
|
||||
*
|
||||
* Remarks:
|
||||
* This routine is invoked by Linux to send a SCSI command to the driver.
|
||||
|
@ -470,10 +468,9 @@ void qla4xxx_srb_compl(struct kref *ref)
|
|||
* completion handling). Unfortunely, it sometimes calls the scheduler
|
||||
* in interrupt context which is a big NO! NO!.
|
||||
**/
|
||||
static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd,
|
||||
void (*done)(struct scsi_cmnd *))
|
||||
static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
|
||||
struct scsi_qla_host *ha = to_qla_host(host);
|
||||
struct ddb_entry *ddb_entry = cmd->device->hostdata;
|
||||
struct iscsi_cls_session *sess = ddb_entry->sess;
|
||||
struct srb *srb;
|
||||
|
@ -515,37 +512,29 @@ static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd,
|
|||
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
|
||||
goto qc_host_busy;
|
||||
|
||||
spin_unlock_irq(ha->host->host_lock);
|
||||
|
||||
srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd, done);
|
||||
srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
|
||||
if (!srb)
|
||||
goto qc_host_busy_lock;
|
||||
goto qc_host_busy;
|
||||
|
||||
rval = qla4xxx_send_command_to_isp(ha, srb);
|
||||
if (rval != QLA_SUCCESS)
|
||||
goto qc_host_busy_free_sp;
|
||||
|
||||
spin_lock_irq(ha->host->host_lock);
|
||||
return 0;
|
||||
|
||||
qc_host_busy_free_sp:
|
||||
qla4xxx_srb_free_dma(ha, srb);
|
||||
mempool_free(srb, ha->srb_mempool);
|
||||
|
||||
qc_host_busy_lock:
|
||||
spin_lock_irq(ha->host->host_lock);
|
||||
|
||||
qc_host_busy:
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
||||
qc_fail_command:
|
||||
done(cmd);
|
||||
cmd->scsi_done(cmd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static DEF_SCSI_QCMD(qla4xxx_queuecommand)
|
||||
|
||||
/**
|
||||
* qla4xxx_mem_free - frees memory allocated to adapter
|
||||
* @ha: Pointer to host adapter structure.
|
||||
|
@ -679,7 +668,27 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
|
|||
if (ha->seconds_since_last_heartbeat == 2) {
|
||||
ha->seconds_since_last_heartbeat = 0;
|
||||
halt_status = qla4_8xxx_rd_32(ha,
|
||||
QLA82XX_PEG_HALT_STATUS1);
|
||||
QLA82XX_PEG_HALT_STATUS1);
|
||||
|
||||
ql4_printk(KERN_INFO, ha,
|
||||
"scsi(%ld): %s, Dumping hw/fw registers:\n "
|
||||
" PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
|
||||
" 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
|
||||
" 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
|
||||
" 0x%x,\n PEG_NET_4_PC: 0x%x\n",
|
||||
ha->host_no, __func__, halt_status,
|
||||
qla4_8xxx_rd_32(ha,
|
||||
QLA82XX_PEG_HALT_STATUS2),
|
||||
qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
|
||||
0x3c),
|
||||
qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
|
||||
0x3c),
|
||||
qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
|
||||
0x3c),
|
||||
qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
|
||||
0x3c),
|
||||
qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
|
||||
0x3c));
|
||||
|
||||
/* Since we cannot change dev_state in interrupt
|
||||
* context, set appropriate DPC flag then wakeup
|
||||
|
@ -715,7 +724,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
|
|||
/* don't poll if reset is going on */
|
||||
if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
|
||||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
|
||||
test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags))) {
|
||||
test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
|
||||
if (dev_state == QLA82XX_DEV_NEED_RESET &&
|
||||
!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
|
||||
if (!ql4xdontresethba) {
|
||||
|
@ -839,7 +848,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
|
|||
}
|
||||
|
||||
/* Wakeup the dpc routine for this adapter, if needed. */
|
||||
if ((start_dpc ||
|
||||
if (start_dpc ||
|
||||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
|
||||
test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
|
||||
test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
|
||||
|
@ -849,9 +858,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
|
|||
test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
|
||||
test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
|
||||
test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
|
||||
test_bit(DPC_AEN, &ha->dpc_flags)) &&
|
||||
!test_bit(AF_DPC_SCHEDULED, &ha->flags) &&
|
||||
ha->dpc_thread) {
|
||||
test_bit(DPC_AEN, &ha->dpc_flags)) {
|
||||
DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
|
||||
" - dpc flags = 0x%lx\n",
|
||||
ha->host_no, __func__, ha->dpc_flags));
|
||||
|
@ -1241,11 +1248,8 @@ static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
|
|||
|
||||
void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
|
||||
{
|
||||
if (ha->dpc_thread &&
|
||||
!test_bit(AF_DPC_SCHEDULED, &ha->flags)) {
|
||||
set_bit(AF_DPC_SCHEDULED, &ha->flags);
|
||||
if (ha->dpc_thread)
|
||||
queue_work(ha->dpc_thread, &ha->dpc_work);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1272,12 +1276,12 @@ static void qla4xxx_do_dpc(struct work_struct *work)
|
|||
|
||||
/* Initialization not yet finished. Don't do anything yet. */
|
||||
if (!test_bit(AF_INIT_DONE, &ha->flags))
|
||||
goto do_dpc_exit;
|
||||
return;
|
||||
|
||||
if (test_bit(AF_EEH_BUSY, &ha->flags)) {
|
||||
DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
|
||||
ha->host_no, __func__, ha->flags));
|
||||
goto do_dpc_exit;
|
||||
return;
|
||||
}
|
||||
|
||||
if (is_qla8022(ha)) {
|
||||
|
@ -1384,8 +1388,6 @@ static void qla4xxx_do_dpc(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
|
||||
do_dpc_exit:
|
||||
clear_bit(AF_DPC_SCHEDULED, &ha->flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -5,4 +5,4 @@
|
|||
* See LICENSE.qla4xxx for copyright and licensing details.
|
||||
*/
|
||||
|
||||
#define QLA4XXX_DRIVER_VERSION "5.02.00-k6"
|
||||
#define QLA4XXX_DRIVER_VERSION "5.02.00-k7"
|
||||
|
|
|
@ -50,6 +50,8 @@
|
|||
#define BUS_RESET_SETTLE_TIME (10)
|
||||
#define HOST_RESET_SETTLE_TIME (10)
|
||||
|
||||
static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
|
||||
|
||||
/* called with shost->host_lock held */
|
||||
void scsi_eh_wakeup(struct Scsi_Host *shost)
|
||||
{
|
||||
|
@ -946,6 +948,48 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_eh_test_devices - check if devices are responding from error recovery.
|
||||
* @cmd_list: scsi commands in error recovery.
|
||||
* @work_q: queue for commands which still need more error recovery
|
||||
* @done_q: queue for commands which are finished
|
||||
* @try_stu: boolean on if a STU command should be tried in addition to TUR.
|
||||
*
|
||||
* Decription:
|
||||
* Tests if devices are in a working state. Commands to devices now in
|
||||
* a working state are sent to the done_q while commands to devices which
|
||||
* are still failing to respond are returned to the work_q for more
|
||||
* processing.
|
||||
**/
|
||||
static int scsi_eh_test_devices(struct list_head *cmd_list,
|
||||
struct list_head *work_q,
|
||||
struct list_head *done_q, int try_stu)
|
||||
{
|
||||
struct scsi_cmnd *scmd, *next;
|
||||
struct scsi_device *sdev;
|
||||
int finish_cmds;
|
||||
|
||||
while (!list_empty(cmd_list)) {
|
||||
scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
|
||||
sdev = scmd->device;
|
||||
|
||||
finish_cmds = !scsi_device_online(scmd->device) ||
|
||||
(try_stu && !scsi_eh_try_stu(scmd) &&
|
||||
!scsi_eh_tur(scmd)) ||
|
||||
!scsi_eh_tur(scmd);
|
||||
|
||||
list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
|
||||
if (scmd->device == sdev) {
|
||||
if (finish_cmds)
|
||||
scsi_eh_finish_cmd(scmd, done_q);
|
||||
else
|
||||
list_move_tail(&scmd->eh_entry, work_q);
|
||||
}
|
||||
}
|
||||
return list_empty(work_q);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* scsi_eh_abort_cmds - abort pending commands.
|
||||
* @work_q: &list_head for pending commands.
|
||||
|
@ -962,6 +1006,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
|
|||
struct list_head *done_q)
|
||||
{
|
||||
struct scsi_cmnd *scmd, *next;
|
||||
LIST_HEAD(check_list);
|
||||
int rtn;
|
||||
|
||||
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
|
||||
|
@ -973,11 +1018,10 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
|
|||
rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd);
|
||||
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
|
||||
scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
|
||||
if (!scsi_device_online(scmd->device) ||
|
||||
rtn == FAST_IO_FAIL ||
|
||||
!scsi_eh_tur(scmd)) {
|
||||
if (rtn == FAST_IO_FAIL)
|
||||
scsi_eh_finish_cmd(scmd, done_q);
|
||||
}
|
||||
else
|
||||
list_move_tail(&scmd->eh_entry, &check_list);
|
||||
} else
|
||||
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
|
||||
" cmd failed:"
|
||||
|
@ -986,7 +1030,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
|
|||
scmd));
|
||||
}
|
||||
|
||||
return list_empty(work_q);
|
||||
return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1137,6 +1181,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
|
|||
struct list_head *done_q)
|
||||
{
|
||||
LIST_HEAD(tmp_list);
|
||||
LIST_HEAD(check_list);
|
||||
|
||||
list_splice_init(work_q, &tmp_list);
|
||||
|
||||
|
@ -1161,9 +1206,9 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
|
|||
if (scmd_id(scmd) != id)
|
||||
continue;
|
||||
|
||||
if ((rtn == SUCCESS || rtn == FAST_IO_FAIL)
|
||||
&& (!scsi_device_online(scmd->device) ||
|
||||
rtn == FAST_IO_FAIL || !scsi_eh_tur(scmd)))
|
||||
if (rtn == SUCCESS)
|
||||
list_move_tail(&scmd->eh_entry, &check_list);
|
||||
else if (rtn == FAST_IO_FAIL)
|
||||
scsi_eh_finish_cmd(scmd, done_q);
|
||||
else
|
||||
/* push back on work queue for further processing */
|
||||
|
@ -1171,7 +1216,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
|
|||
}
|
||||
}
|
||||
|
||||
return list_empty(work_q);
|
||||
return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1185,6 +1230,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
|
|||
struct list_head *done_q)
|
||||
{
|
||||
struct scsi_cmnd *scmd, *chan_scmd, *next;
|
||||
LIST_HEAD(check_list);
|
||||
unsigned int channel;
|
||||
int rtn;
|
||||
|
||||
|
@ -1216,12 +1262,14 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
|
|||
rtn = scsi_try_bus_reset(chan_scmd);
|
||||
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
|
||||
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
|
||||
if (channel == scmd_channel(scmd))
|
||||
if (!scsi_device_online(scmd->device) ||
|
||||
rtn == FAST_IO_FAIL ||
|
||||
!scsi_eh_tur(scmd))
|
||||
if (channel == scmd_channel(scmd)) {
|
||||
if (rtn == FAST_IO_FAIL)
|
||||
scsi_eh_finish_cmd(scmd,
|
||||
done_q);
|
||||
else
|
||||
list_move_tail(&scmd->eh_entry,
|
||||
&check_list);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST"
|
||||
|
@ -1230,7 +1278,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
|
|||
channel));
|
||||
}
|
||||
}
|
||||
return list_empty(work_q);
|
||||
return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1242,6 +1290,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
|
|||
struct list_head *done_q)
|
||||
{
|
||||
struct scsi_cmnd *scmd, *next;
|
||||
LIST_HEAD(check_list);
|
||||
int rtn;
|
||||
|
||||
if (!list_empty(work_q)) {
|
||||
|
@ -1252,12 +1301,10 @@ static int scsi_eh_host_reset(struct list_head *work_q,
|
|||
, current->comm));
|
||||
|
||||
rtn = scsi_try_host_reset(scmd);
|
||||
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
|
||||
if (rtn == SUCCESS) {
|
||||
list_splice_init(work_q, &check_list);
|
||||
} else if (rtn == FAST_IO_FAIL) {
|
||||
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
|
||||
if (!scsi_device_online(scmd->device) ||
|
||||
rtn == FAST_IO_FAIL ||
|
||||
(!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
|
||||
!scsi_eh_tur(scmd))
|
||||
scsi_eh_finish_cmd(scmd, done_q);
|
||||
}
|
||||
} else {
|
||||
|
@ -1266,7 +1313,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
|
|||
current->comm));
|
||||
}
|
||||
}
|
||||
return list_empty(work_q);
|
||||
return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -59,6 +59,10 @@ scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
|
|||
trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
|
||||
(unsigned long long)lba, (unsigned long long)txlen,
|
||||
cdb[1] >> 5);
|
||||
|
||||
if (cdb[0] == WRITE_SAME)
|
||||
trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
|
||||
|
||||
trace_seq_putc(p, 0);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -490,7 +490,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
|
|||
unsigned int max_blocks = 0;
|
||||
|
||||
q->limits.discard_zeroes_data = sdkp->lbprz;
|
||||
q->limits.discard_alignment = sdkp->unmap_alignment;
|
||||
q->limits.discard_alignment = sdkp->unmap_alignment *
|
||||
logical_block_size;
|
||||
q->limits.discard_granularity =
|
||||
max(sdkp->physical_block_size,
|
||||
sdkp->unmap_granularity * logical_block_size);
|
||||
|
@ -2021,16 +2022,26 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
|
|||
|
||||
int dbd;
|
||||
int modepage;
|
||||
int first_len;
|
||||
struct scsi_mode_data data;
|
||||
struct scsi_sense_hdr sshdr;
|
||||
int old_wce = sdkp->WCE;
|
||||
int old_rcd = sdkp->RCD;
|
||||
int old_dpofua = sdkp->DPOFUA;
|
||||
|
||||
if (sdp->skip_ms_page_8)
|
||||
goto defaults;
|
||||
|
||||
if (sdp->type == TYPE_RBC) {
|
||||
first_len = 4;
|
||||
if (sdp->skip_ms_page_8) {
|
||||
if (sdp->type == TYPE_RBC)
|
||||
goto defaults;
|
||||
else {
|
||||
if (sdp->skip_ms_page_3f)
|
||||
goto defaults;
|
||||
modepage = 0x3F;
|
||||
if (sdp->use_192_bytes_for_3f)
|
||||
first_len = 192;
|
||||
dbd = 0;
|
||||
}
|
||||
} else if (sdp->type == TYPE_RBC) {
|
||||
modepage = 6;
|
||||
dbd = 8;
|
||||
} else {
|
||||
|
@ -2039,13 +2050,15 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
|
|||
}
|
||||
|
||||
/* cautiously ask */
|
||||
res = sd_do_mode_sense(sdp, dbd, modepage, buffer, 4, &data, &sshdr);
|
||||
res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len,
|
||||
&data, &sshdr);
|
||||
|
||||
if (!scsi_status_is_good(res))
|
||||
goto bad_sense;
|
||||
|
||||
if (!data.header_length) {
|
||||
modepage = 6;
|
||||
first_len = 0;
|
||||
sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
|
||||
}
|
||||
|
||||
|
@ -2058,30 +2071,61 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
|
|||
*/
|
||||
if (len < 3)
|
||||
goto bad_sense;
|
||||
if (len > 20)
|
||||
len = 20;
|
||||
|
||||
/* Take headers and block descriptors into account */
|
||||
len += data.header_length + data.block_descriptor_length;
|
||||
if (len > SD_BUF_SIZE)
|
||||
goto bad_sense;
|
||||
else if (len > SD_BUF_SIZE) {
|
||||
sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
|
||||
"data from %d to %d bytes\n", len, SD_BUF_SIZE);
|
||||
len = SD_BUF_SIZE;
|
||||
}
|
||||
if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
|
||||
len = 192;
|
||||
|
||||
/* Get the data */
|
||||
res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
|
||||
if (len > first_len)
|
||||
res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len,
|
||||
&data, &sshdr);
|
||||
|
||||
if (scsi_status_is_good(res)) {
|
||||
int offset = data.header_length + data.block_descriptor_length;
|
||||
|
||||
if (offset >= SD_BUF_SIZE - 2) {
|
||||
sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n");
|
||||
goto defaults;
|
||||
while (offset < len) {
|
||||
u8 page_code = buffer[offset] & 0x3F;
|
||||
u8 spf = buffer[offset] & 0x40;
|
||||
|
||||
if (page_code == 8 || page_code == 6) {
|
||||
/* We're interested only in the first 3 bytes.
|
||||
*/
|
||||
if (len - offset <= 2) {
|
||||
sd_printk(KERN_ERR, sdkp, "Incomplete "
|
||||
"mode parameter data\n");
|
||||
goto defaults;
|
||||
} else {
|
||||
modepage = page_code;
|
||||
goto Page_found;
|
||||
}
|
||||
} else {
|
||||
/* Go to the next page */
|
||||
if (spf && len - offset > 3)
|
||||
offset += 4 + (buffer[offset+2] << 8) +
|
||||
buffer[offset+3];
|
||||
else if (!spf && len - offset > 1)
|
||||
offset += 2 + buffer[offset+1];
|
||||
else {
|
||||
sd_printk(KERN_ERR, sdkp, "Incomplete "
|
||||
"mode parameter data\n");
|
||||
goto defaults;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((buffer[offset] & 0x3f) != modepage) {
|
||||
if (modepage == 0x3F) {
|
||||
sd_printk(KERN_ERR, sdkp, "No Caching mode page "
|
||||
"present\n");
|
||||
goto defaults;
|
||||
} else if ((buffer[offset] & 0x3f) != modepage) {
|
||||
sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
|
||||
goto defaults;
|
||||
}
|
||||
|
||||
Page_found:
|
||||
if (modepage == 8) {
|
||||
sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
|
||||
sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
|
||||
|
|
|
@ -307,7 +307,7 @@ static inline int find_and_clear_bit_16(unsigned long *field)
|
|||
"0: bsfw %1,%w0\n\t"
|
||||
"btr %0,%1\n\t"
|
||||
"jnc 0b"
|
||||
: "=&r" (rv), "=m" (*field) :);
|
||||
: "=&r" (rv), "+m" (*field) :);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/libsas.h> /* For TASK_ATTR_* */
|
||||
#include <scsi/scsi_tcq.h>
|
||||
|
||||
#include <target/target_core_base.h>
|
||||
#include <target/target_core_transport.h>
|
||||
|
@ -95,17 +95,17 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
|
|||
if (sc->device->tagged_supported) {
|
||||
switch (sc->tag) {
|
||||
case HEAD_OF_QUEUE_TAG:
|
||||
sam_task_attr = TASK_ATTR_HOQ;
|
||||
sam_task_attr = MSG_HEAD_TAG;
|
||||
break;
|
||||
case ORDERED_QUEUE_TAG:
|
||||
sam_task_attr = TASK_ATTR_ORDERED;
|
||||
sam_task_attr = MSG_ORDERED_TAG;
|
||||
break;
|
||||
default:
|
||||
sam_task_attr = TASK_ATTR_SIMPLE;
|
||||
sam_task_attr = MSG_SIMPLE_TAG;
|
||||
break;
|
||||
}
|
||||
} else
|
||||
sam_task_attr = TASK_ATTR_SIMPLE;
|
||||
sam_task_attr = MSG_SIMPLE_TAG;
|
||||
|
||||
/*
|
||||
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
|
||||
|
@ -379,7 +379,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
|
|||
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
|
||||
*/
|
||||
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
|
||||
DMA_NONE, TASK_ATTR_SIMPLE,
|
||||
DMA_NONE, MSG_SIMPLE_TAG,
|
||||
&tl_cmd->tl_sense_buf[0]);
|
||||
/*
|
||||
* Allocate the LUN_RESET TMR
|
||||
|
@ -939,18 +939,6 @@ static u16 tcm_loop_get_fabric_sense_len(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u64 tcm_loop_pack_lun(unsigned int lun)
|
||||
{
|
||||
u64 result;
|
||||
|
||||
/* LSB of lun into byte 1 big-endian */
|
||||
result = ((lun & 0xff) << 8);
|
||||
/* use flat space addressing method */
|
||||
result |= 0x40 | ((lun >> 8) & 0x3f);
|
||||
|
||||
return cpu_to_le64(result);
|
||||
}
|
||||
|
||||
static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
|
||||
{
|
||||
switch (tl_hba->tl_proto_id) {
|
||||
|
@ -1481,7 +1469,6 @@ static int tcm_loop_register_configfs(void)
|
|||
fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len;
|
||||
fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len;
|
||||
fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove;
|
||||
fabric->tf_ops.pack_lun = &tcm_loop_pack_lun;
|
||||
|
||||
tf_cg = &fabric->tf_group;
|
||||
/*
|
||||
|
|
|
@ -497,10 +497,6 @@ static int target_fabric_tf_ops_check(
|
|||
printk(KERN_ERR "Missing tfo->is_state_remove()\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!(tfo->pack_lun)) {
|
||||
printk(KERN_ERR "Missing tfo->pack_lun()\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/*
|
||||
* We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
|
||||
* tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <net/sock.h>
|
||||
#include <net/tcp.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
|
||||
#include <target/target_core_base.h>
|
||||
#include <target/target_core_device.h>
|
||||
|
@ -150,13 +151,13 @@ int transport_get_lun_for_cmd(
|
|||
|
||||
{
|
||||
struct se_device *dev = se_lun->lun_se_dev;
|
||||
spin_lock(&dev->stats_lock);
|
||||
spin_lock_irq(&dev->stats_lock);
|
||||
dev->num_cmds++;
|
||||
if (se_cmd->data_direction == DMA_TO_DEVICE)
|
||||
dev->write_bytes += se_cmd->data_length;
|
||||
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
|
||||
dev->read_bytes += se_cmd->data_length;
|
||||
spin_unlock(&dev->stats_lock);
|
||||
spin_unlock_irq(&dev->stats_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -658,8 +659,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
|
|||
struct se_session *se_sess = SE_SESS(se_cmd);
|
||||
struct se_task *se_task;
|
||||
unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
|
||||
u32 cdb_offset = 0, lun_count = 0, offset = 8;
|
||||
u64 i, lun;
|
||||
u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
|
||||
|
||||
list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
|
||||
break;
|
||||
|
@ -675,15 +675,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
|
|||
* a $FABRIC_MOD. In that case, report LUN=0 only.
|
||||
*/
|
||||
if (!(se_sess)) {
|
||||
lun = 0;
|
||||
buf[offset++] = ((lun >> 56) & 0xff);
|
||||
buf[offset++] = ((lun >> 48) & 0xff);
|
||||
buf[offset++] = ((lun >> 40) & 0xff);
|
||||
buf[offset++] = ((lun >> 32) & 0xff);
|
||||
buf[offset++] = ((lun >> 24) & 0xff);
|
||||
buf[offset++] = ((lun >> 16) & 0xff);
|
||||
buf[offset++] = ((lun >> 8) & 0xff);
|
||||
buf[offset++] = (lun & 0xff);
|
||||
int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
|
||||
lun_count = 1;
|
||||
goto done;
|
||||
}
|
||||
|
@ -703,15 +695,8 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
|
|||
if ((cdb_offset + 8) >= se_cmd->data_length)
|
||||
continue;
|
||||
|
||||
lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun));
|
||||
buf[offset++] = ((lun >> 56) & 0xff);
|
||||
buf[offset++] = ((lun >> 48) & 0xff);
|
||||
buf[offset++] = ((lun >> 40) & 0xff);
|
||||
buf[offset++] = ((lun >> 32) & 0xff);
|
||||
buf[offset++] = ((lun >> 24) & 0xff);
|
||||
buf[offset++] = ((lun >> 16) & 0xff);
|
||||
buf[offset++] = ((lun >> 8) & 0xff);
|
||||
buf[offset++] = (lun & 0xff);
|
||||
int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
|
||||
offset += 8;
|
||||
cdb_offset += 8;
|
||||
}
|
||||
spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/libsas.h> /* For TASK_ATTR_* */
|
||||
#include <scsi/scsi_tcq.h>
|
||||
|
||||
#include <target/target_core_base.h>
|
||||
#include <target/target_core_device.h>
|
||||
|
@ -911,7 +911,7 @@ static int pscsi_do_task(struct se_task *task)
|
|||
* descriptor
|
||||
*/
|
||||
blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
|
||||
(task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ),
|
||||
(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
|
||||
pscsi_req_done);
|
||||
|
||||
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
|
||||
|
|
|
@ -55,7 +55,8 @@ struct se_tmr_req *core_tmr_alloc_req(
|
|||
{
|
||||
struct se_tmr_req *tmr;
|
||||
|
||||
tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL);
|
||||
tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
|
||||
GFP_ATOMIC : GFP_KERNEL);
|
||||
if (!(tmr)) {
|
||||
printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -398,9 +399,9 @@ int core_tmr_lun_reset(
|
|||
printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
|
||||
}
|
||||
|
||||
spin_lock(&dev->stats_lock);
|
||||
spin_lock_irq(&dev->stats_lock);
|
||||
dev->num_resets++;
|
||||
spin_unlock(&dev->stats_lock);
|
||||
spin_unlock_irq(&dev->stats_lock);
|
||||
|
||||
DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
|
||||
(preempt_and_abort_list) ? "Preempt" : "TMR",
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
#include <net/tcp.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/libsas.h> /* For TASK_ATTR_* */
|
||||
#include <scsi/scsi_tcq.h>
|
||||
|
||||
#include <target/target_core_base.h>
|
||||
#include <target/target_core_device.h>
|
||||
|
@ -762,7 +762,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
|
|||
transport_all_task_dev_remove_state(cmd);
|
||||
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
|
||||
|
||||
transport_free_dev_tasks(cmd);
|
||||
|
||||
check_lun:
|
||||
spin_lock_irqsave(&lun->lun_cmd_lock, flags);
|
||||
|
@ -1075,7 +1074,7 @@ static inline int transport_add_task_check_sam_attr(
|
|||
* head of the struct se_device->execute_task_list, and task_prev
|
||||
* after that for each subsequent task
|
||||
*/
|
||||
if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) {
|
||||
if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
|
||||
list_add(&task->t_execute_list,
|
||||
(task_prev != NULL) ?
|
||||
&task_prev->t_execute_list :
|
||||
|
@ -1195,6 +1194,7 @@ transport_get_task_from_execute_queue(struct se_device *dev)
|
|||
break;
|
||||
|
||||
list_del(&task->t_execute_list);
|
||||
atomic_set(&task->task_execute_queue, 0);
|
||||
atomic_dec(&dev->execute_tasks);
|
||||
|
||||
return task;
|
||||
|
@ -1210,8 +1210,14 @@ void transport_remove_task_from_execute_queue(
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (atomic_read(&task->task_execute_queue) == 0) {
|
||||
dump_stack();
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev->execute_task_lock, flags);
|
||||
list_del(&task->t_execute_list);
|
||||
atomic_set(&task->task_execute_queue, 0);
|
||||
atomic_dec(&dev->execute_tasks);
|
||||
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
|
||||
}
|
||||
|
@ -1867,7 +1873,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
|
|||
if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
|
||||
return 0;
|
||||
|
||||
if (cmd->sam_task_attr == TASK_ATTR_ACA) {
|
||||
if (cmd->sam_task_attr == MSG_ACA_TAG) {
|
||||
DEBUG_STA("SAM Task Attribute ACA"
|
||||
" emulation is not supported\n");
|
||||
return -1;
|
||||
|
@ -2058,6 +2064,13 @@ int transport_generic_handle_tmr(
|
|||
}
|
||||
EXPORT_SYMBOL(transport_generic_handle_tmr);
|
||||
|
||||
void transport_generic_free_cmd_intr(
|
||||
struct se_cmd *cmd)
|
||||
{
|
||||
transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
|
||||
}
|
||||
EXPORT_SYMBOL(transport_generic_free_cmd_intr);
|
||||
|
||||
static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_task *task, *task_tmp;
|
||||
|
@ -2504,7 +2517,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
|
|||
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
|
||||
* to allow the passed struct se_cmd list of tasks to the front of the list.
|
||||
*/
|
||||
if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
|
||||
if (cmd->sam_task_attr == MSG_HEAD_TAG) {
|
||||
atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
|
||||
|
@ -2512,7 +2525,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
|
|||
T_TASK(cmd)->t_task_cdb[0],
|
||||
cmd->se_ordered_id);
|
||||
return 1;
|
||||
} else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
|
||||
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
|
||||
spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
|
||||
list_add_tail(&cmd->se_ordered_list,
|
||||
&SE_DEV(cmd)->ordered_cmd_list);
|
||||
|
@ -3411,7 +3424,7 @@ static int transport_generic_cmd_sequencer(
|
|||
* See spc4r17 section 5.3
|
||||
*/
|
||||
if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
|
||||
cmd->sam_task_attr = TASK_ATTR_HOQ;
|
||||
cmd->sam_task_attr = MSG_HEAD_TAG;
|
||||
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
|
||||
break;
|
||||
case READ_BUFFER:
|
||||
|
@ -3619,7 +3632,7 @@ static int transport_generic_cmd_sequencer(
|
|||
* See spc4r17 section 5.3
|
||||
*/
|
||||
if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
|
||||
cmd->sam_task_attr = TASK_ATTR_HOQ;
|
||||
cmd->sam_task_attr = MSG_HEAD_TAG;
|
||||
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
|
||||
break;
|
||||
default:
|
||||
|
@ -3777,21 +3790,21 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
|
|||
struct se_cmd *cmd_p, *cmd_tmp;
|
||||
int new_active_tasks = 0;
|
||||
|
||||
if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) {
|
||||
if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
|
||||
atomic_dec(&dev->simple_cmds);
|
||||
smp_mb__after_atomic_dec();
|
||||
dev->dev_cur_ordered_id++;
|
||||
DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
|
||||
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
|
||||
cmd->se_ordered_id);
|
||||
} else if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
|
||||
} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
|
||||
atomic_dec(&dev->dev_hoq_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
dev->dev_cur_ordered_id++;
|
||||
DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
|
||||
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
|
||||
cmd->se_ordered_id);
|
||||
} else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
|
||||
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
|
||||
spin_lock(&dev->ordered_cmd_lock);
|
||||
list_del(&cmd->se_ordered_list);
|
||||
atomic_dec(&dev->dev_ordered_sync);
|
||||
|
@ -3824,7 +3837,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
|
|||
new_active_tasks++;
|
||||
|
||||
spin_lock(&dev->delayed_cmd_lock);
|
||||
if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED)
|
||||
if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
|
||||
break;
|
||||
}
|
||||
spin_unlock(&dev->delayed_cmd_lock);
|
||||
|
@ -4776,18 +4789,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
|
|||
sg_end_cur->page_link &= ~0x02;
|
||||
|
||||
sg_chain(sg_head, task_sg_num, sg_head_cur);
|
||||
sg_count += (task->task_sg_num + 1);
|
||||
} else
|
||||
sg_count += task->task_sg_num;
|
||||
task_sg_num = (task->task_sg_num + 1);
|
||||
} else {
|
||||
sg_chain(sg_head, task_sg_num, sg_head_cur);
|
||||
sg_count += task->task_sg_num;
|
||||
task_sg_num = task->task_sg_num;
|
||||
}
|
||||
|
||||
sg_head = sg_head_cur;
|
||||
sg_link = sg_link_cur;
|
||||
task_sg_num = task->task_sg_num;
|
||||
continue;
|
||||
}
|
||||
sg_head = sg_first = &task->task_sg[0];
|
||||
sg_link = &task->task_sg[task->task_sg_num];
|
||||
task_sg_num = task->task_sg_num;
|
||||
/*
|
||||
* Check for single task..
|
||||
*/
|
||||
|
@ -4798,9 +4813,12 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
|
|||
*/
|
||||
sg_end = &task->task_sg[task->task_sg_num - 1];
|
||||
sg_end->page_link &= ~0x02;
|
||||
sg_count += (task->task_sg_num + 1);
|
||||
} else
|
||||
sg_count += task->task_sg_num;
|
||||
task_sg_num = (task->task_sg_num + 1);
|
||||
} else {
|
||||
sg_count += task->task_sg_num;
|
||||
task_sg_num = task->task_sg_num;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Setup the starting pointer and total t_tasks_sg_linked_no including
|
||||
|
@ -4809,21 +4827,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
|
|||
T_TASK(cmd)->t_tasks_sg_chained = sg_first;
|
||||
T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
|
||||
|
||||
DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and"
|
||||
" t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained,
|
||||
DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and"
|
||||
" t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained,
|
||||
T_TASK(cmd)->t_tasks_sg_chained_no);
|
||||
|
||||
for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
|
||||
T_TASK(cmd)->t_tasks_sg_chained_no, i) {
|
||||
|
||||
DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n",
|
||||
sg, sg_page(sg), sg->length, sg->offset);
|
||||
DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
|
||||
i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
|
||||
if (sg_is_chain(sg))
|
||||
DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
|
||||
if (sg_is_last(sg))
|
||||
DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
|
||||
}
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(transport_do_task_sg_chain);
|
||||
|
||||
|
@ -5297,6 +5314,8 @@ void transport_generic_free_cmd(
|
|||
if (wait_for_tasks && cmd->transport_wait_for_tasks)
|
||||
cmd->transport_wait_for_tasks(cmd, 0, 0);
|
||||
|
||||
transport_free_dev_tasks(cmd);
|
||||
|
||||
transport_generic_remove(cmd, release_to_pool,
|
||||
session_reinstatement);
|
||||
}
|
||||
|
@ -6132,6 +6151,9 @@ static int transport_processing_thread(void *param)
|
|||
case TRANSPORT_REMOVE:
|
||||
transport_generic_remove(cmd, 1, 0);
|
||||
break;
|
||||
case TRANSPORT_FREE_CMD_INTR:
|
||||
transport_generic_free_cmd(cmd, 0, 1, 0);
|
||||
break;
|
||||
case TRANSPORT_PROCESS_TMR:
|
||||
transport_generic_do_tmr(cmd);
|
||||
break;
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include <scsi/libfc.h>
|
||||
#include <scsi/fc_encode.h>
|
||||
|
||||
|
@ -592,8 +593,25 @@ static void ft_send_cmd(struct ft_cmd *cmd)
|
|||
case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
|
||||
goto err; /* TBD not supported by tcm_fc yet */
|
||||
}
|
||||
/*
|
||||
* Locate the SAM Task Attr from fc_pri_ta
|
||||
*/
|
||||
switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
|
||||
case FCP_PTA_HEADQ:
|
||||
task_attr = MSG_HEAD_TAG;
|
||||
break;
|
||||
case FCP_PTA_ORDERED:
|
||||
task_attr = MSG_ORDERED_TAG;
|
||||
break;
|
||||
case FCP_PTA_ACA:
|
||||
task_attr = MSG_ACA_TAG;
|
||||
break;
|
||||
case FCP_PTA_SIMPLE: /* Fallthrough */
|
||||
default:
|
||||
task_attr = MSG_SIMPLE_TAG;
|
||||
}
|
||||
|
||||
|
||||
/* FCP_PTA_ maps 1:1 to TASK_ATTR_ */
|
||||
task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
|
||||
data_len = ntohl(fcp->fc_dl);
|
||||
cmd->cdb = fcp->fc_cdb;
|
||||
|
|
|
@ -519,13 +519,6 @@ static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
|
|||
return tpg->index;
|
||||
}
|
||||
|
||||
static u64 ft_pack_lun(unsigned int index)
|
||||
{
|
||||
WARN_ON(index >= 256);
|
||||
/* Caller wants this byte-swapped */
|
||||
return cpu_to_le64((index & 0xff) << 8);
|
||||
}
|
||||
|
||||
static struct target_core_fabric_ops ft_fabric_ops = {
|
||||
.get_fabric_name = ft_get_fabric_name,
|
||||
.get_fabric_proto_ident = fc_get_fabric_proto_ident,
|
||||
|
@ -564,7 +557,6 @@ static struct target_core_fabric_ops ft_fabric_ops = {
|
|||
.get_fabric_sense_len = ft_get_fabric_sense_len,
|
||||
.set_fabric_sense_len = ft_set_fabric_sense_len,
|
||||
.is_state_remove = ft_is_state_remove,
|
||||
.pack_lun = ft_pack_lun,
|
||||
/*
|
||||
* Setup function pointers for generic logic in
|
||||
* target_core_fabric_configfs.c
|
||||
|
|
|
@ -604,6 +604,7 @@ struct sas_domain_function_template {
|
|||
int (*lldd_clear_aca)(struct domain_device *, u8 *lun);
|
||||
int (*lldd_clear_task_set)(struct domain_device *, u8 *lun);
|
||||
int (*lldd_I_T_nexus_reset)(struct domain_device *);
|
||||
int (*lldd_ata_soft_reset)(struct domain_device *);
|
||||
int (*lldd_lu_reset)(struct domain_device *, u8 *lun);
|
||||
int (*lldd_query_task)(struct sas_task *);
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#define MSG_SIMPLE_TAG 0x20
|
||||
#define MSG_HEAD_TAG 0x21
|
||||
#define MSG_ORDERED_TAG 0x22
|
||||
#define MSG_ACA_TAG 0x24 /* unsupported */
|
||||
|
||||
#define SCSI_NO_TAG (-1) /* identify no tag in use */
|
||||
|
||||
|
|
|
@ -98,6 +98,7 @@ enum transport_state_table {
|
|||
TRANSPORT_REMOVE = 14,
|
||||
TRANSPORT_FREE = 15,
|
||||
TRANSPORT_NEW_CMD_MAP = 16,
|
||||
TRANSPORT_FREE_CMD_INTR = 17,
|
||||
};
|
||||
|
||||
/* Used for struct se_cmd->se_cmd_flags */
|
||||
|
|
|
@ -77,7 +77,6 @@ struct target_core_fabric_ops {
|
|||
u16 (*set_fabric_sense_len)(struct se_cmd *, u32);
|
||||
u16 (*get_fabric_sense_len)(void);
|
||||
int (*is_state_remove)(struct se_cmd *);
|
||||
u64 (*pack_lun)(unsigned int);
|
||||
/*
|
||||
* fabric module calls for target_core_fabric_configfs.c
|
||||
*/
|
||||
|
|
|
@ -172,6 +172,7 @@ extern int transport_generic_handle_cdb_map(struct se_cmd *);
|
|||
extern int transport_generic_handle_data(struct se_cmd *);
|
||||
extern void transport_new_cmd_failure(struct se_cmd *);
|
||||
extern int transport_generic_handle_tmr(struct se_cmd *);
|
||||
extern void transport_generic_free_cmd_intr(struct se_cmd *);
|
||||
extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
|
||||
extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]);
|
||||
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
|
||||
|
|
Loading…
Reference in a new issue