Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (154 commits)
  [SCSI] osd: Remove out-of-tree left overs
  [SCSI] libosd: Use REQ_QUIET requests.
  [SCSI] osduld: use filp_open() when looking up an osd-device
  [SCSI] libosd: Define an osd_dev wrapper to retrieve the request_queue
  [SCSI] libosd: osd_req_{read,write} takes a length parameter
  [SCSI] libosd: Let _osd_req_finalize_data_integrity receive number of out_bytes
  [SCSI] libosd: osd_req_{read,write}_kern new API
  [SCSI] libosd: Better printout of OSD target system information
  [SCSI] libosd: OSD2r05: Attribute definitions
  [SCSI] libosd: OSD2r05: Additional command enums
  [SCSI] mpt fusion: fix up doc book comments
  [SCSI] mpt fusion: Added support for Broadcast primitives Event handling
  [SCSI] mpt fusion: Queue full event handling
  [SCSI] mpt fusion: RAID device handling and Dual port Raid support is added
  [SCSI] mpt fusion: Put IOC into ready state if it not already in ready state
  [SCSI] mpt fusion: Code Cleanup patch
  [SCSI] mpt fusion: Rescan SAS topology added
  [SCSI] mpt fusion: SAS topology scan changes, expander events
  [SCSI] mpt fusion: Firmware event implementation using seperate WorkQueue
  [SCSI] mpt fusion: rewrite of ioctl_cmds internal generated function
  ...
This commit is contained in:
Linus Torvalds 2009-06-12 09:50:42 -07:00
commit c9b8af00ff
141 changed files with 43036 additions and 9163 deletions

View file

@ -257,11 +257,8 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
/*
* mgmt tasks do not need special cleanup and we do not
* allocate anything in the init task callout
*/
if (!task->sc || task->state == ISCSI_TASK_PENDING)
/* mgmt tasks do not need special cleanup */
if (!task->sc)
return;
if (iser_task->status == ISER_TASK_STATUS_STARTED) {
@ -517,7 +514,8 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
}
static struct iscsi_endpoint *
iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
{
int err;
struct iser_conn *ib_conn;

File diff suppressed because it is too large Load diff

View file

@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
#endif
#define MPT_LINUX_VERSION_COMMON "3.04.07"
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.07"
#define MPT_LINUX_VERSION_COMMON "3.04.10"
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.09"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
@ -104,6 +104,7 @@
#endif
#define MPT_NAME_LENGTH 32
#define MPT_KOBJ_NAME_LEN 20
#define MPT_PROCFS_MPTBASEDIR "mpt"
/* chg it to "driver/fusion" ? */
@ -134,6 +135,7 @@
#define MPT_COALESCING_TIMEOUT 0x10
/*
* SCSI transfer rate defines.
*/
@ -161,10 +163,10 @@
/*
* Set the MAX_SGE value based on user input.
*/
#ifdef CONFIG_FUSION_MAX_SGE
#if CONFIG_FUSION_MAX_SGE < 16
#ifdef CONFIG_FUSION_MAX_SGE
#if CONFIG_FUSION_MAX_SGE < 16
#define MPT_SCSI_SG_DEPTH 16
#elif CONFIG_FUSION_MAX_SGE > 128
#elif CONFIG_FUSION_MAX_SGE > 128
#define MPT_SCSI_SG_DEPTH 128
#else
#define MPT_SCSI_SG_DEPTH CONFIG_FUSION_MAX_SGE
@ -173,6 +175,18 @@
#define MPT_SCSI_SG_DEPTH 40
#endif
#ifdef CONFIG_FUSION_MAX_FC_SGE
#if CONFIG_FUSION_MAX_FC_SGE < 16
#define MPT_SCSI_FC_SG_DEPTH 16
#elif CONFIG_FUSION_MAX_FC_SGE > 256
#define MPT_SCSI_FC_SG_DEPTH 256
#else
#define MPT_SCSI_FC_SG_DEPTH CONFIG_FUSION_MAX_FC_SGE
#endif
#else
#define MPT_SCSI_FC_SG_DEPTH 40
#endif
/* debug print string length used for events and iocstatus */
# define EVENT_DESCR_STR_SZ 100
@ -431,38 +445,36 @@ do { \
* IOCTL structure and associated defines
*/
#define MPT_IOCTL_STATUS_DID_IOCRESET 0x01 /* IOC Reset occurred on the current*/
#define MPT_IOCTL_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */
#define MPT_IOCTL_STATUS_TIMER_ACTIVE 0x04 /* The timer is running */
#define MPT_IOCTL_STATUS_SENSE_VALID 0x08 /* Sense data is valid */
#define MPT_IOCTL_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */
#define MPT_IOCTL_STATUS_TMTIMER_ACTIVE 0x20 /* The TM timer is running */
#define MPT_IOCTL_STATUS_TM_FAILED 0x40 /* User TM request failed */
#define MPTCTL_RESET_OK 0x01 /* Issue Bus Reset */
typedef struct _MPT_IOCTL {
struct _MPT_ADAPTER *ioc;
u8 ReplyFrame[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */
u8 sense[MPT_SENSE_BUFFER_ALLOC];
int wait_done; /* wake-up value for this ioc */
u8 rsvd;
u8 status; /* current command status */
u8 reset; /* 1 if bus reset allowed */
u8 id; /* target for reset */
struct mutex ioctl_mutex;
} MPT_IOCTL;
#define MPT_MGMT_STATUS_RF_VALID 0x01 /* The Reply Frame is VALID */
#define MPT_MGMT_STATUS_COMMAND_GOOD 0x02 /* Command Status GOOD */
#define MPT_MGMT_STATUS_PENDING 0x04 /* command is pending */
#define MPT_MGMT_STATUS_DID_IOCRESET 0x08 /* IOC Reset occurred
on the current*/
#define MPT_MGMT_STATUS_SENSE_VALID 0x10 /* valid sense info */
#define MPT_MGMT_STATUS_TIMER_ACTIVE 0x20 /* obsolete */
#define MPT_MGMT_STATUS_FREE_MF 0x40 /* free the mf from
complete routine */
#define MPT_SAS_MGMT_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */
#define MPT_SAS_MGMT_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */
#define MPT_SAS_MGMT_STATUS_TM_FAILED 0x40 /* User TM request failed */
#define INITIALIZE_MGMT_STATUS(status) \
status = MPT_MGMT_STATUS_PENDING;
#define CLEAR_MGMT_STATUS(status) \
status = 0;
#define CLEAR_MGMT_PENDING_STATUS(status) \
status &= ~MPT_MGMT_STATUS_PENDING;
#define SET_MGMT_MSG_CONTEXT(msg_context, value) \
msg_context = value;
typedef struct _MPT_SAS_MGMT {
typedef struct _MPT_MGMT {
struct mutex mutex;
struct completion done;
u8 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */
u8 sense[MPT_SENSE_BUFFER_ALLOC];
u8 status; /* current command status */
}MPT_SAS_MGMT;
int completion_code;
u32 msg_context;
} MPT_MGMT;
/*
* Event Structure and define
@ -564,6 +576,10 @@ struct mptfc_rport_info
u8 flags;
};
typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr);
typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length,
dma_addr_t dma_addr);
/*
* Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
*/
@ -573,6 +589,10 @@ typedef struct _MPT_ADAPTER
int pci_irq; /* This irq */
char name[MPT_NAME_LENGTH]; /* "iocN" */
char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */
#ifdef CONFIG_FUSION_LOGGING
/* used in mpt_display_event_info */
char evStr[EVENT_DESCR_STR_SZ];
#endif
char board_name[16];
char board_assembly[16];
char board_tracer[16];
@ -600,6 +620,10 @@ typedef struct _MPT_ADAPTER
int reply_depth; /* Num Allocated reply frames */
int reply_sz; /* Reply frame size */
int num_chain; /* Number of chain buffers */
MPT_ADD_SGE add_sge; /* Pointer to add_sge
function */
MPT_ADD_CHAIN add_chain; /* Pointer to add_chain
function */
/* Pool of buffers for chaining. ReqToChain
* and ChainToChain track index of chain buffers.
* ChainBuffer (DMA) virt/phys addresses.
@ -640,11 +664,8 @@ typedef struct _MPT_ADAPTER
RaidCfgData raid_data; /* Raid config. data */
SasCfgData sas_data; /* Sas config. data */
FcCfgData fc_data; /* Fc config. data */
MPT_IOCTL *ioctl; /* ioctl data pointer */
struct proc_dir_entry *ioc_dentry;
struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */
spinlock_t diagLock; /* diagnostic reset lock */
int diagPending;
u32 biosVersion; /* BIOS version from IO Unit Page 2 */
int eventTypes; /* Event logging parameters */
int eventContext; /* Next event context */
@ -652,7 +673,6 @@ typedef struct _MPT_ADAPTER
struct _mpt_ioctl_events *events; /* pointer to event log */
u8 *cached_fw; /* Pointer to FW */
dma_addr_t cached_fw_dma;
struct list_head configQ; /* linked list of config. requests */
int hs_reply_idx;
#ifndef MFCNT
u32 pad0;
@ -665,9 +685,6 @@ typedef struct _MPT_ADAPTER
IOCFactsReply_t facts;
PortFactsReply_t pfacts[2];
FCPortPage0_t fc_port_page0[2];
struct timer_list persist_timer; /* persist table timer */
int persist_wait_done; /* persist completion flag */
u8 persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */
LANPage0_t lan_cnfg_page0;
LANPage1_t lan_cnfg_page1;
@ -682,23 +699,44 @@ typedef struct _MPT_ADAPTER
int aen_event_read_flag; /* flag to indicate event log was read*/
u8 FirstWhoInit;
u8 upload_fw; /* If set, do a fw upload */
u8 reload_fw; /* Force a FW Reload on next reset */
u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */
u8 pad1[4];
u8 DoneCtx;
u8 TaskCtx;
u8 InternalCtx;
spinlock_t initializing_hba_lock;
int initializing_hba_lock_flag;
struct list_head list;
struct net_device *netdev;
struct list_head sas_topology;
struct mutex sas_topology_mutex;
struct workqueue_struct *fw_event_q;
struct list_head fw_event_list;
spinlock_t fw_event_lock;
u8 fw_events_off; /* if '1', then ignore events */
char fw_event_q_name[MPT_KOBJ_NAME_LEN];
struct mutex sas_discovery_mutex;
u8 sas_discovery_runtime;
u8 sas_discovery_ignore_events;
/* port_info object for the host */
struct mptsas_portinfo *hba_port_info;
u64 hba_port_sas_addr;
u16 hba_port_num_phy;
struct list_head sas_device_info_list;
struct mutex sas_device_info_mutex;
u8 old_sas_discovery_protocal;
u8 sas_discovery_quiesce_io;
int sas_index; /* index refrencing */
MPT_SAS_MGMT sas_mgmt;
MPT_MGMT sas_mgmt;
MPT_MGMT mptbase_cmds; /* for sending config pages */
MPT_MGMT internal_cmds;
MPT_MGMT taskmgmt_cmds;
MPT_MGMT ioctl_cmds;
spinlock_t taskmgmt_lock; /* diagnostic reset lock */
int taskmgmt_in_progress;
u8 taskmgmt_quiesce_io;
u8 ioc_reset_in_progress;
struct work_struct sas_persist_task;
struct work_struct fc_setup_reset_work;
@ -707,15 +745,27 @@ typedef struct _MPT_ADAPTER
u8 fc_link_speed[2];
spinlock_t fc_rescan_work_lock;
struct work_struct fc_rescan_work;
char fc_rescan_work_q_name[20];
char fc_rescan_work_q_name[MPT_KOBJ_NAME_LEN];
struct workqueue_struct *fc_rescan_work_q;
/* driver forced bus resets count */
unsigned long hard_resets;
/* fw/external bus resets count */
unsigned long soft_resets;
/* cmd timeouts */
unsigned long timeouts;
struct scsi_cmnd **ScsiLookup;
spinlock_t scsi_lookup_lock;
char reset_work_q_name[20];
u64 dma_mask;
u32 broadcast_aen_busy;
char reset_work_q_name[MPT_KOBJ_NAME_LEN];
struct workqueue_struct *reset_work_q;
struct delayed_work fault_reset_work;
spinlock_t fault_reset_work_lock;
u8 sg_addr_size;
u8 in_rescan;
u8 SGE_size;
} MPT_ADAPTER;
@ -753,13 +803,14 @@ typedef struct _mpt_sge {
dma_addr_t Address;
} MptSge_t;
#define mpt_addr_size() \
((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SGE_FLAGS_64_BIT_ADDRESSING : \
MPI_SGE_FLAGS_32_BIT_ADDRESSING)
#define mpt_msg_flags() \
((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \
MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32)
#define mpt_msg_flags(ioc) \
(ioc->sg_addr_size == sizeof(u64)) ? \
MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \
MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32
#define MPT_SGE_FLAGS_64_BIT_ADDRESSING \
(MPI_SGE_FLAGS_64_BIT_ADDRESSING << MPI_SGE_FLAGS_SHIFT)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
@ -835,22 +886,14 @@ typedef struct _MPT_SCSI_HOST {
/* Pool of memory for holding SCpnts before doing
* OS callbacks. freeQ is the free pool.
*/
u8 tmPending;
u8 resetPending;
u8 negoNvram; /* DV disabled, nego NVRAM */
u8 pad1;
u8 tmState;
u8 rsvd[2];
MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */
struct scsi_cmnd *abortSCpnt;
MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */
unsigned long hard_resets; /* driver forced bus resets count */
unsigned long soft_resets; /* fw/external bus resets count */
unsigned long timeouts; /* cmd timeouts */
ushort sel_timeout[MPT_MAX_FC_DEVICES];
char *info_kbuf;
wait_queue_head_t scandv_waitq;
int scandv_wait_done;
long last_queue_full;
u16 tm_iocstatus;
u16 spi_pending;
@ -870,21 +913,16 @@ struct scsi_cmnd;
* Generic structure passed to the base mpt_config function.
*/
typedef struct _x_config_parms {
struct list_head linkage; /* linked list */
struct timer_list timer; /* timer function for this request */
union {
ConfigExtendedPageHeader_t *ehdr;
ConfigPageHeader_t *hdr;
} cfghdr;
dma_addr_t physAddr;
int wait_done; /* wait for this request */
u32 pageAddr; /* properly formatted */
u16 status;
u8 action;
u8 dir;
u8 timeout; /* seconds */
u8 pad1;
u16 status;
u16 pad2;
} CONFIGPARMS;
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@ -909,7 +947,6 @@ extern MPT_FRAME_HDR *mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc);
extern void mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
extern void mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
extern void mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
extern void mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr);
extern int mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag);
extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
@ -922,6 +959,12 @@ extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk);
extern int mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
pRaidPhysDiskPage1_t phys_disk);
extern int mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc,
u8 phys_disk_num);
extern int mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
extern void mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
extern void mpt_halt_firmware(MPT_ADAPTER *ioc);
@ -959,7 +1002,6 @@ extern int mpt_fwfault_debug;
#define MPT_SGE_FLAGS_END_OF_BUFFER (0x40000000)
#define MPT_SGE_FLAGS_LOCAL_ADDRESS (0x08000000)
#define MPT_SGE_FLAGS_DIRECTION (0x04000000)
#define MPT_SGE_FLAGS_ADDRESSING (mpt_addr_size() << MPI_SGE_FLAGS_SHIFT)
#define MPT_SGE_FLAGS_END_OF_LIST (0x01000000)
#define MPT_SGE_FLAGS_TRANSACTION_ELEMENT (0x00000000)
@ -972,14 +1014,12 @@ extern int mpt_fwfault_debug;
MPT_SGE_FLAGS_END_OF_BUFFER | \
MPT_SGE_FLAGS_END_OF_LIST | \
MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
MPT_SGE_FLAGS_ADDRESSING | \
MPT_TRANSFER_IOC_TO_HOST)
#define MPT_SGE_FLAGS_SSIMPLE_WRITE \
(MPT_SGE_FLAGS_LAST_ELEMENT | \
MPT_SGE_FLAGS_END_OF_BUFFER | \
MPT_SGE_FLAGS_END_OF_LIST | \
MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
MPT_SGE_FLAGS_ADDRESSING | \
MPT_TRANSFER_HOST_TO_IOC)
/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/

File diff suppressed because it is too large Load diff

View file

@ -58,6 +58,7 @@
#define MPT_DEBUG_FC 0x00080000
#define MPT_DEBUG_SAS 0x00100000
#define MPT_DEBUG_SAS_WIDE 0x00200000
#define MPT_DEBUG_36GB_MEM 0x00400000
/*
* CONFIG_FUSION_LOGGING - enabled in Kconfig
@ -135,6 +136,8 @@
#define dsaswideprintk(IOC, CMD) \
MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
#define d36memprintk(IOC, CMD) \
MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_36GB_MEM)
/*

View file

@ -1251,17 +1251,15 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* A slightly different algorithm is required for
* 64bit SGEs.
*/
scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
if (sizeof(dma_addr_t) == sizeof(u64)) {
scale = ioc->req_sz/ioc->SGE_size;
if (ioc->sg_addr_size == sizeof(u64)) {
numSGE = (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 60) / (sizeof(dma_addr_t) +
sizeof(u32));
(ioc->req_sz - 60) / ioc->SGE_size;
} else {
numSGE = 1 + (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 64) / (sizeof(dma_addr_t) +
sizeof(u32));
(ioc->req_sz - 64) / ioc->SGE_size;
}
if (numSGE < sh->sg_tablesize) {
@ -1292,9 +1290,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Clear the TM flags
*/
hd->tmPending = 0;
hd->tmState = TM_STATE_NONE;
hd->resetPending = 0;
hd->abortSCpnt = NULL;
/* Clear the pointer used to store
@ -1312,8 +1307,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hd->timer.data = (unsigned long) hd;
hd->timer.function = mptscsih_timer_expired;
init_waitqueue_head(&hd->scandv_waitq);
hd->scandv_wait_done = 0;
hd->last_queue_full = 0;
sh->transportt = mptfc_transport_template;

File diff suppressed because it is too large Load diff

View file

@ -53,6 +53,7 @@ struct mptsas_target_reset_event {
struct list_head list;
EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data;
u8 target_reset_issued;
unsigned long time_count;
};
enum mptsas_hotplug_action {
@ -60,12 +61,37 @@ enum mptsas_hotplug_action {
MPTSAS_DEL_DEVICE,
MPTSAS_ADD_RAID,
MPTSAS_DEL_RAID,
MPTSAS_ADD_PHYSDISK,
MPTSAS_ADD_PHYSDISK_REPROBE,
MPTSAS_DEL_PHYSDISK,
MPTSAS_DEL_PHYSDISK_REPROBE,
MPTSAS_ADD_INACTIVE_VOLUME,
MPTSAS_IGNORE_EVENT,
};
struct mptsas_mapping{
u8 id;
u8 channel;
};
struct mptsas_device_info {
struct list_head list;
struct mptsas_mapping os; /* operating system mapping*/
struct mptsas_mapping fw; /* firmware mapping */
u64 sas_address;
u32 device_info; /* specific bits for devices */
u16 slot; /* enclosure slot id */
u64 enclosure_logical_id; /*enclosure address */
u8 is_logical_volume; /* is this logical volume */
/* this belongs to volume */
u8 is_hidden_raid_component;
/* this valid when is_hidden_raid_component set */
u8 volume_id;
/* cached data for a removed device */
u8 is_cached;
};
struct mptsas_hotplug_event {
struct work_struct work;
MPT_ADAPTER *ioc;
enum mptsas_hotplug_action event_type;
u64 sas_address;
@ -73,11 +99,18 @@ struct mptsas_hotplug_event {
u8 id;
u32 device_info;
u16 handle;
u16 parent_handle;
u8 phy_id;
u8 phys_disk_num_valid; /* hrc (hidden raid component) */
u8 phys_disk_num; /* hrc - unique index*/
u8 hidden_raid_component; /* hrc - don't expose*/
struct scsi_device *sdev;
};
struct fw_event_work {
struct list_head list;
struct delayed_work work;
MPT_ADAPTER *ioc;
u32 event;
u8 retries;
u8 event_data[1];
};
struct mptsas_discovery_event {

File diff suppressed because it is too large Load diff

View file

@ -60,6 +60,7 @@
#define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008)
#define MPT_SCANDV_ISSUE_SENSE (0x00000010)
#define MPT_SCANDV_FALLBACK (0x00000020)
#define MPT_SCANDV_BUSY (0x00000040)
#define MPT_SCANDV_MAX_RETRIES (10)
@ -89,6 +90,7 @@
#endif
typedef struct _internal_cmd {
char *data; /* data pointer */
dma_addr_t data_dma; /* data dma address */
@ -112,6 +114,8 @@ extern int mptscsih_resume(struct pci_dev *pdev);
extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func);
extern const char * mptscsih_info(struct Scsi_Host *SChost);
extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
u8 id, int lun, int ctx2abort, ulong timeout);
extern void mptscsih_slave_destroy(struct scsi_device *device);
extern int mptscsih_slave_configure(struct scsi_device *device);
extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
@ -126,7 +130,8 @@ extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pE
extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
extern void mptscsih_timer_expired(unsigned long data);
extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout);
extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
extern struct device_attribute *mptscsih_host_attrs[];
extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);

View file

@ -300,7 +300,7 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id)
flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE |
(IOCPage4Ptr->Header.PageLength + ii) * 4;
mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n",
@ -614,19 +614,24 @@ static void mptspi_read_parameters(struct scsi_target *starget)
spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0;
}
static int
int
mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
{
MPT_ADAPTER *ioc = hd->ioc;
MpiRaidActionRequest_t *pReq;
MPT_FRAME_HDR *mf;
MPT_ADAPTER *ioc = hd->ioc;
int ret;
unsigned long timeleft;
mutex_lock(&ioc->internal_cmds.mutex);
/* Get and Populate a free Frame
*/
if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
ddvprintk(ioc, printk(MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n",
ioc->name));
return -EAGAIN;
dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT
"%s: no msg frames!\n", ioc->name, __func__));
ret = -EAGAIN;
goto out;
}
pReq = (MpiRaidActionRequest_t *)mf;
if (quiesce)
@ -643,29 +648,36 @@ mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
pReq->Reserved2 = 0;
pReq->ActionDataWord = 0; /* Reserved for this action */
mpt_add_sge((char *)&pReq->ActionDataSGE,
ioc->add_sge((char *)&pReq->ActionDataSGE,
MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n",
ioc->name, pReq->Action, channel, id));
hd->pLocal = NULL;
hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */
hd->scandv_wait_done = 0;
/* Save cmd pointer, for resource free if timeout or
* FW reload occurs
*/
hd->cmdPtr = mf;
add_timer(&hd->timer);
INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
wait_event(hd->scandv_waitq, hd->scandv_wait_done);
timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, 10*HZ);
if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
ret = -ETIME;
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: TIMED OUT!\n",
ioc->name, __func__));
if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out;
if (!timeleft) {
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
mpt_HardResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
goto out;
}
if ((hd->pLocal == NULL) || (hd->pLocal->completion != 0))
return -1;
ret = ioc->internal_cmds.completion_code;
return 0;
out:
CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
mutex_unlock(&ioc->internal_cmds.mutex);
return ret;
}
static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
@ -1423,17 +1435,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* A slightly different algorithm is required for
* 64bit SGEs.
*/
scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
if (sizeof(dma_addr_t) == sizeof(u64)) {
scale = ioc->req_sz/ioc->SGE_size;
if (ioc->sg_addr_size == sizeof(u64)) {
numSGE = (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 60) / (sizeof(dma_addr_t) +
sizeof(u32));
(ioc->req_sz - 60) / ioc->SGE_size;
} else {
numSGE = 1 + (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 64) / (sizeof(dma_addr_t) +
sizeof(u32));
(ioc->req_sz - 64) / ioc->SGE_size;
}
if (numSGE < sh->sg_tablesize) {
@ -1464,9 +1474,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Clear the TM flags
*/
hd->tmPending = 0;
hd->tmState = TM_STATE_NONE;
hd->resetPending = 0;
hd->abortSCpnt = NULL;
/* Clear the pointer used to store
@ -1493,8 +1500,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mpt_saf_te));
ioc->spi_data.noQas = 0;
init_waitqueue_head(&hd->scandv_waitq);
hd->scandv_wait_done = 0;
hd->last_queue_full = 0;
hd->spi_pending = 0;
@ -1514,7 +1519,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* issue internal bus reset
*/
if (ioc->spi_data.bus_reset)
mptscsih_TMHandler(hd,
mptscsih_IssueTaskMgmt(hd,
MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
0, 0, 0, 0, 5);

View file

@ -2264,6 +2264,17 @@ config BNX2
To compile this driver as a module, choose M here: the module
will be called bnx2. This is recommended.
config CNIC
tristate "Broadcom CNIC support"
depends on BNX2
depends on UIO
help
This driver supports offload features of Broadcom NetXtremeII
gigabit Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called cnic. This is recommended.
config SPIDER_NET
tristate "Spider Gigabit Ethernet driver"
depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)

View file

@ -73,6 +73,7 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BNX2) += bnx2.o
obj-$(CONFIG_CNIC) += cnic.o
obj-$(CONFIG_BNX2X) += bnx2x.o
bnx2x-objs := bnx2x_main.o bnx2x_link.o
spidernet-y += spider_net.o spider_net_ethtool.o

View file

@ -49,6 +49,10 @@
#include <linux/firmware.h>
#include <linux/log2.h>
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
#include "cnic_if.h"
#endif
#include "bnx2.h"
#include "bnx2_fw.h"
@ -315,6 +319,158 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
spin_unlock_bh(&bp->indirect_lock);
}
#ifdef BCM_CNIC
static int
bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
{
struct bnx2 *bp = netdev_priv(dev);
struct drv_ctl_io *io = &info->data.io;
switch (info->cmd) {
case DRV_CTL_IO_WR_CMD:
bnx2_reg_wr_ind(bp, io->offset, io->data);
break;
case DRV_CTL_IO_RD_CMD:
io->data = bnx2_reg_rd_ind(bp, io->offset);
break;
case DRV_CTL_CTX_WR_CMD:
bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
break;
default:
return -EINVAL;
}
return 0;
}
static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
{
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
int sb_id;
if (bp->flags & BNX2_FLAG_USING_MSIX) {
cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
bnapi->cnic_present = 0;
sb_id = bp->irq_nvecs;
cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
} else {
cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
bnapi->cnic_tag = bnapi->last_status_idx;
bnapi->cnic_present = 1;
sb_id = 0;
cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
}
cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
cp->irq_arr[0].status_blk = (void *)
((unsigned long) bnapi->status_blk.msi +
(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
cp->irq_arr[0].status_blk_num = sb_id;
cp->num_irq = 1;
}
static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
void *data)
{
struct bnx2 *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
if (ops == NULL)
return -EINVAL;
if (cp->drv_state & CNIC_DRV_STATE_REGD)
return -EBUSY;
bp->cnic_data = data;
rcu_assign_pointer(bp->cnic_ops, ops);
cp->num_irq = 0;
cp->drv_state = CNIC_DRV_STATE_REGD;
bnx2_setup_cnic_irq_info(bp);
return 0;
}
static int bnx2_unregister_cnic(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
cp->drv_state = 0;
bnapi->cnic_present = 0;
rcu_assign_pointer(bp->cnic_ops, NULL);
synchronize_rcu();
return 0;
}
struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
cp->drv_owner = THIS_MODULE;
cp->chip_id = bp->chip_id;
cp->pdev = bp->pdev;
cp->io_base = bp->regview;
cp->drv_ctl = bnx2_drv_ctl;
cp->drv_register_cnic = bnx2_register_cnic;
cp->drv_unregister_cnic = bnx2_unregister_cnic;
return cp;
}
EXPORT_SYMBOL(bnx2_cnic_probe);
static void
bnx2_cnic_stop(struct bnx2 *bp)
{
struct cnic_ops *c_ops;
struct cnic_ctl_info info;
rcu_read_lock();
c_ops = rcu_dereference(bp->cnic_ops);
if (c_ops) {
info.cmd = CNIC_CTL_STOP_CMD;
c_ops->cnic_ctl(bp->cnic_data, &info);
}
rcu_read_unlock();
}
static void
bnx2_cnic_start(struct bnx2 *bp)
{
struct cnic_ops *c_ops;
struct cnic_ctl_info info;
rcu_read_lock();
c_ops = rcu_dereference(bp->cnic_ops);
if (c_ops) {
if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
bnapi->cnic_tag = bnapi->last_status_idx;
}
info.cmd = CNIC_CTL_START_CMD;
c_ops->cnic_ctl(bp->cnic_data, &info);
}
rcu_read_unlock();
}
#else
static void
bnx2_cnic_stop(struct bnx2 *bp)
{
}
static void
bnx2_cnic_start(struct bnx2 *bp)
{
}
#endif
static int
bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
{
@ -488,6 +644,7 @@ bnx2_napi_enable(struct bnx2 *bp)
static void
bnx2_netif_stop(struct bnx2 *bp)
{
bnx2_cnic_stop(bp);
bnx2_disable_int_sync(bp);
if (netif_running(bp->dev)) {
bnx2_napi_disable(bp);
@ -504,6 +661,7 @@ bnx2_netif_start(struct bnx2 *bp)
netif_tx_wake_all_queues(bp->dev);
bnx2_napi_enable(bp);
bnx2_enable_int(bp);
bnx2_cnic_start(bp);
}
}
}
@ -3164,6 +3322,11 @@ bnx2_has_work(struct bnx2_napi *bnapi)
if (bnx2_has_fast_work(bnapi))
return 1;
#ifdef BCM_CNIC
if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
return 1;
#endif
if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
(sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
return 1;
@ -3193,6 +3356,23 @@ bnx2_chk_missed_msi(struct bnx2 *bp)
bp->idle_chk_status_idx = bnapi->last_status_idx;
}
#ifdef BCM_CNIC
static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
{
struct cnic_ops *c_ops;
if (!bnapi->cnic_present)
return;
rcu_read_lock();
c_ops = rcu_dereference(bp->cnic_ops);
if (c_ops)
bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
bnapi->status_blk.msi);
rcu_read_unlock();
}
#endif
static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
{
struct status_block *sblk = bnapi->status_blk.msi;
@ -3267,6 +3447,10 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
#ifdef BCM_CNIC
bnx2_poll_cnic(bp, bnapi);
#endif
/* bnapi->last_status_idx is used below to tell the hw how
* much work has been processed, so we must read it before
* checking for more work.
@ -4632,8 +4816,11 @@ bnx2_init_chip(struct bnx2 *bp)
val = REG_RD(bp, BNX2_MQ_CONFIG);
val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
val |= BNX2_MQ_CONFIG_HALT_DIS;
if (CHIP_NUM(bp) == CHIP_NUM_5709) {
val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
if (CHIP_REV(bp) == CHIP_REV_Ax)
val |= BNX2_MQ_CONFIG_HALT_DIS;
}
REG_WR(bp, BNX2_MQ_CONFIG, val);
@ -7471,7 +7658,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
INIT_WORK(&bp->reset_task, bnx2_reset_task);
dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
dev->mem_end = dev->mem_start + mem_len;
dev->irq = pdev->irq;

View file

@ -361,6 +361,9 @@ struct l2_fhdr {
#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28)
#define BNX2_L2CTX_HOST_BDIDX 0x00000004
#define BNX2_L2CTX_STATUSB_NUM_SHIFT 16
#define BNX2_L2CTX_STATUSB_NUM(sb_id) \
(((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0)
#define BNX2_L2CTX_HOST_BSEQ 0x00000008
#define BNX2_L2CTX_NX_BSEQ 0x0000000c
#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010
@ -5900,6 +5903,7 @@ struct l2_fhdr {
#define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
#define BNX2_RXP_SCRATCH 0x000e0000
#define BNX2_RXP_SCRATCH_RXP_FLOOD 0x000e0024
#define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038
#define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c
#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128
@ -6678,6 +6682,11 @@ struct bnx2_napi {
u32 last_status_idx;
u32 int_num;
#ifdef BCM_CNIC
u32 cnic_tag;
int cnic_present;
#endif
struct bnx2_rx_ring_info rx_ring;
struct bnx2_tx_ring_info tx_ring;
};
@ -6727,6 +6736,11 @@ struct bnx2 {
int tx_ring_size;
u32 tx_wake_thresh;
#ifdef BCM_CNIC
struct cnic_ops *cnic_ops;
void *cnic_data;
#endif
/* End of fields used in the performance code paths. */
unsigned int current_interval;
@ -6885,6 +6899,10 @@ struct bnx2 {
u32 idle_chk_status_idx;
#ifdef BCM_CNIC
struct cnic_eth_dev cnic_eth_dev;
#endif
const struct firmware *mips_firmware;
const struct firmware *rv2p_firmware;
};

2711
drivers/net/cnic.c Normal file

File diff suppressed because it is too large Load diff

299
drivers/net/cnic.h Normal file
View file

@ -0,0 +1,299 @@
/* cnic.h: Broadcom CNIC core network driver.
*
* Copyright (c) 2006-2009 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
*/
#ifndef CNIC_H
#define CNIC_H
#define KWQ_PAGE_CNT 4
#define KCQ_PAGE_CNT 16
#define KWQ_CID 24
#define KCQ_CID 25
/*
* krnlq_context definition
*/
#define L5_KRNLQ_FLAGS 0x00000000
#define L5_KRNLQ_SIZE 0x00000000
#define L5_KRNLQ_TYPE 0x00000000
#define KRNLQ_FLAGS_PG_SZ (0xf<<0)
#define KRNLQ_FLAGS_PG_SZ_256 (0<<0)
#define KRNLQ_FLAGS_PG_SZ_512 (1<<0)
#define KRNLQ_FLAGS_PG_SZ_1K (2<<0)
#define KRNLQ_FLAGS_PG_SZ_2K (3<<0)
#define KRNLQ_FLAGS_PG_SZ_4K (4<<0)
#define KRNLQ_FLAGS_PG_SZ_8K (5<<0)
#define KRNLQ_FLAGS_PG_SZ_16K (6<<0)
#define KRNLQ_FLAGS_PG_SZ_32K (7<<0)
#define KRNLQ_FLAGS_PG_SZ_64K (8<<0)
#define KRNLQ_FLAGS_PG_SZ_128K (9<<0)
#define KRNLQ_FLAGS_PG_SZ_256K (10<<0)
#define KRNLQ_FLAGS_PG_SZ_512K (11<<0)
#define KRNLQ_FLAGS_PG_SZ_1M (12<<0)
#define KRNLQ_FLAGS_PG_SZ_2M (13<<0)
#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15)
#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
#define KRNLQ_TYPE_TYPE (0xf<<28)
#define KRNLQ_TYPE_TYPE_EMPTY (0<<28)
#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28)
#define L5_KRNLQ_HOST_QIDX 0x00000004
#define L5_KRNLQ_HOST_FW_QIDX 0x00000008
#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c
#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c
#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010
#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014
#define L5_KRNLQ_PGTBL_PGIDX 0x00000018
#define L5_KRNLQ_NX_PG_QIDX 0x00000018
#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c
#define L5_KRNLQ_QIDX_INCR 0x0000001c
#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020
#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024
#define BNX2_PG_CTX_MAP 0x1a0034
#define BNX2_ISCSI_CTX_MAP 0x1a0074
struct cnic_redirect_entry {
struct dst_entry *old_dst;
struct dst_entry *new_dst;
};
#define MAX_COMPLETED_KCQE 64
#define MAX_CNIC_L5_CONTEXT 256
#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT
#define MAX_ISCSI_TBL_SZ 256
#define CNIC_LOCAL_PORT_MIN 60000
#define CNIC_LOCAL_PORT_MAX 61000
#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
#define MAX_KWQE_CNT (KWQE_CNT - 1)
#define MAX_KCQE_CNT (KCQE_CNT - 1)
#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1)
#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1)
#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \
(MAX_KCQE_CNT - 1)) ? \
(x) + 2 : (x) + 1
#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
#define BNX2X_KWQ_DATA(cp, x) \
&(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
#define DEF_IPID_COUNT 0xc001
#define DEF_KA_TIMEOUT 10000
#define DEF_KA_INTERVAL 300000
#define DEF_KA_MAX_PROBE_COUNT 3
#define DEF_TOS 0
#define DEF_TTL 0xfe
#define DEF_SND_SEQ_SCALE 0
#define DEF_RCV_BUF 0xffff
#define DEF_SND_BUF 0xffff
#define DEF_SEED 0
#define DEF_MAX_RT_TIME 500
#define DEF_MAX_DA_COUNT 2
#define DEF_SWS_TIMER 1000
#define DEF_MAX_CWND 0xffff
struct cnic_ctx {
u32 cid;
void *ctx;
dma_addr_t mapping;
};
#define BNX2_MAX_CID 0x2000
struct cnic_dma {
int num_pages;
void **pg_arr;
dma_addr_t *pg_map_arr;
int pgtbl_size;
u32 *pgtbl;
dma_addr_t pgtbl_map;
};
struct cnic_id_tbl {
spinlock_t lock;
u32 start;
u32 max;
u32 next;
unsigned long *table;
};
#define CNIC_KWQ16_DATA_SIZE 128
struct kwqe_16_data {
u8 data[CNIC_KWQ16_DATA_SIZE];
};
struct cnic_iscsi {
struct cnic_dma task_array_info;
struct cnic_dma r2tq_info;
struct cnic_dma hq_info;
};
struct cnic_context {
u32 cid;
struct kwqe_16_data *kwqe_data;
dma_addr_t kwqe_data_mapping;
wait_queue_head_t waitq;
int wait_cond;
unsigned long timestamp;
u32 ctx_flags;
#define CTX_FL_OFFLD_START 0x00000001
u8 ulp_proto_id;
union {
struct cnic_iscsi *iscsi;
} proto;
};
struct cnic_local {
spinlock_t cnic_ulp_lock;
void *ulp_handle[MAX_CNIC_ULP_TYPE];
unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
#define ULP_F_INIT 0
#define ULP_F_START 1
struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
/* protected by ulp_lock */
u32 cnic_local_flags;
#define CNIC_LCL_FL_KWQ_INIT 0x00000001
struct cnic_dev *dev;
struct cnic_eth_dev *ethdev;
void *l2_ring;
dma_addr_t l2_ring_map;
int l2_ring_size;
int l2_rx_ring_size;
void *l2_buf;
dma_addr_t l2_buf_map;
int l2_buf_size;
int l2_single_buf_size;
u16 *rx_cons_ptr;
u16 *tx_cons_ptr;
u16 rx_cons;
u16 tx_cons;
u32 kwq_cid_addr;
u32 kcq_cid_addr;
struct cnic_dma kwq_info;
struct kwqe **kwq;
struct cnic_dma kwq_16_data_info;
u16 max_kwq_idx;
u16 kwq_prod_idx;
u32 kwq_io_addr;
u16 *kwq_con_idx_ptr;
u16 kwq_con_idx;
struct cnic_dma kcq_info;
struct kcqe **kcq;
u16 kcq_prod_idx;
u32 kcq_io_addr;
void *status_blk;
struct status_block_msix *bnx2_status_blk;
struct host_status_block *bnx2x_status_blk;
u32 status_blk_num;
u32 int_num;
u32 last_status_idx;
struct tasklet_struct cnic_irq_task;
struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
struct cnic_sock *csk_tbl;
struct cnic_id_tbl csk_port_tbl;
struct cnic_dma conn_buf_info;
struct cnic_dma gbl_buf_info;
struct cnic_iscsi *iscsi_tbl;
struct cnic_context *ctx_tbl;
struct cnic_id_tbl cid_tbl;
int max_iscsi_conn;
atomic_t iscsi_conn;
/* per connection parameters */
int num_iscsi_tasks;
int num_ccells;
int task_array_size;
int r2tq_size;
int hq_size;
int num_cqs;
struct cnic_ctx *ctx_arr;
int ctx_blks;
int ctx_blk_size;
int cids_per_blk;
u32 chip_id;
int func;
u32 shmem_base;
u32 uio_dev;
struct uio_info *cnic_uinfo;
struct cnic_ops *cnic_ops;
int (*start_hw)(struct cnic_dev *);
void (*stop_hw)(struct cnic_dev *);
void (*setup_pgtbl)(struct cnic_dev *,
struct cnic_dma *);
int (*alloc_resc)(struct cnic_dev *);
void (*free_resc)(struct cnic_dev *);
int (*start_cm)(struct cnic_dev *);
void (*stop_cm)(struct cnic_dev *);
void (*enable_int)(struct cnic_dev *);
void (*disable_int_sync)(struct cnic_dev *);
void (*ack_int)(struct cnic_dev *);
void (*close_conn)(struct cnic_sock *, u32 opcode);
u16 (*next_idx)(u16);
u16 (*hw_idx)(u16);
};
struct bnx2x_bd_chain_next {
u32 addr_lo;
u32 addr_hi;
u8 reserved[8];
};
#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN)
#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT)
#define CDU_REGION_NUMBER_XCM_AG 2
#define CDU_REGION_NUMBER_UCM_AG 4
#endif

580
drivers/net/cnic_defs.h Normal file
View file

@ -0,0 +1,580 @@
/* cnic.c: Broadcom CNIC core network driver.
*
* Copyright (c) 2006-2009 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
*/
#ifndef CNIC_DEFS_H
#define CNIC_DEFS_H
/* KWQ (kernel work queue) request op codes */
#define L2_KWQE_OPCODE_VALUE_FLUSH (4)
#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50)
#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51)
#define L4_KWQE_OPCODE_VALUE_CONNECT3 (52)
#define L4_KWQE_OPCODE_VALUE_RESET (53)
#define L4_KWQE_OPCODE_VALUE_CLOSE (54)
#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60)
#define L4_KWQE_OPCODE_VALUE_INIT_ULP (61)
#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1)
#define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9)
#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14)
#define L5CM_RAMROD_CMD_ID_BASE (0x80)
#define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3)
#define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12)
#define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13)
#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14)
#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15)
/* KCQ (kernel completion queue) response op codes */
#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53)
#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54)
#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55)
#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56)
#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57)
#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58)
#define L4_KCQE_OPCODE_VALUE_INIT_ULP (61)
#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1)
#define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9)
#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14)
/* KCQ (kernel completion queue) completion status */
#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
#define L4_LAYER_CODE (4)
#define L2_LAYER_CODE (2)
/*
* L4 KCQ CQE
*/
struct l4_kcq {
u32 cid;
u32 pg_cid;
u32 conn_id;
u32 pg_host_opaque;
#if defined(__BIG_ENDIAN)
u16 status;
u16 reserved1;
#elif defined(__LITTLE_ENDIAN)
u16 reserved1;
u16 status;
#endif
u32 reserved2[2];
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KCQ_RESERVED3 (0x7<<0)
#define L4_KCQ_RESERVED3_SHIFT 0
#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
#define L4_KCQ_LAYER_CODE (0x7<<4)
#define L4_KCQ_LAYER_CODE_SHIFT 4
#define L4_KCQ_RESERVED4 (0x1<<7)
#define L4_KCQ_RESERVED4_SHIFT 7
u8 op_code;
u16 qe_self_seq;
#elif defined(__LITTLE_ENDIAN)
u16 qe_self_seq;
u8 op_code;
u8 flags;
#define L4_KCQ_RESERVED3 (0xF<<0)
#define L4_KCQ_RESERVED3_SHIFT 0
#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
#define L4_KCQ_LAYER_CODE (0x7<<4)
#define L4_KCQ_LAYER_CODE_SHIFT 4
#define L4_KCQ_RESERVED4 (0x1<<7)
#define L4_KCQ_RESERVED4_SHIFT 7
#endif
};
/*
* L4 KCQ CQE PG upload
*/
struct l4_kcq_upload_pg {
u32 pg_cid;
#if defined(__BIG_ENDIAN)
u16 pg_status;
u16 pg_ipid_count;
#elif defined(__LITTLE_ENDIAN)
u16 pg_ipid_count;
u16 pg_status;
#endif
u32 reserved1[5];
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
u8 op_code;
u16 qe_self_seq;
#elif defined(__LITTLE_ENDIAN)
u16 qe_self_seq;
u8 op_code;
u8 flags;
#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
#endif
};
/*
* Gracefully close the connection request
*/
struct l4_kwq_close_req {
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
u8 op_code;
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 op_code;
u8 flags;
#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
#endif
u32 cid;
u32 reserved2[6];
};
/*
* The first request to be passed in order to establish connection in option2
*/
struct l4_kwq_connect_req1 {
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
u8 op_code;
u8 reserved0;
u8 conn_flags;
#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
#elif defined(__LITTLE_ENDIAN)
u8 conn_flags;
#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
u8 reserved0;
u8 op_code;
u8 flags;
#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
#endif
u32 cid;
u32 pg_cid;
u32 src_ip;
u32 dst_ip;
#if defined(__BIG_ENDIAN)
u16 dst_port;
u16 src_port;
#elif defined(__LITTLE_ENDIAN)
u16 src_port;
u16 dst_port;
#endif
#if defined(__BIG_ENDIAN)
u8 rsrv1[3];
u8 tcp_flags;
#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
#elif defined(__LITTLE_ENDIAN)
u8 tcp_flags;
#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
u8 rsrv1[3];
#endif
u32 rsrv2;
};
/*
* The second ( optional )request to be passed in order to establish
* connection in option2 - for IPv6 only
*/
struct l4_kwq_connect_req2 {
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
u8 op_code;
u8 reserved0;
u8 rsrv;
#elif defined(__LITTLE_ENDIAN)
u8 rsrv;
u8 reserved0;
u8 op_code;
u8 flags;
#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
#endif
u32 reserved2;
u32 src_ip_v6_2;
u32 src_ip_v6_3;
u32 src_ip_v6_4;
u32 dst_ip_v6_2;
u32 dst_ip_v6_3;
u32 dst_ip_v6_4;
};
/*
* The third ( and last )request to be passed in order to establish
* connection in option2
*/
struct l4_kwq_connect_req3 {
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
u8 op_code;
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 op_code;
u8 flags;
#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
#endif
u32 ka_timeout;
u32 ka_interval ;
#if defined(__BIG_ENDIAN)
u8 snd_seq_scale;
u8 ttl;
u8 tos;
u8 ka_max_probe_count;
#elif defined(__LITTLE_ENDIAN)
u8 ka_max_probe_count;
u8 tos;
u8 ttl;
u8 snd_seq_scale;
#endif
#if defined(__BIG_ENDIAN)
u16 pmtu;
u16 mss;
#elif defined(__LITTLE_ENDIAN)
u16 mss;
u16 pmtu;
#endif
u32 rcv_buf;
u32 snd_buf;
u32 seed;
};
/*
* a KWQE request to offload a PG connection
*/
struct l4_kwq_offload_pg {
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
u8 op_code;
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 op_code;
u8 flags;
#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
#endif
#if defined(__BIG_ENDIAN)
u8 l2hdr_nbytes;
u8 pg_flags;
#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
u8 da0;
u8 da1;
#elif defined(__LITTLE_ENDIAN)
u8 da1;
u8 da0;
u8 pg_flags;
#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
u8 l2hdr_nbytes;
#endif
#if defined(__BIG_ENDIAN)
u8 da2;
u8 da3;
u8 da4;
u8 da5;
#elif defined(__LITTLE_ENDIAN)
u8 da5;
u8 da4;
u8 da3;
u8 da2;
#endif
#if defined(__BIG_ENDIAN)
u8 sa0;
u8 sa1;
u8 sa2;
u8 sa3;
#elif defined(__LITTLE_ENDIAN)
u8 sa3;
u8 sa2;
u8 sa1;
u8 sa0;
#endif
#if defined(__BIG_ENDIAN)
u8 sa4;
u8 sa5;
u16 etype;
#elif defined(__LITTLE_ENDIAN)
u16 etype;
u8 sa5;
u8 sa4;
#endif
#if defined(__BIG_ENDIAN)
u16 vlan_tag;
u16 ipid_start;
#elif defined(__LITTLE_ENDIAN)
u16 ipid_start;
u16 vlan_tag;
#endif
#if defined(__BIG_ENDIAN)
u16 ipid_count;
u16 reserved3;
#elif defined(__LITTLE_ENDIAN)
u16 reserved3;
u16 ipid_count;
#endif
u32 host_opaque;
};
/*
* Abortively close the connection request
*/
struct l4_kwq_reset_req {
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
u8 op_code;
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 op_code;
u8 flags;
#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
#endif
u32 cid;
u32 reserved2[6];
};
/*
* a KWQE request to update a PG connection
*/
struct l4_kwq_update_pg {
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
u8 opcode;
u16 oper16;
#elif defined(__LITTLE_ENDIAN)
u16 oper16;
u8 opcode;
u8 flags;
#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
#endif
u32 pg_cid;
u32 pg_host_opaque;
#if defined(__BIG_ENDIAN)
u8 pg_valids;
#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
u8 pg_unused_a;
u16 pg_ipid_count;
#elif defined(__LITTLE_ENDIAN)
u16 pg_ipid_count;
u8 pg_unused_a;
u8 pg_valids;
#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
#endif
#if defined(__BIG_ENDIAN)
u16 reserverd3;
u8 da0;
u8 da1;
#elif defined(__LITTLE_ENDIAN)
u8 da1;
u8 da0;
u16 reserverd3;
#endif
#if defined(__BIG_ENDIAN)
u8 da2;
u8 da3;
u8 da4;
u8 da5;
#elif defined(__LITTLE_ENDIAN)
u8 da5;
u8 da4;
u8 da3;
u8 da2;
#endif
u32 reserved4;
u32 reserved5;
};
/*
* a KWQE request to upload a PG or L4 context
*/
struct l4_kwq_upload {
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
u8 opcode;
u16 oper16;
#elif defined(__LITTLE_ENDIAN)
u16 oper16;
u8 opcode;
u8 flags;
#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
#endif
u32 cid;
u32 reserved2[6];
};
#endif /* CNIC_DEFS_H */

299
drivers/net/cnic_if.h Normal file
View file

@ -0,0 +1,299 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
* Copyright (c) 2006 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
*/
#ifndef CNIC_IF_H
#define CNIC_IF_H
#define CNIC_MODULE_VERSION "2.0.0"
#define CNIC_MODULE_RELDATE "May 21, 2009"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
#define CNIC_ULP_L4 2
#define MAX_CNIC_ULP_TYPE_EXT 2
#define MAX_CNIC_ULP_TYPE 3
struct kwqe {
u32 kwqe_op_flag;
#define KWQE_OPCODE_MASK 0x00ff0000
#define KWQE_OPCODE_SHIFT 16
#define KWQE_FLAGS_LAYER_SHIFT 28
#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
u32 kwqe_info0;
u32 kwqe_info1;
u32 kwqe_info2;
u32 kwqe_info3;
u32 kwqe_info4;
u32 kwqe_info5;
u32 kwqe_info6;
};
struct kwqe_16 {
u32 kwqe_info0;
u32 kwqe_info1;
u32 kwqe_info2;
u32 kwqe_info3;
};
struct kcqe {
u32 kcqe_info0;
u32 kcqe_info1;
u32 kcqe_info2;
u32 kcqe_info3;
u32 kcqe_info4;
u32 kcqe_info5;
u32 kcqe_info6;
u32 kcqe_op_flag;
#define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */
#define KCQE_FLAGS_LAYER_MASK (0x7<<28)
#define KCQE_FLAGS_LAYER_MASK_MISC (0<<28)
#define KCQE_FLAGS_LAYER_MASK_L2 (2<<28)
#define KCQE_FLAGS_LAYER_MASK_L3 (3<<28)
#define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
#define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
#define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
#define KCQE_FLAGS_NEXT (1<<31)
#define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
#define KCQE_FLAGS_OPCODE_SHIFT (16)
#define KCQE_OPCODE(op) \
(((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
};
#define MAX_CNIC_CTL_DATA 64
#define MAX_DRV_CTL_DATA 64
#define CNIC_CTL_STOP_CMD 1
#define CNIC_CTL_START_CMD 2
#define CNIC_CTL_COMPLETION_CMD 3
#define DRV_CTL_IO_WR_CMD 0x101
#define DRV_CTL_IO_RD_CMD 0x102
#define DRV_CTL_CTX_WR_CMD 0x103
#define DRV_CTL_CTXTBL_WR_CMD 0x104
#define DRV_CTL_COMPLETION_CMD 0x105
struct cnic_ctl_completion {
u32 cid;
};
struct drv_ctl_completion {
u32 comp_count;
};
struct cnic_ctl_info {
int cmd;
union {
struct cnic_ctl_completion comp;
char bytes[MAX_CNIC_CTL_DATA];
} data;
};
struct drv_ctl_io {
u32 cid_addr;
u32 offset;
u32 data;
dma_addr_t dma_addr;
};
struct drv_ctl_info {
int cmd;
union {
struct drv_ctl_completion comp;
struct drv_ctl_io io;
char bytes[MAX_DRV_CTL_DATA];
} data;
};
struct cnic_ops {
struct module *cnic_owner;
/* Calls to these functions are protected by RCU. When
* unregistering, we wait for any calls to complete before
* continuing.
*/
int (*cnic_handler)(void *, void *);
int (*cnic_ctl)(void *, struct cnic_ctl_info *);
};
#define MAX_CNIC_VEC 8
struct cnic_irq {
unsigned int vector;
void *status_blk;
u32 status_blk_num;
u32 irq_flags;
#define CNIC_IRQ_FL_MSIX 0x00000001
};
struct cnic_eth_dev {
struct module *drv_owner;
u32 drv_state;
#define CNIC_DRV_STATE_REGD 0x00000001
#define CNIC_DRV_STATE_USING_MSIX 0x00000002
u32 chip_id;
u32 max_kwqe_pending;
struct pci_dev *pdev;
void __iomem *io_base;
u32 ctx_tbl_offset;
u32 ctx_tbl_len;
int ctx_blk_size;
u32 starting_cid;
u32 max_iscsi_conn;
u32 max_fcoe_conn;
u32 max_rdma_conn;
u32 reserved0[2];
int num_irq;
struct cnic_irq irq_arr[MAX_CNIC_VEC];
int (*drv_register_cnic)(struct net_device *,
struct cnic_ops *, void *);
int (*drv_unregister_cnic)(struct net_device *);
int (*drv_submit_kwqes_32)(struct net_device *,
struct kwqe *[], u32);
int (*drv_submit_kwqes_16)(struct net_device *,
struct kwqe_16 *[], u32);
int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
unsigned long reserved1[2];
};
struct cnic_sockaddr {
union {
struct sockaddr_in v4;
struct sockaddr_in6 v6;
} local;
union {
struct sockaddr_in v4;
struct sockaddr_in6 v6;
} remote;
};
struct cnic_sock {
struct cnic_dev *dev;
void *context;
u32 src_ip[4];
u32 dst_ip[4];
u16 src_port;
u16 dst_port;
u16 vlan_id;
unsigned char old_ha[6];
unsigned char ha[6];
u32 mtu;
u32 cid;
u32 l5_cid;
u32 pg_cid;
int ulp_type;
u32 ka_timeout;
u32 ka_interval;
u8 ka_max_probe_count;
u8 tos;
u8 ttl;
u8 snd_seq_scale;
u32 rcv_buf;
u32 snd_buf;
u32 seed;
unsigned long tcp_flags;
#define SK_TCP_NO_DELAY_ACK 0x1
#define SK_TCP_KEEP_ALIVE 0x2
#define SK_TCP_NAGLE 0x4
#define SK_TCP_TIMESTAMP 0x8
#define SK_TCP_SACK 0x10
#define SK_TCP_SEG_SCALING 0x20
unsigned long flags;
#define SK_F_INUSE 0
#define SK_F_OFFLD_COMPLETE 1
#define SK_F_OFFLD_SCHED 2
#define SK_F_PG_OFFLD_COMPLETE 3
#define SK_F_CONNECT_START 4
#define SK_F_IPV6 5
#define SK_F_CLOSING 7
atomic_t ref_count;
u32 state;
struct kwqe kwqe1;
struct kwqe kwqe2;
struct kwqe kwqe3;
};
struct cnic_dev {
struct net_device *netdev;
struct pci_dev *pcidev;
void __iomem *regview;
struct list_head list;
int (*register_device)(struct cnic_dev *dev, int ulp_type,
void *ulp_ctx);
int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num_wqes);
int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
u32 num_wqes);
int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
void *);
int (*cm_destroy)(struct cnic_sock *);
int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
int (*cm_abort)(struct cnic_sock *);
int (*cm_close)(struct cnic_sock *);
struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
char *data, u16 data_size);
unsigned long flags;
#define CNIC_F_CNIC_UP 1
#define CNIC_F_BNX2_CLASS 3
#define CNIC_F_BNX2X_CLASS 4
atomic_t ref_count;
u8 mac_addr[6];
int max_iscsi_conn;
int max_fcoe_conn;
int max_rdma_conn;
void *cnic_priv;
};
#define CNIC_WR(dev, off, val) writel(val, dev->regview + off)
#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off)
#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off)
#define CNIC_RD(dev, off) readl(dev->regview + off)
#define CNIC_RD16(dev, off) readw(dev->regview + off)
struct cnic_ulp_ops {
/* Calls to these functions are protected by RCU. When
* unregistering, we wait for any calls to complete before
* continuing.
*/
void (*cnic_init)(struct cnic_dev *dev);
void (*cnic_exit)(struct cnic_dev *dev);
void (*cnic_start)(void *ulp_ctx);
void (*cnic_stop)(void *ulp_ctx);
void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
u32 num_cqes);
void (*indicate_netevent)(void *ulp_ctx, unsigned long event);
void (*cm_connect_complete)(struct cnic_sock *);
void (*cm_close_complete)(struct cnic_sock *);
void (*cm_abort_complete)(struct cnic_sock *);
void (*cm_remote_close)(struct cnic_sock *);
void (*cm_remote_abort)(struct cnic_sock *);
void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
char *data, u16 data_size);
struct module *owner;
};
extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
extern int cnic_unregister_driver(int ulp_type);
#endif

View file

@ -11,6 +11,24 @@
#include "zfcp_ext.h"
#define ZFCP_MODEL_PRIV 0x4
static struct ccw_device_id zfcp_ccw_device_id[] = {
{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) },
{},
};
MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
/**
* zfcp_ccw_priv_sch - check if subchannel is privileged
* @adapter: Adapter/Subchannel to check
*/
int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
{
return adapter->ccw_device->id.dev_model == ZFCP_MODEL_PRIV;
}
/**
* zfcp_ccw_probe - probe function of zfcp driver
* @ccw_device: pointer to belonging ccw device
@ -176,8 +194,8 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
"ccnoti4", NULL);
break;
case CIO_BOXED:
dev_warn(&adapter->ccw_device->dev,
"The ccw device did not respond in time.\n");
dev_warn(&adapter->ccw_device->dev, "The FCP device "
"did not respond within the specified time\n");
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
break;
}
@ -199,14 +217,6 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
up(&zfcp_data.config_sema);
}
static struct ccw_device_id zfcp_ccw_device_id[] = {
{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */
{},
};
MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
static struct ccw_driver zfcp_ccw_driver = {
.owner = THIS_MODULE,
.name = "zfcp",

View file

@ -163,7 +163,7 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
}
response->fsf_command = fsf_req->fsf_command;
response->fsf_reqid = (unsigned long)fsf_req;
response->fsf_reqid = fsf_req->req_id;
response->fsf_seqno = fsf_req->seq_no;
response->fsf_issued = fsf_req->issued;
response->fsf_prot_status = qtcb->prefix.prot_status;
@ -737,7 +737,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
spin_lock_irqsave(&adapter->san_dbf_lock, flags);
memset(r, 0, sizeof(*r));
strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
r->fsf_reqid = (unsigned long)fsf_req;
r->fsf_reqid = fsf_req->req_id;
r->fsf_seqno = fsf_req->seq_no;
r->s_id = fc_host_port_id(adapter->scsi_host);
r->d_id = wka_port->d_id;
@ -773,7 +773,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
spin_lock_irqsave(&adapter->san_dbf_lock, flags);
memset(r, 0, sizeof(*r));
strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
r->fsf_reqid = (unsigned long)fsf_req;
r->fsf_reqid = fsf_req->req_id;
r->fsf_seqno = fsf_req->seq_no;
r->s_id = wka_port->d_id;
r->d_id = fc_host_port_id(adapter->scsi_host);
@ -803,7 +803,7 @@ static void zfcp_san_dbf_event_els(const char *tag, int level,
spin_lock_irqsave(&adapter->san_dbf_lock, flags);
memset(rec, 0, sizeof(*rec));
strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
rec->fsf_reqid = (unsigned long)fsf_req;
rec->fsf_reqid = fsf_req->req_id;
rec->fsf_seqno = fsf_req->seq_no;
rec->s_id = s_id;
rec->d_id = d_id;
@ -965,7 +965,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
ZFCP_DBF_SCSI_FCP_SNS_INFO);
}
rec->fsf_reqid = (unsigned long)fsf_req;
rec->fsf_reqid = fsf_req->req_id;
rec->fsf_seqno = fsf_req->seq_no;
rec->fsf_issued = fsf_req->issued;
}

View file

@ -47,13 +47,6 @@
/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
/* Adapter Identification Parameters */
#define ZFCP_CONTROL_UNIT_TYPE 0x1731
#define ZFCP_CONTROL_UNIT_MODEL 0x03
#define ZFCP_DEVICE_TYPE 0x1732
#define ZFCP_DEVICE_MODEL 0x03
#define ZFCP_DEVICE_MODEL_PRIV 0x04
/* DMQ bug workaround: don't use last SBALE */
#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)

View file

@ -880,6 +880,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
zfcp_port_put(port);
return ZFCP_ERP_CONTINUES;
}
/* fall through */
case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
if (!port->d_id)
return ZFCP_ERP_FAILED;
@ -894,8 +895,13 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
act->step = ZFCP_ERP_STEP_PORT_CLOSING;
return ZFCP_ERP_CONTINUES;
}
/* fall through otherwise */
}
if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
port->d_id = 0;
_zfcp_erp_port_reopen(port, 0, "erpsoc1", NULL);
return ZFCP_ERP_EXIT;
}
/* fall through otherwise */
}
return ZFCP_ERP_FAILED;
}

View file

@ -27,6 +27,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int);
/* zfcp_ccw.c */
extern int zfcp_ccw_register(void);
extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
/* zfcp_cfdc.c */

View file

@ -150,9 +150,14 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
struct zfcp_port *port;
read_lock_irqsave(&zfcp_data.config_lock, flags);
list_for_each_entry(port, &fsf_req->adapter->port_list_head, list)
list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
if ((port->d_id & range) == (elem->nport_did & range))
zfcp_test_link(port);
if (!port->d_id)
zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
"fcrscn1", NULL);
}
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
}

View file

@ -526,6 +526,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
break;
case FSF_TOPO_AL:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
/* fall through */
default:
dev_err(&adapter->ccw_device->dev,
"Unknown or unsupported arbitrated loop "
@ -897,6 +898,7 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
switch (fsq->word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
zfcp_test_link(unit->port);
/* fall through */
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@ -993,6 +995,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
break;
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
/* fall through */
case FSF_GENERIC_COMMAND_REJECTED:
case FSF_PAYLOAD_SIZE_MISMATCH:
case FSF_REQUEST_SIZE_TOO_LARGE:
@ -1399,7 +1402,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
struct fsf_plogi *plogi;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
goto out;
switch (header->fsf_status) {
case FSF_PORT_ALREADY_OPEN:
@ -1461,6 +1464,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
out:
zfcp_port_put(port);
}
/**
@ -1473,6 +1479,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
struct qdio_buffer_element *sbale;
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_fsf_req *req;
struct zfcp_port *port = erp_action->port;
int retval = -EIO;
spin_lock_bh(&adapter->req_q_lock);
@ -1493,16 +1500,18 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
req->handler = zfcp_fsf_open_port_handler;
req->qtcb->bottom.support.d_id = erp_action->port->d_id;
req->data = erp_action->port;
req->qtcb->bottom.support.d_id = port->d_id;
req->data = port;
req->erp_action = erp_action;
erp_action->fsf_req = req;
zfcp_port_get(port);
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req = NULL;
zfcp_port_put(port);
}
out:
spin_unlock_bh(&adapter->req_q_lock);
@ -1590,8 +1599,10 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
dev_warn(&req->adapter->ccw_device->dev,
"Opening WKA port 0x%x failed\n", wka_port->d_id);
/* fall through */
case FSF_ADAPTER_STATUS_AVAILABLE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
/* fall through */
case FSF_ACCESS_DENIED:
wka_port->status = ZFCP_WKA_PORT_OFFLINE;
break;
@ -1876,7 +1887,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
(adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
(adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) {
!zfcp_ccw_priv_sch(adapter)) {
exclusive = (bottom->lun_access_info &
FSF_UNIT_ACCESS_EXCLUSIVE);
readwrite = (bottom->lun_access_info &
@ -2314,7 +2325,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
{
struct zfcp_fsf_req *req;
struct fcp_cmnd_iu *fcp_cmnd_iu;
unsigned int sbtype;
unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
int real_bytes, retval = -EIO;
struct zfcp_adapter *adapter = unit->port->adapter;
@ -2356,11 +2367,9 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
switch (scsi_cmnd->sc_data_direction) {
case DMA_NONE:
req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
sbtype = SBAL_FLAGS0_TYPE_READ;
break;
case DMA_FROM_DEVICE:
req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
sbtype = SBAL_FLAGS0_TYPE_READ;
fcp_cmnd_iu->rddata = 1;
break;
case DMA_TO_DEVICE:
@ -2369,8 +2378,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
fcp_cmnd_iu->wddata = 1;
break;
case DMA_BIDIRECTIONAL:
default:
retval = -EIO;
goto failed_scsi_cmnd;
}
@ -2394,9 +2401,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
scsi_sglist(scsi_cmnd),
FSF_MAX_SBALS_PER_REQ);
if (unlikely(real_bytes < 0)) {
if (req->sbal_number < FSF_MAX_SBALS_PER_REQ)
retval = -EIO;
else {
if (req->sbal_number >= FSF_MAX_SBALS_PER_REQ) {
dev_err(&adapter->ccw_device->dev,
"Oversize data package, unit 0x%016Lx "
"on port 0x%016Lx closed\n",

View file

@ -12,6 +12,10 @@
#include "zfcp_ext.h"
#include <asm/atomic.h>
static unsigned int default_depth = 32;
module_param_named(queue_depth, default_depth, uint, 0600);
MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
/* Find start of Sense Information in FCP response unit*/
char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
{
@ -24,6 +28,12 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
return fcp_sns_info_ptr;
}
static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth)
{
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
return sdev->queue_depth;
}
static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
{
struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
@ -34,7 +44,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
{
if (sdp->tagged_supported)
scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, 32);
scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth);
else
scsi_adjust_queue_depth(sdp, 0, 1);
return 0;
@ -647,6 +657,7 @@ struct zfcp_data zfcp_data = {
.name = "zfcp",
.module = THIS_MODULE,
.proc_name = "zfcp",
.change_queue_depth = zfcp_scsi_change_queue_depth,
.slave_alloc = zfcp_scsi_slave_alloc,
.slave_configure = zfcp_scsi_slave_configure,
.slave_destroy = zfcp_scsi_slave_destroy,

View file

@ -191,20 +191,19 @@ config SCSI_ENCLOSURE
it has an enclosure device. Selecting this option will just allow
certain enclosure conditions to be reported and is not required.
comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs"
depends on SCSI
config SCSI_MULTI_LUN
bool "Probe all LUNs on each SCSI device"
depends on SCSI
help
If you have a SCSI device that supports more than one LUN (Logical
Unit Number), e.g. a CD jukebox, and only one LUN is detected, you
can say Y here to force the SCSI driver to probe for multiple LUNs.
A SCSI device with multiple LUNs acts logically like multiple SCSI
devices. The vast majority of SCSI devices have only one LUN, and
so most people can say N here. The max_luns boot/module parameter
allows to override this setting.
Some devices support more than one LUN (Logical Unit Number) in order
to allow access to several media, e.g. CD jukebox, USB card reader,
mobile phone in mass storage mode. This option forces the kernel to
probe for all LUNs by default. This setting can be overriden by
max_luns boot/module parameter. Note that this option does not affect
devices conforming to SCSI-3 or higher as they can explicitely report
their number of LUNs. It is safe to say Y here unless you have one of
those rare devices which reacts in an unexpected way when probed for
multiple LUNs.
config SCSI_CONSTANTS
bool "Verbose SCSI error reporting (kernel size +=12K)"
@ -355,6 +354,7 @@ config ISCSI_TCP
http://open-iscsi.org
source "drivers/scsi/cxgb3i/Kconfig"
source "drivers/scsi/bnx2i/Kconfig"
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
@ -508,6 +508,7 @@ config SCSI_AIC7XXX_OLD
source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
source "drivers/scsi/aic94xx/Kconfig"
source "drivers/scsi/mvsas/Kconfig"
config SCSI_DPT_I2O
tristate "Adaptec I2O RAID support "
@ -1050,16 +1051,6 @@ config SCSI_IZIP_SLOW_CTR
Generally, saying N is fine.
config SCSI_MVSAS
tristate "Marvell 88SE6440 SAS/SATA support"
depends on PCI && SCSI
select SCSI_SAS_LIBSAS
help
This driver supports Marvell SAS/SATA PCI devices.
To compiler this driver as a module, choose M here: the module
will be called mvsas.
config SCSI_NCR53C406A
tristate "NCR53c406a SCSI support"
depends on ISA && SCSI

View file

@ -126,9 +126,10 @@ obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o
obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
obj-$(CONFIG_SCSI_MVSAS) += mvsas/
obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
obj-$(CONFIG_ARM) += arm/

View file

@ -224,7 +224,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
return ret;
}
static int
static irqreturn_t
NCR_D700_intr(int irq, void *data)
{
struct NCR_D700_private *p = (struct NCR_D700_private *)data;

View file

@ -0,0 +1,155 @@
/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
*
* Copyright (c) 2006 - 2009 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
*/
#ifndef __57XX_ISCSI_CONSTANTS_H_
#define __57XX_ISCSI_CONSTANTS_H_
/**
* This file defines HSI constants for the iSCSI flows
*/
/* iSCSI request op codes */
#define ISCSI_OPCODE_CLEANUP_REQUEST (7)
/* iSCSI response/messages op codes */
#define ISCSI_OPCODE_CLEANUP_RESPONSE (0x27)
#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION (0)
/* iSCSI task types */
#define ISCSI_TASK_TYPE_READ (0)
#define ISCSI_TASK_TYPE_WRITE (1)
#define ISCSI_TASK_TYPE_MPATH (2)
/* initial CQ sequence numbers */
#define ISCSI_INITIAL_SN (1)
/* KWQ (kernel work queue) layer codes */
#define ISCSI_KWQE_LAYER_CODE (6)
/* KWQ (kernel work queue) request op codes */
#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0)
#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1)
#define ISCSI_KWQE_OPCODE_UPDATE_CONN (2)
#define ISCSI_KWQE_OPCODE_DESTROY_CONN (3)
#define ISCSI_KWQE_OPCODE_INIT1 (4)
#define ISCSI_KWQE_OPCODE_INIT2 (5)
/* KCQ (kernel completion queue) response op codes */
#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN (0x10)
#define ISCSI_KCQE_OPCODE_UPDATE_CONN (0x12)
#define ISCSI_KCQE_OPCODE_DESTROY_CONN (0x13)
#define ISCSI_KCQE_OPCODE_INIT (0x14)
#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK (0x15)
#define ISCSI_KCQE_OPCODE_TCP_RESET (0x16)
#define ISCSI_KCQE_OPCODE_TCP_SYN (0x17)
#define ISCSI_KCQE_OPCODE_TCP_FIN (0X18)
#define ISCSI_KCQE_OPCODE_TCP_ERROR (0x19)
#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
#define ISCSI_KCQE_OPCODE_ISCSI_ERROR (0x21)
/* KCQ (kernel completion queue) completion status */
#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS (0x0)
#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x1)
#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x2)
#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x3)
#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR (0x4)
#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR (0x5)
#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR (0x6)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE (0xa)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE (0xb)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN (0xc)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT (0xd)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN (0xe)
/* Response */
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN (0xf)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T (0x10)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO (0x2c)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG (0x2d)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0 (0x11)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1 (0x12)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2 (0x13)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3 (0x14)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4 (0x15)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5 (0x16)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6 (0x17)
/* Data-In */
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN (0x18)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN (0x19)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO (0x1a)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV (0x1b)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN (0x1c)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN (0x1d)
/* R2T */
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF (0x1f)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN (0x20)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN (0x21)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED (0x24)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV (0x25)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN (0x26)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27)
/* TMF */
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN (0x28)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN (0x29)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN (0x2a)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP (0x2b)
/* IP/TCP processing errors: */
#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT (0x40)
#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS (0x41)
#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG (0x42)
#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS (0x43)
/* iSCSI licensing errors */
/* general iSCSI license not installed */
#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED (0x50)
/* additional LOM specific iSCSI license not installed */
#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51)
/* SQ/RQ/CQ DB structure sizes */
#define ISCSI_SQ_DB_SIZE (16)
#define ISCSI_RQ_DB_SIZE (16)
#define ISCSI_CQ_DB_SIZE (80)
#define ISCSI_SQN_TO_NOTIFY_NOT_VALID 0xFFFF
/* Page size codes (for flags field in connection offload request) */
#define ISCSI_PAGE_SIZE_256 (0)
#define ISCSI_PAGE_SIZE_512 (1)
#define ISCSI_PAGE_SIZE_1K (2)
#define ISCSI_PAGE_SIZE_2K (3)
#define ISCSI_PAGE_SIZE_4K (4)
#define ISCSI_PAGE_SIZE_8K (5)
#define ISCSI_PAGE_SIZE_16K (6)
#define ISCSI_PAGE_SIZE_32K (7)
#define ISCSI_PAGE_SIZE_64K (8)
#define ISCSI_PAGE_SIZE_128K (9)
#define ISCSI_PAGE_SIZE_256K (10)
#define ISCSI_PAGE_SIZE_512K (11)
#define ISCSI_PAGE_SIZE_1M (12)
#define ISCSI_PAGE_SIZE_2M (13)
#define ISCSI_PAGE_SIZE_4M (14)
#define ISCSI_PAGE_SIZE_8M (15)
/* Iscsi PDU related defines */
#define ISCSI_HEADER_SIZE (48)
#define ISCSI_DIGEST_SHIFT (2)
#define ISCSI_DIGEST_SIZE (4)
#define B577XX_ISCSI_CONNECTION_TYPE 3
#endif /*__57XX_ISCSI_CONSTANTS_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,7 @@
config SCSI_BNX2_ISCSI
tristate "Broadcom NetXtreme II iSCSI support"
select SCSI_ISCSI_ATTRS
select CNIC
---help---
This driver supports iSCSI offload for the Broadcom NetXtreme II
devices.

View file

@ -0,0 +1,3 @@
bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o
obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o

771
drivers/scsi/bnx2i/bnx2i.h Normal file
View file

@ -0,0 +1,771 @@
/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
*
* Copyright (c) 2006 - 2009 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
*/
#ifndef _BNX2I_H_
#define _BNX2I_H_
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/in.h>
#include <linux/kfifo.h>
#include <linux/netdevice.h>
#include <linux/completion.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
#include <scsi/iscsi_proto.h>
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
#include "../../net/cnic_if.h"
#include "57xx_iscsi_hsi.h"
#include "57xx_iscsi_constants.h"
#define BNX2_ISCSI_DRIVER_NAME "bnx2i"
#define BNX2I_MAX_ADAPTERS 8
#define ISCSI_MAX_CONNS_PER_HBA 128
#define ISCSI_MAX_SESS_PER_HBA ISCSI_MAX_CONNS_PER_HBA
#define ISCSI_MAX_CMDS_PER_SESS 128
/* Total active commands across all connections supported by devices */
#define ISCSI_MAX_CMDS_PER_HBA_5708 (28 * (ISCSI_MAX_CMDS_PER_SESS - 1))
#define ISCSI_MAX_CMDS_PER_HBA_5709 (128 * (ISCSI_MAX_CMDS_PER_SESS - 1))
#define ISCSI_MAX_CMDS_PER_HBA_57710 (256 * (ISCSI_MAX_CMDS_PER_SESS - 1))
#define ISCSI_MAX_BDS_PER_CMD 32
#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
#define MAX_BD_LENGTH 65535
#define BD_SPLIT_SIZE 32768
/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
#define BNX2I_SQ_WQES_MIN 16
#define BNX2I_570X_SQ_WQES_MAX 128
#define BNX2I_5770X_SQ_WQES_MAX 512
#define BNX2I_570X_SQ_WQES_DEFAULT 128
#define BNX2I_5770X_SQ_WQES_DEFAULT 256
#define BNX2I_570X_CQ_WQES_MAX 128
#define BNX2I_5770X_CQ_WQES_MAX 512
#define BNX2I_RQ_WQES_MIN 16
#define BNX2I_RQ_WQES_MAX 32
#define BNX2I_RQ_WQES_DEFAULT 16
/* CCELLs per conn */
#define BNX2I_CCELLS_MIN 16
#define BNX2I_CCELLS_MAX 96
#define BNX2I_CCELLS_DEFAULT 64
#define ITT_INVALID_SIGNATURE 0xFFFF
#define ISCSI_CMD_CLEANUP_TIMEOUT 100
#define BNX2I_CONN_CTX_BUF_SIZE 16384
#define BNX2I_SQ_WQE_SIZE 64
#define BNX2I_RQ_WQE_SIZE 256
#define BNX2I_CQE_SIZE 64
#define MB_KERNEL_CTX_SHIFT 8
#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT)
#define CTX_SHIFT 7
#define GET_CID_NUM(cid_addr) ((cid_addr) >> CTX_SHIFT)
#define CTX_OFFSET 0x10000
#define MAX_CID_CNT 0x4000
/* 5709 context registers */
#define BNX2_MQ_CONFIG2 0x00003d00
#define BNX2_MQ_CONFIG2_CONT_SZ (0x7L<<4)
#define BNX2_MQ_CONFIG2_FIRST_L4L5 (0x1fL<<8)
/* 57710's BAR2 is mapped to doorbell registers */
#define BNX2X_DOORBELL_PCI_BAR 2
#define BNX2X_MAX_CQS 8
#define CNIC_ARM_CQE 1
#define CNIC_DISARM_CQE 0
#define REG_RD(__hba, offset) \
readl(__hba->regview + offset)
#define REG_WR(__hba, offset, val) \
writel(val, __hba->regview + offset)
/**
* struct generic_pdu_resc - login pdu resource structure
*
* @req_buf: driver buffer used to stage payload associated with
* the login request
* @req_dma_addr: dma address for iscsi login request payload buffer
* @req_buf_size: actual login request payload length
* @req_wr_ptr: pointer into login request buffer when next data is
* to be written
* @resp_hdr: iscsi header where iscsi login response header is to
* be recreated
* @resp_buf: buffer to stage login response payload
* @resp_dma_addr: login response payload buffer dma address
* @resp_buf_size: login response paylod length
* @resp_wr_ptr: pointer into login response buffer when next data is
* to be written
* @req_bd_tbl: iscsi login request payload BD table
* @req_bd_dma: login request BD table dma address
* @resp_bd_tbl: iscsi login response payload BD table
* @resp_bd_dma: login request BD table dma address
*
* following structure defines buffer info for generic pdus such as iSCSI Login,
* Logout and NOP
*/
struct generic_pdu_resc {
char *req_buf;
dma_addr_t req_dma_addr;
u32 req_buf_size;
char *req_wr_ptr;
struct iscsi_hdr resp_hdr;
char *resp_buf;
dma_addr_t resp_dma_addr;
u32 resp_buf_size;
char *resp_wr_ptr;
char *req_bd_tbl;
dma_addr_t req_bd_dma;
char *resp_bd_tbl;
dma_addr_t resp_bd_dma;
};
/**
* struct bd_resc_page - tracks DMA'able memory allocated for BD tables
*
* @link: list head to link elements
* @max_ptrs: maximun pointers that can be stored in this page
* @num_valid: number of pointer valid in this page
* @page: base addess for page pointer array
*
* structure to track DMA'able memory allocated for command BD tables
*/
struct bd_resc_page {
struct list_head link;
u32 max_ptrs;
u32 num_valid;
void *page[1];
};
/**
* struct io_bdt - I/O buffer destricptor table
*
* @bd_tbl: BD table's virtual address
* @bd_tbl_dma: BD table's dma address
* @bd_valid: num valid BD entries
*
* IO BD table
*/
struct io_bdt {
struct iscsi_bd *bd_tbl;
dma_addr_t bd_tbl_dma;
u16 bd_valid;
};
/**
* bnx2i_cmd - iscsi command structure
*
* @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd
* @sg: SG list
* @io_tbl: buffer descriptor (BD) table
* @bd_tbl_dma: buffer descriptor (BD) table's dma address
*/
struct bnx2i_cmd {
struct iscsi_hdr hdr;
struct bnx2i_conn *conn;
struct scsi_cmnd *scsi_cmd;
struct scatterlist *sg;
struct io_bdt io_tbl;
dma_addr_t bd_tbl_dma;
struct bnx2i_cmd_request req;
};
/**
* struct bnx2i_conn - iscsi connection structure
*
* @cls_conn: pointer to iscsi cls conn
* @hba: adapter structure pointer
* @iscsi_conn_cid: iscsi conn id
* @fw_cid: firmware iscsi context id
* @ep: endpoint structure pointer
* @gen_pdu: login/nopout/logout pdu resources
* @violation_notified: bit mask used to track iscsi error/warning messages
* already printed out
*
* iSCSI connection structure
*/
struct bnx2i_conn {
struct iscsi_cls_conn *cls_conn;
struct bnx2i_hba *hba;
struct completion cmd_cleanup_cmpl;
int is_bound;
u32 iscsi_conn_cid;
#define BNX2I_CID_RESERVED 0x5AFF
u32 fw_cid;
struct timer_list poll_timer;
/*
* Queue Pair (QP) related structure elements.
*/
struct bnx2i_endpoint *ep;
/*
* Buffer for login negotiation process
*/
struct generic_pdu_resc gen_pdu;
u64 violation_notified;
};
/**
* struct iscsi_cid_queue - Per adapter iscsi cid queue
*
* @cid_que_base: queue base memory
* @cid_que: queue memory pointer
* @cid_q_prod_idx: produce index
* @cid_q_cons_idx: consumer index
* @cid_q_max_idx: max index. used to detect wrap around condition
* @cid_free_cnt: queue size
* @conn_cid_tbl: iscsi cid to conn structure mapping table
*
* Per adapter iSCSI CID Queue
*/
struct iscsi_cid_queue {
void *cid_que_base;
u32 *cid_que;
u32 cid_q_prod_idx;
u32 cid_q_cons_idx;
u32 cid_q_max_idx;
u32 cid_free_cnt;
struct bnx2i_conn **conn_cid_tbl;
};
/**
* struct bnx2i_hba - bnx2i adapter structure
*
* @link: list head to link elements
* @cnic: pointer to cnic device
* @pcidev: pointer to pci dev
* @netdev: pointer to netdev structure
* @regview: mapped PCI register space
* @age: age, incremented by every recovery
* @cnic_dev_type: cnic device type, 5706/5708/5709/57710
* @mail_queue_access: mailbox queue access mode, applicable to 5709 only
* @reg_with_cnic: indicates whether the device is register with CNIC
* @adapter_state: adapter state, UP, GOING_DOWN, LINK_DOWN
* @mtu_supported: Ethernet MTU supported
* @shost: scsi host pointer
* @max_sqes: SQ size
* @max_rqes: RQ size
* @max_cqes: CQ size
* @num_ccell: number of command cells per connection
* @ofld_conns_active: active connection list
* @max_active_conns: max offload connections supported by this device
* @cid_que: iscsi cid queue
* @ep_rdwr_lock: read / write lock to synchronize various ep lists
* @ep_ofld_list: connection list for pending offload completion
* @ep_destroy_list: connection list for pending offload completion
* @mp_bd_tbl: BD table to be used with middle path requests
* @mp_bd_dma: DMA address of 'mp_bd_tbl' memory buffer
* @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs
* @dummy_buf_dma: DMA address of 'dummy_buffer' memory buffer
* @lock: lock to synchonize access to hba structure
* @pci_did: PCI device ID
* @pci_vid: PCI vendor ID
* @pci_sdid: PCI subsystem device ID
* @pci_svid: PCI subsystem vendor ID
* @pci_func: PCI function number in system pci tree
* @pci_devno: PCI device number in system pci tree
* @num_wqe_sent: statistic counter, total wqe's sent
* @num_cqe_rcvd: statistic counter, total cqe's received
* @num_intr_claimed: statistic counter, total interrupts claimed
* @link_changed_count: statistic counter, num of link change notifications
* received
* @ipaddr_changed_count: statistic counter, num times IP address changed while
* at least one connection is offloaded
* @num_sess_opened: statistic counter, total num sessions opened
* @num_conn_opened: statistic counter, total num conns opened on this hba
* @ctx_ccell_tasks: captures number of ccells and tasks supported by
* currently offloaded connection, used to decode
* context memory
*
* Adapter Data Structure
*/
struct bnx2i_hba {
struct list_head link;
struct cnic_dev *cnic;
struct pci_dev *pcidev;
struct net_device *netdev;
void __iomem *regview;
u32 age;
unsigned long cnic_dev_type;
#define BNX2I_NX2_DEV_5706 0x0
#define BNX2I_NX2_DEV_5708 0x1
#define BNX2I_NX2_DEV_5709 0x2
#define BNX2I_NX2_DEV_57710 0x3
u32 mail_queue_access;
#define BNX2I_MQ_KERNEL_MODE 0x0
#define BNX2I_MQ_KERNEL_BYPASS_MODE 0x1
#define BNX2I_MQ_BIN_MODE 0x2
unsigned long reg_with_cnic;
#define BNX2I_CNIC_REGISTERED 1
unsigned long adapter_state;
#define ADAPTER_STATE_UP 0
#define ADAPTER_STATE_GOING_DOWN 1
#define ADAPTER_STATE_LINK_DOWN 2
#define ADAPTER_STATE_INIT_FAILED 31
unsigned int mtu_supported;
#define BNX2I_MAX_MTU_SUPPORTED 1500
struct Scsi_Host *shost;
u32 max_sqes;
u32 max_rqes;
u32 max_cqes;
u32 num_ccell;
int ofld_conns_active;
int max_active_conns;
struct iscsi_cid_queue cid_que;
rwlock_t ep_rdwr_lock;
struct list_head ep_ofld_list;
struct list_head ep_destroy_list;
/*
* BD table to be used with MP (Middle Path requests.
*/
char *mp_bd_tbl;
dma_addr_t mp_bd_dma;
char *dummy_buffer;
dma_addr_t dummy_buf_dma;
spinlock_t lock; /* protects hba structure access */
struct mutex net_dev_lock;/* sync net device access */
/*
* PCI related info.
*/
u16 pci_did;
u16 pci_vid;
u16 pci_sdid;
u16 pci_svid;
u16 pci_func;
u16 pci_devno;
/*
* Following are a bunch of statistics useful during development
* and later stage for score boarding.
*/
u32 num_wqe_sent;
u32 num_cqe_rcvd;
u32 num_intr_claimed;
u32 link_changed_count;
u32 ipaddr_changed_count;
u32 num_sess_opened;
u32 num_conn_opened;
unsigned int ctx_ccell_tasks;
};
/*******************************************************************************
* QP [ SQ / RQ / CQ ] info.
******************************************************************************/
/*
* SQ/RQ/CQ generic structure definition
*/
struct sqe {
u8 sqe_byte[BNX2I_SQ_WQE_SIZE];
};
struct rqe {
u8 rqe_byte[BNX2I_RQ_WQE_SIZE];
};
struct cqe {
u8 cqe_byte[BNX2I_CQE_SIZE];
};
enum {
#if defined(__LITTLE_ENDIAN)
CNIC_EVENT_COAL_INDEX = 0x0,
CNIC_SEND_DOORBELL = 0x4,
CNIC_EVENT_CQ_ARM = 0x7,
CNIC_RECV_DOORBELL = 0x8
#elif defined(__BIG_ENDIAN)
CNIC_EVENT_COAL_INDEX = 0x2,
CNIC_SEND_DOORBELL = 0x6,
CNIC_EVENT_CQ_ARM = 0x4,
CNIC_RECV_DOORBELL = 0xa
#endif
};
/*
* CQ DB
*/
struct bnx2x_iscsi_cq_pend_cmpl {
/* CQ producer, updated by Ustorm */
u16 ustrom_prod;
/* CQ pending completion counter */
u16 pend_cntr;
};
struct bnx2i_5771x_cq_db {
struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS];
/* CQ pending completion ITT array */
u16 itt[BNX2X_MAX_CQS];
/* Cstorm CQ sequence to notify array, updated by driver */;
u16 sqn[BNX2X_MAX_CQS];
u32 reserved[4] /* 16 byte allignment */;
};
struct bnx2i_5771x_sq_rq_db {
u16 prod_idx;
u8 reserved0[14]; /* Pad structure size to 16 bytes */
};
struct bnx2i_5771x_dbell_hdr {
u8 header;
/* 1 for rx doorbell, 0 for tx doorbell */
#define B577XX_DOORBELL_HDR_RX (0x1<<0)
#define B577XX_DOORBELL_HDR_RX_SHIFT 0
/* 0 for normal doorbell, 1 for advertise wnd doorbell */
#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1)
#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1
/* rdma tx only: DPM transaction size specifier (64/128/256/512B) */
#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2)
#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2
/* connection type */
#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4)
#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4
};
struct bnx2i_5771x_dbell {
struct bnx2i_5771x_dbell_hdr dbell;
u8 pad[3];
};
/**
* struct qp_info - QP (share queue region) atrributes structure
*
* @ctx_base: ioremapped pci register base to access doorbell register
* pertaining to this offloaded connection
* @sq_virt: virtual address of send queue (SQ) region
* @sq_phys: DMA address of SQ memory region
* @sq_mem_size: SQ size
* @sq_prod_qe: SQ producer entry pointer
* @sq_cons_qe: SQ consumer entry pointer
* @sq_first_qe: virtaul address of first entry in SQ
* @sq_last_qe: virtaul address of last entry in SQ
* @sq_prod_idx: SQ producer index
* @sq_cons_idx: SQ consumer index
* @sqe_left: number sq entry left
* @sq_pgtbl_virt: page table describing buffer consituting SQ region
* @sq_pgtbl_phys: dma address of 'sq_pgtbl_virt'
* @sq_pgtbl_size: SQ page table size
* @cq_virt: virtual address of completion queue (CQ) region
* @cq_phys: DMA address of RQ memory region
* @cq_mem_size: CQ size
* @cq_prod_qe: CQ producer entry pointer
* @cq_cons_qe: CQ consumer entry pointer
* @cq_first_qe: virtaul address of first entry in CQ
* @cq_last_qe: virtaul address of last entry in CQ
* @cq_prod_idx: CQ producer index
* @cq_cons_idx: CQ consumer index
* @cqe_left: number cq entry left
* @cqe_size: size of each CQ entry
* @cqe_exp_seq_sn: next expected CQE sequence number
* @cq_pgtbl_virt: page table describing buffer consituting CQ region
* @cq_pgtbl_phys: dma address of 'cq_pgtbl_virt'
* @cq_pgtbl_size: CQ page table size
* @rq_virt: virtual address of receive queue (RQ) region
* @rq_phys: DMA address of RQ memory region
* @rq_mem_size: RQ size
* @rq_prod_qe: RQ producer entry pointer
* @rq_cons_qe: RQ consumer entry pointer
* @rq_first_qe: virtaul address of first entry in RQ
* @rq_last_qe: virtaul address of last entry in RQ
* @rq_prod_idx: RQ producer index
* @rq_cons_idx: RQ consumer index
* @rqe_left: number rq entry left
* @rq_pgtbl_virt: page table describing buffer consituting RQ region
* @rq_pgtbl_phys: dma address of 'rq_pgtbl_virt'
* @rq_pgtbl_size: RQ page table size
*
* queue pair (QP) is a per connection shared data structure which is used
* to send work requests (SQ), receive completion notifications (CQ)
* and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure
* below holds queue memory, consumer/producer indexes and page table
* information
*/
struct qp_info {
void __iomem *ctx_base;
#define DPM_TRIGER_TYPE 0x40
#define BNX2I_570x_QUE_DB_SIZE 0
#define BNX2I_5771x_QUE_DB_SIZE 16
struct sqe *sq_virt;
dma_addr_t sq_phys;
u32 sq_mem_size;
struct sqe *sq_prod_qe;
struct sqe *sq_cons_qe;
struct sqe *sq_first_qe;
struct sqe *sq_last_qe;
u16 sq_prod_idx;
u16 sq_cons_idx;
u32 sqe_left;
void *sq_pgtbl_virt;
dma_addr_t sq_pgtbl_phys;
u32 sq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
struct cqe *cq_virt;
dma_addr_t cq_phys;
u32 cq_mem_size;
struct cqe *cq_prod_qe;
struct cqe *cq_cons_qe;
struct cqe *cq_first_qe;
struct cqe *cq_last_qe;
u16 cq_prod_idx;
u16 cq_cons_idx;
u32 cqe_left;
u32 cqe_size;
u32 cqe_exp_seq_sn;
void *cq_pgtbl_virt;
dma_addr_t cq_pgtbl_phys;
u32 cq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
struct rqe *rq_virt;
dma_addr_t rq_phys;
u32 rq_mem_size;
struct rqe *rq_prod_qe;
struct rqe *rq_cons_qe;
struct rqe *rq_first_qe;
struct rqe *rq_last_qe;
u16 rq_prod_idx;
u16 rq_cons_idx;
u32 rqe_left;
void *rq_pgtbl_virt;
dma_addr_t rq_pgtbl_phys;
u32 rq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
};
/*
* CID handles
*/
struct ep_handles {
u32 fw_cid;
u32 drv_iscsi_cid;
u16 pg_cid;
u16 rsvd;
};
enum {
EP_STATE_IDLE = 0x0,
EP_STATE_PG_OFLD_START = 0x1,
EP_STATE_PG_OFLD_COMPL = 0x2,
EP_STATE_OFLD_START = 0x4,
EP_STATE_OFLD_COMPL = 0x8,
EP_STATE_CONNECT_START = 0x10,
EP_STATE_CONNECT_COMPL = 0x20,
EP_STATE_ULP_UPDATE_START = 0x40,
EP_STATE_ULP_UPDATE_COMPL = 0x80,
EP_STATE_DISCONN_START = 0x100,
EP_STATE_DISCONN_COMPL = 0x200,
EP_STATE_CLEANUP_START = 0x400,
EP_STATE_CLEANUP_CMPL = 0x800,
EP_STATE_TCP_FIN_RCVD = 0x1000,
EP_STATE_TCP_RST_RCVD = 0x2000,
EP_STATE_PG_OFLD_FAILED = 0x1000000,
EP_STATE_ULP_UPDATE_FAILED = 0x2000000,
EP_STATE_CLEANUP_FAILED = 0x4000000,
EP_STATE_OFLD_FAILED = 0x8000000,
EP_STATE_CONNECT_FAILED = 0x10000000,
EP_STATE_DISCONN_TIMEDOUT = 0x20000000,
};
/**
* struct bnx2i_endpoint - representation of tcp connection in NX2 world
*
* @link: list head to link elements
* @hba: adapter to which this connection belongs
* @conn: iscsi connection this EP is linked to
* @sess: iscsi session this EP is linked to
* @cm_sk: cnic sock struct
* @hba_age: age to detect if 'iscsid' issues ep_disconnect()
* after HBA reset is completed by bnx2i/cnic/bnx2
* modules
* @state: tracks offload connection state machine
* @teardown_mode: indicates if conn teardown is abortive or orderly
* @qp: QP information
* @ids: contains chip allocated *context id* & driver assigned
* *iscsi cid*
* @ofld_timer: offload timer to detect timeout
* @ofld_wait: wait queue
*
* Endpoint Structure - equivalent of tcp socket structure
*/
struct bnx2i_endpoint {
struct list_head link;
struct bnx2i_hba *hba;
struct bnx2i_conn *conn;
struct cnic_sock *cm_sk;
u32 hba_age;
u32 state;
unsigned long timestamp;
int num_active_cmds;
struct qp_info qp;
struct ep_handles ids;
#define ep_iscsi_cid ids.drv_iscsi_cid
#define ep_cid ids.fw_cid
#define ep_pg_cid ids.pg_cid
struct timer_list ofld_timer;
wait_queue_head_t ofld_wait;
};
/* Global variables */
extern unsigned int error_mask1, error_mask2;
extern u64 iscsi_error_mask;
extern unsigned int en_tcp_dack;
extern unsigned int event_coal_div;
extern struct scsi_transport_template *bnx2i_scsi_xport_template;
extern struct iscsi_transport bnx2i_iscsi_transport;
extern struct cnic_ulp_ops bnx2i_cnic_cb;
extern unsigned int sq_size;
extern unsigned int rq_size;
extern struct device_attribute *bnx2i_dev_attributes[];
/*
* Function Prototypes
*/
extern void bnx2i_identify_device(struct bnx2i_hba *hba);
extern void bnx2i_register_device(struct bnx2i_hba *hba);
extern void bnx2i_ulp_init(struct cnic_dev *dev);
extern void bnx2i_ulp_exit(struct cnic_dev *dev);
extern void bnx2i_start(void *handle);
extern void bnx2i_stop(void *handle);
extern void bnx2i_reg_dev_all(void);
extern void bnx2i_unreg_dev_all(void);
extern struct bnx2i_hba *get_adapter_list_head(void);
struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
u16 iscsi_cid);
int bnx2i_alloc_ep_pool(void);
void bnx2i_release_ep_pool(void);
struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
void bnx2i_free_hba(struct bnx2i_hba *hba);
void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len);
void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count);
void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd);
void bnx2i_drop_session(struct iscsi_cls_session *session);
extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba);
extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
struct iscsi_task *mtask);
extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
struct iscsi_task *mtask);
extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
struct bnx2i_cmd *cmnd);
extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
struct iscsi_task *mtask, u32 ttt,
char *datap, int data_len, int unsol);
extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
struct iscsi_task *mtask);
extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
struct bnx2i_cmd *cmd);
extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep);
extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep);
extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep);
extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep);
extern void bnx2i_ep_ofld_timer(unsigned long data);
extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list(
struct bnx2i_hba *hba, u32 iscsi_cid);
extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
struct bnx2i_hba *hba, u32 iscsi_cid);
extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
/* Debug related function prototypes */
extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,438 @@
/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
*
* Copyright (c) 2006 - 2009 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
*/
#include "bnx2i.h"
static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
static int bnx2i_reg_device;
#define DRV_MODULE_NAME "bnx2i"
#define DRV_MODULE_VERSION "2.0.1d"
#define DRV_MODULE_RELDATE "Mar 25, 2009"
static char version[] __devinitdata =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
static DEFINE_RWLOCK(bnx2i_dev_lock);
unsigned int event_coal_div = 1;
module_param(event_coal_div, int, 0664);
MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
unsigned int en_tcp_dack = 1;
module_param(en_tcp_dack, int, 0664);
MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
unsigned int error_mask1 = 0x00;
module_param(error_mask1, int, 0664);
MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
unsigned int error_mask2 = 0x00;
module_param(error_mask2, int, 0664);
MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
unsigned int sq_size;
module_param(sq_size, int, 0664);
MODULE_PARM_DESC(sq_size, "Configure SQ size");
unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT;
module_param(rq_size, int, 0664);
MODULE_PARM_DESC(rq_size, "Configure RQ size");
u64 iscsi_error_mask = 0x00;
static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ;
/**
* bnx2i_identify_device - identifies NetXtreme II device type
* @hba: Adapter structure pointer
*
* This function identifies the NX2 device type and sets appropriate
* queue mailbox register access method, 5709 requires driver to
* access MBOX regs using *bin* mode
*/
void bnx2i_identify_device(struct bnx2i_hba *hba)
{
hba->cnic_dev_type = 0;
if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) ||
(hba->pci_did == PCI_DEVICE_ID_NX2_5706S))
set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) ||
(hba->pci_did == PCI_DEVICE_ID_NX2_5708S))
set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) ||
(hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
} else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
hba->pci_did == PCI_DEVICE_ID_NX2_57711)
set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
}
/**
* get_adapter_list_head - returns head of adapter list
*/
struct bnx2i_hba *get_adapter_list_head(void)
{
struct bnx2i_hba *hba = NULL;
struct bnx2i_hba *tmp_hba;
if (!adapter_count)
goto hba_not_found;
read_lock(&bnx2i_dev_lock);
list_for_each_entry(tmp_hba, &adapter_list, link) {
if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) {
hba = tmp_hba;
break;
}
}
read_unlock(&bnx2i_dev_lock);
hba_not_found:
return hba;
}
/**
* bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance
* @cnic: pointer to cnic device instance
*
*/
struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic)
{
struct bnx2i_hba *hba, *temp;
read_lock(&bnx2i_dev_lock);
list_for_each_entry_safe(hba, temp, &adapter_list, link) {
if (hba->cnic == cnic) {
read_unlock(&bnx2i_dev_lock);
return hba;
}
}
read_unlock(&bnx2i_dev_lock);
return NULL;
}
/**
* bnx2i_start - cnic callback to initialize & start adapter instance
* @handle: transparent handle pointing to adapter structure
*
* This function maps adapter structure to pcidev structure and initiates
* firmware handshake to enable/initialize on chip iscsi components
* This bnx2i - cnic interface api callback is issued after following
* 2 conditions are met -
* a) underlying network interface is up (marked by event 'NETDEV_UP'
* from netdev
* b) bnx2i adapter instance is registered
*/
void bnx2i_start(void *handle)
{
#define BNX2I_INIT_POLL_TIME (1000 / HZ)
struct bnx2i_hba *hba = handle;
int i = HZ;
bnx2i_send_fw_iscsi_init_msg(hba);
while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
msleep(BNX2I_INIT_POLL_TIME);
}
/**
* bnx2i_stop - cnic callback to shutdown adapter instance
* @handle: transparent handle pointing to adapter structure
*
* driver checks if adapter is already in shutdown mode, if not start
* the shutdown process
*/
void bnx2i_stop(void *handle)
{
struct bnx2i_hba *hba = handle;
/* check if cleanup happened in GOING_DOWN context */
clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
&hba->adapter_state))
iscsi_host_for_each_session(hba->shost,
bnx2i_drop_session);
}
/**
* bnx2i_register_device - register bnx2i adapter instance with the cnic driver
* @hba: Adapter instance to register
*
* registers bnx2i adapter instance with the cnic driver while holding the
* adapter structure lock
*/
void bnx2i_register_device(struct bnx2i_hba *hba)
{
if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
return;
}
hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
spin_lock(&hba->lock);
bnx2i_reg_device++;
spin_unlock(&hba->lock);
set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
}
/**
* bnx2i_reg_dev_all - registers all adapter instances with the cnic driver
*
* registers all bnx2i adapter instances with the cnic driver while holding
* the global resource lock
*/
void bnx2i_reg_dev_all(void)
{
struct bnx2i_hba *hba, *temp;
read_lock(&bnx2i_dev_lock);
list_for_each_entry_safe(hba, temp, &adapter_list, link)
bnx2i_register_device(hba);
read_unlock(&bnx2i_dev_lock);
}
/**
* bnx2i_unreg_one_device - unregister adapter instance with the cnic driver
* @hba: Adapter instance to unregister
*
* registers bnx2i adapter instance with the cnic driver while holding
* the adapter structure lock
*/
static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
{
if (hba->ofld_conns_active ||
!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) ||
test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state))
return;
hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
spin_lock(&hba->lock);
bnx2i_reg_device--;
spin_unlock(&hba->lock);
/* ep_disconnect could come before NETDEV_DOWN, driver won't
* see NETDEV_DOWN as it already unregistered itself.
*/
hba->adapter_state = 0;
clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
}
/**
* bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver
*
* unregisters all bnx2i adapter instances with the cnic driver while holding
* the global resource lock
*/
void bnx2i_unreg_dev_all(void)
{
struct bnx2i_hba *hba, *temp;
read_lock(&bnx2i_dev_lock);
list_for_each_entry_safe(hba, temp, &adapter_list, link)
bnx2i_unreg_one_device(hba);
read_unlock(&bnx2i_dev_lock);
}
/**
* bnx2i_init_one - initialize an adapter instance and allocate memory resources
* @hba: bnx2i adapter instance
* @cnic: cnic device handle
*
* Global resource lock and host adapter lock is held during critical sections
* below. This routine is called from cnic_register_driver() context and
* work horse thread which does majority of device specific initialization
*/
static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
{
int rc;
read_lock(&bnx2i_dev_lock);
if (bnx2i_reg_device &&
!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
if (rc) /* duplicate registration */
printk(KERN_ERR "bnx2i- dev reg failed\n");
spin_lock(&hba->lock);
bnx2i_reg_device++;
hba->age++;
spin_unlock(&hba->lock);
set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
}
read_unlock(&bnx2i_dev_lock);
write_lock(&bnx2i_dev_lock);
list_add_tail(&hba->link, &adapter_list);
adapter_count++;
write_unlock(&bnx2i_dev_lock);
return 0;
}
/**
* bnx2i_ulp_init - initialize an adapter instance
* @dev: cnic device handle
*
* Called from cnic_register_driver() context to initialize all enumerated
* cnic devices. This routine allocate adapter structure and other
* device specific resources.
*/
void bnx2i_ulp_init(struct cnic_dev *dev)
{
struct bnx2i_hba *hba;
/* Allocate a HBA structure for this device */
hba = bnx2i_alloc_hba(dev);
if (!hba) {
printk(KERN_ERR "bnx2i init: hba initialization failed\n");
return;
}
/* Get PCI related information and update hba struct members */
clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
if (bnx2i_init_one(hba, dev)) {
printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
bnx2i_free_hba(hba);
} else
hba->cnic = dev;
}
/**
* bnx2i_ulp_exit - shuts down adapter instance and frees all resources
* @dev: cnic device handle
*
*/
void bnx2i_ulp_exit(struct cnic_dev *dev)
{
struct bnx2i_hba *hba;
hba = bnx2i_find_hba_for_cnic(dev);
if (!hba) {
printk(KERN_INFO "bnx2i_ulp_exit: hba not "
"found, dev 0x%p\n", dev);
return;
}
write_lock(&bnx2i_dev_lock);
list_del_init(&hba->link);
adapter_count--;
if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
spin_lock(&hba->lock);
bnx2i_reg_device--;
spin_unlock(&hba->lock);
}
write_unlock(&bnx2i_dev_lock);
bnx2i_free_hba(hba);
}
/**
* bnx2i_mod_init - module init entry point
*
* initialize any driver wide global data structures such as endpoint pool,
* tcp port manager/queue, sysfs. finally driver will register itself
* with the cnic module
*/
static int __init bnx2i_mod_init(void)
{
int err;
printk(KERN_INFO "%s", version);
if (!is_power_of_2(sq_size))
sq_size = roundup_pow_of_two(sq_size);
bnx2i_scsi_xport_template =
iscsi_register_transport(&bnx2i_iscsi_transport);
if (!bnx2i_scsi_xport_template) {
printk(KERN_ERR "Could not register bnx2i transport.\n");
err = -ENOMEM;
goto out;
}
err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb);
if (err) {
printk(KERN_ERR "Could not register bnx2i cnic driver.\n");
goto unreg_xport;
}
return 0;
unreg_xport:
iscsi_unregister_transport(&bnx2i_iscsi_transport);
out:
return err;
}
/**
* bnx2i_mod_exit - module cleanup/exit entry point
*
* Global resource lock and host adapter lock is held during critical sections
* in this function. Driver will browse through the adapter list, cleans-up
* each instance, unregisters iscsi transport name and finally driver will
* unregister itself with the cnic module
*/
static void __exit bnx2i_mod_exit(void)
{
struct bnx2i_hba *hba;
write_lock(&bnx2i_dev_lock);
while (!list_empty(&adapter_list)) {
hba = list_entry(adapter_list.next, struct bnx2i_hba, link);
list_del(&hba->link);
adapter_count--;
if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
bnx2i_reg_device--;
}
write_unlock(&bnx2i_dev_lock);
bnx2i_free_hba(hba);
write_lock(&bnx2i_dev_lock);
}
write_unlock(&bnx2i_dev_lock);
iscsi_unregister_transport(&bnx2i_iscsi_transport);
cnic_unregister_driver(CNIC_ULP_ISCSI);
}
module_init(bnx2i_mod_init);
module_exit(bnx2i_mod_exit);

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,142 @@
/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
*
* Copyright (c) 2004 - 2009 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
*/
#include "bnx2i.h"
/**
* bnx2i_dev_to_hba - maps dev pointer to adapter struct
* @dev: device pointer
*
* Map device to hba structure
*/
static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev)
{
struct Scsi_Host *shost = class_to_shost(dev);
return iscsi_host_priv(shost);
}
/**
* bnx2i_show_sq_info - return(s currently configured send queue (SQ) size
* @dev: device pointer
* @buf: buffer to return current SQ size parameter
*
* Returns current SQ size parameter, this paramater determines the number
* outstanding iSCSI commands supported on a connection
*/
static ssize_t bnx2i_show_sq_info(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
return sprintf(buf, "0x%x\n", hba->max_sqes);
}
/**
* bnx2i_set_sq_info - update send queue (SQ) size parameter
* @dev: device pointer
* @buf: buffer to return current SQ size parameter
* @count: parameter buffer size
*
* Interface for user to change shared queue size allocated for each conn
* Must be within SQ limits and a power of 2. For the latter this is needed
* because of how libiscsi preallocates tasks.
*/
static ssize_t bnx2i_set_sq_info(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
u32 val;
int max_sq_size;
if (hba->ofld_conns_active)
goto skip_config;
if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
max_sq_size = BNX2I_5770X_SQ_WQES_MAX;
else
max_sq_size = BNX2I_570X_SQ_WQES_MAX;
if (sscanf(buf, " 0x%x ", &val) > 0) {
if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) &&
(is_power_of_2(val)))
hba->max_sqes = val;
}
return count;
skip_config:
printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n");
return 0;
}
/**
* bnx2i_show_ccell_info - returns command cell (HQ) size
* @dev: device pointer
* @buf: buffer to return current SQ size parameter
*
* returns per-connection TCP history queue size parameter
*/
static ssize_t bnx2i_show_ccell_info(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
return sprintf(buf, "0x%x\n", hba->num_ccell);
}
/**
* bnx2i_get_link_state - set command cell (HQ) size
* @dev: device pointer
* @buf: buffer to return current SQ size parameter
* @count: parameter buffer size
*
* updates per-connection TCP history queue size parameter
*/
static ssize_t bnx2i_set_ccell_info(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u32 val;
struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
if (hba->ofld_conns_active)
goto skip_config;
if (sscanf(buf, " 0x%x ", &val) > 0) {
if ((val >= BNX2I_CCELLS_MIN) &&
(val <= BNX2I_CCELLS_MAX)) {
hba->num_ccell = val;
}
}
return count;
skip_config:
printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n");
return 0;
}
static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR,
bnx2i_show_sq_info, bnx2i_set_sq_info);
static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR,
bnx2i_show_ccell_info, bnx2i_set_ccell_info);
struct device_attribute *bnx2i_dev_attributes[] = {
&dev_attr_sq_size,
&dev_attr_num_ccell,
NULL
};

View file

@ -144,7 +144,6 @@ struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *);
void cxgb3i_adapter_open(struct t3cdev *);
void cxgb3i_adapter_close(struct t3cdev *);
struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *);
struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
struct net_device *);
void cxgb3i_hba_host_remove(struct cxgb3i_hba *);

View file

@ -13,6 +13,7 @@
#include <linux/inet.h>
#include <linux/crypto.h>
#include <net/dst.h>
#include <net/tcp.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@ -178,7 +179,7 @@ void cxgb3i_adapter_close(struct t3cdev *t3dev)
* cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device
* @t3dev: t3cdev adapter
*/
struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
{
struct cxgb3i_adapter *snic;
int i;
@ -261,20 +262,27 @@ void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
/**
* cxgb3i_ep_connect - establish TCP connection to target portal
* @shost: scsi host to use
* @dst_addr: target IP address
* @non_blocking: blocking or non-blocking call
*
* Initiates a TCP/IP connection to the dst_addr
*/
static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost,
struct sockaddr *dst_addr,
int non_blocking)
{
struct iscsi_endpoint *ep;
struct cxgb3i_endpoint *cep;
struct cxgb3i_hba *hba;
struct cxgb3i_hba *hba = NULL;
struct s3_conn *c3cn = NULL;
int err = 0;
if (shost)
hba = iscsi_host_priv(shost);
cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba);
c3cn = cxgb3i_c3cn_create();
if (!c3cn) {
cxgb3i_log_info("ep connect OOM.\n");
@ -282,17 +290,27 @@ static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
goto release_conn;
}
err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr);
err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn,
(struct sockaddr_in *)dst_addr);
if (err < 0) {
cxgb3i_log_info("ep connect failed.\n");
goto release_conn;
}
hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
if (!hba) {
err = -ENOSPC;
cxgb3i_log_info("NOT going through cxgbi device.\n");
goto release_conn;
}
if (shost && hba != iscsi_host_priv(shost)) {
err = -ENOSPC;
cxgb3i_log_info("Could not connect through request host%u\n",
shost->host_no);
goto release_conn;
}
if (c3cn_is_closing(c3cn)) {
err = -ENOSPC;
cxgb3i_log_info("ep connect unable to connect.\n");

View file

@ -1479,12 +1479,13 @@ static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
return NULL;
}
static struct rtable *find_route(__be32 saddr, __be32 daddr,
static struct rtable *find_route(struct net_device *dev,
__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport)
{
struct rtable *rt;
struct flowi fl = {
.oif = 0,
.oif = dev ? dev->ifindex : 0,
.nl_u = {
.ip4_u = {
.daddr = daddr,
@ -1573,36 +1574,40 @@ static int initiate_act_open(struct s3_conn *c3cn, struct net_device *dev)
*
* return 0 if active open request is sent, < 0 otherwise.
*/
int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin)
int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
struct sockaddr_in *usin)
{
struct rtable *rt;
struct net_device *dev;
struct cxgb3i_sdev_data *cdata;
struct t3cdev *cdev;
__be32 sipv4;
int err;
c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
if (usin->sin_family != AF_INET)
return -EAFNOSUPPORT;
c3cn->daddr.sin_port = usin->sin_port;
c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
rt = find_route(c3cn->saddr.sin_addr.s_addr,
rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
c3cn->daddr.sin_addr.s_addr,
c3cn->saddr.sin_port,
c3cn->daddr.sin_port);
if (rt == NULL) {
c3cn_conn_debug("NO route to 0x%x, port %u.\n",
c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n",
c3cn->daddr.sin_addr.s_addr,
ntohs(c3cn->daddr.sin_port));
ntohs(c3cn->daddr.sin_port),
dev ? dev->name : "any");
return -ENETUNREACH;
}
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n",
c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n",
c3cn->daddr.sin_addr.s_addr,
ntohs(c3cn->daddr.sin_port));
ntohs(c3cn->daddr.sin_port),
dev ? dev->name : "any");
ip_rt_put(rt);
return -ENETUNREACH;
}

View file

@ -169,7 +169,8 @@ void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
void cxgb3i_sdev_remove(struct t3cdev *);
struct s3_conn *cxgb3i_c3cn_create(void);
int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *);
int cxgb3i_c3cn_connect(struct net_device *, struct s3_conn *,
struct sockaddr_in *);
void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
void cxgb3i_c3cn_release(struct s3_conn *);

View file

@ -561,6 +561,12 @@ static int rdac_check_sense(struct scsi_device *sdev,
struct rdac_dh_data *h = get_rdac_data(sdev);
switch (sense_hdr->sense_key) {
case NOT_READY:
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
/* LUN Not Ready - Logical Unit Not Ready and is in
* the process of becoming ready
* Just retry.
*/
return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
/* LUN Not Ready - Storage firmware incompatible
* Manual code synchonisation required.

View file

@ -54,7 +54,6 @@ MODULE_LICENSE("GPL v2");
/* fcoe host list */
LIST_HEAD(fcoe_hostlist);
DEFINE_RWLOCK(fcoe_hostlist_lock);
DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
/* Function Prototypes */
@ -71,7 +70,7 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
static int fcoe_hostlist_add(const struct fc_lport *);
static int fcoe_hostlist_remove(const struct fc_lport *);
static int fcoe_check_wait_queue(struct fc_lport *);
static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
static int fcoe_device_notification(struct notifier_block *, ulong, void *);
static void fcoe_dev_setup(void);
static void fcoe_dev_cleanup(void);
@ -146,6 +145,7 @@ static int fcoe_lport_config(struct fc_lport *lp)
lp->link_up = 0;
lp->qfull = 0;
lp->max_retry_count = 3;
lp->max_rport_retry_count = 3;
lp->e_d_tov = 2 * 1000; /* FC-FS default */
lp->r_a_tov = 2 * 2 * 1000;
lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
@ -166,6 +166,18 @@ static int fcoe_lport_config(struct fc_lport *lp)
return 0;
}
/**
* fcoe_queue_timer() - fcoe queue timer
* @lp: the fc_lport pointer
*
* Calls fcoe_check_wait_queue on timeout
*
*/
static void fcoe_queue_timer(ulong lp)
{
fcoe_check_wait_queue((struct fc_lport *)lp, NULL);
}
/**
* fcoe_netdev_config() - Set up netdev for SW FCoE
* @lp : ptr to the fc_lport
@ -236,6 +248,7 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
}
skb_queue_head_init(&fc->fcoe_pending_queue);
fc->fcoe_pending_queue_active = 0;
setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp);
/* setup Source Mac Address */
memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr,
@ -386,6 +399,9 @@ static int fcoe_if_destroy(struct net_device *netdev)
/* Free existing skbs */
fcoe_clean_pending_queue(lp);
/* Stop the timer */
del_timer_sync(&fc->timer);
/* Free memory used by statistical counters */
fc_lport_free_stats(lp);
@ -988,7 +1004,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
*/
int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
{
int wlen, rc = 0;
int wlen;
u32 crc;
struct ethhdr *eh;
struct fcoe_crc_eof *cp;
@ -1021,8 +1037,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
sof = fr_sof(fp);
eof = fr_eof(fp);
elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
elen = sizeof(struct ethhdr);
hlen = sizeof(struct fcoe_hdr);
tlen = sizeof(struct fcoe_crc_eof);
wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
@ -1107,18 +1122,9 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
/* send down to lld */
fr_dev(fp) = lp;
if (fc->fcoe_pending_queue.qlen)
rc = fcoe_check_wait_queue(lp);
if (rc == 0)
rc = fcoe_start_io(skb);
if (rc) {
spin_lock_bh(&fc->fcoe_pending_queue.lock);
__skb_queue_tail(&fc->fcoe_pending_queue, skb);
spin_unlock_bh(&fc->fcoe_pending_queue.lock);
if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
lp->qfull = 1;
}
fcoe_check_wait_queue(lp, skb);
else if (fcoe_start_io(skb))
fcoe_check_wait_queue(lp, skb);
return 0;
}
@ -1267,32 +1273,6 @@ int fcoe_percpu_receive_thread(void *arg)
return 0;
}
/**
* fcoe_watchdog() - fcoe timer callback
* @vp:
*
* This checks the pending queue length for fcoe and set lport qfull
* if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
* fcoe_hostlist.
*
* Returns: 0 for success
*/
void fcoe_watchdog(ulong vp)
{
struct fcoe_softc *fc;
read_lock(&fcoe_hostlist_lock);
list_for_each_entry(fc, &fcoe_hostlist, list) {
if (fc->ctlr.lp)
fcoe_check_wait_queue(fc->ctlr.lp);
}
read_unlock(&fcoe_hostlist_lock);
fcoe_timer.expires = jiffies + (1 * HZ);
add_timer(&fcoe_timer);
}
/**
* fcoe_check_wait_queue() - attempt to clear the transmit backlog
* @lp: the fc_lport
@ -1305,16 +1285,17 @@ void fcoe_watchdog(ulong vp)
* The wait_queue is used when the skb transmit fails. skb will go
* in the wait_queue which will be emptied by the timer function or
* by the next skb transmit.
*
* Returns: 0 for success
*/
static int fcoe_check_wait_queue(struct fc_lport *lp)
static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
{
struct fcoe_softc *fc = lport_priv(lp);
struct sk_buff *skb;
int rc = -1;
int rc;
spin_lock_bh(&fc->fcoe_pending_queue.lock);
if (skb)
__skb_queue_tail(&fc->fcoe_pending_queue, skb);
if (fc->fcoe_pending_queue_active)
goto out;
fc->fcoe_pending_queue_active = 1;
@ -1340,23 +1321,26 @@ static int fcoe_check_wait_queue(struct fc_lport *lp)
if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
lp->qfull = 0;
if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer))
mod_timer(&fc->timer, jiffies + 2);
fc->fcoe_pending_queue_active = 0;
rc = fc->fcoe_pending_queue.qlen;
out:
if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
lp->qfull = 1;
spin_unlock_bh(&fc->fcoe_pending_queue.lock);
return rc;
return;
}
/**
* fcoe_dev_setup() - setup link change notification interface
*/
static void fcoe_dev_setup()
static void fcoe_dev_setup(void)
{
register_netdevice_notifier(&fcoe_notifier);
}
/**
* fcoe_dev_setup() - cleanup link change notification interface
* fcoe_dev_cleanup() - cleanup link change notification interface
*/
static void fcoe_dev_cleanup(void)
{
@ -1815,10 +1799,6 @@ static int __init fcoe_init(void)
/* Setup link change notification */
fcoe_dev_setup();
setup_timer(&fcoe_timer, fcoe_watchdog, 0);
mod_timer(&fcoe_timer, jiffies + (10 * HZ));
fcoe_if_init();
return 0;
@ -1844,9 +1824,6 @@ static void __exit fcoe_exit(void)
fcoe_dev_cleanup();
/* Stop the timer */
del_timer_sync(&fcoe_timer);
/* releases the associated fcoe hosts */
list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
fcoe_if_destroy(fc->real_dev);

View file

@ -61,6 +61,7 @@ struct fcoe_softc {
struct packet_type fip_packet_type;
struct sk_buff_head fcoe_pending_queue;
u8 fcoe_pending_queue_active;
struct timer_list timer; /* queue timer */
struct fcoe_ctlr ctlr;
};

View file

@ -213,7 +213,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
sol->desc.size.fd_size = htons(fcoe_size);
skb_put(skb, sizeof(*sol));
skb->protocol = htons(ETH_P_802_3);
skb->protocol = htons(ETH_P_FIP);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
@ -365,7 +365,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
}
skb_put(skb, len);
skb->protocol = htons(ETH_P_802_3);
skb->protocol = htons(ETH_P_FIP);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
@ -424,7 +424,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
if (dtype != ELS_FLOGI)
memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN);
skb->protocol = htons(ETH_P_802_3);
skb->protocol = htons(ETH_P_FIP);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
return 0;
@ -447,14 +447,10 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
u16 old_xid;
u8 op;
if (fip->state == FIP_ST_NON_FIP)
return 0;
fh = (struct fc_frame_header *)skb->data;
op = *(u8 *)(fh + 1);
switch (op) {
case ELS_FLOGI:
if (op == ELS_FLOGI) {
old_xid = fip->flogi_oxid;
fip->flogi_oxid = ntohs(fh->fh_ox_id);
if (fip->state == FIP_ST_AUTO) {
@ -466,6 +462,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
fip->map_dest = 1;
return 0;
}
if (fip->state == FIP_ST_NON_FIP)
fip->map_dest = 1;
}
if (fip->state == FIP_ST_NON_FIP)
return 0;
switch (op) {
case ELS_FLOGI:
op = FIP_DT_FLOGI;
break;
case ELS_FDISC:

View file

@ -680,6 +680,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
}
lp->max_retry_count = fnic->config.flogi_retries;
lp->max_rport_retry_count = fnic->config.plogi_retries;
lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
FCP_SPPF_CONF_COMPL);
if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)

View file

@ -152,6 +152,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
struct Scsi_Host *host, gdth_ha_str *ha)
{
int size = 0,len = 0;
int hlen;
off_t begin = 0,pos = 0;
int id, i, j, k, sec, flag;
int no_mdrv = 0, drv_no, is_mirr;
@ -192,11 +193,11 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
if (reserve_list[0] == 0xff)
strcpy(hrec, "--");
else {
sprintf(hrec, "%d", reserve_list[0]);
hlen = sprintf(hrec, "%d", reserve_list[0]);
for (i = 1; i < MAX_RES_ARGS; i++) {
if (reserve_list[i] == 0xff)
break;
sprintf(hrec,"%s,%d", hrec, reserve_list[i]);
hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]);
}
}
size = sprintf(buffer+len,

View file

@ -110,7 +110,7 @@ static const struct {
{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
@ -143,6 +143,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *);
static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
static void ibmvfc_npiv_logout(struct ibmvfc_host *);
static const char *unknown_error = "unknown error";
@ -275,7 +276,7 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
int fc_rsp_len = rsp->fcp_rsp_len;
if ((rsp->flags & FCP_RSP_LEN_VALID) &&
((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
rsp->data.info.rsp_code))
return DID_ERROR << 16;
@ -431,6 +432,8 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
case IBMVFC_TGT_ACTION_DEL_RPORT:
break;
default:
if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
tgt->add_rport = 0;
tgt->action = action;
break;
}
@ -475,6 +478,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
vhost->action = action;
break;
case IBMVFC_HOST_ACTION_LOGO_WAIT:
if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
vhost->action = action;
break;
case IBMVFC_HOST_ACTION_INIT_WAIT:
if (vhost->action == IBMVFC_HOST_ACTION_INIT)
vhost->action = action;
@ -483,7 +490,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
switch (vhost->action) {
case IBMVFC_HOST_ACTION_INIT_WAIT:
case IBMVFC_HOST_ACTION_NONE:
case IBMVFC_HOST_ACTION_TGT_ADD:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
vhost->action = action;
break;
default:
@ -494,11 +501,11 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
vhost->action = action;
break;
case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_TGT_DEL:
case IBMVFC_HOST_ACTION_QUERY_TGTS:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
case IBMVFC_HOST_ACTION_TGT_ADD:
case IBMVFC_HOST_ACTION_NONE:
default:
vhost->action = action;
@ -576,7 +583,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
}
list_for_each_entry(tgt, &vhost->targets, queue)
tgt->need_login = 1;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
vhost->job_step = ibmvfc_npiv_login;
@ -646,6 +653,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0;
dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
free_page((unsigned long)crq->msgs);
}
@ -692,6 +700,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0;
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
/* Clean out the queue */
@ -807,10 +816,10 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
}
/**
* __ibmvfc_reset_host - Reset the connection to the server (no locking)
* ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
* @vhost: struct ibmvfc host to reset
**/
static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
{
int rc;
@ -826,9 +835,25 @@ static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
}
/**
* ibmvfc_reset_host - Reset the connection to the server
* __ibmvfc_reset_host - Reset the connection to the server (no locking)
* @vhost: struct ibmvfc host to reset
**/
static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
{
if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
vhost->job_step = ibmvfc_npiv_logout;
wake_up(&vhost->work_wait_q);
} else
ibmvfc_hard_reset_host(vhost);
}
/**
* ibmvfc_reset_host - Reset the connection to the server
* @vhost: ibmvfc host struct
**/
static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
{
unsigned long flags;
@ -842,9 +867,13 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
* ibmvfc_retry_host_init - Retry host initialization if allowed
* @vhost: ibmvfc host struct
*
* Returns: 1 if init will be retried / 0 if not
*
**/
static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
{
int retry = 0;
if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
vhost->delay_init = 1;
if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
@ -853,11 +882,14 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
} else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
__ibmvfc_reset_host(vhost);
else
else {
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
retry = 1;
}
}
wake_up(&vhost->work_wait_q);
return retry;
}
/**
@ -1137,8 +1169,9 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
login_info->partition_num = vhost->partition_number;
login_info->vfc_frame_version = 1;
login_info->fcp_version = 3;
login_info->flags = IBMVFC_FLUSH_ON_HALT;
if (vhost->client_migrated)
login_info->flags = IBMVFC_CLIENT_MIGRATED;
login_info->flags |= IBMVFC_CLIENT_MIGRATED;
login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
login_info->capabilities = IBMVFC_CAN_MIGRATE;
@ -1451,6 +1484,27 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
}
/**
* ibmvfc_relogin - Log back into the specified device
* @sdev: scsi device struct
*
**/
static void ibmvfc_relogin(struct scsi_device *sdev)
{
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct ibmvfc_target *tgt;
list_for_each_entry(tgt, &vhost->targets, queue) {
if (rport == tgt->rport) {
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
break;
}
}
ibmvfc_reinit_host(vhost);
}
/**
* ibmvfc_scsi_done - Handle responses from commands
* @evt: ibmvfc event to be handled
@ -1483,7 +1537,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
ibmvfc_reinit_host(evt->vhost);
ibmvfc_relogin(cmnd->device);
if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
cmnd->result = (DID_ERROR << 16);
@ -2148,13 +2202,31 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
struct ibmvfc_host *vhost)
{
const char *desc = ibmvfc_get_ae_desc(crq->event);
struct ibmvfc_target *tgt;
ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx,"
" node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
switch (crq->event) {
case IBMVFC_AE_LINK_UP:
case IBMVFC_AE_RESUME:
switch (crq->link_state) {
case IBMVFC_AE_LS_LINK_DOWN:
ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
break;
case IBMVFC_AE_LS_LINK_DEAD:
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
break;
case IBMVFC_AE_LS_LINK_UP:
case IBMVFC_AE_LS_LINK_BOUNCED:
default:
vhost->events_to_log |= IBMVFC_AE_LINKUP;
vhost->delay_init = 1;
__ibmvfc_reset_host(vhost);
break;
};
break;
case IBMVFC_AE_LINK_UP:
vhost->events_to_log |= IBMVFC_AE_LINKUP;
vhost->delay_init = 1;
__ibmvfc_reset_host(vhost);
@ -2168,9 +2240,23 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
case IBMVFC_AE_SCN_NPORT:
case IBMVFC_AE_SCN_GROUP:
vhost->events_to_log |= IBMVFC_AE_RSCN;
ibmvfc_reinit_host(vhost);
break;
case IBMVFC_AE_ELS_LOGO:
case IBMVFC_AE_ELS_PRLO:
case IBMVFC_AE_ELS_PLOGI:
list_for_each_entry(tgt, &vhost->targets, queue) {
if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
break;
if (crq->scsi_id && tgt->scsi_id != crq->scsi_id)
continue;
if (crq->wwpn && tgt->ids.port_name != crq->wwpn)
continue;
if (crq->node_name && tgt->ids.node_name != crq->node_name)
continue;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
}
ibmvfc_reinit_host(vhost);
break;
case IBMVFC_AE_LINK_DOWN:
@ -2222,6 +2308,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
return;
case IBMVFC_CRQ_XPORT_EVENT:
vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0;
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
if (crq->format == IBMVFC_PARTITION_MIGRATED) {
/* We need to re-setup the interpartition connection */
@ -2299,7 +2386,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
done = 1;
}
if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE)
if (vhost->scan_complete)
done = 1;
spin_unlock_irqrestore(shost->host_lock, flags);
return done;
@ -2434,14 +2521,6 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
vhost->login_buf->resp.partition_name);
}
static struct device_attribute ibmvfc_host_partition_name = {
.attr = {
.name = "partition_name",
.mode = S_IRUGO,
},
.show = ibmvfc_show_host_partition_name,
};
static ssize_t ibmvfc_show_host_device_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
@ -2452,14 +2531,6 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev,
vhost->login_buf->resp.device_name);
}
static struct device_attribute ibmvfc_host_device_name = {
.attr = {
.name = "device_name",
.mode = S_IRUGO,
},
.show = ibmvfc_show_host_device_name,
};
static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
struct device_attribute *attr, char *buf)
{
@ -2470,14 +2541,6 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
vhost->login_buf->resp.port_loc_code);
}
static struct device_attribute ibmvfc_host_loc_code = {
.attr = {
.name = "port_loc_code",
.mode = S_IRUGO,
},
.show = ibmvfc_show_host_loc_code,
};
static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
@ -2488,14 +2551,6 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
vhost->login_buf->resp.drc_name);
}
static struct device_attribute ibmvfc_host_drc_name = {
.attr = {
.name = "drc_name",
.mode = S_IRUGO,
},
.show = ibmvfc_show_host_drc_name,
};
static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
@ -2504,13 +2559,13 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
}
static struct device_attribute ibmvfc_host_npiv_version = {
.attr = {
.name = "npiv_version",
.mode = S_IRUGO,
},
.show = ibmvfc_show_host_npiv_version,
};
static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
}
/**
* ibmvfc_show_log_level - Show the adapter's error logging level
@ -2556,14 +2611,14 @@ static ssize_t ibmvfc_store_log_level(struct device *dev,
return strlen(buf);
}
static struct device_attribute ibmvfc_log_level_attr = {
.attr = {
.name = "log_level",
.mode = S_IRUGO | S_IWUSR,
},
.show = ibmvfc_show_log_level,
.store = ibmvfc_store_log_level
};
static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
ibmvfc_show_log_level, ibmvfc_store_log_level);
#ifdef CONFIG_SCSI_IBMVFC_TRACE
/**
@ -2612,12 +2667,13 @@ static struct bin_attribute ibmvfc_trace_attr = {
#endif
static struct device_attribute *ibmvfc_attrs[] = {
&ibmvfc_host_partition_name,
&ibmvfc_host_device_name,
&ibmvfc_host_loc_code,
&ibmvfc_host_drc_name,
&ibmvfc_host_npiv_version,
&ibmvfc_log_level_attr,
&dev_attr_partition_name,
&dev_attr_device_name,
&dev_attr_port_loc_code,
&dev_attr_drc_name,
&dev_attr_npiv_version,
&dev_attr_capabilities,
&dev_attr_log_level,
NULL
};
@ -2774,15 +2830,19 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
* @tgt: ibmvfc target struct
* @job_step: initialization job step
*
* Returns: 1 if step will be retried / 0 if not
*
**/
static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
void (*job_step) (struct ibmvfc_target *))
{
if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
wake_up(&tgt->vhost->work_wait_q);
return 0;
} else
ibmvfc_init_tgt(tgt, job_step);
return 1;
}
/* Defined in FC-LS */
@ -2831,7 +2891,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
u32 status = rsp->common.status;
int index;
int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@ -2850,7 +2910,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC)
tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT);
tgt->add_rport = 1;
} else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
} else if (prli_rsp[index].retry)
@ -2867,13 +2927,14 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
break;
case IBMVFC_MAD_FAILED:
default:
tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(rsp->status, rsp->error),
rsp->status, rsp->error, status);
if (ibmvfc_retry_cmd(rsp->status, rsp->error))
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(rsp->status, rsp->error),
rsp->status, rsp->error, status);
break;
};
@ -2932,6 +2993,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
u32 status = rsp->common.status;
int level = IBMVFC_DEFAULT_LOG_LEVEL;
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@ -2960,15 +3022,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
break;
case IBMVFC_MAD_FAILED:
default:
tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
if (ibmvfc_retry_cmd(rsp->status, rsp->error))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
if (ibmvfc_retry_cmd(rsp->status, rsp->error))
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
break;
};
@ -3129,13 +3191,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "ADISC succeeded\n");
if (ibmvfc_adisc_needs_plogi(mad, tgt))
tgt->need_login = 1;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
case IBMVFC_MAD_FAILED:
default:
tgt->need_login = 1;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
@ -3322,6 +3384,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
u32 status = rsp->common.status;
int level = IBMVFC_DEFAULT_LOG_LEVEL;
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@ -3341,19 +3404,19 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
break;
case IBMVFC_MAD_FAILED:
default:
tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
break;
};
@ -3420,7 +3483,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL);
tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
if (!tgt) {
dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
scsi_id);
@ -3472,6 +3535,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
u32 mad_status = rsp->common.status;
int level = IBMVFC_DEFAULT_LOG_LEVEL;
switch (mad_status) {
case IBMVFC_MAD_SUCCESS:
@ -3480,9 +3544,9 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
break;
case IBMVFC_MAD_FAILED:
dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n",
ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
ibmvfc_retry_host_init(vhost);
level += ibmvfc_retry_host_init(vhost);
ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
@ -3534,18 +3598,19 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
u32 mad_status = evt->xfer_iu->npiv_login.common.status;
struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
unsigned int npiv_max_sectors;
int level = IBMVFC_DEFAULT_LOG_LEVEL;
switch (mad_status) {
case IBMVFC_MAD_SUCCESS:
ibmvfc_free_event(evt);
break;
case IBMVFC_MAD_FAILED:
dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n",
ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
if (ibmvfc_retry_cmd(rsp->status, rsp->error))
ibmvfc_retry_host_init(vhost);
level += ibmvfc_retry_host_init(vhost);
else
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
ibmvfc_free_event(evt);
return;
case IBMVFC_MAD_CRQ_ERROR:
@ -3578,6 +3643,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
return;
}
vhost->logged_in = 1;
npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
rsp->partition_name, rsp->device_name, rsp->port_loc_code,
@ -3635,6 +3701,65 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
};
/**
* ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
* @vhost: ibmvfc host struct
*
**/
static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
{
struct ibmvfc_host *vhost = evt->vhost;
u32 mad_status = evt->xfer_iu->npiv_logout.common.status;
ibmvfc_free_event(evt);
switch (mad_status) {
case IBMVFC_MAD_SUCCESS:
if (list_empty(&vhost->sent) &&
vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
ibmvfc_init_host(vhost, 0);
return;
}
break;
case IBMVFC_MAD_FAILED:
case IBMVFC_MAD_NOT_SUPPORTED:
case IBMVFC_MAD_CRQ_ERROR:
case IBMVFC_MAD_DRIVER_FAILED:
default:
ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
break;
}
ibmvfc_hard_reset_host(vhost);
}
/**
* ibmvfc_npiv_logout - Issue an NPIV Logout
* @vhost: ibmvfc host struct
*
**/
static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
{
struct ibmvfc_npiv_logout_mad *mad;
struct ibmvfc_event *evt;
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
mad = &evt->iu.npiv_logout;
memset(mad, 0, sizeof(*mad));
mad->common.version = 1;
mad->common.opcode = IBMVFC_NPIV_LOGOUT;
mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
if (!ibmvfc_send_event(evt, vhost, default_timeout))
ibmvfc_dbg(vhost, "Sent NPIV logout\n");
else
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
}
/**
* ibmvfc_dev_init_to_do - Is there target initialization work to do?
* @vhost: ibmvfc host struct
@ -3671,6 +3796,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
switch (vhost->action) {
case IBMVFC_HOST_ACTION_NONE:
case IBMVFC_HOST_ACTION_INIT_WAIT:
case IBMVFC_HOST_ACTION_LOGO_WAIT:
return 0;
case IBMVFC_HOST_ACTION_TGT_INIT:
case IBMVFC_HOST_ACTION_QUERY_TGTS:
@ -3683,9 +3809,9 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
return 0;
return 1;
case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_ALLOC_TGTS:
case IBMVFC_HOST_ACTION_TGT_ADD:
case IBMVFC_HOST_ACTION_TGT_DEL:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
case IBMVFC_HOST_ACTION_QUERY:
@ -3740,25 +3866,26 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
{
struct ibmvfc_host *vhost = tgt->vhost;
struct fc_rport *rport = tgt->rport;
struct fc_rport *rport;
unsigned long flags;
if (rport) {
tgt_dbg(tgt, "Setting rport roles\n");
fc_remote_port_rolechg(rport, tgt->ids.roles);
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return;
}
tgt_dbg(tgt, "Adding rport\n");
rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
spin_lock_irqsave(vhost->host->host_lock, flags);
tgt->rport = rport;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
tgt_dbg(tgt, "Deleting rport\n");
list_del(&tgt->queue);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
fc_remote_port_delete(rport);
del_timer_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
}
if (rport) {
tgt_dbg(tgt, "rport add succeeded\n");
tgt->rport = rport;
rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
rport->supported_classes = 0;
tgt->target_id = rport->scsi_target_id;
@ -3789,8 +3916,12 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
vhost->events_to_log = 0;
switch (vhost->action) {
case IBMVFC_HOST_ACTION_NONE:
case IBMVFC_HOST_ACTION_LOGO_WAIT:
case IBMVFC_HOST_ACTION_INIT_WAIT:
break;
case IBMVFC_HOST_ACTION_LOGO:
vhost->job_step(vhost);
break;
case IBMVFC_HOST_ACTION_INIT:
BUG_ON(vhost->state != IBMVFC_INITIALIZING);
if (vhost->delay_init) {
@ -3836,11 +3967,21 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
if (vhost->state == IBMVFC_INITIALIZING) {
if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
vhost->init_retries = 0;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
scsi_unblock_requests(vhost->host);
if (vhost->reinit) {
vhost->reinit = 0;
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
} else {
ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
wake_up(&vhost->init_wait_q);
schedule_work(&vhost->rport_add_work_q);
vhost->init_retries = 0;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
scsi_unblock_requests(vhost->host);
}
return;
} else {
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
@ -3871,24 +4012,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
if (!ibmvfc_dev_init_to_do(vhost))
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
break;
case IBMVFC_HOST_ACTION_TGT_ADD:
list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) {
spin_unlock_irqrestore(vhost->host->host_lock, flags);
ibmvfc_tgt_add_rport(tgt);
return;
}
}
if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
vhost->reinit = 0;
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
} else {
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
wake_up(&vhost->init_wait_q);
}
break;
default:
break;
};
@ -4117,6 +4240,56 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
return -ENOMEM;
}
/**
* ibmvfc_rport_add_thread - Worker thread for rport adds
* @work: work struct
*
**/
static void ibmvfc_rport_add_thread(struct work_struct *work)
{
struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
rport_add_work_q);
struct ibmvfc_target *tgt;
struct fc_rport *rport;
unsigned long flags;
int did_work;
ENTER;
spin_lock_irqsave(vhost->host->host_lock, flags);
do {
did_work = 0;
if (vhost->state != IBMVFC_ACTIVE)
break;
list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->add_rport) {
did_work = 1;
tgt->add_rport = 0;
kref_get(&tgt->kref);
rport = tgt->rport;
if (!rport) {
spin_unlock_irqrestore(vhost->host->host_lock, flags);
ibmvfc_tgt_add_rport(tgt);
} else if (get_device(&rport->dev)) {
spin_unlock_irqrestore(vhost->host->host_lock, flags);
tgt_dbg(tgt, "Setting rport roles\n");
fc_remote_port_rolechg(rport, tgt->ids.roles);
put_device(&rport->dev);
}
kref_put(&tgt->kref, ibmvfc_release_tgt);
spin_lock_irqsave(vhost->host->host_lock, flags);
break;
}
}
} while(did_work);
if (vhost->state == IBMVFC_ACTIVE)
vhost->scan_complete = 1;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
LEAVE;
}
/**
* ibmvfc_probe - Adapter hot plug add entry point
* @vdev: vio device struct
@ -4160,6 +4333,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
strcpy(vhost->partition_name, "UNKNOWN");
init_waitqueue_head(&vhost->work_wait_q);
init_waitqueue_head(&vhost->init_wait_q);
INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
if ((rc = ibmvfc_alloc_mem(vhost)))
goto free_scsi_host;

View file

@ -29,8 +29,8 @@
#include "viosrp.h"
#define IBMVFC_NAME "ibmvfc"
#define IBMVFC_DRIVER_VERSION "1.0.5"
#define IBMVFC_DRIVER_DATE "(March 19, 2009)"
#define IBMVFC_DRIVER_VERSION "1.0.6"
#define IBMVFC_DRIVER_DATE "(May 28, 2009)"
#define IBMVFC_DEFAULT_TIMEOUT 60
#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
@ -57,9 +57,10 @@
* Ensure we have resources for ERP and initialization:
* 1 for ERP
* 1 for initialization
* 1 for NPIV Logout
* 2 for each discovery thread
*/
#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + (disc_threads * 2))
#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + (disc_threads * 2))
#define IBMVFC_MAD_SUCCESS 0x00
#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
@ -127,6 +128,7 @@ enum ibmvfc_mad_types {
IBMVFC_IMPLICIT_LOGOUT = 0x0040,
IBMVFC_PASSTHRU = 0x0200,
IBMVFC_TMF_MAD = 0x0100,
IBMVFC_NPIV_LOGOUT = 0x0800,
};
struct ibmvfc_mad_common {
@ -143,6 +145,10 @@ struct ibmvfc_npiv_login_mad {
struct srp_direct_buf buffer;
}__attribute__((packed, aligned (8)));
struct ibmvfc_npiv_logout_mad {
struct ibmvfc_mad_common common;
}__attribute__((packed, aligned (8)));
#define IBMVFC_MAX_NAME 256
struct ibmvfc_npiv_login {
@ -201,7 +207,8 @@ struct ibmvfc_npiv_login_resp {
#define IBMVFC_NATIVE_FC 0x01
#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
u32 reserved;
u64 capabilites;
u64 capabilities;
#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
u32 max_cmds;
u32 scsi_id_sz;
u64 max_dma_len;
@ -541,9 +548,17 @@ struct ibmvfc_crq_queue {
dma_addr_t msg_token;
};
enum ibmvfc_ae_link_state {
IBMVFC_AE_LS_LINK_UP = 0x01,
IBMVFC_AE_LS_LINK_BOUNCED = 0x02,
IBMVFC_AE_LS_LINK_DOWN = 0x04,
IBMVFC_AE_LS_LINK_DEAD = 0x08,
};
struct ibmvfc_async_crq {
volatile u8 valid;
u8 pad[3];
u8 link_state;
u8 pad[2];
u32 pad2;
volatile u64 event;
volatile u64 scsi_id;
@ -561,6 +576,7 @@ struct ibmvfc_async_crq_queue {
union ibmvfc_iu {
struct ibmvfc_mad_common mad_common;
struct ibmvfc_npiv_login_mad npiv_login;
struct ibmvfc_npiv_logout_mad npiv_logout;
struct ibmvfc_discover_targets discover_targets;
struct ibmvfc_port_login plogi;
struct ibmvfc_process_login prli;
@ -575,7 +591,6 @@ enum ibmvfc_target_action {
IBMVFC_TGT_ACTION_NONE = 0,
IBMVFC_TGT_ACTION_INIT,
IBMVFC_TGT_ACTION_INIT_WAIT,
IBMVFC_TGT_ACTION_ADD_RPORT,
IBMVFC_TGT_ACTION_DEL_RPORT,
};
@ -588,6 +603,7 @@ struct ibmvfc_target {
int target_id;
enum ibmvfc_target_action action;
int need_login;
int add_rport;
int init_retries;
u32 cancel_key;
struct ibmvfc_service_parms service_parms;
@ -627,6 +643,8 @@ struct ibmvfc_event_pool {
enum ibmvfc_host_action {
IBMVFC_HOST_ACTION_NONE = 0,
IBMVFC_HOST_ACTION_LOGO,
IBMVFC_HOST_ACTION_LOGO_WAIT,
IBMVFC_HOST_ACTION_INIT,
IBMVFC_HOST_ACTION_INIT_WAIT,
IBMVFC_HOST_ACTION_QUERY,
@ -635,7 +653,6 @@ enum ibmvfc_host_action {
IBMVFC_HOST_ACTION_ALLOC_TGTS,
IBMVFC_HOST_ACTION_TGT_INIT,
IBMVFC_HOST_ACTION_TGT_DEL_FAILED,
IBMVFC_HOST_ACTION_TGT_ADD,
};
enum ibmvfc_host_state {
@ -682,6 +699,8 @@ struct ibmvfc_host {
int client_migrated;
int reinit;
int delay_init;
int scan_complete;
int logged_in;
int events_to_log;
#define IBMVFC_AE_LINKUP 0x0001
#define IBMVFC_AE_LINKDOWN 0x0002
@ -692,6 +711,7 @@ struct ibmvfc_host {
void (*job_step) (struct ibmvfc_host *);
struct task_struct *work_thread;
struct tasklet_struct tasklet;
struct work_struct rport_add_work_q;
wait_queue_head_t init_wait_q;
wait_queue_head_t work_wait_q;
};
@ -707,6 +727,12 @@ struct ibmvfc_host {
#define tgt_err(t, fmt, ...) \
dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
#define tgt_log(t, level, fmt, ...) \
do { \
if ((t)->vhost->log_level >= level) \
tgt_err(t, fmt, ##__VA_ARGS__); \
} while (0)
#define ibmvfc_dbg(vhost, ...) \
DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))

View file

@ -70,6 +70,7 @@
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <asm/firmware.h>
#include <asm/vio.h>
#include <asm/firmware.h>
@ -87,9 +88,15 @@
*/
static int max_id = 64;
static int max_channel = 3;
static int init_timeout = 5;
static int init_timeout = 300;
static int login_timeout = 60;
static int info_timeout = 30;
static int abort_timeout = 60;
static int reset_timeout = 60;
static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
static int fast_fail = 1;
static int client_reserve = 1;
static struct scsi_transport_template *ibmvscsi_transport_template;
@ -110,6 +117,10 @@ module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
module_param_named(max_requests, max_requests, int, S_IRUGO);
MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
module_param_named(client_reserve, client_reserve, int, S_IRUGO );
MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
/* ------------------------------------------------------------
* Routines for the event pool and event structs
@ -781,105 +792,53 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
/* ------------------------------------------------------------
* Routines for driver initialization
*/
/**
* adapter_info_rsp: - Handle response to MAD adapter info request
* @evt_struct: srp_event_struct with the response
*
* Used as a "done" callback by when sending adapter_info. Gets called
* by ibmvscsi_handle_crq()
*/
static void adapter_info_rsp(struct srp_event_struct *evt_struct)
{
struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
dma_unmap_single(hostdata->dev,
evt_struct->iu.mad.adapter_info.buffer,
evt_struct->iu.mad.adapter_info.common.length,
DMA_BIDIRECTIONAL);
if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
dev_err(hostdata->dev, "error %d getting adapter info\n",
evt_struct->xfer_iu->mad.adapter_info.common.status);
} else {
dev_info(hostdata->dev, "host srp version: %s, "
"host partition %s (%d), OS %d, max io %u\n",
hostdata->madapter_info.srp_version,
hostdata->madapter_info.partition_name,
hostdata->madapter_info.partition_number,
hostdata->madapter_info.os_type,
hostdata->madapter_info.port_max_txu[0]);
if (hostdata->madapter_info.port_max_txu[0])
hostdata->host->max_sectors =
hostdata->madapter_info.port_max_txu[0] >> 9;
if (hostdata->madapter_info.os_type == 3 &&
strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
hostdata->madapter_info.srp_version);
dev_err(hostdata->dev, "limiting scatterlists to %d\n",
MAX_INDIRECT_BUFS);
hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
}
/**
* map_persist_bufs: - Pre-map persistent data for adapter logins
* @hostdata: ibmvscsi_host_data of host
*
* Map the capabilities and adapter info DMA buffers to avoid runtime failures.
* Return 1 on error, 0 on success.
*/
static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
{
hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
return 1;
}
hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
&hostdata->madapter_info,
sizeof(hostdata->madapter_info),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
dma_unmap_single(hostdata->dev, hostdata->caps_addr,
sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
return 1;
}
return 0;
}
/**
* send_mad_adapter_info: - Sends the mad adapter info request
* and stores the result so it can be retrieved with
* sysfs. We COULD consider causing a failure if the
* returned SRP version doesn't match ours.
* @hostdata: ibmvscsi_host_data of host
*
* Returns zero if successful.
*/
static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
* unmap_persist_bufs: - Unmap persistent data needed for adapter logins
* @hostdata: ibmvscsi_host_data of host
*
* Unmap the capabilities and adapter info DMA buffers
*/
static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
{
struct viosrp_adapter_info *req;
struct srp_event_struct *evt_struct;
unsigned long flags;
dma_addr_t addr;
dma_unmap_single(hostdata->dev, hostdata->caps_addr,
sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
evt_struct = get_event_struct(&hostdata->pool);
if (!evt_struct) {
dev_err(hostdata->dev,
"couldn't allocate an event for ADAPTER_INFO_REQ!\n");
return;
}
init_event_struct(evt_struct,
adapter_info_rsp,
VIOSRP_MAD_FORMAT,
init_timeout);
req = &evt_struct->iu.mad.adapter_info;
memset(req, 0x00, sizeof(*req));
req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
req->common.length = sizeof(hostdata->madapter_info);
req->buffer = addr = dma_map_single(hostdata->dev,
&hostdata->madapter_info,
sizeof(hostdata->madapter_info),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(hostdata->dev, req->buffer)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
dev_err(hostdata->dev,
"Unable to map request_buffer for "
"adapter_info!\n");
free_event_struct(&hostdata->pool, evt_struct);
return;
}
spin_lock_irqsave(hostdata->host->host_lock, flags);
if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
dma_unmap_single(hostdata->dev,
addr,
sizeof(hostdata->madapter_info),
DMA_BIDIRECTIONAL);
}
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
};
dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
}
/**
* login_rsp: - Handle response to SRP login request
@ -909,9 +868,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
}
dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
dev_err(hostdata->dev, "Invalid request_limit.\n");
hostdata->client_migrated = 0;
/* Now we know what the real request-limit is.
* This value is set rather than added to request_limit because
@ -922,15 +879,12 @@ static void login_rsp(struct srp_event_struct *evt_struct)
/* If we had any pending I/Os, kick them */
scsi_unblock_requests(hostdata->host);
send_mad_adapter_info(hostdata);
return;
}
/**
* send_srp_login: - Sends the srp login
* @hostdata: ibmvscsi_host_data of host
*
*
* Returns zero if successful.
*/
static int send_srp_login(struct ibmvscsi_host_data *hostdata)
@ -939,22 +893,17 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
unsigned long flags;
struct srp_login_req *login;
struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
if (!evt_struct) {
dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
return FAILED;
}
init_event_struct(evt_struct,
login_rsp,
VIOSRP_SRP_FORMAT,
init_timeout);
BUG_ON(!evt_struct);
init_event_struct(evt_struct, login_rsp,
VIOSRP_SRP_FORMAT, login_timeout);
login = &evt_struct->iu.srp.login_req;
memset(login, 0x00, sizeof(struct srp_login_req));
memset(login, 0, sizeof(*login));
login->opcode = SRP_LOGIN_REQ;
login->req_it_iu_len = sizeof(union srp_iu);
login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
spin_lock_irqsave(hostdata->host->host_lock, flags);
/* Start out with a request limit of 0, since this is negotiated in
* the login request we are just sending and login requests always
@ -962,12 +911,240 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
*/
atomic_set(&hostdata->request_limit, 0);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
dev_info(hostdata->dev, "sent SRP login\n");
return rc;
};
/**
* capabilities_rsp: - Handle response to MAD adapter capabilities request
* @evt_struct: srp_event_struct with the response
*
* Used as a "done" callback by when sending adapter_info.
*/
static void capabilities_rsp(struct srp_event_struct *evt_struct)
{
struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
if (evt_struct->xfer_iu->mad.capabilities.common.status) {
dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
evt_struct->xfer_iu->mad.capabilities.common.status);
} else {
if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
dev_info(hostdata->dev, "Partition migration not supported\n");
if (client_reserve) {
if (hostdata->caps.reserve.common.server_support ==
SERVER_SUPPORTS_CAP)
dev_info(hostdata->dev, "Client reserve enabled\n");
else
dev_info(hostdata->dev, "Client reserve not supported\n");
}
}
send_srp_login(hostdata);
}
/**
* send_mad_capabilities: - Sends the mad capabilities request
* and stores the result so it can be retrieved with
* @hostdata: ibmvscsi_host_data of host
*/
static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
{
struct viosrp_capabilities *req;
struct srp_event_struct *evt_struct;
unsigned long flags;
struct device_node *of_node = hostdata->dev->archdata.of_node;
const char *location;
evt_struct = get_event_struct(&hostdata->pool);
BUG_ON(!evt_struct);
init_event_struct(evt_struct, capabilities_rsp,
VIOSRP_MAD_FORMAT, info_timeout);
req = &evt_struct->iu.mad.capabilities;
memset(req, 0, sizeof(*req));
hostdata->caps.flags = CAP_LIST_SUPPORTED;
if (hostdata->client_migrated)
hostdata->caps.flags |= CLIENT_MIGRATED;
strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
sizeof(hostdata->caps.name));
hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
location = of_get_property(of_node, "ibm,loc-code", NULL);
location = location ? location : dev_name(hostdata->dev);
strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
req->common.type = VIOSRP_CAPABILITIES_TYPE;
req->buffer = hostdata->caps_addr;
hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
hostdata->caps.migration.ecl = 1;
if (client_reserve) {
hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
req->common.length = sizeof(hostdata->caps);
} else
req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
spin_lock_irqsave(hostdata->host->host_lock, flags);
if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
};
/**
* fast_fail_rsp: - Handle response to MAD enable fast fail
* @evt_struct: srp_event_struct with the response
*
* Used as a "done" callback by when sending enable fast fail. Gets called
* by ibmvscsi_handle_crq()
*/
static void fast_fail_rsp(struct srp_event_struct *evt_struct)
{
struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
if (status == VIOSRP_MAD_NOT_SUPPORTED)
dev_err(hostdata->dev, "fast_fail not supported in server\n");
else if (status == VIOSRP_MAD_FAILED)
dev_err(hostdata->dev, "fast_fail request failed\n");
else if (status != VIOSRP_MAD_SUCCESS)
dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
send_mad_capabilities(hostdata);
}
/**
* init_host - Start host initialization
* @hostdata: ibmvscsi_host_data of host
*
* Returns zero if successful.
*/
static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
{
int rc;
unsigned long flags;
struct viosrp_fast_fail *fast_fail_mad;
struct srp_event_struct *evt_struct;
if (!fast_fail) {
send_mad_capabilities(hostdata);
return 0;
}
evt_struct = get_event_struct(&hostdata->pool);
BUG_ON(!evt_struct);
init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
fast_fail_mad = &evt_struct->iu.mad.fast_fail;
memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
fast_fail_mad->common.length = sizeof(*fast_fail_mad);
spin_lock_irqsave(hostdata->host->host_lock, flags);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
return rc;
}
/**
* adapter_info_rsp: - Handle response to MAD adapter info request
* @evt_struct: srp_event_struct with the response
*
* Used as a "done" callback by when sending adapter_info. Gets called
* by ibmvscsi_handle_crq()
*/
static void adapter_info_rsp(struct srp_event_struct *evt_struct)
{
struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
dev_err(hostdata->dev, "error %d getting adapter info\n",
evt_struct->xfer_iu->mad.adapter_info.common.status);
} else {
dev_info(hostdata->dev, "host srp version: %s, "
"host partition %s (%d), OS %d, max io %u\n",
hostdata->madapter_info.srp_version,
hostdata->madapter_info.partition_name,
hostdata->madapter_info.partition_number,
hostdata->madapter_info.os_type,
hostdata->madapter_info.port_max_txu[0]);
if (hostdata->madapter_info.port_max_txu[0])
hostdata->host->max_sectors =
hostdata->madapter_info.port_max_txu[0] >> 9;
if (hostdata->madapter_info.os_type == 3 &&
strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
hostdata->madapter_info.srp_version);
dev_err(hostdata->dev, "limiting scatterlists to %d\n",
MAX_INDIRECT_BUFS);
hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
}
}
enable_fast_fail(hostdata);
}
/**
* send_mad_adapter_info: - Sends the mad adapter info request
* and stores the result so it can be retrieved with
* sysfs. We COULD consider causing a failure if the
* returned SRP version doesn't match ours.
* @hostdata: ibmvscsi_host_data of host
*
* Returns zero if successful.
*/
static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
{
struct viosrp_adapter_info *req;
struct srp_event_struct *evt_struct;
unsigned long flags;
evt_struct = get_event_struct(&hostdata->pool);
BUG_ON(!evt_struct);
init_event_struct(evt_struct,
adapter_info_rsp,
VIOSRP_MAD_FORMAT,
info_timeout);
req = &evt_struct->iu.mad.adapter_info;
memset(req, 0x00, sizeof(*req));
req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
req->common.length = sizeof(hostdata->madapter_info);
req->buffer = hostdata->adapter_info_addr;
spin_lock_irqsave(hostdata->host->host_lock, flags);
if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
};
/**
* init_adapter: Start virtual adapter initialization sequence
*
*/
static void init_adapter(struct ibmvscsi_host_data *hostdata)
{
send_mad_adapter_info(hostdata);
}
/**
* sync_completion: Signal that a synchronous command has completed
* Note that after returning from this call, the evt_struct is freed.
@ -1029,7 +1206,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
init_event_struct(evt,
sync_completion,
VIOSRP_SRP_FORMAT,
init_timeout);
abort_timeout);
tsk_mgmt = &evt->iu.srp.tsk_mgmt;
@ -1043,7 +1220,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
evt->sync_srp = &srp_rsp;
init_completion(&evt->comp);
rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
break;
@ -1152,7 +1329,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
init_event_struct(evt,
sync_completion,
VIOSRP_SRP_FORMAT,
init_timeout);
reset_timeout);
tsk_mgmt = &evt->iu.srp.tsk_mgmt;
@ -1165,7 +1342,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
evt->sync_srp = &srp_rsp;
init_completion(&evt->comp);
rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
break;
@ -1281,7 +1458,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
if ((rc = ibmvscsi_ops->send_crq(hostdata,
0xC002000000000000LL, 0)) == 0) {
/* Now login */
send_srp_login(hostdata);
init_adapter(hostdata);
} else {
dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
}
@ -1291,7 +1468,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
dev_info(hostdata->dev, "partner initialization complete\n");
/* Now login */
send_srp_login(hostdata);
init_adapter(hostdata);
break;
default:
dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
@ -1303,6 +1480,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
if (crq->format == 0x06) {
/* We need to re-setup the interpartition connection */
dev_info(hostdata->dev, "Re-enabling adapter!\n");
hostdata->client_migrated = 1;
purge_requests(hostdata, DID_REQUEUE);
if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue,
hostdata)) ||
@ -1397,7 +1575,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
init_event_struct(evt_struct,
sync_completion,
VIOSRP_MAD_FORMAT,
init_timeout);
info_timeout);
host_config = &evt_struct->iu.mad.host_config;
@ -1419,7 +1597,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
init_completion(&evt_struct->comp);
spin_lock_irqsave(hostdata->host->host_lock, flags);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (rc == 0)
wait_for_completion(&evt_struct->comp);
@ -1444,7 +1622,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
spin_lock_irqsave(shost->host_lock, lock_flags);
if (sdev->type == TYPE_DISK) {
sdev->allow_restart = 1;
blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
}
scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
@ -1471,6 +1649,46 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
/* ------------------------------------------------------------
* sysfs attributes
*/
static ssize_t show_host_vhost_loc(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvscsi_host_data *hostdata = shost_priv(shost);
int len;
len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
hostdata->caps.loc);
return len;
}
static struct device_attribute ibmvscsi_host_vhost_loc = {
.attr = {
.name = "vhost_loc",
.mode = S_IRUGO,
},
.show = show_host_vhost_loc,
};
static ssize_t show_host_vhost_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvscsi_host_data *hostdata = shost_priv(shost);
int len;
len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
hostdata->caps.name);
return len;
}
static struct device_attribute ibmvscsi_host_vhost_name = {
.attr = {
.name = "vhost_name",
.mode = S_IRUGO,
},
.show = show_host_vhost_name,
};
static ssize_t show_host_srp_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
@ -1594,6 +1812,8 @@ static struct device_attribute ibmvscsi_host_config = {
};
static struct device_attribute *ibmvscsi_attrs[] = {
&ibmvscsi_host_vhost_loc,
&ibmvscsi_host_vhost_name,
&ibmvscsi_host_srp_version,
&ibmvscsi_host_partition_name,
&ibmvscsi_host_partition_number,
@ -1674,6 +1894,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
atomic_set(&hostdata->request_limit, -1);
hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
if (map_persist_bufs(hostdata)) {
dev_err(&vdev->dev, "couldn't map persistent buffers\n");
goto persist_bufs_failed;
}
rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
if (rc != 0 && rc != H_RESOURCE) {
dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
@ -1687,6 +1912,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
host->max_lun = 8;
host->max_id = max_id;
host->max_channel = max_channel;
host->max_cmd_len = 16;
if (scsi_add_host(hostdata->host, hostdata->dev))
goto add_host_failed;
@ -1733,6 +1959,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
init_pool_failed:
ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
init_crq_failed:
unmap_persist_bufs(hostdata);
persist_bufs_failed:
scsi_host_put(host);
scsi_host_alloc_failed:
return -1;
@ -1741,6 +1969,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
static int ibmvscsi_remove(struct vio_dev *vdev)
{
struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
unmap_persist_bufs(hostdata);
release_event_pool(&hostdata->pool, hostdata);
ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
max_events);

View file

@ -90,6 +90,7 @@ struct event_pool {
/* all driver data associated with a host adapter */
struct ibmvscsi_host_data {
atomic_t request_limit;
int client_migrated;
struct device *dev;
struct event_pool pool;
struct crq_queue queue;
@ -97,6 +98,9 @@ struct ibmvscsi_host_data {
struct list_head sent;
struct Scsi_Host *host;
struct mad_adapter_info_data madapter_info;
struct capabilities caps;
dma_addr_t caps_addr;
dma_addr_t adapter_info_addr;
};
/* routines for managing a command/response queue */

View file

@ -37,6 +37,7 @@
#define SRP_VERSION "16.a"
#define SRP_MAX_IU_LEN 256
#define SRP_MAX_LOC_LEN 32
union srp_iu {
struct srp_login_req login_req;
@ -86,7 +87,37 @@ enum viosrp_mad_types {
VIOSRP_EMPTY_IU_TYPE = 0x01,
VIOSRP_ERROR_LOG_TYPE = 0x02,
VIOSRP_ADAPTER_INFO_TYPE = 0x03,
VIOSRP_HOST_CONFIG_TYPE = 0x04
VIOSRP_HOST_CONFIG_TYPE = 0x04,
VIOSRP_CAPABILITIES_TYPE = 0x05,
VIOSRP_ENABLE_FAST_FAIL = 0x08,
};
enum viosrp_mad_status {
VIOSRP_MAD_SUCCESS = 0x00,
VIOSRP_MAD_NOT_SUPPORTED = 0xF1,
VIOSRP_MAD_FAILED = 0xF7,
};
enum viosrp_capability_type {
MIGRATION_CAPABILITIES = 0x01,
RESERVATION_CAPABILITIES = 0x02,
};
enum viosrp_capability_support {
SERVER_DOES_NOT_SUPPORTS_CAP = 0x0,
SERVER_SUPPORTS_CAP = 0x01,
SERVER_CAP_DATA = 0x02,
};
enum viosrp_reserve_type {
CLIENT_RESERVE_SCSI_2 = 0x01,
};
enum viosrp_capability_flag {
CLIENT_MIGRATED = 0x01,
CLIENT_RECONNECT = 0x02,
CAP_LIST_SUPPORTED = 0x04,
CAP_LIST_DATA = 0x08,
};
/*
@ -127,11 +158,46 @@ struct viosrp_host_config {
u64 buffer;
};
struct viosrp_fast_fail {
struct mad_common common;
};
struct viosrp_capabilities {
struct mad_common common;
u64 buffer;
};
struct mad_capability_common {
u32 cap_type;
u16 length;
u16 server_support;
};
struct mad_reserve_cap {
struct mad_capability_common common;
u32 type;
};
struct mad_migration_cap {
struct mad_capability_common common;
u32 ecl;
};
struct capabilities{
u32 flags;
char name[SRP_MAX_LOC_LEN];
char loc[SRP_MAX_LOC_LEN];
struct mad_migration_cap migration;
struct mad_reserve_cap reserve;
};
union mad_iu {
struct viosrp_empty_iu empty_iu;
struct viosrp_error_log error_log;
struct viosrp_adapter_info adapter_info;
struct viosrp_host_config host_config;
struct viosrp_fast_fail fast_fail;
struct viosrp_capabilities capabilities;
};
union viosrp_iu {

View file

@ -7003,6 +7003,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
ioa_cfg->sdt_state = ABORT_DUMP;
ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
ioa_cfg->in_ioa_bringdown = 1;
ioa_cfg->allow_cmds = 0;
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
}
@ -7688,7 +7689,7 @@ static void __ipr_remove(struct pci_dev *pdev)
* Return value:
* none
**/
static void ipr_remove(struct pci_dev *pdev)
static void __devexit ipr_remove(struct pci_dev *pdev)
{
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
@ -7864,7 +7865,7 @@ static struct pci_driver ipr_driver = {
.name = IPR_NAME,
.id_table = ipr_pci_table,
.probe = ipr_probe,
.remove = ipr_remove,
.remove = __devexit_p(ipr_remove),
.shutdown = ipr_shutdown,
.err_handler = &ipr_err_handler,
};

View file

@ -1159,6 +1159,10 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
atomic_inc(&mp->stats.xid_not_found);
goto out;
}
if (ep->esb_stat & ESB_ST_COMPLETE) {
atomic_inc(&mp->stats.xid_not_found);
goto out;
}
if (ep->rxid == FC_XID_UNKNOWN)
ep->rxid = ntohs(fh->fh_rx_id);
if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {

View file

@ -1896,7 +1896,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
break;
case FC_CMD_ABORTED:
sc_cmd->result = (DID_ABORT << 16) | fsp->io_status;
sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
break;
case FC_CMD_TIME_OUT:
sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;

View file

@ -478,7 +478,7 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp)
if (PTR_ERR(fp) == -FC_EX_CLOSED)
return fc_rport_error(rport, fp);
if (rdata->retries < rdata->local_port->max_retry_count) {
if (rdata->retries < rdata->local_port->max_rport_retry_count) {
FC_DEBUG_RPORT("Error %ld in state %s, retrying\n",
PTR_ERR(fp), fc_rport_state(rport));
rdata->retries++;
@ -1330,7 +1330,7 @@ int fc_rport_init(struct fc_lport *lport)
}
EXPORT_SYMBOL(fc_rport_init);
int fc_setup_rport()
int fc_setup_rport(void)
{
rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
if (!rport_event_queue)
@ -1339,7 +1339,7 @@ int fc_setup_rport()
}
EXPORT_SYMBOL(fc_setup_rport);
void fc_destroy_rport()
void fc_destroy_rport(void)
{
destroy_workqueue(rport_event_queue);
}

View file

@ -81,7 +81,8 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
struct Scsi_Host *shost = conn->session->host;
struct iscsi_host *ihost = shost_priv(shost);
queue_work(ihost->workq, &conn->xmitwork);
if (ihost->workq)
queue_work(ihost->workq, &conn->xmitwork);
}
EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
@ -109,11 +110,9 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
* if the window closed with IO queued, then kick the
* xmit thread
*/
if (!list_empty(&session->leadconn->xmitqueue) ||
!list_empty(&session->leadconn->mgmtqueue)) {
if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
iscsi_conn_queue_work(session->leadconn);
}
if (!list_empty(&session->leadconn->cmdqueue) ||
!list_empty(&session->leadconn->mgmtqueue))
iscsi_conn_queue_work(session->leadconn);
}
}
EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
@ -257,9 +256,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
itt_t itt;
int rc;
rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
if (rc)
return rc;
if (conn->session->tt->alloc_pdu) {
rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
if (rc)
return rc;
}
hdr = (struct iscsi_cmd *) task->hdr;
itt = hdr->itt;
memset(hdr, 0, sizeof(*hdr));
@ -364,7 +365,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
return -EIO;
task->state = ISCSI_TASK_RUNNING;
list_move_tail(&task->running, &conn->run_list);
conn->scsicmd_pdus_cnt++;
ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
@ -380,26 +380,25 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
}
/**
* iscsi_complete_command - finish a task
* iscsi_free_task - free a task
* @task: iscsi cmd task
*
* Must be called with session lock.
* This function returns the scsi command to scsi-ml or cleans
* up mgmt tasks then returns the task to the pool.
*/
static void iscsi_complete_command(struct iscsi_task *task)
static void iscsi_free_task(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = task->sc;
session->tt->cleanup_task(task);
list_del_init(&task->running);
task->state = ISCSI_TASK_COMPLETED;
task->sc = NULL;
ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
task->itt, task->state, task->sc);
if (conn->task == task)
conn->task = NULL;
session->tt->cleanup_task(task);
task->state = ISCSI_TASK_FREE;
task->sc = NULL;
/*
* login task is preallocated so do not free
*/
@ -408,9 +407,6 @@ static void iscsi_complete_command(struct iscsi_task *task)
__kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
if (conn->ping_task == task)
conn->ping_task = NULL;
if (sc) {
task->sc = NULL;
/* SCSI eh reuses commands to verify us */
@ -433,7 +429,7 @@ EXPORT_SYMBOL_GPL(__iscsi_get_task);
static void __iscsi_put_task(struct iscsi_task *task)
{
if (atomic_dec_and_test(&task->refcount))
iscsi_complete_command(task);
iscsi_free_task(task);
}
void iscsi_put_task(struct iscsi_task *task)
@ -446,26 +442,74 @@ void iscsi_put_task(struct iscsi_task *task)
}
EXPORT_SYMBOL_GPL(iscsi_put_task);
/*
* session lock must be held
/**
* iscsi_complete_task - finish a task
* @task: iscsi cmd task
* @state: state to complete task with
*
* Must be called with session lock.
*/
static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
int err)
static void iscsi_complete_task(struct iscsi_task *task, int state)
{
struct scsi_cmnd *sc;
struct iscsi_conn *conn = task->conn;
ISCSI_DBG_SESSION(conn->session,
"complete task itt 0x%x state %d sc %p\n",
task->itt, task->state, task->sc);
if (task->state == ISCSI_TASK_COMPLETED ||
task->state == ISCSI_TASK_ABRT_TMF ||
task->state == ISCSI_TASK_ABRT_SESS_RECOV)
return;
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
task->state = state;
if (!list_empty(&task->running))
list_del_init(&task->running);
if (conn->task == task)
conn->task = NULL;
if (conn->ping_task == task)
conn->ping_task = NULL;
/* release get from queueing */
__iscsi_put_task(task);
}
/*
* session lock must be held and if not called for a task that is
* still pending or from the xmit thread, then xmit thread must
* be suspended.
*/
static void fail_scsi_task(struct iscsi_task *task, int err)
{
struct iscsi_conn *conn = task->conn;
struct scsi_cmnd *sc;
int state;
/*
* if a command completes and we get a successful tmf response
* we will hit this because the scsi eh abort code does not take
* a ref to the task.
*/
sc = task->sc;
if (!sc)
return;
if (task->state == ISCSI_TASK_PENDING)
if (task->state == ISCSI_TASK_PENDING) {
/*
* cmd never made it to the xmit thread, so we should not count
* the cmd in the sequencing
*/
conn->session->queued_cmdsn--;
/* it was never sent so just complete like normal */
state = ISCSI_TASK_COMPLETED;
} else if (err == DID_TRANSPORT_DISRUPTED)
state = ISCSI_TASK_ABRT_SESS_RECOV;
else
state = ISCSI_TASK_ABRT_TMF;
sc->result = err;
sc->result = err << 16;
if (!scsi_bidi_cmnd(sc))
scsi_set_resid(sc, scsi_bufflen(sc));
else {
@ -473,10 +517,7 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
scsi_in(sc)->resid = scsi_in(sc)->length;
}
if (conn->task == task)
conn->task = NULL;
/* release ref from queuecommand */
__iscsi_put_task(task);
iscsi_complete_task(task, state);
}
static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@ -516,7 +557,6 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
session->state = ISCSI_STATE_LOGGING_OUT;
task->state = ISCSI_TASK_RUNNING;
list_move_tail(&task->running, &conn->mgmt_run_list);
ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
"datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
hdr->itt, task->data_count);
@ -528,6 +568,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size)
{
struct iscsi_session *session = conn->session;
struct iscsi_host *ihost = shost_priv(session->host);
struct iscsi_task *task;
itt_t itt;
@ -544,6 +585,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
*/
task = conn->login_task;
else {
if (session->state != ISCSI_STATE_LOGGED_IN)
return NULL;
BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
@ -559,6 +603,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
atomic_set(&task->refcount, 1);
task->conn = conn;
task->sc = NULL;
INIT_LIST_HEAD(&task->running);
task->state = ISCSI_TASK_PENDING;
if (data_size) {
memcpy(task->data, data, data_size);
@ -566,11 +612,14 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
} else
task->data_count = 0;
if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
"pdu for mgmt task.\n");
goto requeue_task;
if (conn->session->tt->alloc_pdu) {
if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
"pdu for mgmt task.\n");
goto free_task;
}
}
itt = task->hdr->itt;
task->hdr_len = sizeof(struct iscsi_hdr);
memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
@ -583,30 +632,22 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
task->conn->session->age);
}
INIT_LIST_HEAD(&task->running);
list_add_tail(&task->running, &conn->mgmtqueue);
if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
if (!ihost->workq) {
if (iscsi_prep_mgmt_task(conn, task))
goto free_task;
if (session->tt->xmit_task(task))
goto free_task;
} else
} else {
list_add_tail(&task->running, &conn->mgmtqueue);
iscsi_conn_queue_work(conn);
}
return task;
free_task:
__iscsi_put_task(task);
return NULL;
requeue_task:
if (task != conn->login_task)
__kfifo_put(session->cmdpool.queue, (void*)&task,
sizeof(void*));
return NULL;
}
int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@ -701,11 +742,10 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
}
out:
ISCSI_DBG_SESSION(session, "done [sc %p res %d itt 0x%x]\n",
ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n",
sc, sc->result, task->itt);
conn->scsirsp_pdus_cnt++;
__iscsi_put_task(task);
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
}
/**
@ -724,6 +764,7 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
return;
iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr);
sc->result = (DID_OK << 16) | rhdr->cmd_status;
conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
@ -738,8 +779,11 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
}
ISCSI_DBG_SESSION(conn->session, "data in with status done "
"[sc %p res %d itt 0x%x]\n",
sc, sc->result, task->itt);
conn->scsirsp_pdus_cnt++;
__iscsi_put_task(task);
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
}
static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@ -823,7 +867,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
*
* The session lock must be held.
*/
static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
{
struct iscsi_session *session = conn->session;
int i;
@ -840,6 +884,7 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
return session->cmds[i];
}
EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
/**
* __iscsi_complete_pdu - complete pdu
@ -959,7 +1004,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
}
iscsi_tmf_rsp(conn, hdr);
__iscsi_put_task(task);
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
break;
case ISCSI_OP_NOOP_IN:
iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
@ -977,7 +1022,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
goto recv_pdu;
mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
__iscsi_put_task(task);
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
break;
default:
rc = ISCSI_ERR_BAD_OPCODE;
@ -989,7 +1034,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
recv_pdu:
if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
rc = ISCSI_ERR_CONN_FAILED;
__iscsi_put_task(task);
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
return rc;
}
EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
@ -1166,7 +1211,12 @@ void iscsi_requeue_task(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
list_move_tail(&task->running, &conn->requeue);
/*
* this may be on the requeue list already if the xmit_task callout
* is handling the r2ts while we are adding new ones
*/
if (list_empty(&task->running))
list_add_tail(&task->running, &conn->requeue);
iscsi_conn_queue_work(conn);
}
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@ -1206,6 +1256,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
while (!list_empty(&conn->mgmtqueue)) {
conn->task = list_entry(conn->mgmtqueue.next,
struct iscsi_task, running);
list_del_init(&conn->task->running);
if (iscsi_prep_mgmt_task(conn, conn->task)) {
__iscsi_put_task(conn->task);
conn->task = NULL;
@ -1217,23 +1268,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
}
/* process pending command queue */
while (!list_empty(&conn->xmitqueue)) {
while (!list_empty(&conn->cmdqueue)) {
if (conn->tmf_state == TMF_QUEUED)
break;
conn->task = list_entry(conn->xmitqueue.next,
conn->task = list_entry(conn->cmdqueue.next,
struct iscsi_task, running);
list_del_init(&conn->task->running);
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
fail_command(conn, conn->task, DID_IMM_RETRY << 16);
fail_scsi_task(conn->task, DID_IMM_RETRY);
continue;
}
rc = iscsi_prep_scsi_cmd_pdu(conn->task);
if (rc) {
if (rc == -ENOMEM) {
list_add_tail(&conn->task->running,
&conn->cmdqueue);
conn->task = NULL;
goto again;
} else
fail_command(conn, conn->task, DID_ABORT << 16);
fail_scsi_task(conn->task, DID_ABORT);
continue;
}
rc = iscsi_xmit_task(conn);
@ -1260,8 +1314,8 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
conn->task = list_entry(conn->requeue.next,
struct iscsi_task, running);
list_del_init(&conn->task->running);
conn->task->state = ISCSI_TASK_RUNNING;
list_move_tail(conn->requeue.next, &conn->run_list);
rc = iscsi_xmit_task(conn);
if (rc)
goto again;
@ -1328,6 +1382,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
struct iscsi_cls_session *cls_session;
struct Scsi_Host *host;
struct iscsi_host *ihost;
int reason = 0;
struct iscsi_session *session;
struct iscsi_conn *conn;
@ -1338,6 +1393,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
sc->SCp.ptr = NULL;
host = sc->device->host;
ihost = shost_priv(host);
spin_unlock(host->host_lock);
cls_session = starget_to_session(scsi_target(sc->device));
@ -1350,13 +1406,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
goto fault;
}
/*
* ISCSI_STATE_FAILED is a temp. state. The recovery
* code will decide what is best to do with command queued
* during this time
*/
if (session->state != ISCSI_STATE_LOGGED_IN &&
session->state != ISCSI_STATE_FAILED) {
if (session->state != ISCSI_STATE_LOGGED_IN) {
/*
* to handle the race between when we set the recovery state
* and block the session we requeue here (commands could
@ -1364,12 +1414,15 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
* up because the block code is not locked)
*/
switch (session->state) {
case ISCSI_STATE_FAILED:
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
goto reject;
sc->result = DID_IMM_RETRY << 16;
break;
case ISCSI_STATE_LOGGING_OUT:
reason = FAILURE_SESSION_LOGGING_OUT;
goto reject;
sc->result = DID_IMM_RETRY << 16;
break;
case ISCSI_STATE_RECOVERY_FAILED:
reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
sc->result = DID_TRANSPORT_FAILFAST << 16;
@ -1402,9 +1455,8 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
reason = FAILURE_OOM;
goto reject;
}
list_add_tail(&task->running, &conn->xmitqueue);
if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
if (!ihost->workq) {
reason = iscsi_prep_scsi_cmd_pdu(task);
if (reason) {
if (reason == -ENOMEM) {
@ -1419,8 +1471,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
reason = FAILURE_SESSION_NOT_READY;
goto prepd_reject;
}
} else
} else {
list_add_tail(&task->running, &conn->cmdqueue);
iscsi_conn_queue_work(conn);
}
session->queued_cmdsn++;
spin_unlock(&session->lock);
@ -1429,7 +1483,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
prepd_reject:
sc->scsi_done = NULL;
iscsi_complete_command(task);
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
reject:
spin_unlock(&session->lock);
ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
@ -1439,7 +1493,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
prepd_fault:
sc->scsi_done = NULL;
iscsi_complete_command(task);
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
fault:
spin_unlock(&session->lock);
ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
@ -1608,44 +1662,24 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
* Fail commands. session lock held and recv side suspended and xmit
* thread flushed
*/
static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
int error)
static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
int error)
{
struct iscsi_task *task, *tmp;
struct iscsi_task *task;
int i;
if (conn->task) {
if (lun == -1 ||
(conn->task->sc && conn->task->sc->device->lun == lun))
conn->task = NULL;
}
for (i = 0; i < conn->session->cmds_max; i++) {
task = conn->session->cmds[i];
if (!task->sc || task->state == ISCSI_TASK_FREE)
continue;
/* flush pending */
list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
if (lun == task->sc->device->lun || lun == -1) {
ISCSI_DBG_SESSION(conn->session,
"failing pending sc %p itt 0x%x\n",
task->sc, task->itt);
fail_command(conn, task, error << 16);
}
}
if (lun != -1 && lun != task->sc->device->lun)
continue;
list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
if (lun == task->sc->device->lun || lun == -1) {
ISCSI_DBG_SESSION(conn->session,
"failing requeued sc %p itt 0x%x\n",
task->sc, task->itt);
fail_command(conn, task, error << 16);
}
}
/* fail all other running */
list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
if (lun == task->sc->device->lun || lun == -1) {
ISCSI_DBG_SESSION(conn->session,
"failing in progress sc %p itt 0x%x\n",
task->sc, task->itt);
fail_command(conn, task, error << 16);
}
ISCSI_DBG_SESSION(conn->session,
"failing sc %p itt 0x%x state %d\n",
task->sc, task->itt, task->state);
fail_scsi_task(task, error);
}
}
@ -1655,7 +1689,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
struct iscsi_host *ihost = shost_priv(shost);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
if (ihost->workq)
flush_workqueue(ihost->workq);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
@ -1663,8 +1697,23 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
static void iscsi_start_tx(struct iscsi_conn *conn)
{
clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
iscsi_conn_queue_work(conn);
iscsi_conn_queue_work(conn);
}
/*
* We want to make sure a ping is in flight. It has timed out.
* And we are not busy processing a pdu that is making
* progress but got started before the ping and is taking a while
* to complete so the ping is just stuck behind it in a queue.
*/
static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
{
if (conn->ping_task &&
time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
(conn->ping_timeout * HZ), jiffies))
return 1;
else
return 0;
}
static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
@ -1702,16 +1751,20 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
* if the ping timedout then we are in the middle of cleaning up
* and can let the iscsi eh handle it
*/
if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
(conn->ping_timeout * HZ), jiffies))
if (iscsi_has_ping_timed_out(conn)) {
rc = BLK_EH_RESET_TIMER;
goto done;
}
/*
* if we are about to check the transport then give the command
* more time
*/
if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
jiffies))
jiffies)) {
rc = BLK_EH_RESET_TIMER;
goto done;
}
/* if in the middle of checking the transport then give us more time */
if (conn->ping_task)
rc = BLK_EH_RESET_TIMER;
@ -1738,13 +1791,13 @@ static void iscsi_check_transport_timeouts(unsigned long data)
recv_timeout *= HZ;
last_recv = conn->last_recv;
if (conn->ping_task &&
time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
jiffies)) {
if (iscsi_has_ping_timed_out(conn)) {
iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
"expired, last rx %lu, last ping %lu, "
"now %lu\n", conn->ping_timeout, last_recv,
conn->last_ping, jiffies);
"expired, recv timeout %d, last rx %lu, "
"last ping %lu, now %lu\n",
conn->ping_timeout, conn->recv_timeout,
last_recv, conn->last_ping, jiffies);
spin_unlock(&session->lock);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return;
@ -1788,6 +1841,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
ISCSI_DBG_SESSION(session, "aborting sc %p\n", sc);
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
/*
@ -1810,6 +1865,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
sc->SCp.phase != session->age) {
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
ISCSI_DBG_SESSION(session, "failing abort due to dropped "
"session.\n");
return FAILED;
}
@ -1829,7 +1886,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
}
if (task->state == ISCSI_TASK_PENDING) {
fail_command(conn, task, DID_ABORT << 16);
fail_scsi_task(task, DID_ABORT);
goto success;
}
@ -1860,7 +1917,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
* then sent more data for the cmd.
*/
spin_lock(&session->lock);
fail_command(conn, task, DID_ABORT << 16);
fail_scsi_task(task, DID_ABORT);
conn->tmf_state = TMF_INITIAL;
spin_unlock(&session->lock);
iscsi_start_tx(conn);
@ -1967,7 +2024,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
iscsi_suspend_tx(conn);
spin_lock_bh(&session->lock);
fail_all_commands(conn, sc->device->lun, DID_ERROR);
fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
conn->tmf_state = TMF_INITIAL;
spin_unlock_bh(&session->lock);
@ -2274,6 +2331,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
if (cmd_task_size)
task->dd_data = &task[1];
task->itt = cmd_i;
task->state = ISCSI_TASK_FREE;
INIT_LIST_HEAD(&task->running);
}
@ -2360,10 +2418,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
conn->transport_timer.data = (unsigned long)conn;
conn->transport_timer.function = iscsi_check_transport_timeouts;
INIT_LIST_HEAD(&conn->run_list);
INIT_LIST_HEAD(&conn->mgmt_run_list);
INIT_LIST_HEAD(&conn->mgmtqueue);
INIT_LIST_HEAD(&conn->xmitqueue);
INIT_LIST_HEAD(&conn->cmdqueue);
INIT_LIST_HEAD(&conn->requeue);
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
@ -2531,27 +2587,28 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
EXPORT_SYMBOL_GPL(iscsi_conn_start);
static void
flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
{
struct iscsi_task *task, *tmp;
struct iscsi_task *task;
int i, state;
for (i = 0; i < conn->session->cmds_max; i++) {
task = conn->session->cmds[i];
if (task->sc)
continue;
if (task->state == ISCSI_TASK_FREE)
continue;
ISCSI_DBG_SESSION(conn->session,
"failing mgmt itt 0x%x state %d\n",
task->itt, task->state);
state = ISCSI_TASK_ABRT_SESS_RECOV;
if (task->state == ISCSI_TASK_PENDING)
state = ISCSI_TASK_COMPLETED;
iscsi_complete_task(task, state);
/* handle pending */
list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
ISCSI_DBG_SESSION(session, "flushing pending mgmt task "
"itt 0x%x\n", task->itt);
/* release ref from prep task */
__iscsi_put_task(task);
}
/* handle running */
list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
ISCSI_DBG_SESSION(session, "flushing running mgmt task "
"itt 0x%x\n", task->itt);
/* release ref from prep task */
__iscsi_put_task(task);
}
conn->task = NULL;
}
static void iscsi_start_session_recovery(struct iscsi_session *session,
@ -2559,8 +2616,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
{
int old_stop_stage;
del_timer_sync(&conn->transport_timer);
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
if (conn->stop_stage == STOP_CONN_TERM) {
@ -2578,13 +2633,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
session->state = ISCSI_STATE_TERMINATE;
else if (conn->stop_stage != STOP_CONN_RECOVER)
session->state = ISCSI_STATE_IN_RECOVERY;
spin_unlock_bh(&session->lock);
del_timer_sync(&conn->transport_timer);
iscsi_suspend_tx(conn);
spin_lock_bh(&session->lock);
old_stop_stage = conn->stop_stage;
conn->stop_stage = flag;
conn->c_stage = ISCSI_CONN_STOPPED;
spin_unlock_bh(&session->lock);
iscsi_suspend_tx(conn);
/*
* for connection level recovery we should not calculate
* header digest. conn->hdr_size used for optimization
@ -2605,11 +2664,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
* flush queues.
*/
spin_lock_bh(&session->lock);
if (flag == STOP_CONN_RECOVER)
fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED);
else
fail_all_commands(conn, -1, DID_ERROR);
flush_control_queues(session, conn);
fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
fail_mgmt_tasks(session, conn);
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
}
@ -2651,6 +2707,23 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
}
EXPORT_SYMBOL_GPL(iscsi_conn_bind);
static int iscsi_switch_str_param(char **param, char *new_val_buf)
{
char *new_val;
if (*param) {
if (!strcmp(*param, new_val_buf))
return 0;
}
new_val = kstrdup(new_val_buf, GFP_NOIO);
if (!new_val)
return -ENOMEM;
kfree(*param);
*param = new_val;
return 0;
}
int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf, int buflen)
@ -2723,38 +2796,15 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
sscanf(buf, "%u", &conn->exp_statsn);
break;
case ISCSI_PARAM_USERNAME:
kfree(session->username);
session->username = kstrdup(buf, GFP_KERNEL);
if (!session->username)
return -ENOMEM;
break;
return iscsi_switch_str_param(&session->username, buf);
case ISCSI_PARAM_USERNAME_IN:
kfree(session->username_in);
session->username_in = kstrdup(buf, GFP_KERNEL);
if (!session->username_in)
return -ENOMEM;
break;
return iscsi_switch_str_param(&session->username_in, buf);
case ISCSI_PARAM_PASSWORD:
kfree(session->password);
session->password = kstrdup(buf, GFP_KERNEL);
if (!session->password)
return -ENOMEM;
break;
return iscsi_switch_str_param(&session->password, buf);
case ISCSI_PARAM_PASSWORD_IN:
kfree(session->password_in);
session->password_in = kstrdup(buf, GFP_KERNEL);
if (!session->password_in)
return -ENOMEM;
break;
return iscsi_switch_str_param(&session->password_in, buf);
case ISCSI_PARAM_TARGET_NAME:
/* this should not change between logins */
if (session->targetname)
break;
session->targetname = kstrdup(buf, GFP_KERNEL);
if (!session->targetname)
return -ENOMEM;
break;
return iscsi_switch_str_param(&session->targetname, buf);
case ISCSI_PARAM_TPGT:
sscanf(buf, "%d", &session->tpgt);
break;
@ -2762,25 +2812,11 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
sscanf(buf, "%d", &conn->persistent_port);
break;
case ISCSI_PARAM_PERSISTENT_ADDRESS:
/*
* this is the address returned in discovery so it should
* not change between logins.
*/
if (conn->persistent_address)
break;
conn->persistent_address = kstrdup(buf, GFP_KERNEL);
if (!conn->persistent_address)
return -ENOMEM;
break;
return iscsi_switch_str_param(&conn->persistent_address, buf);
case ISCSI_PARAM_IFACE_NAME:
if (!session->ifacename)
session->ifacename = kstrdup(buf, GFP_KERNEL);
break;
return iscsi_switch_str_param(&session->ifacename, buf);
case ISCSI_PARAM_INITIATOR_NAME:
if (!session->initiatorname)
session->initiatorname = kstrdup(buf, GFP_KERNEL);
break;
return iscsi_switch_str_param(&session->initiatorname, buf);
default:
return -ENOSYS;
}
@ -2851,10 +2887,7 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
len = sprintf(buf, "%s\n", session->ifacename);
break;
case ISCSI_PARAM_INITIATOR_NAME:
if (!session->initiatorname)
len = sprintf(buf, "%s\n", "unknown");
else
len = sprintf(buf, "%s\n", session->initiatorname);
len = sprintf(buf, "%s\n", session->initiatorname);
break;
default:
return -ENOSYS;
@ -2920,29 +2953,16 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
switch (param) {
case ISCSI_HOST_PARAM_NETDEV_NAME:
if (!ihost->netdev)
len = sprintf(buf, "%s\n", "default");
else
len = sprintf(buf, "%s\n", ihost->netdev);
len = sprintf(buf, "%s\n", ihost->netdev);
break;
case ISCSI_HOST_PARAM_HWADDRESS:
if (!ihost->hwaddress)
len = sprintf(buf, "%s\n", "default");
else
len = sprintf(buf, "%s\n", ihost->hwaddress);
len = sprintf(buf, "%s\n", ihost->hwaddress);
break;
case ISCSI_HOST_PARAM_INITIATOR_NAME:
if (!ihost->initiatorname)
len = sprintf(buf, "%s\n", "unknown");
else
len = sprintf(buf, "%s\n", ihost->initiatorname);
len = sprintf(buf, "%s\n", ihost->initiatorname);
break;
case ISCSI_HOST_PARAM_IPADDRESS:
if (!strlen(ihost->local_address))
len = sprintf(buf, "%s\n", "unknown");
else
len = sprintf(buf, "%s\n",
ihost->local_address);
len = sprintf(buf, "%s\n", ihost->local_address);
break;
default:
return -ENOSYS;
@ -2959,17 +2979,11 @@ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
switch (param) {
case ISCSI_HOST_PARAM_NETDEV_NAME:
if (!ihost->netdev)
ihost->netdev = kstrdup(buf, GFP_KERNEL);
break;
return iscsi_switch_str_param(&ihost->netdev, buf);
case ISCSI_HOST_PARAM_HWADDRESS:
if (!ihost->hwaddress)
ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
break;
return iscsi_switch_str_param(&ihost->hwaddress, buf);
case ISCSI_HOST_PARAM_INITIATOR_NAME:
if (!ihost->initiatorname)
ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
break;
return iscsi_switch_str_param(&ihost->initiatorname, buf);
default:
return -ENOSYS;
}

View file

@ -440,8 +440,8 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_r2t_info *r2t;
/* nothing to do for mgmt or pending tasks */
if (!task->sc || task->state == ISCSI_TASK_PENDING)
/* nothing to do for mgmt */
if (!task->sc)
return;
/* flush task's r2t queues */
@ -473,7 +473,13 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
int datasn = be32_to_cpu(rhdr->datasn);
unsigned total_in_length = scsi_in(task->sc)->length;
iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
/*
* lib iscsi will update this in the completion handling if there
* is status.
*/
if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
if (tcp_conn->in.datalen == 0)
return 0;
@ -857,6 +863,12 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
int rc = 0;
ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset);
/*
* Update for each skb instead of pdu, because over slow networks a
* data_in's data could take a while to read in. We also want to
* account for r2ts.
*/
conn->last_recv = jiffies;
if (unlikely(conn->suspend_rx)) {
ISCSI_DBG_TCP(conn, "Rx suspended!\n");

View file

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2008 Emulex. All rights reserved. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -23,6 +23,13 @@
struct lpfc_sli2_slim;
#define LPFC_PCI_DEV_LP 0x1
#define LPFC_PCI_DEV_OC 0x2
#define LPFC_SLI_REV2 2
#define LPFC_SLI_REV3 3
#define LPFC_SLI_REV4 4
#define LPFC_MAX_TARGET 4096 /* max number of targets supported */
#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
requests */
@ -98,9 +105,11 @@ struct lpfc_dma_pool {
};
struct hbq_dmabuf {
struct lpfc_dmabuf hbuf;
struct lpfc_dmabuf dbuf;
uint32_t size;
uint32_t tag;
struct lpfc_rcqe rcqe;
};
/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@ -134,7 +143,10 @@ typedef struct lpfc_vpd {
} rev;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd2 :24; /* Reserved */
uint32_t rsvd3 :19; /* Reserved */
uint32_t cdss : 1; /* Configure Data Security SLI */
uint32_t rsvd2 : 3; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t csah : 1; /* Configure Synchronous Abort Handling */
@ -152,7 +164,10 @@ typedef struct lpfc_vpd {
uint32_t csah : 1; /* Configure Synchronous Abort Handling */
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t rsvd2 :24; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t rsvd2 : 3; /* Reserved */
uint32_t cdss : 1; /* Configure Data Security SLI */
uint32_t rsvd3 :19; /* Reserved */
#endif
} sli3Feat;
} lpfc_vpd_t;
@ -264,8 +279,8 @@ enum hba_state {
};
struct lpfc_vport {
struct list_head listentry;
struct lpfc_hba *phba;
struct list_head listentry;
uint8_t port_type;
#define LPFC_PHYSICAL_PORT 1
#define LPFC_NPIV_PORT 2
@ -273,6 +288,9 @@ struct lpfc_vport {
enum discovery_state port_state;
uint16_t vpi;
uint16_t vfi;
uint8_t vfi_state;
#define LPFC_VFI_REGISTERED 0x1
uint32_t fc_flag; /* FC flags */
/* Several of these flags are HBA centric and should be moved to
@ -385,6 +403,9 @@ struct lpfc_vport {
#endif
uint8_t stat_data_enabled;
uint8_t stat_data_blocked;
struct list_head rcv_buffer_list;
uint32_t vport_flag;
#define STATIC_VPORT 1
};
struct hbq_s {
@ -420,8 +441,66 @@ enum intr_type_t {
};
struct lpfc_hba {
/* SCSI interface function jump table entries */
int (*lpfc_new_scsi_buf)
(struct lpfc_vport *, int);
struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
(struct lpfc_hba *);
int (*lpfc_scsi_prep_dma_buf)
(struct lpfc_hba *, struct lpfc_scsi_buf *);
void (*lpfc_scsi_unprep_dma_buf)
(struct lpfc_hba *, struct lpfc_scsi_buf *);
void (*lpfc_release_scsi_buf)
(struct lpfc_hba *, struct lpfc_scsi_buf *);
void (*lpfc_rampdown_queue_depth)
(struct lpfc_hba *);
void (*lpfc_scsi_prep_cmnd)
(struct lpfc_vport *, struct lpfc_scsi_buf *,
struct lpfc_nodelist *);
int (*lpfc_scsi_prep_task_mgmt_cmd)
(struct lpfc_vport *, struct lpfc_scsi_buf *,
unsigned int, uint8_t);
/* IOCB interface function jump table entries */
int (*__lpfc_sli_issue_iocb)
(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
struct lpfc_iocbq *);
int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
IOCB_t * (*lpfc_get_iocb_from_iocbq)
(struct lpfc_iocbq *);
void (*lpfc_scsi_cmd_iocb_cmpl)
(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
/* MBOX interface function jump table entries */
int (*lpfc_sli_issue_mbox)
(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
/* Slow-path IOCB process function jump table entries */
void (*lpfc_sli_handle_slow_ring_event)
(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t mask);
/* INIT device interface function jump table entries */
int (*lpfc_sli_hbq_to_firmware)
(struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
int (*lpfc_sli_brdrestart)
(struct lpfc_hba *);
int (*lpfc_sli_brdready)
(struct lpfc_hba *, uint32_t);
void (*lpfc_handle_eratt)
(struct lpfc_hba *);
void (*lpfc_stop_port)
(struct lpfc_hba *);
/* SLI4 specific HBA data structure */
struct lpfc_sli4_hba sli4_hba;
struct lpfc_sli sli;
uint32_t sli_rev; /* SLI2 or SLI3 */
uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */
uint32_t sli3_options; /* Mask of enabled SLI3 options */
#define LPFC_SLI3_HBQ_ENABLED 0x01
#define LPFC_SLI3_NPIV_ENABLED 0x02
@ -429,6 +508,7 @@ struct lpfc_hba {
#define LPFC_SLI3_CRP_ENABLED 0x08
#define LPFC_SLI3_INB_ENABLED 0x10
#define LPFC_SLI3_BG_ENABLED 0x20
#define LPFC_SLI3_DSS_ENABLED 0x40
uint32_t iocb_cmd_size;
uint32_t iocb_rsp_size;
@ -442,8 +522,13 @@ struct lpfc_hba {
uint32_t hba_flag; /* hba generic flags */
#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
#define DEFER_ERATT 0x4 /* Deferred error attention in progress */
#define DEFER_ERATT 0x2 /* Deferred error attention in progress */
#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */
#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */
#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
#define FCP_XRI_ABORT_EVENT 0x20
#define ELS_XRI_ABORT_EVENT 0x40
#define ASYNC_EVENT 0x80
struct lpfc_dmabuf slim2p;
MAILBOX_t *mbox;
@ -502,6 +587,9 @@ struct lpfc_hba {
uint32_t cfg_poll;
uint32_t cfg_poll_tmo;
uint32_t cfg_use_msi;
uint32_t cfg_fcp_imax;
uint32_t cfg_fcp_wq_count;
uint32_t cfg_fcp_eq_count;
uint32_t cfg_sg_seg_cnt;
uint32_t cfg_prot_sg_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
@ -511,6 +599,8 @@ struct lpfc_hba {
uint32_t cfg_enable_hba_reset;
uint32_t cfg_enable_hba_heartbeat;
uint32_t cfg_enable_bg;
uint32_t cfg_enable_fip;
uint32_t cfg_log_verbose;
lpfc_vpd_t vpd; /* vital product data */
@ -526,11 +616,12 @@ struct lpfc_hba {
unsigned long data_flags;
uint32_t hbq_in_use; /* HBQs in use flag */
struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */
struct list_head rb_pend_list; /* Received buffers to be processed */
uint32_t hbq_count; /* Count of configured HBQs */
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
void __iomem *slim_memmap_p; /* Kernel memory mapped address for
PCI BAR0 */
@ -593,7 +684,8 @@ struct lpfc_hba {
/* pci_mem_pools */
struct pci_pool *lpfc_scsi_dma_buf_pool;
struct pci_pool *lpfc_mbuf_pool;
struct pci_pool *lpfc_hbq_pool;
struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
mempool_t *mbox_mem_pool;
@ -609,6 +701,14 @@ struct lpfc_hba {
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
uint16_t max_vpi; /* Maximum virtual nports */
#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
uint16_t max_vports; /*
* For IOV HBAs max_vpi can change
* after a reset. max_vports is max
* number of vports present. This can
* be greater than max_vpi.
*/
uint16_t vpi_base;
uint16_t vfi_base;
unsigned long *vpi_bmask; /* vpi allocation table */
/* Data structure used by fabric iocb scheduler */
@ -667,6 +767,11 @@ struct lpfc_hba {
/* Maximum number of events that can be outstanding at any time*/
#define LPFC_MAX_EVT_COUNT 512
atomic_t fast_event_count;
struct lpfc_fcf fcf;
uint8_t fc_map[3];
uint8_t valid_vlan;
uint16_t vlan_id;
struct list_head fcf_conn_rec_list;
};
static inline struct Scsi_Host *

View file

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2008 Emulex. All rights reserved. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -30,8 +30,10 @@
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@ -505,12 +507,14 @@ lpfc_issue_lip(struct Scsi_Host *shost)
return -ENOMEM;
memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
pmboxq->mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->mb.mbxOwner = OWN_HOST;
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {
if ((mbxstatus == MBX_SUCCESS) &&
(pmboxq->u.mb.mbxStatus == 0 ||
pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
lpfc_init_link(phba, pmboxq, phba->cfg_topology,
phba->cfg_link_speed);
@ -789,7 +793,8 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
uint32_t *mrpi, uint32_t *arpi,
uint32_t *mvpi, uint32_t *avpi)
{
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_mbx_read_config *rd_config;
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
int rc = 0;
@ -800,7 +805,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
*/
if (phba->link_state < LPFC_LINK_DOWN ||
!phba->mbox_mem_pool ||
(phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
(phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
return 0;
if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@ -811,13 +816,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
return 0;
memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
pmb = &pmboxq->mb;
pmb = &pmboxq->u.mb;
pmb->mbxCommand = MBX_READ_CONFIG;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
rc = MBX_NOT_FINISHED;
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@ -828,18 +833,37 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
return 0;
}
if (mrpi)
*mrpi = pmb->un.varRdConfig.max_rpi;
if (arpi)
*arpi = pmb->un.varRdConfig.avail_rpi;
if (mxri)
*mxri = pmb->un.varRdConfig.max_xri;
if (axri)
*axri = pmb->un.varRdConfig.avail_xri;
if (mvpi)
*mvpi = pmb->un.varRdConfig.max_vpi;
if (avpi)
*avpi = pmb->un.varRdConfig.avail_vpi;
if (phba->sli_rev == LPFC_SLI_REV4) {
rd_config = &pmboxq->u.mqe.un.rd_config;
if (mrpi)
*mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
if (arpi)
*arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
phba->sli4_hba.max_cfg_param.rpi_used;
if (mxri)
*mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
if (axri)
*axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
phba->sli4_hba.max_cfg_param.xri_used;
if (mvpi)
*mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
if (avpi)
*avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) -
phba->sli4_hba.max_cfg_param.vpi_used;
} else {
if (mrpi)
*mrpi = pmb->un.varRdConfig.max_rpi;
if (arpi)
*arpi = pmb->un.varRdConfig.avail_rpi;
if (mxri)
*mxri = pmb->un.varRdConfig.max_xri;
if (axri)
*axri = pmb->un.varRdConfig.avail_xri;
if (mvpi)
*mvpi = pmb->un.varRdConfig.max_vpi;
if (avpi)
*avpi = pmb->un.varRdConfig.avail_vpi;
}
mempool_free(pmboxq, phba->mbox_mem_pool);
return 1;
@ -2021,22 +2045,9 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
# deluged with LOTS of information.
# You can set a bit mask to record specific types of verbose messages:
#
# LOG_ELS 0x1 ELS events
# LOG_DISCOVERY 0x2 Link discovery events
# LOG_MBOX 0x4 Mailbox events
# LOG_INIT 0x8 Initialization events
# LOG_LINK_EVENT 0x10 Link events
# LOG_FCP 0x40 FCP traffic history
# LOG_NODE 0x80 Node table events
# LOG_BG 0x200 BlockBuard events
# LOG_MISC 0x400 Miscellaneous events
# LOG_SLI 0x800 SLI events
# LOG_FCP_ERROR 0x1000 Only log FCP errors
# LOG_LIBDFC 0x2000 LIBDFC events
# LOG_ALL_MSG 0xffff LOG all messages
# See lpfc_logmsh.h for definitions.
*/
LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff,
LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
"Verbose logging bit-mask");
/*
@ -2266,6 +2277,36 @@ lpfc_param_init(topology, 0, 0, 6)
static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
lpfc_topology_show, lpfc_topology_store);
/**
* lpfc_static_vport_show: Read callback function for
* lpfc_static_vport sysfs file.
* @dev: Pointer to class device object.
* @attr: device attribute structure.
* @buf: Data buffer.
*
* This function is the read call back function for
* lpfc_static_vport sysfs file. The lpfc_static_vport
* sysfs file report the mageability of the vport.
**/
static ssize_t
lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
if (vport->vport_flag & STATIC_VPORT)
sprintf(buf, "1\n");
else
sprintf(buf, "0\n");
return strlen(buf);
}
/*
* Sysfs attribute to control the statistical data collection.
*/
static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
lpfc_static_vport_show, NULL);
/**
* lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
@ -2341,7 +2382,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
if (vports == NULL)
return -ENOMEM;
for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
v_shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(v_shost->host_lock);
/* Block and reset data collection */
@ -2356,7 +2397,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
phba->bucket_base = base;
phba->bucket_step = step;
for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
v_shost = lpfc_shost_from_vport(vports[i]);
/* Unblock data collection */
@ -2373,7 +2414,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
if (vports == NULL)
return -ENOMEM;
for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
v_shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
vports[i]->stat_data_blocked = 1;
@ -2844,14 +2885,38 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
/*
# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
# support this feature
# 0 = MSI disabled
# 0 = MSI disabled (default)
# 1 = MSI enabled
# 2 = MSI-X enabled (default)
# Value range is [0,2]. Default value is 2.
# 2 = MSI-X enabled
# Value range is [0,2]. Default value is 0.
*/
LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
"MSI-X (2), if possible");
/*
# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second
#
# Value range is [636,651042]. Default value is 10000.
*/
LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST,
"Set the maximum number of fast-path FCP interrupts per second");
/*
# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
#
# Value range is [1,31]. Default value is 4.
*/
LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX,
"Set the number of fast-path FCP work queues, if possible");
/*
# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues
#
# Value range is [1,7]. Default value is 1.
*/
LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
"Set the number of fast-path FCP event queues, if possible");
/*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
# 0 = HBA resets disabled
@ -2876,6 +2941,14 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
*/
LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
/*
# lpfc_enable_fip: When set, FIP is required to start discovery. If not
# set, the driver will add an FCF record manually if the port has no
# FCF records available and start discovery.
# Value range is [0,1]. Default value is 1 (enabled)
*/
LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
/*
# lpfc_prot_mask: i
@ -2942,6 +3015,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_peer_port_login,
&dev_attr_lpfc_nodev_tmo,
&dev_attr_lpfc_devloss_tmo,
&dev_attr_lpfc_enable_fip,
&dev_attr_lpfc_fcp_class,
&dev_attr_lpfc_use_adisc,
&dev_attr_lpfc_ack0,
@ -2969,6 +3043,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_poll,
&dev_attr_lpfc_poll_tmo,
&dev_attr_lpfc_use_msi,
&dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_wq_count,
&dev_attr_lpfc_fcp_eq_count,
&dev_attr_lpfc_enable_bg,
&dev_attr_lpfc_soft_wwnn,
&dev_attr_lpfc_soft_wwpn,
@ -2991,6 +3068,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_lpfc_lun_queue_depth,
&dev_attr_lpfc_nodev_tmo,
&dev_attr_lpfc_devloss_tmo,
&dev_attr_lpfc_enable_fip,
&dev_attr_lpfc_hba_queue_depth,
&dev_attr_lpfc_peer_port_login,
&dev_attr_lpfc_restrict_login,
@ -3003,6 +3081,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_lpfc_enable_da_id,
&dev_attr_lpfc_max_scsicmpl_time,
&dev_attr_lpfc_stat_data_ctrl,
&dev_attr_lpfc_static_vport,
NULL,
};
@ -3199,7 +3278,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
}
}
memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off,
memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
buf, count);
phba->sysfs_mbox.offset = off + count;
@ -3241,6 +3320,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
int rc;
MAILBOX_t *pmb;
if (off > MAILBOX_CMD_SIZE)
return -ERANGE;
@ -3265,8 +3345,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
if (off == 0 &&
phba->sysfs_mbox.state == SMBOX_WRITING &&
phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
switch (phba->sysfs_mbox.mbox->mb.mbxCommand) {
pmb = &phba->sysfs_mbox.mbox->u.mb;
switch (pmb->mbxCommand) {
/* Offline only */
case MBX_INIT_LINK:
case MBX_DOWN_LINK:
@ -3283,7 +3363,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
printk(KERN_WARNING "mbox_read:Command 0x%x "
"is illegal in on-line state\n",
phba->sysfs_mbox.mbox->mb.mbxCommand);
pmb->mbxCommand);
sysfs_mbox_idle(phba);
spin_unlock_irq(&phba->hbalock);
return -EPERM;
@ -3319,13 +3399,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
case MBX_CONFIG_PORT:
case MBX_RUN_BIU_DIAG:
printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
phba->sysfs_mbox.mbox->mb.mbxCommand);
pmb->mbxCommand);
sysfs_mbox_idle(phba);
spin_unlock_irq(&phba->hbalock);
return -EPERM;
default:
printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
phba->sysfs_mbox.mbox->mb.mbxCommand);
pmb->mbxCommand);
sysfs_mbox_idle(phba);
spin_unlock_irq(&phba->hbalock);
return -EPERM;
@ -3335,14 +3415,14 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
* or RESTART mailbox commands until the HBA is restarted.
*/
if (phba->pport->stopped &&
phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY &&
phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART &&
phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS &&
phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN)
pmb->mbxCommand != MBX_DUMP_MEMORY &&
pmb->mbxCommand != MBX_RESTART &&
pmb->mbxCommand != MBX_WRITE_VPARMS &&
pmb->mbxCommand != MBX_WRITE_WWN)
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"1259 mbox: Issued mailbox cmd "
"0x%x while in stopped state.\n",
phba->sysfs_mbox.mbox->mb.mbxCommand);
pmb->mbxCommand);
phba->sysfs_mbox.mbox->vport = vport;
@ -3356,7 +3436,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
}
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){
(!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
spin_unlock_irq(&phba->hbalock);
rc = lpfc_sli_issue_mbox (phba,
@ -3368,8 +3448,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
spin_unlock_irq(&phba->hbalock);
rc = lpfc_sli_issue_mbox_wait (phba,
phba->sysfs_mbox.mbox,
lpfc_mbox_tmo_val(phba,
phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ);
spin_lock_irq(&phba->hbalock);
}
@ -3391,7 +3470,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
return -EAGAIN;
}
memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
memcpy(buf, (uint8_t *) &pmb + off, count);
phba->sysfs_mbox.offset = off + count;
@ -3585,6 +3664,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
case LA_8GHZ_LINK:
fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
break;
case LA_10GHZ_LINK:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
default:
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
@ -3652,7 +3734,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
*/
if (phba->link_state < LPFC_LINK_DOWN ||
!phba->mbox_mem_pool ||
(phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
(phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
return NULL;
if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@ -3663,14 +3745,14 @@ lpfc_get_stats(struct Scsi_Host *shost)
return NULL;
memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
pmb = &pmboxq->mb;
pmb = &pmboxq->u.mb;
pmb->mbxCommand = MBX_READ_STATUS;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@ -3695,7 +3777,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@ -3769,7 +3851,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
return;
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pmb = &pmboxq->mb;
pmb = &pmboxq->u.mb;
pmb->mbxCommand = MBX_READ_STATUS;
pmb->mbxOwner = OWN_HOST;
pmb->un.varWords[0] = 0x1; /* reset request */
@ -3777,7 +3859,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@ -3795,7 +3877,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@ -3962,6 +4044,21 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
}
/**
* lpfc_hba_log_verbose_init - Set hba's log verbose level
* @phba: Pointer to lpfc_hba struct.
*
* This function is called by the lpfc_get_cfgparam() routine to set the
* module lpfc_log_verbose into the @phba cfg_log_verbose for use with
* log messsage according to the module's lpfc_log_verbose parameter setting
* before hba port or vport created.
**/
static void
lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
{
phba->cfg_log_verbose = verbose;
}
struct fc_function_template lpfc_transport_functions = {
/* fixed attributes the driver supports */
.show_host_node_name = 1,
@ -4105,6 +4202,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
lpfc_enable_bg_init(phba, lpfc_enable_bg);
@ -4113,26 +4213,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
/*
* Since the sg_tablesize is module parameter, the sg_dma_buf_size
* used to create the sg_dma_buf_pool must be dynamically calculated.
* 2 segments are added since the IOCB needs a command and response bde.
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
if (phba->cfg_enable_bg) {
phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
phba->cfg_sg_dma_buf_size +=
phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
}
/* Also reinitialize the host templates with new values. */
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_enable_fip_init(phba, lpfc_enable_fip);
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
return;
}

View file

@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
struct fc_rport;
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
@ -35,17 +37,19 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
LPFC_MBOXQ_t *, uint32_t);
int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
LPFC_MBOXQ_t *, uint32_t);
void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rpis(struct lpfc_vport *, int);
int lpfc_linkdown(struct lpfc_hba *);
void lpfc_linkdown_port(struct lpfc_vport *);
void lpfc_port_link_failure(struct lpfc_vport *);
void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
@ -54,6 +58,7 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@ -105,6 +110,7 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
@ -149,15 +155,19 @@ int lpfc_online(struct lpfc_hba *);
void lpfc_unblock_mgmt_io(struct lpfc_hba *);
void lpfc_offline_prep(struct lpfc_hba *);
void lpfc_offline(struct lpfc_hba *);
void lpfc_reset_hba(struct lpfc_hba *);
int lpfc_sli_setup(struct lpfc_hba *);
int lpfc_sli_queue_setup(struct lpfc_hba *);
void lpfc_handle_eratt(struct lpfc_hba *);
void lpfc_handle_latt(struct lpfc_hba *);
irqreturn_t lpfc_intr_handler(int, void *);
irqreturn_t lpfc_sp_intr_handler(int, void *);
irqreturn_t lpfc_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli_intr_handler(int, void *);
irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@ -165,16 +175,32 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_mbox_dev_check(struct lpfc_hba *);
int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
void lpfc_init_vpi(struct lpfcMboxq *, uint16_t);
void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
uint32_t , LPFC_MBOXQ_t *);
struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t);
void lpfc_unregister_unused_fcf(struct lpfc_hba *);
int lpfc_mem_alloc(struct lpfc_hba *);
int lpfc_mem_alloc(struct lpfc_hba *, int align);
void lpfc_mem_free(struct lpfc_hba *);
void lpfc_mem_free_all(struct lpfc_hba *);
void lpfc_stop_vport_timers(struct lpfc_vport *);
void lpfc_poll_timeout(unsigned long ptr);
@ -186,6 +212,7 @@ void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
uint32_t);
void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_reset_barrier(struct lpfc_hba * phba);
int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
@ -198,12 +225,13 @@ int lpfc_sli_host_down(struct lpfc_vport *);
int lpfc_sli_hba_down(struct lpfc_hba *);
int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
int lpfc_sli_handle_mb_event(struct lpfc_hba *);
int lpfc_sli_flush_mbox_queue(struct lpfc_hba *);
void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
int lpfc_sli_check_eratt(struct lpfc_hba *);
int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
struct lpfc_sli_ring *, uint32_t);
int lpfc_sli4_handle_received_buffer(struct lpfc_hba *);
void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
@ -237,7 +265,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *,
int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, struct lpfc_iocbq *,
uint32_t);
void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
@ -254,6 +282,12 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
const char* lpfc_info(struct Scsi_Host *);
int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t);
int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t);
int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t);
int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t);
int lpfc_api_table_setup(struct lpfc_hba *, uint8_t);
void lpfc_get_cfgparam(struct lpfc_hba *);
void lpfc_get_vport_cfgparam(struct lpfc_vport *);
int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
@ -314,8 +348,15 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
void lpfc_create_static_vport(struct lpfc_hba *);
void lpfc_stop_hba_timers(struct lpfc_hba *);
void lpfc_stop_port(struct lpfc_hba *);
void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
void lpfc_start_fdiscs(struct lpfc_hba *phba);
#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define HBA_EVENT_RSCN 5
#define HBA_EVENT_LINK_UP 2
#define HBA_EVENT_LINK_DOWN 3

View file

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2008 Emulex. All rights reserved. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -32,8 +32,10 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@ -267,8 +269,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
uint32_t tmo, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
IOCB_t *icmd;
struct lpfc_iocbq *geniocb;
int rc;
@ -331,7 +331,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
geniocb->vport = vport;
geniocb->retry = retry;
rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
if (rc == IOCB_ERROR) {
lpfc_sli_release_iocbq(phba, geniocb);
@ -1578,6 +1578,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
case LA_8GHZ_LINK:
ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
break;
case LA_10GHZ_LINK:
ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
break;
default:
ae->un.PortSpeed =
HBA_PORTSPEED_UNKNOWN;
@ -1730,7 +1733,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
uint8_t *fwname;
if (vp->rev.rBit) {
if (psli->sli_flag & LPFC_SLI2_ACTIVE)
if (psli->sli_flag & LPFC_SLI_ACTIVE)
rev = vp->rev.sli2FwRev;
else
rev = vp->rev.sli1FwRev;
@ -1756,7 +1759,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
}
b4 = (rev & 0x0000000f);
if (psli->sli_flag & LPFC_SLI2_ACTIVE)
if (psli->sli_flag & LPFC_SLI_ACTIVE)
fwname = vp->rev.sli2FwName;
else
fwname = vp->rev.sli1FwName;

View file

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2007-2008 Emulex. All rights reserved. *
* Copyright (C) 2007-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -33,8 +33,10 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@ -280,6 +282,8 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
struct lpfc_dmabuf *d_buf;
struct hbq_dmabuf *hbq_buf;
if (phba->sli_rev != 3)
return 0;
cnt = LPFC_HBQINFO_SIZE;
spin_lock_irq(&phba->hbalock);
@ -489,12 +493,15 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
pring->next_cmdidx, pring->local_getidx,
pring->flag, pgpp->rspPutInx, pring->numRiocb);
}
word0 = readl(phba->HAregaddr);
word1 = readl(phba->CAregaddr);
word2 = readl(phba->HSregaddr);
word3 = readl(phba->HCregaddr);
len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n",
word0, word1, word2, word3);
if (phba->sli_rev <= LPFC_SLI_REV3) {
word0 = readl(phba->HAregaddr);
word1 = readl(phba->CAregaddr);
word2 = readl(phba->HSregaddr);
word3 = readl(phba->HCregaddr);
len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
"HC:%08x\n", word0, word1, word2, word3);
}
spin_unlock_irq(&phba->hbalock);
return len;
}

View file

@ -135,6 +135,7 @@ struct lpfc_nodelist {
#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
#define NLP_RPI_VALID 0x80000000 /* nlp_rpi is valid */
/* ndlp usage management macros */
#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \

View file

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2008 Emulex. All rights reserved. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -28,8 +28,10 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@ -84,7 +86,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
uint32_t ha_copy;
if (vport->port_state >= LPFC_VPORT_READY ||
phba->link_state == LPFC_LINK_DOWN)
phba->link_state == LPFC_LINK_DOWN ||
phba->sli_rev > LPFC_SLI_REV3)
return 0;
/* Read the HBA Host Attention Register */
@ -219,7 +222,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
icmd->un.elsreq64.myID = vport->fc_myDID;
/* For ELS_REQUEST64_CR, use the VPI by default */
icmd->ulpContext = vport->vpi;
icmd->ulpContext = vport->vpi + phba->vpi_base;
icmd->ulpCt_h = 0;
/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
if (elscmd == ELS_CMD_ECHO)
@ -305,7 +308,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
* 0 - successfully issued fabric registration login for @vport
* -ENXIO -- failed to issue fabric registration login for @vport
**/
static int
int
lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
@ -345,8 +348,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
err = 4;
goto fail;
}
rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
0);
rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
if (rc) {
err = 5;
goto fail_free_mbox;
@ -385,6 +387,75 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
return -ENXIO;
}
/**
* lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
* @vport: pointer to a host virtual N_Port data structure.
*
* This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
* the @vport. This mailbox command is necessary for FCoE only.
*
* Return code
* 0 - successfully issued REG_VFI for @vport
* A failure code otherwise.
**/
static int
lpfc_issue_reg_vfi(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mboxq;
struct lpfc_nodelist *ndlp;
struct serv_parm *sp;
struct lpfc_dmabuf *dmabuf;
int rc = 0;
sp = &phba->fc_fabparam;
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
rc = -ENODEV;
goto fail;
}
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!dmabuf) {
rc = -ENOMEM;
goto fail;
}
dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
if (!dmabuf->virt) {
rc = -ENOMEM;
goto fail_free_dmabuf;
}
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
rc = -ENOMEM;
goto fail_free_coherent;
}
vport->port_state = LPFC_FABRIC_CFG_LINK;
memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
mboxq->vport = vport;
mboxq->context1 = dmabuf;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
rc = -ENXIO;
goto fail_free_mbox;
}
return 0;
fail_free_mbox:
mempool_free(mboxq, phba->mbox_mem_pool);
fail_free_coherent:
lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
fail_free_dmabuf:
kfree(dmabuf);
fail:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0289 Issue Register VFI failed: Err %d\n", rc);
return rc;
}
/**
* lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
* @vport: pointer to a host virtual N_Port data structure.
@ -497,17 +568,24 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
}
lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) {
lpfc_register_new_vport(phba, vport, ndlp);
return 0;
if (phba->sli_rev < LPFC_SLI_REV4) {
lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
lpfc_register_new_vport(phba, vport, ndlp);
else
lpfc_issue_fabric_reglogin(vport);
} else {
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
if (vport->vfi_state & LPFC_VFI_REGISTERED) {
lpfc_start_fdiscs(phba);
lpfc_do_scr_ns_plogi(phba, vport);
} else
lpfc_issue_reg_vfi(vport);
}
lpfc_issue_fabric_reglogin(vport);
return 0;
}
/**
* lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
* @vport: pointer to a host virtual N_Port data structure.
@ -815,9 +893,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (sp->cmn.fcphHigh < FC_PH3)
sp->cmn.fcphHigh = FC_PH3;
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
if (phba->sli_rev == LPFC_SLI_REV4) {
elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
/* FLOGI needs to be 3 for WQE FCFI */
/* Set the fcfi to the fcfi we registered with */
elsiocb->iocb.ulpContext = phba->fcf.fcfi;
} else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
sp->cmn.request_multiple_Nport = 1;
/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
icmd->ulpCt_h = 1;
icmd->ulpCt_l = 0;
@ -930,6 +1013,8 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
if (!ndlp)
return 0;
lpfc_nlp_init(vport, ndlp, Fabric_DID);
/* Set the node type */
ndlp->nlp_type |= NLP_FABRIC;
/* Put ndlp onto node list */
lpfc_enqueue_node(vport, ndlp);
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
@ -1350,14 +1435,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
IOCB_t *icmd;
struct lpfc_nodelist *ndlp;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int ret;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
ndlp = lpfc_findnode_did(vport, did);
if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
@ -1391,7 +1474,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
phba->fc_stat.elsXmitPLOGI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (ret == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
@ -1501,14 +1584,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
PRLI *npr;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_PRLI);
@ -1550,7 +1628,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_PRLI_SND;
spin_unlock_irq(shost->host_lock);
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
IOCB_ERROR) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
spin_unlock_irq(shost->host_lock);
@ -1608,7 +1687,8 @@ lpfc_adisc_done(struct lpfc_vport *vport)
* and continue discovery.
*/
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
!(vport->fc_flag & FC_RSCN_MODE)) {
!(vport->fc_flag & FC_RSCN_MODE) &&
(phba->sli_rev < LPFC_SLI_REV4)) {
lpfc_issue_reg_vpi(phba, vport);
return;
}
@ -1788,8 +1868,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ADISC *ap;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
uint8_t *pcmd;
uint16_t cmdsize;
@ -1822,7 +1900,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_ADISC_SND;
spin_unlock_irq(shost->host_lock);
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
IOCB_ERROR) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_ADISC_SND;
spin_unlock_irq(shost->host_lock);
@ -1937,15 +2016,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING];
spin_lock_irq(shost->host_lock);
if (ndlp->nlp_flag & NLP_LOGO_SND) {
spin_unlock_irq(shost->host_lock);
@ -1978,7 +2052,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
spin_unlock_irq(shost->host_lock);
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
spin_lock_irq(shost->host_lock);
@ -2058,14 +2132,12 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
struct lpfc_nodelist *ndlp;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
cmdsize = (sizeof(uint32_t) + sizeof(SCR));
ndlp = lpfc_findnode_did(vport, nportid);
@ -2108,7 +2180,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
phba->fc_stat.elsXmitSCR++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
IOCB_ERROR) {
/* The additional lpfc_nlp_put will cause the following
* lpfc_els_free_iocb routine to trigger the rlease of
* the node.
@ -2152,7 +2225,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
FARP *fp;
uint8_t *pcmd;
@ -2162,7 +2234,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
struct lpfc_nodelist *ndlp;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
cmdsize = (sizeof(uint32_t) + sizeof(FARP));
ndlp = lpfc_findnode_did(vport, nportid);
@ -2219,7 +2290,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
phba->fc_stat.elsXmitFARPR++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
IOCB_ERROR) {
/* The additional lpfc_nlp_put will cause the following
* lpfc_els_free_iocb routine to trigger the release of
* the node.
@ -2949,6 +3021,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
/*
* This routine is used to register and unregister in previous SLI
* modes.
*/
if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
(phba->sli_rev == LPFC_SLI_REV4))
lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
pmb->context1 = NULL;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@ -2961,6 +3041,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*/
lpfc_nlp_not_used(ndlp);
}
return;
}
@ -3170,7 +3251,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
@ -3178,7 +3258,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
ELS_PKT *els_pkt_ptr;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
oldcmd = &oldiocb->iocb;
switch (flag) {
@ -3266,7 +3345,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
}
phba->fc_stat.elsXmitACC++;
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@ -3305,15 +3384,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
cmdsize = 2 * sizeof(uint32_t);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
ndlp->nlp_DID, ELS_CMD_LS_RJT);
@ -3346,7 +3422,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
phba->fc_stat.elsXmitLSRJT++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
@ -3379,8 +3455,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
ADISC *ap;
IOCB_t *icmd, *oldcmd;
struct lpfc_iocbq *elsiocb;
@ -3422,7 +3496,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@ -3459,14 +3533,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
cmdsize = sizeof(uint32_t) + sizeof(PRLI);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@ -3520,7 +3592,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@ -3562,15 +3634,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
RNID *rn;
IOCB_t *icmd, *oldcmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING];
cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
+ (2 * sizeof(struct lpfc_name));
if (format)
@ -3626,7 +3695,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
* it could be freed */
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@ -3839,7 +3908,9 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
payload_len -= sizeof(uint32_t);
switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
case RSCN_ADDRESS_FORMAT_PORT:
if (ns_did.un.word == rscn_did.un.word)
if ((ns_did.un.b.domain == rscn_did.un.b.domain)
&& (ns_did.un.b.area == rscn_did.un.b.area)
&& (ns_did.un.b.id == rscn_did.un.b.id))
goto return_did_out;
break;
case RSCN_ADDRESS_FORMAT_AREA:
@ -4300,7 +4371,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_init_link(phba, mbox,
phba->cfg_topology,
phba->cfg_link_speed);
mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@ -4440,8 +4511,6 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
static void
lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
MAILBOX_t *mb;
IOCB_t *icmd;
RPS_RSP *rps_rsp;
@ -4451,7 +4520,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint16_t xri, status;
uint32_t cmdsize;
mb = &pmb->mb;
mb = &pmb->u.mb;
ndlp = (struct lpfc_nodelist *) pmb->context2;
xri = (uint16_t) ((unsigned long)(pmb->context1));
@ -4507,7 +4576,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_rpi);
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR)
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
lpfc_els_free_iocb(phba, elsiocb);
return;
}
@ -4616,8 +4685,6 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
IOCB_t *icmd, *oldcmd;
RPL_RSP rpl_rsp;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
uint8_t *pcmd;
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@ -4654,7 +4721,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
ndlp->nlp_rpi);
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@ -4883,7 +4951,10 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
} else {
/* FAN verified - skip FLOGI */
vport->fc_myDID = vport->fc_prevDID;
lpfc_issue_fabric_reglogin(vport);
if (phba->sli_rev < LPFC_SLI_REV4)
lpfc_issue_fabric_reglogin(vport);
else
lpfc_issue_reg_vfi(vport);
}
}
return 0;
@ -5566,11 +5637,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dropit:
if (vport && !(vport->load_flag & FC_UNLOADING))
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"(%d):0111 Dropping received ELS cmd "
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0111 Dropping received ELS cmd "
"Data: x%x x%x x%x\n",
vport->vpi, icmd->ulpStatus,
icmd->un.ulpWord[4], icmd->ulpTimeout);
icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
phba->fc_stat.elsRcvDrop++;
}
@ -5646,10 +5716,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
if (icmd->unsli3.rcvsli3.vpi == 0xffff)
vport = phba->pport;
else {
uint16_t vpi = icmd->unsli3.rcvsli3.vpi;
vport = lpfc_find_vport_by_vpid(phba, vpi);
}
else
vport = lpfc_find_vport_by_vpid(phba,
icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
}
/* If there are no BDEs associated
* with this IOCB, there is nothing to do.
@ -5781,7 +5850,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
MAILBOX_t *mb = &pmb->mb;
MAILBOX_t *mb = &pmb->u.mb;
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@ -5818,7 +5887,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
if (vport == phba->pport)
lpfc_issue_fabric_reglogin(vport);
if (phba->sli_rev < LPFC_SLI_REV4)
lpfc_issue_fabric_reglogin(vport);
else
lpfc_issue_reg_vfi(vport);
else
lpfc_do_scr_ns_plogi(phba, vport);
}
@ -5850,7 +5922,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox);
lpfc_reg_vpi(vport, mbox);
mbox->vport = vport;
mbox->context2 = lpfc_nlp_get(ndlp);
mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
@ -6139,7 +6211,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
@ -6169,7 +6240,8 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
spin_unlock_irq(shost->host_lock);
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
IOCB_ERROR) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
spin_unlock_irq(shost->host_lock);
@ -6224,7 +6296,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
struct lpfc_iocbq *iocb;
unsigned long iflags;
int ret;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
IOCB_t *cmd;
repeat:
@ -6248,7 +6319,7 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
"Fabric sched1: ste:x%x",
iocb->vport->port_state, 0, 0);
ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
if (ret == IOCB_ERROR) {
iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@ -6394,7 +6465,6 @@ static int
lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
{
unsigned long iflags;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
int ready;
int ret;
@ -6418,7 +6488,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
"Fabric sched2: ste:x%x",
iocb->vport->port_state, 0, 0);
ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
if (ret == IOCB_ERROR) {
iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@ -6524,3 +6594,38 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
}
/**
* lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the els xri abort wcqe structure.
*
* This routine is invoked by the worker thread to process a SLI4 slow-path
* ELS aborted xri.
**/
void
lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri)
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
unsigned long iflag = 0;
spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag);
list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
if (sglq_entry->sli4_xritag == xri) {
list_del(&sglq_entry->list);
spin_unlock_irqrestore(
&phba->sli4_hba.abts_sgl_list_lock,
iflag);
spin_lock_irqsave(&phba->hbalock, iflag);
list_add_tail(&sglq_entry->list,
&phba->sli4_hba.lpfc_sgl_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
}
spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag);
}

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2008 Emulex. All rights reserved. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -470,6 +470,35 @@ struct serv_parm { /* Structure is in Big Endian format */
uint8_t vendorVersion[16];
};
/*
* Virtual Fabric Tagging Header
*/
struct fc_vft_header {
uint32_t word0;
#define fc_vft_hdr_r_ctl_SHIFT 24
#define fc_vft_hdr_r_ctl_MASK 0xFF
#define fc_vft_hdr_r_ctl_WORD word0
#define fc_vft_hdr_ver_SHIFT 22
#define fc_vft_hdr_ver_MASK 0x3
#define fc_vft_hdr_ver_WORD word0
#define fc_vft_hdr_type_SHIFT 18
#define fc_vft_hdr_type_MASK 0xF
#define fc_vft_hdr_type_WORD word0
#define fc_vft_hdr_e_SHIFT 16
#define fc_vft_hdr_e_MASK 0x1
#define fc_vft_hdr_e_WORD word0
#define fc_vft_hdr_priority_SHIFT 13
#define fc_vft_hdr_priority_MASK 0x7
#define fc_vft_hdr_priority_WORD word0
#define fc_vft_hdr_vf_id_SHIFT 1
#define fc_vft_hdr_vf_id_MASK 0xFFF
#define fc_vft_hdr_vf_id_WORD word0
uint32_t word1;
#define fc_vft_hdr_hopct_SHIFT 24
#define fc_vft_hdr_hopct_MASK 0xFF
#define fc_vft_hdr_hopct_WORD word1
};
/*
* Extended Link Service LS_COMMAND codes (Payload Word 0)
*/
@ -1152,6 +1181,9 @@ typedef struct {
#define PCI_DEVICE_ID_HORNET 0xfe05
#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
#define PCI_DEVICE_ID_TIGERSHARK 0x0704
#define PCI_DEVICE_ID_TIGERSHARK_S 0x0705
#define JEDEC_ID_ADDRESS 0x0080001c
#define FIREFLY_JEDEC_ID 0x1ACC
@ -1342,15 +1374,21 @@ typedef struct { /* FireFly BIU registers */
#define MBX_READ_LA64 0x95
#define MBX_REG_VPI 0x96
#define MBX_UNREG_VPI 0x97
#define MBX_REG_VNPID 0x96
#define MBX_UNREG_VNPID 0x97
#define MBX_WRITE_WWN 0x98
#define MBX_SET_DEBUG 0x99
#define MBX_LOAD_EXP_ROM 0x9C
#define MBX_MAX_CMDS 0x9D
#define MBX_SLI4_CONFIG 0x9B
#define MBX_SLI4_REQ_FTRS 0x9D
#define MBX_MAX_CMDS 0x9E
#define MBX_RESUME_RPI 0x9E
#define MBX_SLI2_CMD_MASK 0x80
#define MBX_REG_VFI 0x9F
#define MBX_REG_FCFI 0xA0
#define MBX_UNREG_VFI 0xA1
#define MBX_UNREG_FCFI 0xA2
#define MBX_INIT_VFI 0xA3
#define MBX_INIT_VPI 0xA4
/* IOCB Commands */
@ -1440,6 +1478,16 @@ typedef struct { /* FireFly BIU registers */
#define CMD_IOCB_LOGENTRY_CN 0x94
#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
/* Unhandled Data Security SLI Commands */
#define DSSCMD_IWRITE64_CR 0xD8
#define DSSCMD_IWRITE64_CX 0xD9
#define DSSCMD_IREAD64_CR 0xDA
#define DSSCMD_IREAD64_CX 0xDB
#define DSSCMD_INVALIDATE_DEK 0xDC
#define DSSCMD_SET_KEK 0xDD
#define DSSCMD_GET_KEK_ID 0xDE
#define DSSCMD_GEN_XFER 0xDF
#define CMD_MAX_IOCB_CMD 0xE6
#define CMD_IOCB_MASK 0xff
@ -1466,6 +1514,7 @@ typedef struct { /* FireFly BIU registers */
#define MBXERR_BAD_RCV_LENGTH 14
#define MBXERR_DMA_ERROR 15
#define MBXERR_ERROR 16
#define MBXERR_LINK_DOWN 0x33
#define MBX_NOT_FINISHED 255
#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
@ -1504,32 +1553,6 @@ struct ulp_bde {
#endif
};
struct ulp_bde64 { /* SLI-2 */
union ULP_BDE_TUS {
uint32_t w;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
VALUE !! */
uint32_t bdeSize:24; /* Size of buffer (in bytes) */
#else /* __LITTLE_ENDIAN_BITFIELD */
uint32_t bdeSize:24; /* Size of buffer (in bytes) */
uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
VALUE !! */
#endif
#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
} f;
} tus;
uint32_t addrLow;
uint32_t addrHigh;
};
typedef struct ULP_BDL { /* SLI-2 */
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t bdeFlags:8; /* BDL Flags */
@ -2287,7 +2310,7 @@ typedef struct {
uint32_t rsvd3;
uint32_t rsvd4;
uint32_t rsvd5;
uint16_t rsvd6;
uint16_t vfi;
uint16_t vpi;
#else /* __LITTLE_ENDIAN */
uint32_t rsvd1;
@ -2297,7 +2320,7 @@ typedef struct {
uint32_t rsvd4;
uint32_t rsvd5;
uint16_t vpi;
uint16_t rsvd6;
uint16_t vfi;
#endif
} REG_VPI_VAR;
@ -2457,7 +2480,7 @@ typedef struct {
uint32_t entry_index:16;
#endif
uint32_t rsvd1;
uint32_t sli4_length;
uint32_t word_cnt;
uint32_t resp_offset;
} DUMP_VAR;
@ -2470,9 +2493,32 @@ typedef struct {
#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
#define DMP_REGION_VPORT 0x16 /* VPort info region */
#define DMP_VPORT_REGION_SIZE 0x200
#define DMP_MBOX_OFFSET_WORD 0x5
#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */
#define DMP_FCOEPARAM_RGN_SIZE 0x400
#define WAKE_UP_PARMS_REGION_ID 4
#define WAKE_UP_PARMS_WORD_SIZE 15
struct vport_rec {
uint8_t wwpn[8];
uint8_t wwnn[8];
};
#define VPORT_INFO_SIG 0x32324752
#define VPORT_INFO_REV_MASK 0xff
#define VPORT_INFO_REV 0x1
#define MAX_STATIC_VPORT_COUNT 16
struct static_vport_info {
uint32_t signature;
uint32_t rev;
struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
uint32_t resvd[66];
};
/* Option rom version structure */
struct prog_id {
#ifdef __BIG_ENDIAN_BITFIELD
@ -2697,7 +2743,9 @@ typedef struct {
#endif
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd1 : 23; /* Reserved */
uint32_t rsvd1 : 19; /* Reserved */
uint32_t cdss : 1; /* Configure Data Security SLI */
uint32_t rsvd2 : 3; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t ccrp : 1; /* Config Command Ring Polling */
@ -2717,10 +2765,14 @@ typedef struct {
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t rsvd1 : 23; /* Reserved */
uint32_t rsvd2 : 3; /* Reserved */
uint32_t cdss : 1; /* Configure Data Security SLI */
uint32_t rsvd1 : 19; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd2 : 23; /* Reserved */
uint32_t rsvd3 : 19; /* Reserved */
uint32_t gdss : 1; /* Configure Data Security SLI */
uint32_t rsvd4 : 3; /* Reserved */
uint32_t gbg : 1; /* Grant BlockGuard */
uint32_t gmv : 1; /* Grant Max VPIs */
uint32_t gcrp : 1; /* Grant Command Ring Polling */
@ -2740,7 +2792,9 @@ typedef struct {
uint32_t gcrp : 1; /* Grant Command Ring Polling */
uint32_t gmv : 1; /* Grant Max VPIs */
uint32_t gbg : 1; /* Grant BlockGuard */
uint32_t rsvd2 : 23; /* Reserved */
uint32_t rsvd4 : 3; /* Reserved */
uint32_t gdss : 1; /* Configure Data Security SLI */
uint32_t rsvd3 : 19; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
@ -2753,20 +2807,20 @@ typedef struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */
uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
#else /* __LITTLE_ENDIAN */
uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */
uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
#endif
uint32_t rsvd4; /* Reserved */
uint32_t rsvd6; /* Reserved */
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd5 : 16; /* Reserved */
uint32_t rsvd7 : 16; /* Reserved */
uint32_t max_vpi : 16; /* Max number of virt N-Ports */
#else /* __LITTLE_ENDIAN */
uint32_t max_vpi : 16; /* Max number of virt N-Ports */
uint32_t rsvd5 : 16; /* Reserved */
uint32_t rsvd7 : 16; /* Reserved */
#endif
} CONFIG_PORT_VAR;
@ -3666,3 +3720,5 @@ lpfc_error_lost_link(IOCB_t *iocbp)
#define MENLO_TIMEOUT 30
#define SETVAR_MLOMNT 0x103107
#define SETVAR_MLORST 0x103007
#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */

2141
drivers/scsi/lpfc/lpfc_hw4.h Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2008 Emulex. All rights reserved. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -18,33 +18,39 @@
* included with this package. *
*******************************************************************/
#define LOG_ELS 0x1 /* ELS events */
#define LOG_DISCOVERY 0x2 /* Link discovery events */
#define LOG_MBOX 0x4 /* Mailbox events */
#define LOG_INIT 0x8 /* Initialization events */
#define LOG_LINK_EVENT 0x10 /* Link events */
#define LOG_IP 0x20 /* IP traffic history */
#define LOG_FCP 0x40 /* FCP traffic history */
#define LOG_NODE 0x80 /* Node table events */
#define LOG_TEMP 0x100 /* Temperature sensor events */
#define LOG_BG 0x200 /* BlockGuard events */
#define LOG_MISC 0x400 /* Miscellaneous events */
#define LOG_SLI 0x800 /* SLI events */
#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
#define LOG_LIBDFC 0x2000 /* Libdfc events */
#define LOG_VPORT 0x4000 /* NPIV events */
#define LOG_ALL_MSG 0xffff /* LOG all messages */
#define LOG_ELS 0x00000001 /* ELS events */
#define LOG_DISCOVERY 0x00000002 /* Link discovery events */
#define LOG_MBOX 0x00000004 /* Mailbox events */
#define LOG_INIT 0x00000008 /* Initialization events */
#define LOG_LINK_EVENT 0x00000010 /* Link events */
#define LOG_IP 0x00000020 /* IP traffic history */
#define LOG_FCP 0x00000040 /* FCP traffic history */
#define LOG_NODE 0x00000080 /* Node table events */
#define LOG_TEMP 0x00000100 /* Temperature sensor events */
#define LOG_BG 0x00000200 /* BlockGuard events */
#define LOG_MISC 0x00000400 /* Miscellaneous events */
#define LOG_SLI 0x00000800 /* SLI events */
#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */
#define LOG_LIBDFC 0x00002000 /* Libdfc events */
#define LOG_VPORT 0x00004000 /* NPIV events */
#define LOF_SECURITY 0x00008000 /* Security events */
#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
do { \
{ if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \
do { \
{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
} while (0)
} while (0)
#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
do { \
{ if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \
do { \
{ uint32_t log_verbose = (phba)->pport ? \
(phba)->pport->cfg_log_verbose : \
(phba)->cfg_log_verbose; \
if (((mask) & log_verbose) || (level[1] <= '3')) \
dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
fmt, phba->brd_no, ##arg); } \
} while (0)
fmt, phba->brd_no, ##arg); \
} \
} while (0)

View file

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2008 Emulex. All rights reserved. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -28,8 +28,10 @@
#include <scsi/scsi.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@ -38,6 +40,44 @@
#include "lpfc_crtn.h"
#include "lpfc_compat.h"
/**
* lpfc_dump_static_vport - Dump HBA's static vport information.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
* @offset: offset for dumping vport info.
*
* The dump mailbox command provides a method for the device driver to obtain
* various types of information from the HBA device.
*
* This routine prepares the mailbox command for dumping list of static
* vports to be created.
**/
void
lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
uint16_t offset)
{
MAILBOX_t *mb;
void *ctx;
mb = &pmb->u.mb;
ctx = pmb->context2;
/* Setup to dump vport info region */
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
mb->mbxCommand = MBX_DUMP_MEMORY;
mb->un.varDmp.cv = 1;
mb->un.varDmp.type = DMP_NV_PARAMS;
mb->un.varDmp.entry_index = offset;
mb->un.varDmp.region_id = DMP_REGION_VPORT;
mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
mb->un.varDmp.co = 0;
mb->un.varDmp.resp_offset = 0;
pmb->context2 = ctx;
mb->mbxOwner = OWN_HOST;
return;
}
/**
* lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory
* @phba: pointer to lpfc hba data structure.
@ -58,7 +98,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
MAILBOX_t *mb;
void *ctx;
mb = &pmb->mb;
mb = &pmb->u.mb;
ctx = pmb->context2;
/* Setup to dump VPD region */
@ -90,7 +130,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb;
void *ctx;
mb = &pmb->mb;
mb = &pmb->u.mb;
/* Save context so that we can restore after memset */
ctx = pmb->context2;
@ -125,7 +165,7 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
mb = &pmb->mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxCommand = MBX_READ_NV;
mb->mbxOwner = OWN_HOST;
@ -151,7 +191,7 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
{
MAILBOX_t *mb;
mb = &pmb->mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
mb->un.varCfgAsyncEvent.ring = ring;
@ -177,7 +217,7 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
mb = &pmb->mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxCommand = MBX_HEARTBEAT;
mb->mbxOwner = OWN_HOST;
@ -211,7 +251,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
struct lpfc_sli *psli;
psli = &phba->sli;
mb = &pmb->mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
INIT_LIST_HEAD(&mp->list);
@ -248,7 +288,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
mb = &pmb->mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varClearLA.eventTag = phba->fc_eventTag;
@ -275,7 +315,7 @@ void
lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
struct lpfc_vport *vport = phba->pport;
MAILBOX_t *mb = &pmb->mb;
MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
/* NEW_FEATURE
@ -321,7 +361,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
int
lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->mb;
MAILBOX_t *mb = &pmb->u.mb;
uint32_t attentionConditions[2];
/* Sanity check */
@ -405,7 +445,7 @@ lpfc_init_link(struct lpfc_hba * phba,
struct lpfc_sli *psli;
MAILBOX_t *mb;
mb = &pmb->mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
psli = &phba->sli;
@ -492,7 +532,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
struct lpfc_sli *psli;
psli = &phba->sli;
mb = &pmb->mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxOwner = OWN_HOST;
@ -515,7 +555,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
mb->un.varRdSparm.vpi = vpi;
mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
/* save address for completion */
pmb->context1 = mp;
@ -544,10 +584,12 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
{
MAILBOX_t *mb;
mb = &pmb->mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregDID.did = did;
if (vpi != 0xffff)
vpi += phba->vpi_base;
mb->un.varUnregDID.vpi = vpi;
mb->mbxCommand = MBX_UNREG_D_ID;
@ -573,7 +615,7 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
mb = &pmb->mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxCommand = MBX_READ_CONFIG;
@ -598,7 +640,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
mb = &pmb->mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxCommand = MBX_READ_LNK_STAT;
@ -607,7 +649,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
}
/**
* lpfc_reg_login - Prepare a mailbox command for registering remote login
* lpfc_reg_rpi - Prepare a mailbox command for registering remote login
* @phba: pointer to lpfc hba data structure.
* @vpi: virtual N_Port identifier.
* @did: remote port identifier.
@ -631,17 +673,23 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
* 1 - DMA memory allocation failed
**/
int
lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
{
MAILBOX_t *mb = &pmb->mb;
MAILBOX_t *mb = &pmb->u.mb;
uint8_t *sparam;
struct lpfc_dmabuf *mp;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRegLogin.rpi = 0;
mb->un.varRegLogin.vpi = vpi;
if (phba->sli_rev == LPFC_SLI_REV4) {
mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba);
if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
return 1;
}
mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
mb->un.varRegLogin.did = did;
mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
@ -697,15 +745,16 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
{
MAILBOX_t *mb;
mb = &pmb->mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregLogin.rpi = (uint16_t) rpi;
mb->un.varUnregLogin.rsvd1 = 0;
mb->un.varUnregLogin.vpi = vpi;
mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
mb->mbxCommand = MBX_UNREG_LOGIN;
mb->mbxOwner = OWN_HOST;
return;
}
@ -725,15 +774,15 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
* This routine prepares the mailbox command for registering a virtual N_Port.
**/
void
lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
LPFC_MBOXQ_t *pmb)
lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->mb;
MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRegVpi.vpi = vpi;
mb->un.varRegVpi.sid = sid;
mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
mb->un.varRegVpi.sid = vport->fc_myDID;
mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
mb->mbxCommand = MBX_REG_VPI;
mb->mbxOwner = OWN_HOST;
@ -760,10 +809,10 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
void
lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->mb;
MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregVpi.vpi = vpi;
mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
mb->mbxCommand = MBX_UNREG_VPI;
mb->mbxOwner = OWN_HOST;
@ -852,7 +901,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
void
lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb = &pmb->mb;
MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRdRev.cv = 1;
mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
@ -945,7 +994,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
{
int i;
MAILBOX_t *mb = &pmb->mb;
MAILBOX_t *mb = &pmb->u.mb;
struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@ -1020,7 +1069,7 @@ void
lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
{
int i;
MAILBOX_t *mb = &pmb->mb;
MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_sli *psli;
struct lpfc_sli_ring *pring;
@ -1075,7 +1124,7 @@ void
lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
MAILBOX_t *mb = &pmb->mb;
MAILBOX_t *mb = &pmb->u.mb;
dma_addr_t pdma_addr;
uint32_t bar_low, bar_high;
size_t offset;
@ -1099,21 +1148,22 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* If HBA supports SLI=3 ask for it */
if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
if (phba->cfg_enable_bg)
mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
mb->un.varCfgPort.cdss = 1; /* Configure Security */
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
if (phba->max_vpi && phba->cfg_enable_npiv &&
phba->vpd.sli3Feat.cmv) {
mb->un.varCfgPort.max_vpi = phba->max_vpi;
mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
mb->un.varCfgPort.cmv = 1;
} else
mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
} else
phba->sli_rev = 2;
phba->sli_rev = LPFC_SLI_REV2;
mb->un.varCfgPort.sli_mode = phba->sli_rev;
/* Now setup pcb */
@ -1245,7 +1295,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
void
lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb = &pmb->mb;
MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
mb->mbxCommand = MBX_KILL_BOARD;
@ -1304,29 +1354,98 @@ lpfc_mbox_get(struct lpfc_hba * phba)
return mbq;
}
/**
* __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
* @phba: pointer to lpfc hba data structure.
* @mbq: pointer to the driver internal queue element for mailbox command.
*
* This routine put the completed mailbox command into the mailbox command
* complete list. This is the unlocked version of the routine. The mailbox
* complete list is used by the driver worker thread to process mailbox
* complete callback functions outside the driver interrupt handler.
**/
void
__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
{
list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
}
/**
* lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
* @phba: pointer to lpfc hba data structure.
* @mbq: pointer to the driver internal queue element for mailbox command.
*
* This routine put the completed mailbox command into the mailbox command
* complete list. This routine is called from driver interrupt handler
* context.The mailbox complete list is used by the driver worker thread
* to process mailbox complete callback functions outside the driver interrupt
* handler.
* complete list. This is the locked version of the routine. The mailbox
* complete list is used by the driver worker thread to process mailbox
* complete callback functions outside the driver interrupt handler.
**/
void
lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
{
unsigned long iflag;
/* This function expects to be called from interrupt context */
spin_lock_irqsave(&phba->hbalock, iflag);
list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
__lpfc_mbox_cmpl_put(phba, mbq);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
/**
* lpfc_mbox_cmd_check - Check the validality of a mailbox command
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to the driver internal queue element for mailbox command.
*
* This routine is to check whether a mailbox command is valid to be issued.
* This check will be performed by both the mailbox issue API when a client
* is to issue a mailbox command to the mailbox transport.
*
* Return 0 - pass the check, -ENODEV - fail the check
**/
int
lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
/* Mailbox command that have a completion handler must also have a
* vport specified.
*/
if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
if (!mboxq->vport) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
"1814 Mbox x%x failed, no vport\n",
mboxq->u.mb.mbxCommand);
dump_stack();
return -ENODEV;
}
}
return 0;
}
/**
* lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
* @phba: pointer to lpfc hba data structure.
*
* This routine is to check whether the HBA device is ready for posting a
* mailbox command. It is used by the mailbox transport API at the time the
* to post a mailbox command to the device.
*
* Return 0 - pass the check, -ENODEV - fail the check
**/
int
lpfc_mbox_dev_check(struct lpfc_hba *phba)
{
/* If the PCI channel is in offline state, do not issue mbox */
if (unlikely(pci_channel_offline(phba->pcidev)))
return -ENODEV;
/* If the HBA is in error state, do not issue mbox */
if (phba->link_state == LPFC_HBA_ERROR)
return -ENODEV;
return 0;
}
/**
* lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
* @phba: pointer to lpfc hba data structure.
@ -1350,6 +1469,475 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
case MBX_WRITE_WWN: /* 0x98 */
case MBX_LOAD_EXP_ROM: /* 0x9C */
return LPFC_MBOX_TMO_FLASH_CMD;
case MBX_SLI4_CONFIG: /* 0x9b */
return LPFC_MBOX_SLI4_CONFIG_TMO;
}
return LPFC_MBOX_TMO;
}
/**
* lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
* @mbox: pointer to lpfc mbox command.
* @sgentry: sge entry index.
* @phyaddr: physical address for the sge
* @length: Length of the sge.
*
* This routine sets up an entry in the non-embedded mailbox command at the sge
* index location.
**/
void
lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
dma_addr_t phyaddr, uint32_t length)
{
struct lpfc_mbx_nembed_cmd *nembed_sge;
nembed_sge = (struct lpfc_mbx_nembed_cmd *)
&mbox->u.mqe.un.nembed_cmd;
nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
nembed_sge->sge[sgentry].length = length;
}
/**
* lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
* @mbox: pointer to lpfc mbox command.
* @sgentry: sge entry index.
*
* This routine gets an entry from the non-embedded mailbox command at the sge
* index location.
**/
void
lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
struct lpfc_mbx_sge *sge)
{
struct lpfc_mbx_nembed_cmd *nembed_sge;
nembed_sge = (struct lpfc_mbx_nembed_cmd *)
&mbox->u.mqe.un.nembed_cmd;
sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
sge->length = nembed_sge->sge[sgentry].length;
}
/**
* lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
* @phba: pointer to lpfc hba data structure.
* @mbox: pointer to lpfc mbox command.
*
* This routine frees SLI4 specific mailbox command for sending IOCTL command.
**/
void
lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
{
struct lpfc_mbx_sli4_config *sli4_cfg;
struct lpfc_mbx_sge sge;
dma_addr_t phyaddr;
uint32_t sgecount, sgentry;
sli4_cfg = &mbox->u.mqe.un.sli4_config;
/* For embedded mbox command, just free the mbox command */
if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
mempool_free(mbox, phba->mbox_mem_pool);
return;
}
/* For non-embedded mbox command, we need to free the pages first */
sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
/* There is nothing we can do if there is no sge address array */
if (unlikely(!mbox->sge_array)) {
mempool_free(mbox, phba->mbox_mem_pool);
return;
}
/* Each non-embedded DMA memory was allocated in the length of a page */
for (sgentry = 0; sgentry < sgecount; sgentry++) {
lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE,
mbox->sge_array->addr[sgentry], phyaddr);
}
/* Free the sge address array memory */
kfree(mbox->sge_array);
/* Finally, free the mailbox command itself */
mempool_free(mbox, phba->mbox_mem_pool);
}
/**
* lpfc_sli4_config - Initialize the SLI4 Config Mailbox command
* @phba: pointer to lpfc hba data structure.
* @mbox: pointer to lpfc mbox command.
* @subsystem: The sli4 config sub mailbox subsystem.
* @opcode: The sli4 config sub mailbox command opcode.
* @length: Length of the sli4 config mailbox command.
*
* This routine sets up the header fields of SLI4 specific mailbox command
* for sending IOCTL command.
*
* Return: the actual length of the mbox command allocated (mostly useful
* for none embedded mailbox command).
**/
int
lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
{
struct lpfc_mbx_sli4_config *sli4_config;
union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
uint32_t alloc_len;
uint32_t resid_len;
uint32_t pagen, pcount;
void *viraddr;
dma_addr_t phyaddr;
/* Set up SLI4 mailbox command header fields */
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
/* Set up SLI4 ioctl command header fields */
sli4_config = &mbox->u.mqe.un.sli4_config;
/* Setup for the embedded mbox command */
if (emb) {
/* Set up main header fields */
bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
sli4_config->header.cfg_mhdr.payload_length =
LPFC_MBX_CMD_HDR_LENGTH + length;
/* Set up sub-header fields following main header */
bf_set(lpfc_mbox_hdr_opcode,
&sli4_config->header.cfg_shdr.request, opcode);
bf_set(lpfc_mbox_hdr_subsystem,
&sli4_config->header.cfg_shdr.request, subsystem);
sli4_config->header.cfg_shdr.request.request_length = length;
return length;
}
/* Setup for the none-embedded mbox command */
pcount = (PAGE_ALIGN(length))/PAGE_SIZE;
pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
/* Allocate record for keeping SGE virtual addresses */
mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
GFP_KERNEL);
if (!mbox->sge_array)
return 0;
for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
/* The DMA memory is always allocated in the length of a
* page even though the last SGE might not fill up to a
* page, this is used as a priori size of PAGE_SIZE for
* the later DMA memory free.
*/
viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE,
&phyaddr, GFP_KERNEL);
/* In case of malloc fails, proceed with whatever we have */
if (!viraddr)
break;
mbox->sge_array->addr[pagen] = viraddr;
/* Keep the first page for later sub-header construction */
if (pagen == 0)
cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
resid_len = length - alloc_len;
if (resid_len > PAGE_SIZE) {
lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
PAGE_SIZE);
alloc_len += PAGE_SIZE;
} else {
lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
resid_len);
alloc_len = length;
}
}
/* Set up main header fields in mailbox command */
sli4_config->header.cfg_mhdr.payload_length = alloc_len;
bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
/* Set up sub-header fields into the first page */
if (pagen > 0) {
bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
cfg_shdr->request.request_length =
alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
}
/* The sub-header is in DMA memory, which needs endian converstion */
lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
sizeof(union lpfc_sli4_cfg_shdr));
return alloc_len;
}
/**
* lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
* @phba: pointer to lpfc hba data structure.
* @mbox: pointer to lpfc mbox command.
*
* This routine gets the opcode from a SLI4 specific mailbox command for
* sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG
* (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be
* returned.
**/
uint8_t
lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
{
struct lpfc_mbx_sli4_config *sli4_cfg;
union lpfc_sli4_cfg_shdr *cfg_shdr;
if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
return 0;
sli4_cfg = &mbox->u.mqe.un.sli4_config;
/* For embedded mbox command, get opcode from embedded sub-header*/
if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
}
/* For non-embedded mbox command, get opcode from first dma page */
if (unlikely(!mbox->sge_array))
return 0;
cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
}
/**
* lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
* @mboxq: pointer to lpfc mbox command.
*
* This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
* mailbox command.
**/
void
lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
{
/* Set up SLI4 mailbox command header fields */
memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
/* Set up host requested features. */
bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
/* Virtual fabrics and FIPs are not supported yet. */
bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
/* Enable DIF (block guard) only if configured to do so. */
if (phba->cfg_enable_bg)
bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
/* Enable NPIV only if configured to do so. */
if (phba->max_vpi && phba->cfg_enable_npiv)
bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
return;
}
/**
* lpfc_init_vfi - Initialize the INIT_VFI mailbox command
* @mbox: pointer to lpfc mbox command to initialize.
* @vport: Vport associated with the VF.
*
* This routine initializes @mbox to all zeros and then fills in the mailbox
* fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
* in the context of an FCF. The driver issues this command to setup a VFI
* before issuing a FLOGI to login to the VSAN. The driver should also issue a
* REG_VFI after a successful VSAN login.
**/
void
lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
{
struct lpfc_mbx_init_vfi *init_vfi;
memset(mbox, 0, sizeof(*mbox));
init_vfi = &mbox->u.mqe.un.init_vfi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
bf_set(lpfc_init_vfi_vr, init_vfi, 1);
bf_set(lpfc_init_vfi_vt, init_vfi, 1);
bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
}
/**
* lpfc_reg_vfi - Initialize the REG_VFI mailbox command
* @mbox: pointer to lpfc mbox command to initialize.
* @vport: vport associated with the VF.
* @phys: BDE DMA bus address used to send the service parameters to the HBA.
*
* This routine initializes @mbox to all zeros and then fills in the mailbox
* fields from @vport, and uses @buf as a DMAable buffer to send the vport's
* fc service parameters to the HBA for this VFI. REG_VFI configures virtual
* fabrics identified by VFI in the context of an FCF.
**/
void
lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
{
struct lpfc_mbx_reg_vfi *reg_vfi;
memset(mbox, 0, sizeof(*mbox));
reg_vfi = &mbox->u.mqe.un.reg_vfi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
reg_vfi->bde.addrHigh = putPaddrHigh(phys);
reg_vfi->bde.addrLow = putPaddrLow(phys);
reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
}
/**
* lpfc_init_vpi - Initialize the INIT_VPI mailbox command
* @mbox: pointer to lpfc mbox command to initialize.
* @vpi: VPI to be initialized.
*
* The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
* command to activate a virtual N_Port. The HBA assigns a MAC address to use
* with the virtual N Port. The SLI Host issues this command before issuing a
* FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
* successful virtual NPort login.
**/
void
lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi)
{
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi);
}
/**
* lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
* @mbox: pointer to lpfc mbox command to initialize.
* @vfi: VFI to be unregistered.
*
* The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
* (logical NPort) into the inactive state. The SLI Host must have logged out
* and unregistered all remote N_Ports to abort any activity on the virtual
* fabric. The SLI Port posts the mailbox response after marking the virtual
* fabric inactive.
**/
void
lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi)
{
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi);
}
/**
* lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
* @phba: pointer to the hba structure containing.
* @mbox: pointer to lpfc mbox command to initialize.
*
* This function create a SLI4 dump mailbox command to dump FCoE
* parameters stored in region 23.
**/
int
lpfc_dump_fcoe_param(struct lpfc_hba *phba,
struct lpfcMboxq *mbox)
{
struct lpfc_dmabuf *mp = NULL;
MAILBOX_t *mb;
memset(mbox, 0, sizeof(*mbox));
mb = &mbox->u.mb;
mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (mp)
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
if (!mp || !mp->virt) {
kfree(mp);
/* dump_fcoe_param failed to allocate memory */
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"2569 lpfc_dump_fcoe_param: memory"
" allocation failed \n");
return 1;
}
memset(mp->virt, 0, LPFC_BPL_SIZE);
INIT_LIST_HEAD(&mp->list);
/* save address for completion */
mbox->context1 = (uint8_t *) mp;
mb->mbxCommand = MBX_DUMP_MEMORY;
mb->un.varDmp.type = DMP_NV_PARAMS;
mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM;
mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE;
mb->un.varWords[3] = putPaddrLow(mp->phys);
mb->un.varWords[4] = putPaddrHigh(mp->phys);
return 0;
}
/**
* lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
* @phba: pointer to the hba structure containing the FCF index and RQ ID.
* @mbox: pointer to lpfc mbox command to initialize.
*
* The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
* SLI Host uses the command to activate an FCF after it has acquired FCF
* information via a READ_FCF mailbox command. This mailbox command also is used
* to indicate where received unsolicited frames from this FCF will be sent. By
* default this routine will set up the FCF to forward all unsolicited frames
* the the RQ ID passed in the @phba. This can be overridden by the caller for
* more complicated setups.
**/
void
lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
{
struct lpfc_mbx_reg_fcfi *reg_fcfi;
memset(mbox, 0, sizeof(*mbox));
reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx);
/* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
(~phba->fcf.addr_mode) & 0x3);
if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id);
}
}
/**
* lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
* @mbox: pointer to lpfc mbox command to initialize.
* @fcfi: FCFI to be unregistered.
*
* The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
* The SLI Host uses the command to inactivate an FCFI.
**/
void
lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
{
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
}
/**
* lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
* @mbox: pointer to lpfc mbox command to initialize.
* @ndlp: The nodelist structure that describes the RPI to resume.
*
* The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
* link event.
**/
void
lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
{
struct lpfc_mbx_resume_rpi *resume_rpi;
memset(mbox, 0, sizeof(*mbox));
resume_rpi = &mbox->u.mqe.un.resume_rpi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi);
bf_set(lpfc_resume_rpi_vpi, resume_rpi,
ndlp->vport->vpi + ndlp->vport->phba->vpi_base);
bf_set(lpfc_resume_rpi_vfi, resume_rpi,
ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
}

View file

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2008 Emulex. All rights reserved. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -28,8 +28,10 @@
#include <scsi/scsi.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@ -45,7 +47,7 @@
* @phba: HBA to allocate pools for
*
* Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
* lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools
* lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
* for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
*
* Notes: Not interrupt-safe. Must be called with no locks held. If any
@ -56,19 +58,30 @@
* -ENOMEM on failure (if any memory allocations fail)
**/
int
lpfc_mem_alloc(struct lpfc_hba * phba)
lpfc_mem_alloc(struct lpfc_hba *phba, int align)
{
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
int longs;
int i;
phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool",
phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0);
if (phba->sli_rev == LPFC_SLI_REV4)
phba->lpfc_scsi_dma_buf_pool =
pci_pool_create("lpfc_scsi_dma_buf_pool",
phba->pcidev,
phba->cfg_sg_dma_buf_size,
phba->cfg_sg_dma_buf_size,
0);
else
phba->lpfc_scsi_dma_buf_pool =
pci_pool_create("lpfc_scsi_dma_buf_pool",
phba->pcidev, phba->cfg_sg_dma_buf_size,
align, 0);
if (!phba->lpfc_scsi_dma_buf_pool)
goto fail;
phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
LPFC_BPL_SIZE, 8,0);
LPFC_BPL_SIZE,
align, 0);
if (!phba->lpfc_mbuf_pool)
goto fail_free_dma_buf_pool;
@ -97,23 +110,31 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
sizeof(struct lpfc_nodelist));
if (!phba->nlp_mem_pool)
goto fail_free_mbox_pool;
phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev,
LPFC_BPL_SIZE, 8, 0);
if (!phba->lpfc_hbq_pool)
phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
phba->pcidev,
LPFC_HDR_BUF_SIZE, align, 0);
if (!phba->lpfc_hrb_pool)
goto fail_free_nlp_mem_pool;
phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
phba->pcidev,
LPFC_DATA_BUF_SIZE, align, 0);
if (!phba->lpfc_drb_pool)
goto fail_free_hbq_pool;
/* vpi zero is reserved for the physical port so add 1 to max */
longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
if (!phba->vpi_bmask)
goto fail_free_hbq_pool;
goto fail_free_dbq_pool;
return 0;
fail_free_dbq_pool:
pci_pool_destroy(phba->lpfc_drb_pool);
phba->lpfc_drb_pool = NULL;
fail_free_hbq_pool:
lpfc_sli_hbqbuf_free_all(phba);
pci_pool_destroy(phba->lpfc_hbq_pool);
pci_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
fail_free_nlp_mem_pool:
mempool_destroy(phba->nlp_mem_pool);
phba->nlp_mem_pool = NULL;
@ -136,27 +157,73 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
}
/**
* lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc
* lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
* @phba: HBA to free memory for
*
* Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool,
* lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and
* lpfc_nodelist. Also frees the VPI bitmask
* Description: Free the memory allocated by lpfc_mem_alloc routine. This
* routine is a the counterpart of lpfc_mem_alloc.
*
* Returns: None
**/
void
lpfc_mem_free(struct lpfc_hba * phba)
lpfc_mem_free(struct lpfc_hba *phba)
{
int i;
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
/* Free VPI bitmask memory */
kfree(phba->vpi_bmask);
/* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba);
pci_pool_destroy(phba->lpfc_drb_pool);
phba->lpfc_drb_pool = NULL;
pci_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
/* Free NLP memory pool */
mempool_destroy(phba->nlp_mem_pool);
phba->nlp_mem_pool = NULL;
/* Free mbox memory pool */
mempool_destroy(phba->mbox_mem_pool);
phba->mbox_mem_pool = NULL;
/* Free MBUF memory pool */
for (i = 0; i < pool->current_count; i++)
pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
pool->elements[i].phys);
kfree(pool->elements);
pci_pool_destroy(phba->lpfc_mbuf_pool);
phba->lpfc_mbuf_pool = NULL;
/* Free DMA buffer memory pool */
pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
phba->lpfc_scsi_dma_buf_pool = NULL;
return;
}
/**
* lpfc_mem_free_all - Frees all PCI and driver memory
* @phba: HBA to free memory for
*
* Description: Free memory from PCI and driver memory pools and also those
* used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
* kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
* the VPI bitmask.
*
* Returns: None
**/
void
lpfc_mem_free_all(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
LPFC_MBOXQ_t *mbox, *next_mbox;
struct lpfc_dmabuf *mp;
int i;
kfree(phba->vpi_bmask);
lpfc_sli_hbqbuf_free_all(phba);
/* Free memory used in mailbox queue back to mailbox memory pool */
list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
mp = (struct lpfc_dmabuf *) (mbox->context1);
if (mp) {
@ -166,6 +233,7 @@ lpfc_mem_free(struct lpfc_hba * phba)
list_del(&mbox->list);
mempool_free(mbox, phba->mbox_mem_pool);
}
/* Free memory used in mailbox cmpl list back to mailbox memory pool */
list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
mp = (struct lpfc_dmabuf *) (mbox->context1);
if (mp) {
@ -175,8 +243,10 @@ lpfc_mem_free(struct lpfc_hba * phba)
list_del(&mbox->list);
mempool_free(mbox, phba->mbox_mem_pool);
}
/* Free the active mailbox command back to the mailbox memory pool */
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irq(&phba->hbalock);
if (psli->mbox_active) {
mbox = psli->mbox_active;
mp = (struct lpfc_dmabuf *) (mbox->context1);
@ -188,27 +258,14 @@ lpfc_mem_free(struct lpfc_hba * phba)
psli->mbox_active = NULL;
}
for (i = 0; i < pool->current_count; i++)
pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
pool->elements[i].phys);
kfree(pool->elements);
pci_pool_destroy(phba->lpfc_hbq_pool);
mempool_destroy(phba->nlp_mem_pool);
mempool_destroy(phba->mbox_mem_pool);
pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
pci_pool_destroy(phba->lpfc_mbuf_pool);
phba->lpfc_hbq_pool = NULL;
phba->nlp_mem_pool = NULL;
phba->mbox_mem_pool = NULL;
phba->lpfc_scsi_dma_buf_pool = NULL;
phba->lpfc_mbuf_pool = NULL;
/* Free and destroy all the allocated memory pools */
lpfc_mem_free(phba);
/* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
psli->iocbq_lookup = NULL;
return;
}
/**
@ -305,7 +362,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
* lpfc_els_hbq_alloc - Allocate an HBQ buffer
* @phba: HBA to allocate HBQ buffer for
*
* Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI
* Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
* pool along a non-DMA-mapped container for it.
*
* Notes: Not interrupt-safe. Must be called with no locks held.
@ -323,7 +380,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
if (!hbqbp)
return NULL;
hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
&hbqbp->dbuf.phys);
if (!hbqbp->dbuf.virt) {
kfree(hbqbp);
@ -334,7 +391,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
}
/**
* lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
* lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
* @phba: HBA buffer was allocated for
* @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
*
@ -348,11 +405,72 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
void
lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
{
pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
kfree(hbqbp);
return;
}
/**
* lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
* @phba: HBA to allocate a receive buffer for
*
* Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
* pool along a non-DMA-mapped container for it.
*
* Notes: Not interrupt-safe. Must be called with no locks held.
*
* Returns:
* pointer to HBQ on success
* NULL on failure
**/
struct hbq_dmabuf *
lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
{
struct hbq_dmabuf *dma_buf;
dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
if (!dma_buf)
return NULL;
dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
&dma_buf->hbuf.phys);
if (!dma_buf->hbuf.virt) {
kfree(dma_buf);
return NULL;
}
dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
&dma_buf->dbuf.phys);
if (!dma_buf->dbuf.virt) {
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys);
kfree(dma_buf);
return NULL;
}
dma_buf->size = LPFC_BPL_SIZE;
return dma_buf;
}
/**
* lpfc_sli4_rb_free - Frees a receive buffer
* @phba: HBA buffer was allocated for
* @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
*
* Description: Frees both the container and the DMA-mapped buffers returned by
* lpfc_sli4_rb_alloc.
*
* Notes: Can be called with or without locks held.
*
* Returns: None
**/
void
lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
{
pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
kfree(dmab);
return;
}
/**
* lpfc_in_buf_free - Free a DMA buffer
* @phba: HBA buffer is associated with

View file

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2008 Emulex. All rights reserved. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -28,8 +28,10 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@ -361,7 +363,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!mbox)
goto out;
rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID,
rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
(uint8_t *) sp, mbox, 0);
if (rc) {
mempool_free(mbox, phba->mbox_mem_pool);
@ -495,11 +497,19 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
if ((ndlp->nlp_type & NLP_FABRIC) &&
vport->port_type == LPFC_NPIV_PORT) {
lpfc_linkdown_port(vport);
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
spin_unlock_irq(shost->host_lock);
if ((!(ndlp->nlp_type & NLP_FABRIC) &&
((ndlp->nlp_type & NLP_FCP_TARGET) ||
!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
((ndlp->nlp_type & NLP_FCP_TARGET) ||
!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
/* Only try to re-login if this is NOT a Fabric Node */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
spin_lock_irq(shost->host_lock);
@ -567,7 +577,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (!ndlp->nlp_rpi) {
if (!(ndlp->nlp_flag & NLP_RPI_VALID)) {
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
return 0;
}
@ -857,7 +867,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
lpfc_unreg_rpi(vport, ndlp);
if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID,
if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
(uint8_t *) sp, mbox, 0) == 0) {
switch (ndlp->nlp_DID) {
case NameServer_DID:
@ -1068,6 +1078,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
ADISC *ap;
int rc;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
@ -1093,6 +1104,15 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
return ndlp->nlp_state;
}
if (phba->sli_rev == LPFC_SLI_REV4) {
rc = lpfc_sli4_resume_rpi(ndlp);
if (rc) {
/* Stay in state and retry. */
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
return ndlp->nlp_state;
}
}
if (ndlp->nlp_type & NLP_FCP_TARGET) {
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
@ -1100,6 +1120,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
return ndlp->nlp_state;
}
@ -1190,7 +1211,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
if ((mb = phba->sli.mbox_active)) {
if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
lpfc_nlp_put(ndlp);
mb->context2 = NULL;
@ -1200,7 +1221,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
mp = (struct lpfc_dmabuf *) (mb->context1);
if (mp) {
@ -1251,7 +1272,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
MAILBOX_t *mb = &pmb->mb;
MAILBOX_t *mb = &pmb->u.mb;
uint32_t did = mb->un.varWords[1];
if (mb->mbxStatus) {
@ -1283,6 +1304,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
}
ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_VALID;
/* Only if we are not a fabric nport do we issue PRLI */
if (!(ndlp->nlp_type & NLP_FABRIC)) {
@ -1878,11 +1900,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
void *arg, uint32_t evt)
{
LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
MAILBOX_t *mb = &pmb->mb;
MAILBOX_t *mb = &pmb->u.mb;
if (!mb->mbxStatus)
if (!mb->mbxStatus) {
ndlp->nlp_rpi = mb->un.varWords[0];
else {
ndlp->nlp_flag |= NLP_RPI_VALID;
} else {
if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;

File diff suppressed because it is too large Load diff

View file

@ -140,6 +140,8 @@ struct lpfc_scsi_buf {
struct fcp_rsp *fcp_rsp;
struct ulp_bde64 *fcp_bpl;
dma_addr_t dma_phys_bpl;
/* cur_iocbq has phys of the dma-able buffer.
* Iotag is in here
*/

File diff suppressed because it is too large Load diff

View file

@ -29,13 +29,23 @@ typedef enum _lpfc_ctx_cmd {
LPFC_CTX_HOST
} lpfc_ctx_cmd;
/* This structure is used to carry the needed response IOCB states */
struct lpfc_sli4_rspiocb_info {
uint8_t hw_status;
uint8_t bfield;
#define LPFC_XB 0x1
#define LPFC_PV 0x2
uint8_t priority;
uint8_t reserved;
};
/* This structure is used to handle IOCB requests / responses */
struct lpfc_iocbq {
/* lpfc_iocbqs are used in double linked lists */
struct list_head list;
struct list_head clist;
uint16_t iotag; /* pre-assigned IO tag */
uint16_t rsvd1;
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
IOCB_t iocb; /* IOCB cmd */
uint8_t retry; /* retry counter for IOCB cmd - if needed */
@ -65,7 +75,7 @@ struct lpfc_iocbq {
struct lpfc_iocbq *);
void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
struct lpfc_sli4_rspiocb_info sli4_info;
};
#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@ -81,14 +91,18 @@ struct lpfc_iocbq {
typedef struct lpfcMboxq {
/* MBOXQs are used in single linked lists */
struct list_head list; /* ptr to next mailbox command */
MAILBOX_t mb; /* Mailbox cmd */
struct lpfc_vport *vport;/* virutal port pointer */
union {
MAILBOX_t mb; /* Mailbox cmd */
struct lpfc_mqe mqe;
} u;
struct lpfc_vport *vport;/* virtual port pointer */
void *context1; /* caller context information */
void *context2; /* caller context information */
void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
uint8_t mbox_flag;
struct lpfc_mcqe mcqe;
struct lpfc_mbx_nembed_sge_virt *sge_array;
} LPFC_MBOXQ_t;
#define MBX_POLL 1 /* poll mailbox till command done, then
@ -230,10 +244,11 @@ struct lpfc_sli {
/* Additional sli_flags */
#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */
#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */
#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
struct lpfc_sli_ring ring[LPFC_MAX_RING];
int fcp_ring; /* ring used for FCP initiator commands */
@ -261,6 +276,8 @@ struct lpfc_sli {
#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
command */
#define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox
command */
#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
* or erase cmds. This is especially
* long because of the potential of

View file

@ -0,0 +1,467 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*******************************************************************/
#define LPFC_ACTIVE_MBOX_WAIT_CNT 100
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
#define LPFC_GET_QE_REL_INT 32
#define LPFC_RPI_LOW_WATER_MARK 10
/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
#define LPFC_NEMBED_MBOX_SGL_CNT 254
/* Multi-queue arrangement for fast-path FCP work queues */
#define LPFC_FN_EQN_MAX 8
#define LPFC_SP_EQN_DEF 1
#define LPFC_FP_EQN_DEF 1
#define LPFC_FP_EQN_MIN 1
#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
#define LPFC_FN_WQN_MAX 32
#define LPFC_SP_WQN_DEF 1
#define LPFC_FP_WQN_DEF 4
#define LPFC_FP_WQN_MIN 1
#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
/*
* Provide the default FCF Record attributes used by the driver
* when nonFIP mode is configured and there is no other default
* FCF Record attributes.
*/
#define LPFC_FCOE_FCF_DEF_INDEX 0
#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
/* First 3 bytes of default FCF MAC is specified by FC_MAP */
#define LPFC_FCOE_FCF_MAC3 0xFF
#define LPFC_FCOE_FCF_MAC4 0xFF
#define LPFC_FCOE_FCF_MAC5 0xFE
#define LPFC_FCOE_FCF_MAP0 0x0E
#define LPFC_FCOE_FCF_MAP1 0xFC
#define LPFC_FCOE_FCF_MAP2 0x00
#define LPFC_FCOE_MAX_RCV_SIZE 0x5AC
#define LPFC_FCOE_FKA_ADV_PER 0
#define LPFC_FCOE_FIP_PRIORITY 0x80
enum lpfc_sli4_queue_type {
LPFC_EQ,
LPFC_GCQ,
LPFC_MCQ,
LPFC_WCQ,
LPFC_RCQ,
LPFC_MQ,
LPFC_WQ,
LPFC_HRQ,
LPFC_DRQ
};
/* The queue sub-type defines the functional purpose of the queue */
enum lpfc_sli4_queue_subtype {
LPFC_NONE,
LPFC_MBOX,
LPFC_FCP,
LPFC_ELS,
LPFC_USOL
};
union sli4_qe {
void *address;
struct lpfc_eqe *eqe;
struct lpfc_cqe *cqe;
struct lpfc_mcqe *mcqe;
struct lpfc_wcqe_complete *wcqe_complete;
struct lpfc_wcqe_release *wcqe_release;
struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
struct lpfc_rcqe_complete *rcqe_complete;
struct lpfc_mqe *mqe;
union lpfc_wqe *wqe;
struct lpfc_rqe *rqe;
};
struct lpfc_queue {
struct list_head list;
enum lpfc_sli4_queue_type type;
enum lpfc_sli4_queue_subtype subtype;
struct lpfc_hba *phba;
struct list_head child_list;
uint32_t entry_count; /* Number of entries to support on the queue */
uint32_t entry_size; /* Size of each queue entry. */
uint32_t queue_id; /* Queue ID assigned by the hardware */
struct list_head page_list;
uint32_t page_count; /* Number of pages allocated for this queue */
uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */
union sli4_qe qe[1]; /* array to index entries (must be last) */
};
struct lpfc_cq_event {
struct list_head list;
union {
struct lpfc_mcqe mcqe_cmpl;
struct lpfc_acqe_link acqe_link;
struct lpfc_acqe_fcoe acqe_fcoe;
struct lpfc_acqe_dcbx acqe_dcbx;
struct lpfc_rcqe rcqe_cmpl;
struct sli4_wcqe_xri_aborted wcqe_axri;
} cqe;
};
struct lpfc_sli4_link {
uint8_t speed;
uint8_t duplex;
uint8_t status;
uint8_t physical;
uint8_t fault;
};
struct lpfc_fcf {
uint8_t fabric_name[8];
uint8_t mac_addr[6];
uint16_t fcf_indx;
uint16_t fcfi;
uint32_t fcf_flag;
#define FCF_AVAILABLE 0x01 /* FCF available for discovery */
#define FCF_REGISTERED 0x02 /* FCF registered with FW */
#define FCF_DISCOVERED 0x04 /* FCF discovery started */
#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */
#define FCF_IN_USE 0x10 /* Atleast one discovery completed */
#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */
uint32_t priority;
uint32_t addr_mode;
uint16_t vlan_id;
};
#define LPFC_REGION23_SIGNATURE "RG23"
#define LPFC_REGION23_VERSION 1
#define LPFC_REGION23_LAST_REC 0xff
struct lpfc_fip_param_hdr {
uint8_t type;
#define FCOE_PARAM_TYPE 0xA0
uint8_t length;
#define FCOE_PARAM_LENGTH 2
uint8_t parm_version;
#define FIPP_VERSION 0x01
uint8_t parm_flags;
#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
#define FIPP_MODE_ON 0x2
#define FIPP_MODE_OFF 0x0
#define FIPP_VLAN_VALID 0x1
};
struct lpfc_fcoe_params {
uint8_t fc_map[3];
uint8_t reserved1;
uint16_t vlan_tag;
uint8_t reserved[2];
};
struct lpfc_fcf_conn_hdr {
uint8_t type;
#define FCOE_CONN_TBL_TYPE 0xA1
uint8_t length; /* words */
uint8_t reserved[2];
};
struct lpfc_fcf_conn_rec {
uint16_t flags;
#define FCFCNCT_VALID 0x0001
#define FCFCNCT_BOOT 0x0002
#define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */
#define FCFCNCT_FBNM_VALID 0x0008
#define FCFCNCT_SWNM_VALID 0x0010
#define FCFCNCT_VLAN_VALID 0x0020
#define FCFCNCT_AM_VALID 0x0040
#define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */
#define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */
uint16_t vlan_tag;
uint8_t fabric_name[8];
uint8_t switch_name[8];
};
struct lpfc_fcf_conn_entry {
struct list_head list;
struct lpfc_fcf_conn_rec conn_rec;
};
/*
* Define the host's bootstrap mailbox. This structure contains
* the member attributes needed to create, use, and destroy the
* bootstrap mailbox region.
*
* The macro definitions for the bmbx data structure are defined
* in lpfc_hw4.h with the register definition.
*/
struct lpfc_bmbx {
struct lpfc_dmabuf *dmabuf;
struct dma_address dma_address;
void *avirt;
dma_addr_t aphys;
uint32_t bmbx_size;
};
#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
#define LPFC_EQE_SIZE_4B 4
#define LPFC_EQE_SIZE_16B 16
#define LPFC_CQE_SIZE 16
#define LPFC_WQE_SIZE 64
#define LPFC_MQE_SIZE 256
#define LPFC_RQE_SIZE 8
#define LPFC_EQE_DEF_COUNT 1024
#define LPFC_CQE_DEF_COUNT 256
#define LPFC_WQE_DEF_COUNT 64
#define LPFC_MQE_DEF_COUNT 16
#define LPFC_RQE_DEF_COUNT 512
#define LPFC_QUEUE_NOARM false
#define LPFC_QUEUE_REARM true
/*
* SLI4 CT field defines
*/
#define SLI4_CT_RPI 0
#define SLI4_CT_VPI 1
#define SLI4_CT_VFI 2
#define SLI4_CT_FCFI 3
#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000
/*
* SLI4 specific data structures
*/
struct lpfc_max_cfg_param {
uint16_t max_xri;
uint16_t xri_base;
uint16_t xri_used;
uint16_t max_rpi;
uint16_t rpi_base;
uint16_t rpi_used;
uint16_t max_vpi;
uint16_t vpi_base;
uint16_t vpi_used;
uint16_t max_vfi;
uint16_t vfi_base;
uint16_t vfi_used;
uint16_t max_fcfi;
uint16_t fcfi_base;
uint16_t fcfi_used;
uint16_t max_eq;
uint16_t max_rq;
uint16_t max_cq;
uint16_t max_wq;
};
struct lpfc_hba;
/* SLI4 HBA multi-fcp queue handler struct */
struct lpfc_fcp_eq_hdl {
uint32_t idx;
struct lpfc_hba *phba;
};
/* SLI4 HBA data structure entries */
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
PCI BAR0, config space registers */
void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
PCI BAR1, control registers */
void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
PCI BAR2, doorbell registers */
/* BAR0 PCI config space register memory map */
void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
void __iomem *ONLINE0regaddr; /* Address to components of internal UE */
void __iomem *ONLINE1regaddr; /* Address to components of internal UE */
#define LPFC_ONLINE_NERR 0xFFFFFFFF
void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
/* BAR1 FCoE function CSR register memory map */
void __iomem *STAregaddr; /* Address to HST_STATE register */
void __iomem *ISRregaddr; /* Address to HST_ISR register */
void __iomem *IMRregaddr; /* Address to HST_IMR register */
void __iomem *ISCRregaddr; /* Address to HST_ISCR register */
/* BAR2 VF-0 doorbell register memory map */
void __iomem *RQDBregaddr; /* Address to RQ_DOORBELL register */
void __iomem *WQDBregaddr; /* Address to WQ_DOORBELL register */
void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */
void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */
void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */
struct msix_entry *msix_entries;
uint32_t cfg_eqn;
struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
/* Pointers to the constructed SLI4 queues */
struct lpfc_queue **fp_eq; /* Fast-path event queue */
struct lpfc_queue *sp_eq; /* Slow-path event queue */
struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
/* Setup information for various queue parameters */
int eq_esize;
int eq_ecount;
int cq_esize;
int cq_ecount;
int wq_esize;
int wq_ecount;
int mq_esize;
int mq_ecount;
int rq_esize;
int rq_ecount;
#define LPFC_SP_EQ_MAX_INTR_SEC 10000
#define LPFC_FP_EQ_MAX_INTR_SEC 10000
uint32_t intr_enable;
struct lpfc_bmbx bmbx;
struct lpfc_max_cfg_param max_cfg_param;
uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
uint16_t next_rpi;
uint16_t scsi_xri_max;
uint16_t scsi_xri_cnt;
struct list_head lpfc_free_sgl_list;
struct list_head lpfc_sgl_list;
struct lpfc_sglq **lpfc_els_sgl_array;
struct list_head lpfc_abts_els_sgl_list;
struct lpfc_scsi_buf **lpfc_scsi_psb_array;
struct list_head lpfc_abts_scsi_buf_list;
uint32_t total_sglq_bufs;
struct lpfc_sglq **lpfc_sglq_active_list;
struct list_head lpfc_rpi_hdr_list;
unsigned long *rpi_bmask;
uint16_t rpi_count;
struct lpfc_sli4_flags sli4_flags;
struct list_head sp_rspiocb_work_queue;
struct list_head sp_cqe_event_pool;
struct list_head sp_asynce_work_queue;
struct list_head sp_fcp_xri_aborted_work_queue;
struct list_head sp_els_xri_aborted_work_queue;
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
};
enum lpfc_sge_type {
GEN_BUFF_TYPE,
SCSI_BUFF_TYPE
};
struct lpfc_sglq {
/* lpfc_sglqs are used in double linked lists */
struct list_head list;
struct list_head clist;
enum lpfc_sge_type buff_type; /* is this a scsi sgl */
uint16_t iotag; /* pre-assigned IO tag */
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
struct sli4_sge *sgl; /* pre-assigned SGL */
void *virt; /* virtual address. */
dma_addr_t phys; /* physical address */
};
struct lpfc_rpi_hdr {
struct list_head list;
uint32_t len;
struct lpfc_dmabuf *dmabuf;
uint32_t page_count;
uint32_t start_rpi;
};
/*
* SLI4 specific function prototypes
*/
int lpfc_pci_function_reset(struct lpfc_hba *);
int lpfc_sli4_hba_setup(struct lpfc_hba *);
int lpfc_sli4_hba_down(struct lpfc_hba *);
int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
uint8_t, uint32_t, bool);
void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
struct lpfc_mbx_sge *);
void lpfc_sli4_hba_reset(struct lpfc_hba *);
struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
uint32_t);
void lpfc_sli4_queue_free(struct lpfc_queue *);
uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t, uint32_t);
uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t);
uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t);
uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, struct lpfc_queue *, uint32_t);
uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *);
int lpfc_sli4_queue_setup(struct lpfc_hba *);
void lpfc_sli4_queue_unset(struct lpfc_hba *);
int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *);
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
void lpfc_sli4_remove_rpis(struct lpfc_hba *);
void lpfc_sli4_async_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
int lpfc_sli4_brdreset(struct lpfc_hba *);
int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t);
void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli4_post_status_check(struct lpfc_hba *);
uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);

View file

@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.3.1"
#define LPFC_DRIVER_VERSION "8.3.2"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"

View file

@ -32,8 +32,10 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@ -89,6 +91,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
vpi = 0;
else
set_bit(vpi, phba->vpi_bmask);
if (phba->sli_rev == LPFC_SLI_REV4)
phba->sli4_hba.max_cfg_param.vpi_used++;
spin_unlock_irq(&phba->hbalock);
return vpi;
}
@ -96,8 +100,12 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
static void
lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
{
if (vpi == 0)
return;
spin_lock_irq(&phba->hbalock);
clear_bit(vpi, phba->vpi_bmask);
if (phba->sli_rev == LPFC_SLI_REV4)
phba->sli4_hba.max_cfg_param.vpi_used--;
spin_unlock_irq(&phba->hbalock);
}
@ -113,7 +121,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
if (!pmb) {
return -ENOMEM;
}
mb = &pmb->mb;
mb = &pmb->u.mb;
lpfc_read_sparam(phba, pmb, vport->vpi);
/*
@ -243,23 +251,22 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
(vport->fc_flag & wait_flags) ||
((vport->port_state > LPFC_VPORT_FAILED) &&
(vport->port_state < LPFC_VPORT_READY))) {
lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
"1833 Vport discovery quiesce Wait:"
" vpi x%x state x%x fc_flags x%x"
" state x%x fc_flags x%x"
" num_nodes x%x, waiting 1000 msecs"
" total wait msecs x%x\n",
vport->vpi, vport->port_state,
vport->fc_flag, vport->num_disc_nodes,
vport->port_state, vport->fc_flag,
vport->num_disc_nodes,
jiffies_to_msecs(jiffies - start_time));
msleep(1000);
} else {
/* Base case. Wait variants satisfied. Break out */
lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
"1834 Vport discovery quiesced:"
" vpi x%x state x%x fc_flags x%x"
" state x%x fc_flags x%x"
" wait msecs x%x\n",
vport->vpi, vport->port_state,
vport->fc_flag,
vport->port_state, vport->fc_flag,
jiffies_to_msecs(jiffies
- start_time));
break;
@ -267,12 +274,10 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
}
if (time_after(jiffies, wait_time_max))
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1835 Vport discovery quiesce failed:"
" vpi x%x state x%x fc_flags x%x"
" wait msecs x%x\n",
vport->vpi, vport->port_state,
vport->fc_flag,
" state x%x fc_flags x%x wait msecs x%x\n",
vport->port_state, vport->fc_flag,
jiffies_to_msecs(jiffies - start_time));
}
@ -308,6 +313,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
goto error_out;
}
/*
* In SLI4, the vpi must be activated before it can be used
* by the port.
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
rc = lpfc_sli4_init_vpi(phba, vpi);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"1838 Failed to INIT_VPI on vpi %d "
"status %d\n", vpi, rc);
rc = VPORT_NORESOURCES;
lpfc_free_vpi(phba, vpi);
goto error_out;
}
}
/* Assign an unused board number */
if ((instance = lpfc_get_instance()) < 0) {
@ -535,6 +555,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
"physical host\n");
return VPORT_ERROR;
}
/* If the vport is a static vport fail the deletion. */
if ((vport->vport_flag & STATIC_VPORT) &&
!(phba->pport->load_flag & FC_UNLOADING)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1837 vport_delete failed: Cannot delete "
"static vport.\n");
return VPORT_ERROR;
}
/*
* If we are not unloading the driver then prevent the vport_delete
* from happening until after this vport's discovery is finished.
@ -710,7 +740,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
struct lpfc_vport *port_iterator;
struct lpfc_vport **vports;
int index = 0;
vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *),
vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
GFP_KERNEL);
if (vports == NULL)
return NULL;
@ -734,7 +764,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
int i;
if (vports == NULL)
return;
for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++)
for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++)
scsi_host_put(lpfc_shost_from_vport(vports[i]));
kfree(vports);
}

View file

@ -61,6 +61,7 @@
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_eh.h>
#include "mpt2sas_debug.h"
@ -68,10 +69,10 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
#define MPT2SAS_DRIVER_VERSION "01.100.02.00"
#define MPT2SAS_DRIVER_VERSION "01.100.03.00"
#define MPT2SAS_MAJOR_VERSION 01
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 02
#define MPT2SAS_BUILD_VERSION 03
#define MPT2SAS_RELEASE_VERSION 00
/*

View file

@ -473,7 +473,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
}
/**
* _ctl_do_task_abort - assign an active smid to the abort_task
* _ctl_set_task_mid - assign an active smid to tm request
* @ioc: per adapter object
* @karg - (struct mpt2_ioctl_command)
* @tm_request - pointer to mf from user space
@ -482,7 +482,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
* during failure, the reply frame is filled.
*/
static int
_ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
_ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
Mpi2SCSITaskManagementRequest_t *tm_request)
{
u8 found = 0;
@ -494,6 +494,14 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
Mpi2SCSITaskManagementReply_t *tm_reply;
u32 sz;
u32 lun;
char *desc = NULL;
if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
desc = "abort_task";
else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
desc = "query_task";
else
return 0;
lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
@ -517,13 +525,13 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
if (!found) {
dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: "
"DevHandle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
tm_request->DevHandle, lun));
dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"handle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
desc, tm_request->DevHandle, lun));
tm_reply = ioc->ctl_cmds.reply;
tm_reply->DevHandle = tm_request->DevHandle;
tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
tm_reply->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
tm_reply->TaskType = tm_request->TaskType;
tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
tm_reply->VP_ID = tm_request->VP_ID;
tm_reply->VF_ID = tm_request->VF_ID;
@ -535,9 +543,9 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
return 1;
}
dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: "
"DevHandle(0x%04x), lun(%d), smid(%d)\n", ioc->name,
tm_request->DevHandle, lun, tm_request->TaskMID));
dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
desc, tm_request->DevHandle, lun, tm_request->TaskMID));
return 0;
}
@ -739,8 +747,10 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
(Mpi2SCSITaskManagementRequest_t *)mpi_request;
if (tm_request->TaskType ==
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
if (_ctl_do_task_abort(ioc, &karg, tm_request)) {
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
tm_request->TaskType ==
MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
mpt2sas_base_free_smid(ioc, smid);
goto out;
}

View file

@ -197,12 +197,12 @@ static struct pci_device_id scsih_pci_table[] = {
MODULE_DEVICE_TABLE(pci, scsih_pci_table);
/**
* scsih_set_debug_level - global setting of ioc->logging_level.
* _scsih_set_debug_level - global setting of ioc->logging_level.
*
* Note: The logging levels are defined in mpt2sas_debug.h.
*/
static int
scsih_set_debug_level(const char *val, struct kernel_param *kp)
_scsih_set_debug_level(const char *val, struct kernel_param *kp)
{
int ret = param_set_int(val, kp);
struct MPT2SAS_ADAPTER *ioc;
@ -215,7 +215,7 @@ scsih_set_debug_level(const char *val, struct kernel_param *kp)
ioc->logging_level = logging_level;
return 0;
}
module_param_call(logging_level, scsih_set_debug_level, param_get_int,
module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
&logging_level, 0644);
/**
@ -883,6 +883,41 @@ _scsih_scsi_lookup_find_by_target(struct MPT2SAS_ADAPTER *ioc, int id,
return found;
}
/**
* _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
* @ioc: per adapter object
* @id: target id
* @lun: lun number
* @channel: channel
* Context: This function will acquire ioc->scsi_lookup_lock.
*
* This will search for a matching channel:id:lun in the scsi_lookup array,
* returning 1 if found.
*/
static u8
_scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id,
unsigned int lun, int channel)
{
u8 found;
unsigned long flags;
int i;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
found = 0;
for (i = 0 ; i < ioc->request_depth; i++) {
if (ioc->scsi_lookup[i].scmd &&
(ioc->scsi_lookup[i].scmd->device->id == id &&
ioc->scsi_lookup[i].scmd->device->channel == channel &&
ioc->scsi_lookup[i].scmd->device->lun == lun)) {
found = 1;
goto out;
}
}
out:
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return found;
}
/**
* _scsih_get_chain_buffer_dma - obtain block of chains (dma address)
* @ioc: per adapter object
@ -1047,14 +1082,14 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
}
/**
* scsih_change_queue_depth - setting device queue depth
* _scsih_change_queue_depth - setting device queue depth
* @sdev: scsi device struct
* @qdepth: requested queue depth
*
* Returns queue depth.
*/
static int
scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
{
struct Scsi_Host *shost = sdev->host;
int max_depth;
@ -1079,14 +1114,14 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
}
/**
* scsih_change_queue_depth - changing device queue tag type
* _scsih_change_queue_depth - changing device queue tag type
* @sdev: scsi device struct
* @tag_type: requested tag type
*
* Returns queue tag type.
*/
static int
scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
_scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
{
if (sdev->tagged_supported) {
scsi_set_tag_type(sdev, tag_type);
@ -1101,14 +1136,14 @@ scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
}
/**
* scsih_target_alloc - target add routine
* _scsih_target_alloc - target add routine
* @starget: scsi target struct
*
* Returns 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
scsih_target_alloc(struct scsi_target *starget)
_scsih_target_alloc(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@ -1163,13 +1198,13 @@ scsih_target_alloc(struct scsi_target *starget)
}
/**
* scsih_target_destroy - target destroy routine
* _scsih_target_destroy - target destroy routine
* @starget: scsi target struct
*
* Returns nothing.
*/
static void
scsih_target_destroy(struct scsi_target *starget)
_scsih_target_destroy(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@ -1212,14 +1247,14 @@ scsih_target_destroy(struct scsi_target *starget)
}
/**
* scsih_slave_alloc - device add routine
* _scsih_slave_alloc - device add routine
* @sdev: scsi device struct
*
* Returns 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
scsih_slave_alloc(struct scsi_device *sdev)
_scsih_slave_alloc(struct scsi_device *sdev)
{
struct Scsi_Host *shost;
struct MPT2SAS_ADAPTER *ioc;
@ -1273,13 +1308,13 @@ scsih_slave_alloc(struct scsi_device *sdev)
}
/**
* scsih_slave_destroy - device destroy routine
* _scsih_slave_destroy - device destroy routine
* @sdev: scsi device struct
*
* Returns nothing.
*/
static void
scsih_slave_destroy(struct scsi_device *sdev)
_scsih_slave_destroy(struct scsi_device *sdev)
{
struct MPT2SAS_TARGET *sas_target_priv_data;
struct scsi_target *starget;
@ -1295,13 +1330,13 @@ scsih_slave_destroy(struct scsi_device *sdev)
}
/**
* scsih_display_sata_capabilities - sata capabilities
* _scsih_display_sata_capabilities - sata capabilities
* @ioc: per adapter object
* @sas_device: the sas_device object
* @sdev: scsi device struct
*/
static void
scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
_scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
struct _sas_device *sas_device, struct scsi_device *sdev)
{
Mpi2ConfigReply_t mpi_reply;
@ -1401,14 +1436,14 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
}
/**
* scsih_slave_configure - device configure routine.
* _scsih_slave_configure - device configure routine.
* @sdev: scsi device struct
*
* Returns 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
scsih_slave_configure(struct scsi_device *sdev)
_scsih_slave_configure(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@ -1489,7 +1524,7 @@ scsih_slave_configure(struct scsi_device *sdev)
r_level, raid_device->handle,
(unsigned long long)raid_device->wwid,
raid_device->num_pds, ds);
scsih_change_queue_depth(sdev, qdepth);
_scsih_change_queue_depth(sdev, qdepth);
return 0;
}
@ -1532,10 +1567,10 @@ scsih_slave_configure(struct scsi_device *sdev)
sas_device->slot);
if (!ssp_target)
scsih_display_sata_capabilities(ioc, sas_device, sdev);
_scsih_display_sata_capabilities(ioc, sas_device, sdev);
}
scsih_change_queue_depth(sdev, qdepth);
_scsih_change_queue_depth(sdev, qdepth);
if (ssp_target)
sas_read_port_mode_page(sdev);
@ -1543,7 +1578,7 @@ scsih_slave_configure(struct scsi_device *sdev)
}
/**
* scsih_bios_param - fetch head, sector, cylinder info for a disk
* _scsih_bios_param - fetch head, sector, cylinder info for a disk
* @sdev: scsi device struct
* @bdev: pointer to block device context
* @capacity: device size (in 512 byte sectors)
@ -1555,7 +1590,7 @@ scsih_slave_configure(struct scsi_device *sdev)
* Return nothing.
*/
static int
scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int params[])
{
int heads;
@ -1636,7 +1671,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
}
/**
* scsih_tm_done - tm completion routine
* _scsih_tm_done - tm completion routine
* @ioc: per adapter object
* @smid: system request message index
* @VF_ID: virtual function id
@ -1648,7 +1683,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
* Return nothing.
*/
static void
scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
_scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
{
MPI2DefaultReply_t *mpi_reply;
@ -1823,13 +1858,13 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
}
/**
* scsih_abort - eh threads main abort routine
* _scsih_abort - eh threads main abort routine
* @sdev: scsi device struct
*
* Returns SUCCESS if command aborted else FAILED
*/
static int
scsih_abort(struct scsi_cmnd *scmd)
_scsih_abort(struct scsi_cmnd *scmd)
{
struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT2SAS_DEVICE *sas_device_priv_data;
@ -1889,15 +1924,86 @@ scsih_abort(struct scsi_cmnd *scmd)
return r;
}
/**
* scsih_dev_reset - eh threads main device reset routine
* _scsih_dev_reset - eh threads main device reset routine
* @sdev: scsi device struct
*
* Returns SUCCESS if command aborted else FAILED
*/
static int
scsih_dev_reset(struct scsi_cmnd *scmd)
_scsih_dev_reset(struct scsi_cmnd *scmd)
{
struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT2SAS_DEVICE *sas_device_priv_data;
struct _sas_device *sas_device;
unsigned long flags;
u16 handle;
int r;
printk(MPT2SAS_INFO_FMT "attempting device reset! scmd(%p)\n",
ioc->name, scmd);
scsi_print_command(scmd);
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n",
ioc->name, scmd);
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
r = SUCCESS;
goto out;
}
/* for hidden raid components obtain the volume_handle */
handle = 0;
if (sas_device_priv_data->sas_target->flags &
MPT_TARGET_FLAGS_RAID_COMPONENT) {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = _scsih_sas_device_find_by_handle(ioc,
sas_device_priv_data->sas_target->handle);
if (sas_device)
handle = sas_device->volume_handle;
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
} else
handle = sas_device_priv_data->sas_target->handle;
if (!handle) {
scmd->result = DID_RESET << 16;
r = FAILED;
goto out;
}
mutex_lock(&ioc->tm_cmds.mutex);
mpt2sas_scsih_issue_tm(ioc, handle, 0,
MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, scmd->device->lun,
30);
/*
* sanity check see whether all commands to this device been
* completed
*/
if (_scsih_scsi_lookup_find_by_lun(ioc, scmd->device->id,
scmd->device->lun, scmd->device->channel))
r = FAILED;
else
r = SUCCESS;
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
mutex_unlock(&ioc->tm_cmds.mutex);
out:
printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n",
ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
return r;
}
/**
* _scsih_target_reset - eh threads main target reset routine
* @sdev: scsi device struct
*
* Returns SUCCESS if command aborted else FAILED
*/
static int
_scsih_target_reset(struct scsi_cmnd *scmd)
{
struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT2SAS_DEVICE *sas_device_priv_data;
@ -1912,7 +2018,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n",
printk(MPT2SAS_INFO_FMT "target been deleted! scmd(%p)\n",
ioc->name, scmd);
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
@ -1962,13 +2068,13 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
}
/**
* scsih_abort - eh threads main host reset routine
* _scsih_abort - eh threads main host reset routine
* @sdev: scsi device struct
*
* Returns SUCCESS if command aborted else FAILED
*/
static int
scsih_host_reset(struct scsi_cmnd *scmd)
_scsih_host_reset(struct scsi_cmnd *scmd)
{
struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
int r, retval;
@ -2390,7 +2496,107 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
}
/**
* scsih_qcmd - main scsi request entry point
* _scsih_setup_eedp - setup MPI request for EEDP transfer
* @scmd: pointer to scsi command object
* @mpi_request: pointer to the SCSI_IO reqest message frame
*
* Supporting protection 1 and 3.
*
* Returns nothing
*/
static void
_scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
{
u16 eedp_flags;
unsigned char prot_op = scsi_get_prot_op(scmd);
unsigned char prot_type = scsi_get_prot_type(scmd);
if (prot_type == SCSI_PROT_DIF_TYPE0 ||
prot_type == SCSI_PROT_DIF_TYPE2 ||
prot_op == SCSI_PROT_NORMAL)
return;
if (prot_op == SCSI_PROT_READ_STRIP)
eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
else if (prot_op == SCSI_PROT_WRITE_INSERT)
eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
else
return;
mpi_request->EEDPBlockSize = scmd->device->sector_size;
switch (prot_type) {
case SCSI_PROT_DIF_TYPE1:
/*
* enable ref/guard checking
* auto increment ref tag
*/
mpi_request->EEDPFlags = eedp_flags |
MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
mpi_request->CDB.EEDP32.PrimaryReferenceTag =
cpu_to_be32(scsi_get_lba(scmd));
break;
case SCSI_PROT_DIF_TYPE3:
/*
* enable guard checking
*/
mpi_request->EEDPFlags = eedp_flags |
MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
break;
}
}
/**
* _scsih_eedp_error_handling - return sense code for EEDP errors
* @scmd: pointer to scsi command object
* @ioc_status: ioc status
*
* Returns nothing
*/
static void
_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
{
u8 ascq;
u8 sk;
u8 host_byte;
switch (ioc_status) {
case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
ascq = 0x01;
break;
case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
ascq = 0x02;
break;
case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
ascq = 0x03;
break;
default:
ascq = 0x00;
break;
}
if (scmd->sc_data_direction == DMA_TO_DEVICE) {
sk = ILLEGAL_REQUEST;
host_byte = DID_ABORT;
} else {
sk = ABORTED_COMMAND;
host_byte = DID_OK;
}
scsi_build_sense_buffer(0, scmd->sense_buffer, sk, 0x10, ascq);
scmd->result = DRIVER_SENSE << 24 | (host_byte << 16) |
SAM_STAT_CHECK_CONDITION;
}
/**
* _scsih_qcmd - main scsi request entry point
* @scmd: pointer to scsi command object
* @done: function pointer to be invoked on completion
*
@ -2401,7 +2607,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
* SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
*/
static int
scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
_scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
{
struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT2SAS_DEVICE *sas_device_priv_data;
@ -2470,6 +2676,7 @@ scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
}
mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
_scsih_setup_eedp(scmd, mpi_request);
mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
if (sas_device_priv_data->sas_target->flags &
MPT_TARGET_FLAGS_RAID_COMPONENT)
@ -2604,6 +2811,15 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
desc_ioc_state = "scsi ext terminated";
break;
case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
desc_ioc_state = "eedp guard error";
break;
case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
desc_ioc_state = "eedp ref tag error";
break;
case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
desc_ioc_state = "eedp app tag error";
break;
default:
desc_ioc_state = "unknown";
break;
@ -2783,7 +2999,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
}
/**
* scsih_io_done - scsi request callback
* _scsih_io_done - scsi request callback
* @ioc: per adapter object
* @smid: system request message index
* @VF_ID: virtual function id
@ -2794,7 +3010,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
* Return nothing.
*/
static void
scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
_scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
{
Mpi2SCSIIORequest_t *mpi_request;
Mpi2SCSIIOReply_t *mpi_reply;
@ -2939,6 +3155,11 @@ scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
scmd->result = DID_RESET << 16;
break;
case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
_scsih_eedp_error_handling(scmd, ioc_status);
break;
case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
case MPI2_IOCSTATUS_INVALID_FUNCTION:
case MPI2_IOCSTATUS_INVALID_SGL:
@ -5130,18 +5351,19 @@ static struct scsi_host_template scsih_driver_template = {
.module = THIS_MODULE,
.name = "Fusion MPT SAS Host",
.proc_name = MPT2SAS_DRIVER_NAME,
.queuecommand = scsih_qcmd,
.target_alloc = scsih_target_alloc,
.slave_alloc = scsih_slave_alloc,
.slave_configure = scsih_slave_configure,
.target_destroy = scsih_target_destroy,
.slave_destroy = scsih_slave_destroy,
.change_queue_depth = scsih_change_queue_depth,
.change_queue_type = scsih_change_queue_type,
.eh_abort_handler = scsih_abort,
.eh_device_reset_handler = scsih_dev_reset,
.eh_host_reset_handler = scsih_host_reset,
.bios_param = scsih_bios_param,
.queuecommand = _scsih_qcmd,
.target_alloc = _scsih_target_alloc,
.slave_alloc = _scsih_slave_alloc,
.slave_configure = _scsih_slave_configure,
.target_destroy = _scsih_target_destroy,
.slave_destroy = _scsih_slave_destroy,
.change_queue_depth = _scsih_change_queue_depth,
.change_queue_type = _scsih_change_queue_type,
.eh_abort_handler = _scsih_abort,
.eh_device_reset_handler = _scsih_dev_reset,
.eh_target_reset_handler = _scsih_target_reset,
.eh_host_reset_handler = _scsih_host_reset,
.bios_param = _scsih_bios_param,
.can_queue = 1,
.this_id = -1,
.sg_tablesize = MPT2SAS_SG_DEPTH,
@ -5228,13 +5450,13 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
}
/**
* scsih_remove - detach and remove add host
* _scsih_remove - detach and remove add host
* @pdev: PCI device struct
*
* Return nothing.
*/
static void __devexit
scsih_remove(struct pci_dev *pdev)
_scsih_remove(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@ -5442,14 +5664,14 @@ _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc)
}
/**
* scsih_probe - attach and add scsi host
* _scsih_probe - attach and add scsi host
* @pdev: PCI device struct
* @id: pci device id
*
* Returns 0 success, anything else error.
*/
static int
scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct MPT2SAS_ADAPTER *ioc;
struct Scsi_Host *shost;
@ -5503,6 +5725,9 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_add_shost_fail;
}
scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE3_PROTECTION);
/* event thread */
snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
"fw_event%d", ioc->id);
@ -5536,14 +5761,14 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
#ifdef CONFIG_PM
/**
* scsih_suspend - power management suspend main entry point
* _scsih_suspend - power management suspend main entry point
* @pdev: PCI device struct
* @state: PM state change to (usually PCI_D3)
*
* Returns 0 success, anything else error.
*/
static int
scsih_suspend(struct pci_dev *pdev, pm_message_t state)
_scsih_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@ -5564,13 +5789,13 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
}
/**
* scsih_resume - power management resume main entry point
* _scsih_resume - power management resume main entry point
* @pdev: PCI device struct
*
* Returns 0 success, anything else error.
*/
static int
scsih_resume(struct pci_dev *pdev)
_scsih_resume(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@ -5599,22 +5824,22 @@ scsih_resume(struct pci_dev *pdev)
static struct pci_driver scsih_driver = {
.name = MPT2SAS_DRIVER_NAME,
.id_table = scsih_pci_table,
.probe = scsih_probe,
.remove = __devexit_p(scsih_remove),
.probe = _scsih_probe,
.remove = __devexit_p(_scsih_remove),
#ifdef CONFIG_PM
.suspend = scsih_suspend,
.resume = scsih_resume,
.suspend = _scsih_suspend,
.resume = _scsih_resume,
#endif
};
/**
* scsih_init - main entry point for this driver.
* _scsih_init - main entry point for this driver.
*
* Returns 0 success, anything else error.
*/
static int __init
scsih_init(void)
_scsih_init(void)
{
int error;
@ -5630,10 +5855,10 @@ scsih_init(void)
mpt2sas_base_initialize_callback_handler();
/* queuecommand callback hander */
scsi_io_cb_idx = mpt2sas_base_register_callback_handler(scsih_io_done);
scsi_io_cb_idx = mpt2sas_base_register_callback_handler(_scsih_io_done);
/* task managment callback handler */
tm_cb_idx = mpt2sas_base_register_callback_handler(scsih_tm_done);
tm_cb_idx = mpt2sas_base_register_callback_handler(_scsih_tm_done);
/* base internal commands callback handler */
base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done);
@ -5659,12 +5884,12 @@ scsih_init(void)
}
/**
* scsih_exit - exit point for this driver (when it is a module).
* _scsih_exit - exit point for this driver (when it is a module).
*
* Returns 0 success, anything else error.
*/
static void __exit
scsih_exit(void)
_scsih_exit(void)
{
printk(KERN_INFO "mpt2sas version %s unloading\n",
MPT2SAS_DRIVER_VERSION);
@ -5682,5 +5907,5 @@ scsih_exit(void)
mpt2sas_ctl_exit();
}
module_init(scsih_init);
module_exit(scsih_exit);
module_init(_scsih_init);
module_exit(_scsih_exit);

View file

@ -264,7 +264,7 @@ struct rep_manu_reply{
};
/**
* transport_expander_report_manufacture - obtain SMP report_manufacture
* _transport_expander_report_manufacture - obtain SMP report_manufacture
* @ioc: per adapter object
* @sas_address: expander sas address
* @edev: the sas_expander_device object
@ -274,7 +274,7 @@ struct rep_manu_reply{
* Returns 0 for success, non-zero for failure.
*/
static int
transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
_transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
u64 sas_address, struct sas_expander_device *edev)
{
Mpi2SmpPassthroughRequest_t *mpi_request;
@ -578,7 +578,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
mpt2sas_port->remote_identify.device_type ==
MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
transport_expander_report_manufacture(ioc,
_transport_expander_report_manufacture(ioc,
mpt2sas_port->remote_identify.sas_address,
rphy_to_expander_device(rphy));
@ -852,7 +852,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
}
/**
* transport_get_linkerrors -
* _transport_get_linkerrors -
* @phy: The sas phy object
*
* Only support sas_host direct attached phys.
@ -860,7 +860,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
*
*/
static int
transport_get_linkerrors(struct sas_phy *phy)
_transport_get_linkerrors(struct sas_phy *phy)
{
struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
struct _sas_phy *mpt2sas_phy;
@ -903,14 +903,14 @@ transport_get_linkerrors(struct sas_phy *phy)
}
/**
* transport_get_enclosure_identifier -
* _transport_get_enclosure_identifier -
* @phy: The sas phy object
*
* Obtain the enclosure logical id for an expander.
* Returns 0 for success, non-zero for failure.
*/
static int
transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
{
struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
struct _sas_node *sas_expander;
@ -929,13 +929,13 @@ transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
}
/**
* transport_get_bay_identifier -
* _transport_get_bay_identifier -
* @phy: The sas phy object
*
* Returns the slot id for a device that resides inside an enclosure.
*/
static int
transport_get_bay_identifier(struct sas_rphy *rphy)
_transport_get_bay_identifier(struct sas_rphy *rphy)
{
struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
struct _sas_device *sas_device;
@ -953,7 +953,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
}
/**
* transport_phy_reset -
* _transport_phy_reset -
* @phy: The sas phy object
* @hard_reset:
*
@ -961,7 +961,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
* Returns 0 for success, non-zero for failure.
*/
static int
transport_phy_reset(struct sas_phy *phy, int hard_reset)
_transport_phy_reset(struct sas_phy *phy, int hard_reset)
{
struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
struct _sas_phy *mpt2sas_phy;
@ -1002,7 +1002,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
}
/**
* transport_smp_handler - transport portal for smp passthru
* _transport_smp_handler - transport portal for smp passthru
* @shost: shost object
* @rphy: sas transport rphy object
* @req:
@ -1012,7 +1012,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
* smp_rep_general /sys/class/bsg/expander-5:0
*/
static int
transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct request *req)
{
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@ -1200,11 +1200,11 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
struct sas_function_template mpt2sas_transport_functions = {
.get_linkerrors = transport_get_linkerrors,
.get_enclosure_identifier = transport_get_enclosure_identifier,
.get_bay_identifier = transport_get_bay_identifier,
.phy_reset = transport_phy_reset,
.smp_handler = transport_smp_handler,
.get_linkerrors = _transport_get_linkerrors,
.get_enclosure_identifier = _transport_get_enclosure_identifier,
.get_bay_identifier = _transport_get_bay_identifier,
.phy_reset = _transport_phy_reset,
.smp_handler = _transport_smp_handler,
};
struct scsi_transport_template *mpt2sas_transport_template;

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,42 @@
#
# Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver.
#
# Copyright 2007 Red Hat, Inc.
# Copyright 2008 Marvell. <kewei@marvell.com>
#
# This file is licensed under GPLv2.
#
# This file is part of the 88SE64XX/88SE94XX driver.
#
# The 88SE64XX/88SE94XX driver is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2 of the
# License.
#
# The 88SE64XX/88SE94XX driver is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with 88SE64XX/88SE94XX Driver; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
config SCSI_MVSAS
tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support"
depends on PCI
select SCSI_SAS_LIBSAS
select FW_LOADER
help
This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX and 6Gb/s
PCI-E 88SE94XX chip based host adapters.
config SCSI_MVSAS_DEBUG
bool "Compile in debug mode"
default y
depends on SCSI_MVSAS
help
Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode,
the driver prints some messages to the console.

View file

@ -0,0 +1,32 @@
#
# Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver.
#
# Copyright 2007 Red Hat, Inc.
# Copyright 2008 Marvell. <kewei@marvell.com>
#
# This file is licensed under GPLv2.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2 of the
# License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
ifeq ($(CONFIG_SCSI_MVSAS_DEBUG),y)
EXTRA_CFLAGS += -DMV_DEBUG
endif
obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
mvsas-y += mv_init.o \
mv_sas.o \
mv_64xx.o \
mv_94xx.o

View file

@ -0,0 +1,793 @@
/*
* Marvell 88SE64xx hardware specific
*
* Copyright 2007 Red Hat, Inc.
* Copyright 2008 Marvell. <kewei@marvell.com>
*
* This file is licensed under GPLv2.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of the
* License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*/
#include "mv_sas.h"
#include "mv_64xx.h"
#include "mv_chips.h"
static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
{
void __iomem *regs = mvi->regs;
u32 reg;
struct mvs_phy *phy = &mvi->phy[i];
/* TODO check & save device type */
reg = mr32(MVS_GBL_PORT_TYPE);
phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
if (reg & MODE_SAS_SATA & (1 << i))
phy->phy_type |= PORT_TYPE_SAS;
else
phy->phy_type |= PORT_TYPE_SATA;
}
static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
{
void __iomem *regs = mvi->regs;
u32 tmp;
tmp = mr32(MVS_PCS);
if (mvi->chip->n_phy <= 4)
tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
else
tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
mw32(MVS_PCS, tmp);
}
static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
mvs_phy_hacks(mvi);
if (!(mvi->flags & MVF_FLAG_SOC)) {
/* TEST - for phy decoding error, adjust voltage levels */
mw32(MVS_P0_VSR_ADDR + 0, 0x8);
mw32(MVS_P0_VSR_DATA + 0, 0x2F0);
mw32(MVS_P0_VSR_ADDR + 8, 0x8);
mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
mw32(MVS_P0_VSR_ADDR + 16, 0x8);
mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
mw32(MVS_P0_VSR_ADDR + 24, 0x8);
mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
} else {
int i;
/* disable auto port detection */
mw32(MVS_GBL_PORT_TYPE, 0);
for (i = 0; i < mvi->chip->n_phy; i++) {
mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7);
mvs_write_port_vsr_data(mvi, i, 0x90000000);
mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9);
mvs_write_port_vsr_data(mvi, i, 0x50f2);
mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11);
mvs_write_port_vsr_data(mvi, i, 0x0e);
}
}
}
static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
{
void __iomem *regs = mvi->regs;
u32 reg, tmp;
if (!(mvi->flags & MVF_FLAG_SOC)) {
if (phy_id < 4)
pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
else
pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
} else
reg = mr32(MVS_PHY_CTL);
tmp = reg;
if (phy_id < 4)
tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
else
tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS;
if (!(mvi->flags & MVF_FLAG_SOC)) {
if (phy_id < 4) {
pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
mdelay(10);
pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
} else {
pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
mdelay(10);
pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg);
}
} else {
mw32(MVS_PHY_CTL, tmp);
mdelay(10);
mw32(MVS_PHY_CTL, reg);
}
}
static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
{
u32 tmp;
tmp = mvs_read_port_irq_stat(mvi, phy_id);
tmp &= ~PHYEV_RDY_CH;
mvs_write_port_irq_stat(mvi, phy_id, tmp);
tmp = mvs_read_phy_ctl(mvi, phy_id);
if (hard)
tmp |= PHY_RST_HARD;
else
tmp |= PHY_RST;
mvs_write_phy_ctl(mvi, phy_id, tmp);
if (hard) {
do {
tmp = mvs_read_phy_ctl(mvi, phy_id);
} while (tmp & PHY_RST_HARD);
}
}
static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
u32 tmp;
int i;
/* make sure interrupts are masked immediately (paranoia) */
mw32(MVS_GBL_CTL, 0);
tmp = mr32(MVS_GBL_CTL);
/* Reset Controller */
if (!(tmp & HBA_RST)) {
if (mvi->flags & MVF_PHY_PWR_FIX) {
pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
tmp &= ~PCTL_PWR_OFF;
tmp |= PCTL_PHY_DSBL;
pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
tmp &= ~PCTL_PWR_OFF;
tmp |= PCTL_PHY_DSBL;
pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
}
}
/* make sure interrupts are masked immediately (paranoia) */
mw32(MVS_GBL_CTL, 0);
tmp = mr32(MVS_GBL_CTL);
/* Reset Controller */
if (!(tmp & HBA_RST)) {
/* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
mw32_f(MVS_GBL_CTL, HBA_RST);
}
/* wait for reset to finish; timeout is just a guess */
i = 1000;
while (i-- > 0) {
msleep(10);
if (!(mr32(MVS_GBL_CTL) & HBA_RST))
break;
}
if (mr32(MVS_GBL_CTL) & HBA_RST) {
dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n");
return -EBUSY;
}
return 0;
}
static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
{
void __iomem *regs = mvi->regs;
u32 tmp;
if (!(mvi->flags & MVF_FLAG_SOC)) {
u32 offs;
if (phy_id < 4)
offs = PCR_PHY_CTL;
else {
offs = PCR_PHY_CTL2;
phy_id -= 4;
}
pci_read_config_dword(mvi->pdev, offs, &tmp);
tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
pci_write_config_dword(mvi->pdev, offs, tmp);
} else {
tmp = mr32(MVS_PHY_CTL);
tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
mw32(MVS_PHY_CTL, tmp);
}
}
static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
{
void __iomem *regs = mvi->regs;
u32 tmp;
if (!(mvi->flags & MVF_FLAG_SOC)) {
u32 offs;
if (phy_id < 4)
offs = PCR_PHY_CTL;
else {
offs = PCR_PHY_CTL2;
phy_id -= 4;
}
pci_read_config_dword(mvi->pdev, offs, &tmp);
tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
pci_write_config_dword(mvi->pdev, offs, tmp);
} else {
tmp = mr32(MVS_PHY_CTL);
tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
mw32(MVS_PHY_CTL, tmp);
}
}
static int __devinit mvs_64xx_init(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
int i;
u32 tmp, cctl;
if (mvi->pdev && mvi->pdev->revision == 0)
mvi->flags |= MVF_PHY_PWR_FIX;
if (!(mvi->flags & MVF_FLAG_SOC)) {
mvs_show_pcie_usage(mvi);
tmp = mvs_64xx_chip_reset(mvi);
if (tmp)
return tmp;
} else {
tmp = mr32(MVS_PHY_CTL);
tmp &= ~PCTL_PWR_OFF;
tmp |= PCTL_PHY_DSBL;
mw32(MVS_PHY_CTL, tmp);
}
/* Init Chip */
/* make sure RST is set; HBA_RST /should/ have done that for us */
cctl = mr32(MVS_CTL) & 0xFFFF;
if (cctl & CCTL_RST)
cctl &= ~CCTL_RST;
else
mw32_f(MVS_CTL, cctl | CCTL_RST);
if (!(mvi->flags & MVF_FLAG_SOC)) {
/* write to device control _AND_ device status register */
pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
tmp &= ~PRD_REQ_MASK;
tmp |= PRD_REQ_SIZE;
pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
tmp &= ~PCTL_PWR_OFF;
tmp &= ~PCTL_PHY_DSBL;
pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
tmp &= PCTL_PWR_OFF;
tmp &= ~PCTL_PHY_DSBL;
pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
} else {
tmp = mr32(MVS_PHY_CTL);
tmp &= ~PCTL_PWR_OFF;
tmp |= PCTL_COM_ON;
tmp &= ~PCTL_PHY_DSBL;
tmp |= PCTL_LINK_RST;
mw32(MVS_PHY_CTL, tmp);
msleep(100);
tmp &= ~PCTL_LINK_RST;
mw32(MVS_PHY_CTL, tmp);
msleep(100);
}
/* reset control */
mw32(MVS_PCS, 0); /* MVS_PCS */
/* init phys */
mvs_64xx_phy_hacks(mvi);
/* enable auto port detection */
mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
mw32(MVS_TX_LO, mvi->tx_dma);
mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
mw32(MVS_RX_LO, mvi->rx_dma);
mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
for (i = 0; i < mvi->chip->n_phy; i++) {
/* set phy local SAS address */
/* should set little endian SAS address to 64xx chip */
mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI,
cpu_to_be64(mvi->phy[i].dev_sas_addr));
mvs_64xx_enable_xmt(mvi, i);
mvs_64xx_phy_reset(mvi, i, 1);
msleep(500);
mvs_64xx_detect_porttype(mvi, i);
}
if (mvi->flags & MVF_FLAG_SOC) {
/* set select registers */
writel(0x0E008000, regs + 0x000);
writel(0x59000008, regs + 0x004);
writel(0x20, regs + 0x008);
writel(0x20, regs + 0x00c);
writel(0x20, regs + 0x010);
writel(0x20, regs + 0x014);
writel(0x20, regs + 0x018);
writel(0x20, regs + 0x01c);
}
for (i = 0; i < mvi->chip->n_phy; i++) {
/* clear phy int status */
tmp = mvs_read_port_irq_stat(mvi, i);
tmp &= ~PHYEV_SIG_FIS;
mvs_write_port_irq_stat(mvi, i, tmp);
/* set phy int mask */
tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR |
PHYEV_DEC_ERR;
mvs_write_port_irq_mask(mvi, i, tmp);
msleep(100);
mvs_update_phyinfo(mvi, i, 1);
}
/* FIXME: update wide port bitmaps */
/* little endian for open address and command table, etc. */
/*
* it seems that ( from the spec ) turning on big-endian won't
* do us any good on big-endian machines, need further confirmation
*/
cctl = mr32(MVS_CTL);
cctl |= CCTL_ENDIAN_CMD;
cctl |= CCTL_ENDIAN_DATA;
cctl &= ~CCTL_ENDIAN_OPEN;
cctl |= CCTL_ENDIAN_RSP;
mw32_f(MVS_CTL, cctl);
/* reset CMD queue */
tmp = mr32(MVS_PCS);
tmp |= PCS_CMD_RST;
mw32(MVS_PCS, tmp);
/* interrupt coalescing may cause missing HW interrput in some case,
* and the max count is 0x1ff, while our max slot is 0x200,
* it will make count 0.
*/
tmp = 0;
mw32(MVS_INT_COAL, tmp);
tmp = 0x100;
mw32(MVS_INT_COAL_TMOUT, tmp);
/* ladies and gentlemen, start your engines */
mw32(MVS_TX_CFG, 0);
mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
/* enable CMD/CMPL_Q/RESP mode */
mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN |
PCS_CMD_EN | PCS_CMD_STOP_ERR);
/* enable completion queue interrupt */
tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
CINT_DMA_PCIE);
mw32(MVS_INT_MASK, tmp);
/* Enable SRS interrupt */
mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
return 0;
}
static int mvs_64xx_ioremap(struct mvs_info *mvi)
{
if (!mvs_ioremap(mvi, 4, 2))
return 0;
return -1;
}
static void mvs_64xx_iounmap(struct mvs_info *mvi)
{
mvs_iounmap(mvi->regs);
mvs_iounmap(mvi->regs_ex);
}
static void mvs_64xx_interrupt_enable(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
u32 tmp;
tmp = mr32(MVS_GBL_CTL);
mw32(MVS_GBL_CTL, tmp | INT_EN);
}
static void mvs_64xx_interrupt_disable(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
u32 tmp;
tmp = mr32(MVS_GBL_CTL);
mw32(MVS_GBL_CTL, tmp & ~INT_EN);
}
static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq)
{
void __iomem *regs = mvi->regs;
u32 stat;
if (!(mvi->flags & MVF_FLAG_SOC)) {
stat = mr32(MVS_GBL_INT_STAT);
if (stat == 0 || stat == 0xffffffff)
return 0;
} else
stat = 1;
return stat;
}
static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
{
void __iomem *regs = mvi->regs;
/* clear CMD_CMPLT ASAP */
mw32_f(MVS_INT_STAT, CINT_DONE);
#ifndef MVS_USE_TASKLET
spin_lock(&mvi->lock);
#endif
mvs_int_full(mvi);
#ifndef MVS_USE_TASKLET
spin_unlock(&mvi->lock);
#endif
return IRQ_HANDLED;
}
static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx)
{
u32 tmp;
mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32));
mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32));
do {
tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3));
} while (tmp & 1 << (slot_idx % 32));
do {
tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3));
} while (tmp & 1 << (slot_idx % 32));
}
static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
u32 tfs)
{
void __iomem *regs = mvi->regs;
u32 tmp;
if (type == PORT_TYPE_SATA) {
tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
mw32(MVS_INT_STAT_SRS_0, tmp);
}
mw32(MVS_INT_STAT, CINT_CI_STOP);
tmp = mr32(MVS_PCS) | 0xFF00;
mw32(MVS_PCS, tmp);
}
static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
{
void __iomem *regs = mvi->regs;
u32 tmp, offs;
if (*tfs == MVS_ID_NOT_MAPPED)
return;
offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
if (*tfs < 16) {
tmp = mr32(MVS_PCS);
mw32(MVS_PCS, tmp & ~offs);
} else {
tmp = mr32(MVS_CTL);
mw32(MVS_CTL, tmp & ~offs);
}
tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs);
if (tmp)
mw32(MVS_INT_STAT_SRS_0, tmp);
*tfs = MVS_ID_NOT_MAPPED;
return;
}
static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
{
int i;
u32 tmp, offs;
void __iomem *regs = mvi->regs;
if (*tfs != MVS_ID_NOT_MAPPED)
return 0;
tmp = mr32(MVS_PCS);
for (i = 0; i < mvi->chip->srs_sz; i++) {
if (i == 16)
tmp = mr32(MVS_CTL);
offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
if (!(tmp & offs)) {
*tfs = i;
if (i < 16)
mw32(MVS_PCS, tmp | offs);
else
mw32(MVS_CTL, tmp | offs);
tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i);
if (tmp)
mw32(MVS_INT_STAT_SRS_0, tmp);
return 0;
}
}
return MVS_ID_NOT_MAPPED;
}
void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
{
int i;
struct scatterlist *sg;
struct mvs_prd *buf_prd = prd;
for_each_sg(scatter, sg, nr, i) {
buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
buf_prd->len = cpu_to_le32(sg_dma_len(sg));
buf_prd++;
}
}
static int mvs_64xx_oob_done(struct mvs_info *mvi, int i)
{
u32 phy_st;
mvs_write_port_cfg_addr(mvi, i,
PHYR_PHY_STAT);
phy_st = mvs_read_port_cfg_data(mvi, i);
if (phy_st & PHY_OOB_DTCTD)
return 1;
return 0;
}
static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i,
struct sas_identify_frame *id)
{
struct mvs_phy *phy = &mvi->phy[i];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
sas_phy->linkrate =
(phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
phy->minimum_linkrate =
(phy->phy_status &
PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
phy->maximum_linkrate =
(phy->phy_status &
PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
phy->dev_info = mvs_read_port_cfg_data(mvi, i);
mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
phy->att_dev_sas_addr =
(u64) mvs_read_port_cfg_data(mvi, i) << 32;
mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr);
}
static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
{
u32 tmp;
struct mvs_phy *phy = &mvi->phy[i];
/* workaround for HW phy decoding error on 1.5g disk drive */
mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
tmp = mvs_read_port_vsr_data(mvi, i);
if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
SAS_LINK_RATE_1_5_GBPS)
tmp &= ~PHY_MODE6_LATECLK;
else
tmp |= PHY_MODE6_LATECLK;
mvs_write_port_vsr_data(mvi, i, tmp);
}
void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
struct sas_phy_linkrates *rates)
{
u32 lrmin = 0, lrmax = 0;
u32 tmp;
tmp = mvs_read_phy_ctl(mvi, phy_id);
lrmin = (rates->minimum_linkrate << 8);
lrmax = (rates->maximum_linkrate << 12);
if (lrmin) {
tmp &= ~(0xf << 8);
tmp |= lrmin;
}
if (lrmax) {
tmp &= ~(0xf << 12);
tmp |= lrmax;
}
mvs_write_phy_ctl(mvi, phy_id, tmp);
mvs_64xx_phy_reset(mvi, phy_id, 1);
}
static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
{
u32 tmp;
void __iomem *regs = mvi->regs;
tmp = mr32(MVS_PCS);
mw32(MVS_PCS, tmp & 0xFFFF);
mw32(MVS_PCS, tmp);
tmp = mr32(MVS_CTL);
mw32(MVS_CTL, tmp & 0xFFFF);
mw32(MVS_CTL, tmp);
}
u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs_ex;
return ior32(SPI_DATA_REG_64XX);
}
void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
{
void __iomem *regs = mvi->regs_ex;
iow32(SPI_DATA_REG_64XX, data);
}
int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
u32 *dwCmd,
u8 cmd,
u8 read,
u8 length,
u32 addr
)
{
u32 dwTmp;
dwTmp = ((u32)cmd << 24) | ((u32)length << 19);
if (read)
dwTmp |= 1U<<23;
if (addr != MV_MAX_U32) {
dwTmp |= 1U<<22;
dwTmp |= (addr & 0x0003FFFF);
}
*dwCmd = dwTmp;
return 0;
}
int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
{
void __iomem *regs = mvi->regs_ex;
int retry;
for (retry = 0; retry < 1; retry++) {
iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE);
iow32(SPI_CMD_REG_64XX, cmd);
iow32(SPI_CTRL_REG_64XX,
SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART);
}
return 0;
}
int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
{
void __iomem *regs = mvi->regs_ex;
u32 i, dwTmp;
for (i = 0; i < timeout; i++) {
dwTmp = ior32(SPI_CTRL_REG_64XX);
if (!(dwTmp & SPI_CTRL_SPISTART))
return 0;
msleep(10);
}
return -1;
}
#ifndef DISABLE_HOTPLUG_DMA_FIX
void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
{
int i;
struct mvs_prd *buf_prd = prd;
buf_prd += from;
for (i = 0; i < MAX_SG_ENTRY - from; i++) {
buf_prd->addr = cpu_to_le64(buf_dma);
buf_prd->len = cpu_to_le32(buf_len);
++buf_prd;
}
}
#endif
const struct mvs_dispatch mvs_64xx_dispatch = {
"mv64xx",
mvs_64xx_init,
NULL,
mvs_64xx_ioremap,
mvs_64xx_iounmap,
mvs_64xx_isr,
mvs_64xx_isr_status,
mvs_64xx_interrupt_enable,
mvs_64xx_interrupt_disable,
mvs_read_phy_ctl,
mvs_write_phy_ctl,
mvs_read_port_cfg_data,
mvs_write_port_cfg_data,
mvs_write_port_cfg_addr,
mvs_read_port_vsr_data,
mvs_write_port_vsr_data,
mvs_write_port_vsr_addr,
mvs_read_port_irq_stat,
mvs_write_port_irq_stat,
mvs_read_port_irq_mask,
mvs_write_port_irq_mask,
mvs_get_sas_addr,
mvs_64xx_command_active,
mvs_64xx_issue_stop,
mvs_start_delivery,
mvs_rx_update,
mvs_int_full,
mvs_64xx_assign_reg_set,
mvs_64xx_free_reg_set,
mvs_get_prd_size,
mvs_get_prd_count,
mvs_64xx_make_prd,
mvs_64xx_detect_porttype,
mvs_64xx_oob_done,
mvs_64xx_fix_phy_info,
mvs_64xx_phy_work_around,
mvs_64xx_phy_set_link_rate,
mvs_hw_max_link_rate,
mvs_64xx_phy_disable,
mvs_64xx_phy_enable,
mvs_64xx_phy_reset,
mvs_64xx_stp_reset,
mvs_64xx_clear_active_cmds,
mvs_64xx_spi_read_data,
mvs_64xx_spi_write_data,
mvs_64xx_spi_buildcmd,
mvs_64xx_spi_issuecmd,
mvs_64xx_spi_waitdataready,
#ifndef DISABLE_HOTPLUG_DMA_FIX
mvs_64xx_fix_dma,
#endif
};

View file

@ -0,0 +1,151 @@
/*
* Marvell 88SE64xx hardware specific head file
*
* Copyright 2007 Red Hat, Inc.
* Copyright 2008 Marvell. <kewei@marvell.com>
*
* This file is licensed under GPLv2.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of the
* License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*/
#ifndef _MVS64XX_REG_H_
#define _MVS64XX_REG_H_
#include <linux/types.h>
#define MAX_LINK_RATE SAS_LINK_RATE_3_0_GBPS
/* enhanced mode registers (BAR4) */
enum hw_registers {
MVS_GBL_CTL = 0x04, /* global control */
MVS_GBL_INT_STAT = 0x08, /* global irq status */
MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
MVS_PHY_CTL = 0x40, /* SOC PHY Control */
MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
MVS_GBL_PORT_TYPE = 0xa0, /* port type */
MVS_CTL = 0x100, /* SAS/SATA port configuration */
MVS_PCS = 0x104, /* SAS/SATA port control/status */
MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
MVS_CMD_LIST_HI = 0x10C,
MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
MVS_RX_FIS_HI = 0x114,
MVS_TX_CFG = 0x120, /* TX configuration */
MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
MVS_TX_HI = 0x128,
MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
MVS_RX_CFG = 0x134, /* RX configuration */
MVS_RX_LO = 0x138, /* RX (completion) ring addr */
MVS_RX_HI = 0x13C,
MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
MVS_INT_COAL = 0x148, /* Int coalescing config */
MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
MVS_INT_STAT = 0x150, /* Central int status */
MVS_INT_MASK = 0x154, /* Central int enable */
MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
MVS_INT_MASK_SRS_0 = 0x15C,
/* ports 1-3 follow after this */
MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
/* ports 5-7 follow after this */
MVS_P4_INT_STAT = 0x200, /* Port4 interrupt status */
MVS_P4_INT_MASK = 0x204, /* Port4 interrupt enable mask */
/* ports 1-3 follow after this */
MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
/* ports 5-7 follow after this */
MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
/* ports 1-3 follow after this */
MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
/* ports 5-7 follow after this */
MVS_P4_CFG_ADDR = 0x230, /* Port4 config address */
MVS_P4_CFG_DATA = 0x234, /* Port4 config data */
/* ports 1-3 follow after this */
MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
/* ports 5-7 follow after this */
MVS_P4_VSR_ADDR = 0x250, /* port4 VSR addr */
MVS_P4_VSR_DATA = 0x254, /* port4 VSR data */
};
enum pci_cfg_registers {
PCR_PHY_CTL = 0x40,
PCR_PHY_CTL2 = 0x90,
PCR_DEV_CTRL = 0xE8,
PCR_LINK_STAT = 0xF2,
};
/* SAS/SATA Vendor Specific Port Registers */
enum sas_sata_vsp_regs {
VSR_PHY_STAT = 0x00, /* Phy Status */
VSR_PHY_MODE1 = 0x01, /* phy tx */
VSR_PHY_MODE2 = 0x02, /* tx scc */
VSR_PHY_MODE3 = 0x03, /* pll */
VSR_PHY_MODE4 = 0x04, /* VCO */
VSR_PHY_MODE5 = 0x05, /* Rx */
VSR_PHY_MODE6 = 0x06, /* CDR */
VSR_PHY_MODE7 = 0x07, /* Impedance */
VSR_PHY_MODE8 = 0x08, /* Voltage */
VSR_PHY_MODE9 = 0x09, /* Test */
VSR_PHY_MODE10 = 0x0A, /* Power */
VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
};
enum chip_register_bits {
PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
(0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
};
#define MAX_SG_ENTRY 64
struct mvs_prd {
__le64 addr; /* 64-bit buffer address */
__le32 reserved;
__le32 len; /* 16-bit length */
};
#define SPI_CTRL_REG 0xc0
#define SPI_CTRL_VENDOR_ENABLE (1U<<29)
#define SPI_CTRL_SPIRDY (1U<<22)
#define SPI_CTRL_SPISTART (1U<<20)
#define SPI_CMD_REG 0xc4
#define SPI_DATA_REG 0xc8
#define SPI_CTRL_REG_64XX 0x10
#define SPI_CMD_REG_64XX 0x14
#define SPI_DATA_REG_64XX 0x18
#endif

View file

@ -0,0 +1,672 @@
/*
* Marvell 88SE94xx hardware specific
*
* Copyright 2007 Red Hat, Inc.
* Copyright 2008 Marvell. <kewei@marvell.com>
*
* This file is licensed under GPLv2.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of the
* License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*/
#include "mv_sas.h"
#include "mv_94xx.h"
#include "mv_chips.h"
static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
{
u32 reg;
struct mvs_phy *phy = &mvi->phy[i];
u32 phy_status;
mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3);
reg = mvs_read_port_vsr_data(mvi, i);
phy_status = ((reg & 0x3f0000) >> 16) & 0xff;
phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
switch (phy_status) {
case 0x10:
phy->phy_type |= PORT_TYPE_SAS;
break;
case 0x1d:
default:
phy->phy_type |= PORT_TYPE_SATA;
break;
}
}
static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
{
void __iomem *regs = mvi->regs;
u32 tmp;
tmp = mr32(MVS_PCS);
tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
mw32(MVS_PCS, tmp);
}
static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
{
u32 tmp;
tmp = mvs_read_port_irq_stat(mvi, phy_id);
tmp &= ~PHYEV_RDY_CH;
mvs_write_port_irq_stat(mvi, phy_id, tmp);
if (hard) {
tmp = mvs_read_phy_ctl(mvi, phy_id);
tmp |= PHY_RST_HARD;
mvs_write_phy_ctl(mvi, phy_id, tmp);
do {
tmp = mvs_read_phy_ctl(mvi, phy_id);
} while (tmp & PHY_RST_HARD);
} else {
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT);
tmp = mvs_read_port_vsr_data(mvi, phy_id);
tmp |= PHY_RST;
mvs_write_port_vsr_data(mvi, phy_id, tmp);
}
}
static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
{
u32 tmp;
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
tmp = mvs_read_port_vsr_data(mvi, phy_id);
mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000);
}
static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
{
mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4);
mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
mvs_write_port_vsr_addr(mvi, phy_id, 0x104);
mvs_write_port_vsr_data(mvi, phy_id, 0x00018080);
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff);
}
static int __devinit mvs_94xx_init(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
int i;
u32 tmp, cctl;
mvs_show_pcie_usage(mvi);
if (mvi->flags & MVF_FLAG_SOC) {
tmp = mr32(MVS_PHY_CTL);
tmp &= ~PCTL_PWR_OFF;
tmp |= PCTL_PHY_DSBL;
mw32(MVS_PHY_CTL, tmp);
}
/* Init Chip */
/* make sure RST is set; HBA_RST /should/ have done that for us */
cctl = mr32(MVS_CTL) & 0xFFFF;
if (cctl & CCTL_RST)
cctl &= ~CCTL_RST;
else
mw32_f(MVS_CTL, cctl | CCTL_RST);
if (mvi->flags & MVF_FLAG_SOC) {
tmp = mr32(MVS_PHY_CTL);
tmp &= ~PCTL_PWR_OFF;
tmp |= PCTL_COM_ON;
tmp &= ~PCTL_PHY_DSBL;
tmp |= PCTL_LINK_RST;
mw32(MVS_PHY_CTL, tmp);
msleep(100);
tmp &= ~PCTL_LINK_RST;
mw32(MVS_PHY_CTL, tmp);
msleep(100);
}
/* reset control */
mw32(MVS_PCS, 0); /* MVS_PCS */
mw32(MVS_STP_REG_SET_0, 0);
mw32(MVS_STP_REG_SET_1, 0);
/* init phys */
mvs_phy_hacks(mvi);
/* disable Multiplexing, enable phy implemented */
mw32(MVS_PORTS_IMP, 0xFF);
mw32(MVS_PA_VSR_ADDR, 0x00000104);
mw32(MVS_PA_VSR_PORT, 0x00018080);
mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
mw32(MVS_PA_VSR_PORT, 0x0084ffff);
/* set LED blink when IO*/
mw32(MVS_PA_VSR_ADDR, 0x00000030);
tmp = mr32(MVS_PA_VSR_PORT);
tmp &= 0xFFFF00FF;
tmp |= 0x00003300;
mw32(MVS_PA_VSR_PORT, tmp);
mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
mw32(MVS_TX_LO, mvi->tx_dma);
mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
mw32(MVS_RX_LO, mvi->rx_dma);
mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
for (i = 0; i < mvi->chip->n_phy; i++) {
mvs_94xx_phy_disable(mvi, i);
/* set phy local SAS address */
mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
(mvi->phy[i].dev_sas_addr));
mvs_94xx_enable_xmt(mvi, i);
mvs_94xx_phy_enable(mvi, i);
mvs_94xx_phy_reset(mvi, i, 1);
msleep(500);
mvs_94xx_detect_porttype(mvi, i);
}
if (mvi->flags & MVF_FLAG_SOC) {
/* set select registers */
writel(0x0E008000, regs + 0x000);
writel(0x59000008, regs + 0x004);
writel(0x20, regs + 0x008);
writel(0x20, regs + 0x00c);
writel(0x20, regs + 0x010);
writel(0x20, regs + 0x014);
writel(0x20, regs + 0x018);
writel(0x20, regs + 0x01c);
}
for (i = 0; i < mvi->chip->n_phy; i++) {
/* clear phy int status */
tmp = mvs_read_port_irq_stat(mvi, i);
tmp &= ~PHYEV_SIG_FIS;
mvs_write_port_irq_stat(mvi, i, tmp);
/* set phy int mask */
tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH |
PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ;
mvs_write_port_irq_mask(mvi, i, tmp);
msleep(100);
mvs_update_phyinfo(mvi, i, 1);
}
/* FIXME: update wide port bitmaps */
/* little endian for open address and command table, etc. */
/*
* it seems that ( from the spec ) turning on big-endian won't
* do us any good on big-endian machines, need further confirmation
*/
cctl = mr32(MVS_CTL);
cctl |= CCTL_ENDIAN_CMD;
cctl |= CCTL_ENDIAN_DATA;
cctl &= ~CCTL_ENDIAN_OPEN;
cctl |= CCTL_ENDIAN_RSP;
mw32_f(MVS_CTL, cctl);
/* reset CMD queue */
tmp = mr32(MVS_PCS);
tmp |= PCS_CMD_RST;
mw32(MVS_PCS, tmp);
/* interrupt coalescing may cause missing HW interrput in some case,
* and the max count is 0x1ff, while our max slot is 0x200,
* it will make count 0.
*/
tmp = 0;
mw32(MVS_INT_COAL, tmp);
tmp = 0x100;
mw32(MVS_INT_COAL_TMOUT, tmp);
/* ladies and gentlemen, start your engines */
mw32(MVS_TX_CFG, 0);
mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
/* enable CMD/CMPL_Q/RESP mode */
mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN |
PCS_CMD_EN | PCS_CMD_STOP_ERR);
/* enable completion queue interrupt */
tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
CINT_DMA_PCIE);
tmp |= CINT_PHY_MASK;
mw32(MVS_INT_MASK, tmp);
/* Enable SRS interrupt */
mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
return 0;
}
static int mvs_94xx_ioremap(struct mvs_info *mvi)
{
if (!mvs_ioremap(mvi, 2, -1)) {
mvi->regs_ex = mvi->regs + 0x10200;
mvi->regs += 0x20000;
if (mvi->id == 1)
mvi->regs += 0x4000;
return 0;
}
return -1;
}
static void mvs_94xx_iounmap(struct mvs_info *mvi)
{
if (mvi->regs) {
mvi->regs -= 0x20000;
if (mvi->id == 1)
mvi->regs -= 0x4000;
mvs_iounmap(mvi->regs);
}
}
static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs_ex;
u32 tmp;
tmp = mr32(MVS_GBL_CTL);
tmp |= (IRQ_SAS_A | IRQ_SAS_B);
mw32(MVS_GBL_INT_STAT, tmp);
writel(tmp, regs + 0x0C);
writel(tmp, regs + 0x10);
writel(tmp, regs + 0x14);
writel(tmp, regs + 0x18);
mw32(MVS_GBL_CTL, tmp);
}
static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs_ex;
u32 tmp;
tmp = mr32(MVS_GBL_CTL);
tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
mw32(MVS_GBL_INT_STAT, tmp);
writel(tmp, regs + 0x0C);
writel(tmp, regs + 0x10);
writel(tmp, regs + 0x14);
writel(tmp, regs + 0x18);
mw32(MVS_GBL_CTL, tmp);
}
static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
{
void __iomem *regs = mvi->regs_ex;
u32 stat = 0;
if (!(mvi->flags & MVF_FLAG_SOC)) {
stat = mr32(MVS_GBL_INT_STAT);
if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
return 0;
}
return stat;
}
static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
{
void __iomem *regs = mvi->regs;
if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
((stat & IRQ_SAS_B) && mvi->id == 1)) {
mw32_f(MVS_INT_STAT, CINT_DONE);
#ifndef MVS_USE_TASKLET
spin_lock(&mvi->lock);
#endif
mvs_int_full(mvi);
#ifndef MVS_USE_TASKLET
spin_unlock(&mvi->lock);
#endif
}
return IRQ_HANDLED;
}
static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
{
u32 tmp;
mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32));
do {
tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3));
} while (tmp & 1 << (slot_idx % 32));
}
static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
u32 tfs)
{
void __iomem *regs = mvi->regs;
u32 tmp;
if (type == PORT_TYPE_SATA) {
tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
mw32(MVS_INT_STAT_SRS_0, tmp);
}
mw32(MVS_INT_STAT, CINT_CI_STOP);
tmp = mr32(MVS_PCS) | 0xFF00;
mw32(MVS_PCS, tmp);
}
static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
{
void __iomem *regs = mvi->regs;
u32 tmp;
u8 reg_set = *tfs;
if (*tfs == MVS_ID_NOT_MAPPED)
return;
mvi->sata_reg_set &= ~bit(reg_set);
if (reg_set < 32) {
w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
if (tmp)
mw32(MVS_INT_STAT_SRS_0, tmp);
} else {
w_reg_set_enable(reg_set, mvi->sata_reg_set);
tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
if (tmp)
mw32(MVS_INT_STAT_SRS_1, tmp);
}
*tfs = MVS_ID_NOT_MAPPED;
return;
}
static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
{
int i;
void __iomem *regs = mvi->regs;
if (*tfs != MVS_ID_NOT_MAPPED)
return 0;
i = mv_ffc64(mvi->sata_reg_set);
if (i > 32) {
mvi->sata_reg_set |= bit(i);
w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
*tfs = i;
return 0;
} else if (i >= 0) {
mvi->sata_reg_set |= bit(i);
w_reg_set_enable(i, (u32)mvi->sata_reg_set);
*tfs = i;
return 0;
}
return MVS_ID_NOT_MAPPED;
}
static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
{
int i;
struct scatterlist *sg;
struct mvs_prd *buf_prd = prd;
for_each_sg(scatter, sg, nr, i) {
buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
buf_prd++;
}
}
static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
{
u32 phy_st;
phy_st = mvs_read_phy_ctl(mvi, i);
if (phy_st & PHY_READY_MASK) /* phy ready */
return 1;
return 0;
}
static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
struct sas_identify_frame *id)
{
int i;
u32 id_frame[7];
for (i = 0; i < 7; i++) {
mvs_write_port_cfg_addr(mvi, port_id,
CONFIG_ID_FRAME0 + i * 4);
id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
}
memcpy(id, id_frame, 28);
}
static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
struct sas_identify_frame *id)
{
int i;
u32 id_frame[7];
/* mvs_hexdump(28, (u8 *)id_frame, 0); */
for (i = 0; i < 7; i++) {
mvs_write_port_cfg_addr(mvi, port_id,
CONFIG_ATT_ID_FRAME0 + i * 4);
id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
mv_dprintk("94xx phy %d atta frame %d %x.\n",
port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
}
/* mvs_hexdump(28, (u8 *)id_frame, 0); */
memcpy(id, id_frame, 28);
}
static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id)
{
u32 att_dev_info = 0;
att_dev_info |= id->dev_type;
if (id->stp_iport)
att_dev_info |= PORT_DEV_STP_INIT;
if (id->smp_iport)
att_dev_info |= PORT_DEV_SMP_INIT;
if (id->ssp_iport)
att_dev_info |= PORT_DEV_SSP_INIT;
if (id->stp_tport)
att_dev_info |= PORT_DEV_STP_TRGT;
if (id->smp_tport)
att_dev_info |= PORT_DEV_SMP_TRGT;
if (id->ssp_tport)
att_dev_info |= PORT_DEV_SSP_TRGT;
att_dev_info |= (u32)id->phy_id<<24;
return att_dev_info;
}
static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id)
{
return mvs_94xx_make_dev_info(id);
}
static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
struct sas_identify_frame *id)
{
struct mvs_phy *phy = &mvi->phy[i];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status);
sas_phy->linkrate =
(phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
sas_phy->linkrate += 0x8;
mv_dprintk("get link rate is %d\n", sas_phy->linkrate);
phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
mvs_94xx_get_dev_identify_frame(mvi, i, id);
phy->dev_info = mvs_94xx_make_dev_info(id);
if (phy->phy_type & PORT_TYPE_SAS) {
mvs_94xx_get_att_identify_frame(mvi, i, id);
phy->att_dev_info = mvs_94xx_make_att_info(id);
phy->att_dev_sas_addr = *(u64 *)id->sas_addr;
} else {
phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
}
}
void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
struct sas_phy_linkrates *rates)
{
/* TODO */
}
static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
{
u32 tmp;
void __iomem *regs = mvi->regs;
tmp = mr32(MVS_STP_REG_SET_0);
mw32(MVS_STP_REG_SET_0, 0);
mw32(MVS_STP_REG_SET_0, tmp);
tmp = mr32(MVS_STP_REG_SET_1);
mw32(MVS_STP_REG_SET_1, 0);
mw32(MVS_STP_REG_SET_1, tmp);
}
u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs_ex - 0x10200;
return mr32(SPI_RD_DATA_REG_94XX);
}
void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
{
void __iomem *regs = mvi->regs_ex - 0x10200;
mw32(SPI_RD_DATA_REG_94XX, data);
}
int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
u32 *dwCmd,
u8 cmd,
u8 read,
u8 length,
u32 addr
)
{
void __iomem *regs = mvi->regs_ex - 0x10200;
u32 dwTmp;
dwTmp = ((u32)cmd << 8) | ((u32)length << 4);
if (read)
dwTmp |= SPI_CTRL_READ_94XX;
if (addr != MV_MAX_U32) {
mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL));
dwTmp |= SPI_ADDR_VLD_94XX;
}
*dwCmd = dwTmp;
return 0;
}
int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
{
void __iomem *regs = mvi->regs_ex - 0x10200;
mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
return 0;
}
int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
{
void __iomem *regs = mvi->regs_ex - 0x10200;
u32 i, dwTmp;
for (i = 0; i < timeout; i++) {
dwTmp = mr32(SPI_CTRL_REG_94XX);
if (!(dwTmp & SPI_CTRL_SpiStart_94XX))
return 0;
msleep(10);
}
return -1;
}
#ifndef DISABLE_HOTPLUG_DMA_FIX
void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
{
int i;
struct mvs_prd *buf_prd = prd;
buf_prd += from;
for (i = 0; i < MAX_SG_ENTRY - from; i++) {
buf_prd->addr = cpu_to_le64(buf_dma);
buf_prd->im_len.len = cpu_to_le32(buf_len);
++buf_prd;
}
}
#endif
const struct mvs_dispatch mvs_94xx_dispatch = {
"mv94xx",
mvs_94xx_init,
NULL,
mvs_94xx_ioremap,
mvs_94xx_iounmap,
mvs_94xx_isr,
mvs_94xx_isr_status,
mvs_94xx_interrupt_enable,
mvs_94xx_interrupt_disable,
mvs_read_phy_ctl,
mvs_write_phy_ctl,
mvs_read_port_cfg_data,
mvs_write_port_cfg_data,
mvs_write_port_cfg_addr,
mvs_read_port_vsr_data,
mvs_write_port_vsr_data,
mvs_write_port_vsr_addr,
mvs_read_port_irq_stat,
mvs_write_port_irq_stat,
mvs_read_port_irq_mask,
mvs_write_port_irq_mask,
mvs_get_sas_addr,
mvs_94xx_command_active,
mvs_94xx_issue_stop,
mvs_start_delivery,
mvs_rx_update,
mvs_int_full,
mvs_94xx_assign_reg_set,
mvs_94xx_free_reg_set,
mvs_get_prd_size,
mvs_get_prd_count,
mvs_94xx_make_prd,
mvs_94xx_detect_porttype,
mvs_94xx_oob_done,
mvs_94xx_fix_phy_info,
NULL,
mvs_94xx_phy_set_link_rate,
mvs_hw_max_link_rate,
mvs_94xx_phy_disable,
mvs_94xx_phy_enable,
mvs_94xx_phy_reset,
NULL,
mvs_94xx_clear_active_cmds,
mvs_94xx_spi_read_data,
mvs_94xx_spi_write_data,
mvs_94xx_spi_buildcmd,
mvs_94xx_spi_issuecmd,
mvs_94xx_spi_waitdataready,
#ifndef DISABLE_HOTPLUG_DMA_FIX
mvs_94xx_fix_dma,
#endif
};

View file

@ -0,0 +1,222 @@
/*
* Marvell 88SE94xx hardware specific head file
*
* Copyright 2007 Red Hat, Inc.
* Copyright 2008 Marvell. <kewei@marvell.com>
*
* This file is licensed under GPLv2.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of the
* License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*/
#ifndef _MVS94XX_REG_H_
#define _MVS94XX_REG_H_
#include <linux/types.h>
#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS
enum hw_registers {
MVS_GBL_CTL = 0x04, /* global control */
MVS_GBL_INT_STAT = 0x00, /* global irq status */
MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
MVS_PHY_CTL = 0x40, /* SOC PHY Control */
MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
MVS_GBL_PORT_TYPE = 0xa0, /* port type */
MVS_CTL = 0x100, /* SAS/SATA port configuration */
MVS_PCS = 0x104, /* SAS/SATA port control/status */
MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
MVS_CMD_LIST_HI = 0x10C,
MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
MVS_RX_FIS_HI = 0x114,
MVS_STP_REG_SET_0 = 0x118, /* STP/SATA Register Set Enable */
MVS_STP_REG_SET_1 = 0x11C,
MVS_TX_CFG = 0x120, /* TX configuration */
MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
MVS_TX_HI = 0x128,
MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
MVS_RX_CFG = 0x134, /* RX configuration */
MVS_RX_LO = 0x138, /* RX (completion) ring addr */
MVS_RX_HI = 0x13C,
MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
MVS_INT_COAL = 0x148, /* Int coalescing config */
MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
MVS_INT_STAT = 0x150, /* Central int status */
MVS_INT_MASK = 0x154, /* Central int enable */
MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
MVS_INT_MASK_SRS_0 = 0x15C,
MVS_INT_STAT_SRS_1 = 0x160,
MVS_INT_MASK_SRS_1 = 0x164,
MVS_NON_NCQ_ERR_0 = 0x168, /* SRS Non-specific NCQ Error */
MVS_NON_NCQ_ERR_1 = 0x16C,
MVS_CMD_ADDR = 0x170, /* Command register port (addr) */
MVS_CMD_DATA = 0x174, /* Command register port (data) */
MVS_MEM_PARITY_ERR = 0x178, /* Memory parity error */
/* ports 1-3 follow after this */
MVS_P0_INT_STAT = 0x180, /* port0 interrupt status */
MVS_P0_INT_MASK = 0x184, /* port0 interrupt mask */
/* ports 5-7 follow after this */
MVS_P4_INT_STAT = 0x1A0, /* Port4 interrupt status */
MVS_P4_INT_MASK = 0x1A4, /* Port4 interrupt enable mask */
/* ports 1-3 follow after this */
MVS_P0_SER_CTLSTAT = 0x1D0, /* port0 serial control/status */
/* ports 5-7 follow after this */
MVS_P4_SER_CTLSTAT = 0x1E0, /* port4 serial control/status */
/* ports 1-3 follow after this */
MVS_P0_CFG_ADDR = 0x200, /* port0 phy register address */
MVS_P0_CFG_DATA = 0x204, /* port0 phy register data */
/* ports 5-7 follow after this */
MVS_P4_CFG_ADDR = 0x220, /* Port4 config address */
MVS_P4_CFG_DATA = 0x224, /* Port4 config data */
/* phys 1-3 follow after this */
MVS_P0_VSR_ADDR = 0x250, /* phy0 VSR address */
MVS_P0_VSR_DATA = 0x254, /* phy0 VSR data */
/* phys 1-3 follow after this */
/* multiplexing */
MVS_P4_VSR_ADDR = 0x250, /* phy4 VSR address */
MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */
MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */
MVS_PA_VSR_PORT = 0x294, /* All port VSR data */
};
enum pci_cfg_registers {
PCR_PHY_CTL = 0x40,
PCR_PHY_CTL2 = 0x90,
PCR_DEV_CTRL = 0x78,
PCR_LINK_STAT = 0x82,
};
/* SAS/SATA Vendor Specific Port Registers */
enum sas_sata_vsp_regs {
VSR_PHY_STAT = 0x00 * 4, /* Phy Status */
VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */
VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */
VSR_PHY_MODE3 = 0x03 * 4, /* pll */
VSR_PHY_MODE4 = 0x04 * 4, /* VCO */
VSR_PHY_MODE5 = 0x05 * 4, /* Rx */
VSR_PHY_MODE6 = 0x06 * 4, /* CDR */
VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */
VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */
VSR_PHY_MODE9 = 0x09 * 4, /* Test */
VSR_PHY_MODE10 = 0x0A * 4, /* Power */
VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */
VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */
VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */
};
enum chip_register_bits {
PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12),
PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
(0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
};
enum pci_interrupt_cause {
/* MAIN_IRQ_CAUSE (R10200) Bits*/
IRQ_COM_IN_I2O_IOP0 = (1 << 0),
IRQ_COM_IN_I2O_IOP1 = (1 << 1),
IRQ_COM_IN_I2O_IOP2 = (1 << 2),
IRQ_COM_IN_I2O_IOP3 = (1 << 3),
IRQ_COM_OUT_I2O_HOS0 = (1 << 4),
IRQ_COM_OUT_I2O_HOS1 = (1 << 5),
IRQ_COM_OUT_I2O_HOS2 = (1 << 6),
IRQ_COM_OUT_I2O_HOS3 = (1 << 7),
IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8),
IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9),
IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10),
IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11),
IRQ_PCIF_DRBL0 = (1 << 12),
IRQ_PCIF_DRBL1 = (1 << 13),
IRQ_PCIF_DRBL2 = (1 << 14),
IRQ_PCIF_DRBL3 = (1 << 15),
IRQ_XOR_A = (1 << 16),
IRQ_XOR_B = (1 << 17),
IRQ_SAS_A = (1 << 18),
IRQ_SAS_B = (1 << 19),
IRQ_CPU_CNTRL = (1 << 20),
IRQ_GPIO = (1 << 21),
IRQ_UART = (1 << 22),
IRQ_SPI = (1 << 23),
IRQ_I2C = (1 << 24),
IRQ_SGPIO = (1 << 25),
IRQ_COM_ERR = (1 << 29),
IRQ_I2O_ERR = (1 << 30),
IRQ_PCIE_ERR = (1 << 31),
};
#define MAX_SG_ENTRY 255
struct mvs_prd_imt {
__le32 len:22;
u8 _r_a:2;
u8 misc_ctl:4;
u8 inter_sel:4;
};
struct mvs_prd {
/* 64-bit buffer address */
__le64 addr;
/* 22-bit length */
struct mvs_prd_imt im_len;
} __attribute__ ((packed));
#define SPI_CTRL_REG_94XX 0xc800
#define SPI_ADDR_REG_94XX 0xc804
#define SPI_WR_DATA_REG_94XX 0xc808
#define SPI_RD_DATA_REG_94XX 0xc80c
#define SPI_CTRL_READ_94XX (1U << 2)
#define SPI_ADDR_VLD_94XX (1U << 1)
#define SPI_CTRL_SpiStart_94XX (1U << 0)
#define mv_ffc(x) ffz(x)
static inline int
mv_ffc64(u64 v)
{
int i;
i = mv_ffc((u32)v);
if (i >= 0)
return i;
i = mv_ffc((u32)(v>>32));
if (i != 0)
return 32 + i;
return -1;
}
#define r_reg_set_enable(i) \
(((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \
mr32(MVS_STP_REG_SET_0))
#define w_reg_set_enable(i, tmp) \
(((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \
mw32(MVS_STP_REG_SET_0, tmp))
extern const struct mvs_dispatch mvs_94xx_dispatch;
#endif

View file

@ -0,0 +1,280 @@
/*
* Marvell 88SE64xx/88SE94xx register IO interface
*
* Copyright 2007 Red Hat, Inc.
* Copyright 2008 Marvell. <kewei@marvell.com>
*
* This file is licensed under GPLv2.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of the
* License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*/
#ifndef _MV_CHIPS_H_
#define _MV_CHIPS_H_
#define mr32(reg) readl(regs + reg)
#define mw32(reg, val) writel((val), regs + reg)
#define mw32_f(reg, val) do { \
mw32(reg, val); \
mr32(reg); \
} while (0)
#define iow32(reg, val) outl(val, (unsigned long)(regs + reg))
#define ior32(reg) inl((unsigned long)(regs + reg))
#define iow16(reg, val) outw((unsigned long)(val, regs + reg))
#define ior16(reg) inw((unsigned long)(regs + reg))
#define iow8(reg, val) outb((unsigned long)(val, regs + reg))
#define ior8(reg) inb((unsigned long)(regs + reg))
static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr)
{
void __iomem *regs = mvi->regs;
mw32(MVS_CMD_ADDR, addr);
return mr32(MVS_CMD_DATA);
}
static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val)
{
void __iomem *regs = mvi->regs;
mw32(MVS_CMD_ADDR, addr);
mw32(MVS_CMD_DATA, val);
}
static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
{
void __iomem *regs = mvi->regs;
return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) :
mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4);
}
static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
{
void __iomem *regs = mvi->regs;
if (port < 4)
mw32(MVS_P0_SER_CTLSTAT + port * 4, val);
else
mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val);
}
static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off,
u32 off2, u32 port)
{
void __iomem *regs = mvi->regs + off;
void __iomem *regs2 = mvi->regs + off2;
return (port < 4) ? readl(regs + port * 8) :
readl(regs2 + (port - 4) * 8);
}
static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
u32 port, u32 val)
{
void __iomem *regs = mvi->regs + off;
void __iomem *regs2 = mvi->regs + off2;
if (port < 4)
writel(val, regs + port * 8);
else
writel(val, regs2 + (port - 4) * 8);
}
static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
{
return mvs_read_port(mvi, MVS_P0_CFG_DATA,
MVS_P4_CFG_DATA, port);
}
static inline void mvs_write_port_cfg_data(struct mvs_info *mvi,
u32 port, u32 val)
{
mvs_write_port(mvi, MVS_P0_CFG_DATA,
MVS_P4_CFG_DATA, port, val);
}
static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi,
u32 port, u32 addr)
{
mvs_write_port(mvi, MVS_P0_CFG_ADDR,
MVS_P4_CFG_ADDR, port, addr);
mdelay(10);
}
static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
{
return mvs_read_port(mvi, MVS_P0_VSR_DATA,
MVS_P4_VSR_DATA, port);
}
static inline void mvs_write_port_vsr_data(struct mvs_info *mvi,
u32 port, u32 val)
{
mvs_write_port(mvi, MVS_P0_VSR_DATA,
MVS_P4_VSR_DATA, port, val);
}
static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi,
u32 port, u32 addr)
{
mvs_write_port(mvi, MVS_P0_VSR_ADDR,
MVS_P4_VSR_ADDR, port, addr);
mdelay(10);
}
static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
{
return mvs_read_port(mvi, MVS_P0_INT_STAT,
MVS_P4_INT_STAT, port);
}
static inline void mvs_write_port_irq_stat(struct mvs_info *mvi,
u32 port, u32 val)
{
mvs_write_port(mvi, MVS_P0_INT_STAT,
MVS_P4_INT_STAT, port, val);
}
static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
{
return mvs_read_port(mvi, MVS_P0_INT_MASK,
MVS_P4_INT_MASK, port);
}
static inline void mvs_write_port_irq_mask(struct mvs_info *mvi,
u32 port, u32 val)
{
mvs_write_port(mvi, MVS_P0_INT_MASK,
MVS_P4_INT_MASK, port, val);
}
static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
{
u32 tmp;
/* workaround for SATA R-ERR, to ignore phy glitch */
tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
tmp &= ~(1 << 9);
tmp |= (1 << 10);
mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
/* enable retry 127 times */
mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f);
/* extend open frame timeout to max */
tmp = mvs_cr32(mvi, CMD_SAS_CTL0);
tmp &= ~0xffff;
tmp |= 0x3fff;
mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
/* workaround for WDTIMEOUT , set to 550 ms */
mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
/* not to halt for different port op during wideport link change */
mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
/* workaround for Seagate disk not-found OOB sequence, recv
* COMINIT before sending out COMWAKE */
tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
tmp &= 0x0000ffff;
tmp |= 0x00fa0000;
mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
tmp &= 0x1fffffff;
tmp |= (2U << 29); /* 8 ms retry */
mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
}
static inline void mvs_int_sata(struct mvs_info *mvi)
{
u32 tmp;
void __iomem *regs = mvi->regs;
tmp = mr32(MVS_INT_STAT_SRS_0);
if (tmp)
mw32(MVS_INT_STAT_SRS_0, tmp);
MVS_CHIP_DISP->clear_active_cmds(mvi);
}
static inline void mvs_int_full(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
u32 tmp, stat;
int i;
stat = mr32(MVS_INT_STAT);
mvs_int_rx(mvi, false);
for (i = 0; i < mvi->chip->n_phy; i++) {
tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
if (tmp)
mvs_int_port(mvi, i, tmp);
}
if (stat & CINT_SRS)
mvs_int_sata(mvi);
mw32(MVS_INT_STAT, stat);
}
static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx)
{
void __iomem *regs = mvi->regs;
mw32(MVS_TX_PROD_IDX, tx);
}
static inline u32 mvs_rx_update(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
return mr32(MVS_RX_CONS_IDX);
}
static inline u32 mvs_get_prd_size(void)
{
return sizeof(struct mvs_prd);
}
static inline u32 mvs_get_prd_count(void)
{
return MAX_SG_ENTRY;
}
static inline void mvs_show_pcie_usage(struct mvs_info *mvi)
{
u16 link_stat, link_spd;
const char *spd[] = {
"UnKnown",
"2.5",
"5.0",
};
if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0)
return;
pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat);
link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS;
if (link_spd >= 3)
link_spd = 0;
dev_printk(KERN_INFO, mvi->dev,
"mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n",
(link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS,
spd[link_spd]);
}
static inline u32 mvs_hw_max_link_rate(void)
{
return MAX_LINK_RATE;
}
#endif /* _MV_CHIPS_H_ */

View file

@ -0,0 +1,502 @@
/*
* Marvell 88SE64xx/88SE94xx const head file
*
* Copyright 2007 Red Hat, Inc.
* Copyright 2008 Marvell. <kewei@marvell.com>
*
* This file is licensed under GPLv2.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of the
* License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*/
#ifndef _MV_DEFS_H_
#define _MV_DEFS_H_
enum chip_flavors {
chip_6320,
chip_6440,
chip_6485,
chip_9480,
chip_9180,
};
/* driver compile-time configuration */
enum driver_configuration {
MVS_SLOTS = 512, /* command slots */
MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
/* software requires power-of-2
ring size */
MVS_SOC_SLOTS = 64,
MVS_SOC_TX_RING_SZ = MVS_SOC_SLOTS * 2,
MVS_SOC_RX_RING_SZ = MVS_SOC_SLOTS * 2,
MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
MVS_OAF_SZ = 64, /* Open address frame buffer size */
MVS_QUEUE_SIZE = 32, /* Support Queue depth */
MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */
MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2,
};
/* unchangeable hardware details */
enum hardware_details {
MVS_MAX_PHYS = 8, /* max. possible phys */
MVS_MAX_PORTS = 8, /* max. possible ports */
MVS_SOC_PHYS = 4, /* soc phys */
MVS_SOC_PORTS = 4, /* soc phys */
MVS_MAX_DEVICES = 1024, /* max supported device */
};
/* peripheral registers (BAR2) */
enum peripheral_registers {
SPI_CTL = 0x10, /* EEPROM control */
SPI_CMD = 0x14, /* EEPROM command */
SPI_DATA = 0x18, /* EEPROM data */
};
enum peripheral_register_bits {
TWSI_RDY = (1U << 7), /* EEPROM interface ready */
TWSI_RD = (1U << 4), /* EEPROM read access */
SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
};
enum hw_register_bits {
/* MVS_GBL_CTL */
INT_EN = (1U << 1), /* Global int enable */
HBA_RST = (1U << 0), /* HBA reset */
/* MVS_GBL_INT_STAT */
INT_XOR = (1U << 4), /* XOR engine event */
INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
/* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
SATA_TARGET = (1U << 16), /* port0 SATA target enable */
MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
MODE_AUTO_DET_PORT6 = (1U << 14),
MODE_AUTO_DET_PORT5 = (1U << 13),
MODE_AUTO_DET_PORT4 = (1U << 12),
MODE_AUTO_DET_PORT3 = (1U << 11),
MODE_AUTO_DET_PORT2 = (1U << 10),
MODE_AUTO_DET_PORT1 = (1U << 9),
MODE_AUTO_DET_PORT0 = (1U << 8),
MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
MODE_SAS_PORT6_MASK = (1U << 6),
MODE_SAS_PORT5_MASK = (1U << 5),
MODE_SAS_PORT4_MASK = (1U << 4),
MODE_SAS_PORT3_MASK = (1U << 3),
MODE_SAS_PORT2_MASK = (1U << 2),
MODE_SAS_PORT1_MASK = (1U << 1),
MODE_SAS_PORT0_MASK = (1U << 0),
MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
/* SAS_MODE value may be
* dictated (in hw) by values
* of SATA_TARGET & AUTO_DET
*/
/* MVS_TX_CFG */
TX_EN = (1U << 16), /* Enable TX */
TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
/* MVS_RX_CFG */
RX_EN = (1U << 16), /* Enable RX */
RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
/* MVS_INT_COAL */
COAL_EN = (1U << 16), /* Enable int coalescing */
/* MVS_INT_STAT, MVS_INT_MASK */
CINT_I2C = (1U << 31), /* I2C event */
CINT_SW0 = (1U << 30), /* software event 0 */
CINT_SW1 = (1U << 29), /* software event 1 */
CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
CINT_MEM = (1U << 26), /* int mem parity err */
CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
CINT_SRS = (1U << 3), /* SRS event */
CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
CINT_DONE = (1U << 0), /* cmd completion */
/* shl for ports 1-3 */
CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
CINT_PORT = (1U << 8), /* port0 event */
CINT_PORT_MASK_OFFSET = 8,
CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
CINT_PHY_MASK_OFFSET = 4,
CINT_PHY_MASK = (0x0F << CINT_PHY_MASK_OFFSET),
/* TX (delivery) ring bits */
TXQ_CMD_SHIFT = 29,
TXQ_CMD_SSP = 1, /* SSP protocol */
TXQ_CMD_SMP = 2, /* SMP protocol */
TXQ_CMD_STP = 3, /* STP/SATA protocol */
TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
TXQ_MODE_TARGET = 0,
TXQ_MODE_INITIATOR = 1,
TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
TXQ_PRI_NORMAL = 0,
TXQ_PRI_HIGH = 1,
TXQ_SRS_SHIFT = 20, /* SATA register set */
TXQ_SRS_MASK = 0x7f,
TXQ_PHY_SHIFT = 12, /* PHY bitmap */
TXQ_PHY_MASK = 0xff,
TXQ_SLOT_MASK = 0xfff, /* slot number */
/* RX (completion) ring bits */
RXQ_GOOD = (1U << 23), /* Response good */
RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
RXQ_CMD_RX = (1U << 20), /* target cmd received */
RXQ_ATTN = (1U << 19), /* attention */
RXQ_RSP = (1U << 18), /* response frame xfer'd */
RXQ_ERR = (1U << 17), /* err info rec xfer'd */
RXQ_DONE = (1U << 16), /* cmd complete */
RXQ_SLOT_MASK = 0xfff, /* slot number */
/* mvs_cmd_hdr bits */
MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
/* SSP initiator only */
MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
/* SSP initiator or target */
MCH_SSP_FR_TASK = 0x1, /* TASK frame */
/* SSP target only */
MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
MCH_SSP_MODE_PASSTHRU = 1,
MCH_SSP_MODE_NORMAL = 0,
MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
MCH_FBURST = (1U << 11), /* first burst (SSP) */
MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
CCTL_RST = (1U << 5), /* port logic reset */
/* 0(LSB first), 1(MSB first) */
CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
CCTL_ENDIAN_CMD = (1U << 0), /* command table */
/* MVS_Px_SER_CTLSTAT (per-phy control) */
PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
PHY_RST = (1U << 0), /* phy reset */
PHY_READY_MASK = (1U << 20),
/* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */
PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */
PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
PHYEV_AN = (1U << 18), /* SATA async notification */
PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
PHYEV_IU_BIG = (1U << 11), /* IU too long err */
PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
PHYEV_PORT_SEL = (1U << 6), /* port selector present */
PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
PHYEV_ID_FAIL = (1U << 3), /* identify failed */
PHYEV_ID_DONE = (1U << 2), /* identify done */
PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
/* MVS_PCS */
PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6485 */
PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
PCS_SATA_RETRY_2 = (1U << 6), /* For 9180 */
PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
PCS_CMD_RST = (1U << 1), /* reset cmd issue */
PCS_CMD_EN = (1U << 0), /* enable cmd issue */
/* Port n Attached Device Info */
PORT_DEV_SSP_TRGT = (1U << 19),
PORT_DEV_SMP_TRGT = (1U << 18),
PORT_DEV_STP_TRGT = (1U << 17),
PORT_DEV_SSP_INIT = (1U << 11),
PORT_DEV_SMP_INIT = (1U << 10),
PORT_DEV_STP_INIT = (1U << 9),
PORT_PHY_ID_MASK = (0xFFU << 24),
PORT_SSP_TRGT_MASK = (0x1U << 19),
PORT_SSP_INIT_MASK = (0x1U << 11),
PORT_DEV_TRGT_MASK = (0x7U << 17),
PORT_DEV_INIT_MASK = (0x7U << 9),
PORT_DEV_TYPE_MASK = (0x7U << 0),
/* Port n PHY Status */
PHY_RDY = (1U << 2),
PHY_DW_SYNC = (1U << 1),
PHY_OOB_DTCTD = (1U << 0),
/* VSR */
/* PHYMODE 6 (CDB) */
PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
};
/* SAS/SATA configuration port registers, aka phy registers */
enum sas_sata_config_port_regs {
PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
PHYR_SATA_CTL = 0x18, /* SATA control */
PHYR_PHY_STAT = 0x1C, /* PHY status */
PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
PHYR_WIDE_PORT = 0x38, /* wide port participating */
PHYR_CURRENT0 = 0x80, /* current connection info 0 */
PHYR_CURRENT1 = 0x84, /* current connection info 1 */
PHYR_CURRENT2 = 0x88, /* current connection info 2 */
CONFIG_ID_FRAME0 = 0x100, /* Port device ID frame register 0 */
CONFIG_ID_FRAME1 = 0x104, /* Port device ID frame register 1 */
CONFIG_ID_FRAME2 = 0x108, /* Port device ID frame register 2 */
CONFIG_ID_FRAME3 = 0x10c, /* Port device ID frame register 3 */
CONFIG_ID_FRAME4 = 0x110, /* Port device ID frame register 4 */
CONFIG_ID_FRAME5 = 0x114, /* Port device ID frame register 5 */
CONFIG_ID_FRAME6 = 0x118, /* Port device ID frame register 6 */
CONFIG_ATT_ID_FRAME0 = 0x11c, /* attached ID frame register 0 */
CONFIG_ATT_ID_FRAME1 = 0x120, /* attached ID frame register 1 */
CONFIG_ATT_ID_FRAME2 = 0x124, /* attached ID frame register 2 */
CONFIG_ATT_ID_FRAME3 = 0x128, /* attached ID frame register 3 */
CONFIG_ATT_ID_FRAME4 = 0x12c, /* attached ID frame register 4 */
CONFIG_ATT_ID_FRAME5 = 0x130, /* attached ID frame register 5 */
CONFIG_ATT_ID_FRAME6 = 0x134, /* attached ID frame register 6 */
};
enum sas_cmd_port_registers {
CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
CMD_OOB_SPACE = 0x110, /* OOB space control register */
CMD_OOB_BURST = 0x114, /* OOB burst control register */
CMD_PHY_TIMER = 0x118, /* PHY timer control register */
CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
CMD_ID_TEST = 0x134, /* ID test register */
CMD_PL_TIMER = 0x138, /* PL timer register */
CMD_WD_TIMER = 0x13c, /* WD timer register */
CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
CMD_RESET_COUNT = 0x188, /* Reset Count */
CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
CMD_PHY_CTL = 0x194, /* PHY Control and Status */
CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
CMD_HOST_CTL = 0x1AC, /* Host Control Status */
CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
};
enum mvs_info_flags {
MVF_MSI = (1U << 0), /* MSI is enabled */
MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */
};
enum mvs_event_flags {
PHY_PLUG_EVENT = (3U),
PHY_PLUG_IN = (1U << 0), /* phy plug in */
PHY_PLUG_OUT = (1U << 1), /* phy plug out */
};
enum mvs_port_type {
PORT_TGT_MASK = (1U << 5),
PORT_INIT_PORT = (1U << 4),
PORT_TGT_PORT = (1U << 3),
PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT),
PORT_TYPE_SAS = (1U << 1),
PORT_TYPE_SATA = (1U << 0),
};
/* Command Table Format */
enum ct_format {
/* SSP */
SSP_F_H = 0x00,
SSP_F_IU = 0x18,
SSP_F_MAX = 0x4D,
/* STP */
STP_CMD_FIS = 0x00,
STP_ATAPI_CMD = 0x40,
STP_F_MAX = 0x10,
/* SMP */
SMP_F_T = 0x00,
SMP_F_DEP = 0x01,
SMP_F_MAX = 0x101,
};
enum status_buffer {
SB_EIR_OFF = 0x00, /* Error Information Record */
SB_RFB_OFF = 0x08, /* Response Frame Buffer */
SB_RFB_MAX = 0x400, /* RFB size*/
};
enum error_info_rec {
CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
RSP_OVER = (1U << 29), /* rsp buffer overflow */
RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
UNK_FIS = (1U << 27), /* unknown FIS */
DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
R_ERR = (1U << 23), /* SATA returned R_ERR prim */
RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
INTERLOCK = (1U << 15), /* interlock error */
NAK = (1U << 14), /* NAK rx'd */
ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
STP_RES_BSY = (1U << 8), /* STP resources busy */
BREAK = (1U << 7), /* break received */
BAD_DEST = (1U << 6), /* bad destination */
BAD_PROTO = (1U << 5), /* protocol not supported */
BAD_RATE = (1U << 4), /* cxn rate not supported */
WRONG_DEST = (1U << 3), /* wrong destination error */
CREDIT_TO = (1U << 2), /* credit timeout */
WDOG_TO = (1U << 1), /* watchdog timeout */
BUF_PAR = (1U << 0), /* buffer parity error */
};
enum error_info_rec_2 {
SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
APP_CHK_ERR = (1U << 13), /* Application Check error */
REF_CHK_ERR = (1U << 12), /* Reference Check Error */
USR_BLK_NM = (1U << 0), /* User Block Number */
};
enum pci_cfg_register_bits {
PCTL_PWR_OFF = (0xFU << 24),
PCTL_COM_ON = (0xFU << 20),
PCTL_LINK_RST = (0xFU << 16),
PCTL_LINK_OFFS = (16),
PCTL_PHY_DSBL = (0xFU << 12),
PCTL_PHY_DSBL_OFFS = (12),
PRD_REQ_SIZE = (0x4000),
PRD_REQ_MASK = (0x00007000),
PLS_NEG_LINK_WD = (0x3FU << 4),
PLS_NEG_LINK_WD_OFFS = 4,
PLS_LINK_SPD = (0x0FU << 0),
PLS_LINK_SPD_OFFS = 0,
};
enum open_frame_protocol {
PROTOCOL_SMP = 0x0,
PROTOCOL_SSP = 0x1,
PROTOCOL_STP = 0x2,
};
/* define for response frame datapres field */
enum datapres_field {
NO_DATA = 0,
RESPONSE_DATA = 1,
SENSE_DATA = 2,
};
/* define task management IU */
struct mvs_tmf_task{
u8 tmf;
u16 tag_of_task_to_be_managed;
};
#endif

View file

@ -0,0 +1,703 @@
/*
* Marvell 88SE64xx/88SE94xx pci init
*
* Copyright 2007 Red Hat, Inc.
* Copyright 2008 Marvell. <kewei@marvell.com>
*
* This file is licensed under GPLv2.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of the
* License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*/
#include "mv_sas.h"
static struct scsi_transport_template *mvs_stt;
static const struct mvs_chip_info mvs_chips[] = {
[chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
[chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
[chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
[chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
[chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
};
#define SOC_SAS_NUM 2
static struct scsi_host_template mvs_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
.slave_configure = mvs_slave_configure,
.slave_destroy = sas_slave_destroy,
.scan_finished = mvs_scan_finished,
.scan_start = mvs_scan_start,
.change_queue_depth = sas_change_queue_depth,
.change_queue_type = sas_change_queue_type,
.bios_param = sas_bios_param,
.can_queue = 1,
.cmd_per_lun = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
.slave_alloc = mvs_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
};
static struct sas_domain_function_template mvs_transport_ops = {
.lldd_dev_found = mvs_dev_found,
.lldd_dev_gone = mvs_dev_gone,
.lldd_execute_task = mvs_queue_command,
.lldd_control_phy = mvs_phy_control,
.lldd_abort_task = mvs_abort_task,
.lldd_abort_task_set = mvs_abort_task_set,
.lldd_clear_aca = mvs_clear_aca,
.lldd_clear_task_set = mvs_clear_task_set,
.lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
.lldd_lu_reset = mvs_lu_reset,
.lldd_query_task = mvs_query_task,
.lldd_port_formed = mvs_port_formed,
.lldd_port_deformed = mvs_port_deformed,
};
static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
{
struct mvs_phy *phy = &mvi->phy[phy_id];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
phy->mvi = mvi;
init_timer(&phy->timer);
sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
sas_phy->tproto = 0;
sas_phy->type = PHY_TYPE_PHYSICAL;
sas_phy->role = PHY_ROLE_INITIATOR;
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
sas_phy->id = phy_id;
sas_phy->sas_addr = &mvi->sas_addr[0];
sas_phy->frame_rcvd = &phy->frame_rcvd[0];
sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
sas_phy->lldd_phy = phy;
}
static void mvs_free(struct mvs_info *mvi)
{
int i;
struct mvs_wq *mwq;
int slot_nr;
if (!mvi)
return;
if (mvi->flags & MVF_FLAG_SOC)
slot_nr = MVS_SOC_SLOTS;
else
slot_nr = MVS_SLOTS;
for (i = 0; i < mvi->tags_num; i++) {
struct mvs_slot_info *slot = &mvi->slot_info[i];
if (slot->buf)
dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
slot->buf, slot->buf_dma);
}
if (mvi->tx)
dma_free_coherent(mvi->dev,
sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
mvi->tx, mvi->tx_dma);
if (mvi->rx_fis)
dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
mvi->rx_fis, mvi->rx_fis_dma);
if (mvi->rx)
dma_free_coherent(mvi->dev,
sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
mvi->rx, mvi->rx_dma);
if (mvi->slot)
dma_free_coherent(mvi->dev,
sizeof(*mvi->slot) * slot_nr,
mvi->slot, mvi->slot_dma);
#ifndef DISABLE_HOTPLUG_DMA_FIX
if (mvi->bulk_buffer)
dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
mvi->bulk_buffer, mvi->bulk_buffer_dma);
#endif
MVS_CHIP_DISP->chip_iounmap(mvi);
if (mvi->shost)
scsi_host_put(mvi->shost);
list_for_each_entry(mwq, &mvi->wq_list, entry)
cancel_delayed_work(&mwq->work_q);
kfree(mvi);
}
#ifdef MVS_USE_TASKLET
struct tasklet_struct mv_tasklet;
static void mvs_tasklet(unsigned long opaque)
{
unsigned long flags;
u32 stat;
u16 core_nr, i = 0;
struct mvs_info *mvi;
struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque;
core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
if (unlikely(!mvi))
BUG_ON(1);
for (i = 0; i < core_nr; i++) {
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq);
if (stat)
MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
}
}
#endif
static irqreturn_t mvs_interrupt(int irq, void *opaque)
{
u32 core_nr, i = 0;
u32 stat;
struct mvs_info *mvi;
struct sas_ha_struct *sha = opaque;
core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
if (unlikely(!mvi))
return IRQ_NONE;
stat = MVS_CHIP_DISP->isr_status(mvi, irq);
if (!stat)
return IRQ_NONE;
#ifdef MVS_USE_TASKLET
tasklet_schedule(&mv_tasklet);
#else
for (i = 0; i < core_nr; i++) {
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
MVS_CHIP_DISP->isr(mvi, irq, stat);
}
#endif
return IRQ_HANDLED;
}
static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
{
int i, slot_nr;
if (mvi->flags & MVF_FLAG_SOC)
slot_nr = MVS_SOC_SLOTS;
else
slot_nr = MVS_SLOTS;
spin_lock_init(&mvi->lock);
for (i = 0; i < mvi->chip->n_phy; i++) {
mvs_phy_init(mvi, i);
mvi->port[i].wide_port_phymap = 0;
mvi->port[i].port_attached = 0;
INIT_LIST_HEAD(&mvi->port[i].list);
}
for (i = 0; i < MVS_MAX_DEVICES; i++) {
mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
mvi->devices[i].dev_type = NO_DEVICE;
mvi->devices[i].device_id = i;
mvi->devices[i].dev_status = MVS_DEV_NORMAL;
}
/*
* alloc and init our DMA areas
*/
mvi->tx = dma_alloc_coherent(mvi->dev,
sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
&mvi->tx_dma, GFP_KERNEL);
if (!mvi->tx)
goto err_out;
memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
&mvi->rx_fis_dma, GFP_KERNEL);
if (!mvi->rx_fis)
goto err_out;
memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
mvi->rx = dma_alloc_coherent(mvi->dev,
sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
&mvi->rx_dma, GFP_KERNEL);
if (!mvi->rx)
goto err_out;
memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
mvi->rx[0] = cpu_to_le32(0xfff);
mvi->rx_cons = 0xfff;
mvi->slot = dma_alloc_coherent(mvi->dev,
sizeof(*mvi->slot) * slot_nr,
&mvi->slot_dma, GFP_KERNEL);
if (!mvi->slot)
goto err_out;
memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
#ifndef DISABLE_HOTPLUG_DMA_FIX
mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
TRASH_BUCKET_SIZE,
&mvi->bulk_buffer_dma, GFP_KERNEL);
if (!mvi->bulk_buffer)
goto err_out;
#endif
for (i = 0; i < slot_nr; i++) {
struct mvs_slot_info *slot = &mvi->slot_info[i];
slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
&slot->buf_dma, GFP_KERNEL);
if (!slot->buf) {
printk(KERN_DEBUG"failed to allocate slot->buf.\n");
goto err_out;
}
memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
++mvi->tags_num;
}
/* Initialize tags */
mvs_tag_init(mvi);
return 0;
err_out:
return 1;
}
int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
{
unsigned long res_start, res_len, res_flag, res_flag_ex = 0;
struct pci_dev *pdev = mvi->pdev;
if (bar_ex != -1) {
/*
* ioremap main and peripheral registers
*/
res_start = pci_resource_start(pdev, bar_ex);
res_len = pci_resource_len(pdev, bar_ex);
if (!res_start || !res_len)
goto err_out;
res_flag_ex = pci_resource_flags(pdev, bar_ex);
if (res_flag_ex & IORESOURCE_MEM) {
if (res_flag_ex & IORESOURCE_CACHEABLE)
mvi->regs_ex = ioremap(res_start, res_len);
else
mvi->regs_ex = ioremap_nocache(res_start,
res_len);
} else
mvi->regs_ex = (void *)res_start;
if (!mvi->regs_ex)
goto err_out;
}
res_start = pci_resource_start(pdev, bar);
res_len = pci_resource_len(pdev, bar);
if (!res_start || !res_len)
goto err_out;
res_flag = pci_resource_flags(pdev, bar);
if (res_flag & IORESOURCE_CACHEABLE)
mvi->regs = ioremap(res_start, res_len);
else
mvi->regs = ioremap_nocache(res_start, res_len);
if (!mvi->regs) {
if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
iounmap(mvi->regs_ex);
mvi->regs_ex = NULL;
goto err_out;
}
return 0;
err_out:
return -1;
}
void mvs_iounmap(void __iomem *regs)
{
iounmap(regs);
}
static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct Scsi_Host *shost, unsigned int id)
{
struct mvs_info *mvi;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info),
GFP_KERNEL);
if (!mvi)
return NULL;
mvi->pdev = pdev;
mvi->dev = &pdev->dev;
mvi->chip_id = ent->driver_data;
mvi->chip = &mvs_chips[mvi->chip_id];
INIT_LIST_HEAD(&mvi->wq_list);
mvi->irq = pdev->irq;
((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
mvi->id = id;
mvi->sas = sha;
mvi->shost = shost;
#ifdef MVS_USE_TASKLET
tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha);
#endif
if (MVS_CHIP_DISP->chip_ioremap(mvi))
goto err_out;
if (!mvs_alloc(mvi, shost))
return mvi;
err_out:
mvs_free(mvi);
return NULL;
}
/* move to PCI layer or libata core? */
static int pci_go_64(struct pci_dev *pdev)
{
int rc;
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"64-bit DMA enable failed\n");
return rc;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"32-bit consistent DMA enable failed\n");
return rc;
}
}
return rc;
}
static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
const struct mvs_chip_info *chip_info)
{
int phy_nr, port_nr; unsigned short core_nr;
struct asd_sas_phy **arr_phy;
struct asd_sas_port **arr_port;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
core_nr = chip_info->n_host;
phy_nr = core_nr * chip_info->n_phy;
port_nr = phy_nr;
memset(sha, 0x00, sizeof(struct sas_ha_struct));
arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
if (!arr_phy || !arr_port)
goto exit_free;
sha->sas_phy = arr_phy;
sha->sas_port = arr_port;
sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
if (!sha->lldd_ha)
goto exit_free;
((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
shost->transportt = mvs_stt;
shost->max_id = 128;
shost->max_lun = ~0;
shost->max_channel = 1;
shost->max_cmd_len = 16;
return 0;
exit_free:
kfree(arr_phy);
kfree(arr_port);
return -1;
}
static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
const struct mvs_chip_info *chip_info)
{
int can_queue, i = 0, j = 0;
struct mvs_info *mvi = NULL;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
for (j = 0; j < nr_core; j++) {
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
for (i = 0; i < chip_info->n_phy; i++) {
sha->sas_phy[j * chip_info->n_phy + i] =
&mvi->phy[i].sas_phy;
sha->sas_port[j * chip_info->n_phy + i] =
&mvi->port[i].sas_port;
}
}
sha->sas_ha_name = DRV_NAME;
sha->dev = mvi->dev;
sha->lldd_module = THIS_MODULE;
sha->sas_addr = &mvi->sas_addr[0];
sha->num_phys = nr_core * chip_info->n_phy;
sha->lldd_max_execute_num = 1;
if (mvi->flags & MVF_FLAG_SOC)
can_queue = MVS_SOC_CAN_QUEUE;
else
can_queue = MVS_CAN_QUEUE;
sha->lldd_queue_size = can_queue;
shost->can_queue = can_queue;
mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys;
sha->core.shost = mvi->shost;
}
static void mvs_init_sas_add(struct mvs_info *mvi)
{
u8 i;
for (i = 0; i < mvi->chip->n_phy; i++) {
mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
mvi->phy[i].dev_sas_addr =
cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
}
memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
}
static int __devinit mvs_pci_init(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned int rc, nhost = 0;
struct mvs_info *mvi;
irq_handler_t irq_handler = mvs_interrupt;
struct Scsi_Host *shost = NULL;
const struct mvs_chip_info *chip;
dev_printk(KERN_INFO, &pdev->dev,
"mvsas: driver version %s\n", DRV_VERSION);
rc = pci_enable_device(pdev);
if (rc)
goto err_out_enable;
pci_set_master(pdev);
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out_disable;
rc = pci_go_64(pdev);
if (rc)
goto err_out_regions;
shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
if (!shost) {
rc = -ENOMEM;
goto err_out_regions;
}
chip = &mvs_chips[ent->driver_data];
SHOST_TO_SAS_HA(shost) =
kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
if (!SHOST_TO_SAS_HA(shost)) {
kfree(shost);
rc = -ENOMEM;
goto err_out_regions;
}
rc = mvs_prep_sas_ha_init(shost, chip);
if (rc) {
kfree(shost);
rc = -ENOMEM;
goto err_out_regions;
}
pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
do {
mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
if (!mvi) {
rc = -ENOMEM;
goto err_out_regions;
}
mvs_init_sas_add(mvi);
mvi->instance = nhost;
rc = MVS_CHIP_DISP->chip_init(mvi);
if (rc) {
mvs_free(mvi);
goto err_out_regions;
}
nhost++;
} while (nhost < chip->n_host);
mvs_post_sas_ha_init(shost, chip);
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_shost;
rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
if (rc)
goto err_out_shost;
rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED,
DRV_NAME, SHOST_TO_SAS_HA(shost));
if (rc)
goto err_not_sas;
MVS_CHIP_DISP->interrupt_enable(mvi);
scsi_scan_host(mvi->shost);
return 0;
err_not_sas:
sas_unregister_ha(SHOST_TO_SAS_HA(shost));
err_out_shost:
scsi_remove_host(mvi->shost);
err_out_regions:
pci_release_regions(pdev);
err_out_disable:
pci_disable_device(pdev);
err_out_enable:
return rc;
}
static void __devexit mvs_pci_remove(struct pci_dev *pdev)
{
unsigned short core_nr, i = 0;
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct mvs_info *mvi = NULL;
core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
#ifdef MVS_USE_TASKLET
tasklet_kill(&mv_tasklet);
#endif
pci_set_drvdata(pdev, NULL);
sas_unregister_ha(sha);
sas_remove_host(mvi->shost);
scsi_remove_host(mvi->shost);
MVS_CHIP_DISP->interrupt_disable(mvi);
free_irq(mvi->irq, sha);
for (i = 0; i < core_nr; i++) {
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
mvs_free(mvi);
}
kfree(sha->sas_phy);
kfree(sha->sas_port);
kfree(sha);
pci_release_regions(pdev);
pci_disable_device(pdev);
return;
}
static struct pci_device_id __devinitdata mvs_pci_table[] = {
{ PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
{ PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
{
.vendor = PCI_VENDOR_ID_MARVELL,
.device = 0x6440,
.subvendor = PCI_ANY_ID,
.subdevice = 0x6480,
.class = 0,
.class_mask = 0,
.driver_data = chip_6485,
},
{ PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
{ PCI_VDEVICE(MARVELL, 0x6485), chip_6485 },
{ PCI_VDEVICE(MARVELL, 0x9480), chip_9480 },
{ PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
{ } /* terminate list */
};
static struct pci_driver mvs_pci_driver = {
.name = DRV_NAME,
.id_table = mvs_pci_table,
.probe = mvs_pci_init,
.remove = __devexit_p(mvs_pci_remove),
};
/* task handler */
struct task_struct *mvs_th;
static int __init mvs_init(void)
{
int rc;
mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
if (!mvs_stt)
return -ENOMEM;
rc = pci_register_driver(&mvs_pci_driver);
if (rc)
goto err_out;
return 0;
err_out:
sas_release_transport(mvs_stt);
return rc;
}
static void __exit mvs_exit(void)
{
pci_unregister_driver(&mvs_pci_driver);
sas_release_transport(mvs_stt);
}
module_init(mvs_init);
module_exit(mvs_exit);
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
#ifdef CONFIG_PCI
MODULE_DEVICE_TABLE(pci, mvs_pci_table);
#endif

2154
drivers/scsi/mvsas/mv_sas.c Normal file

File diff suppressed because it is too large Load diff

406
drivers/scsi/mvsas/mv_sas.h Normal file
View file

@ -0,0 +1,406 @@
/*
* Marvell 88SE64xx/88SE94xx main function head file
*
* Copyright 2007 Red Hat, Inc.
* Copyright 2008 Marvell. <kewei@marvell.com>
*
* This file is licensed under GPLv2.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of the
* License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*/
#ifndef _MV_SAS_H_
#define _MV_SAS_H_
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/vmalloc.h>
#include <scsi/libsas.h>
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
#include <linux/version.h>
#include "mv_defs.h"
#define DRV_NAME "mvsas"
#define DRV_VERSION "0.8.2"
#define _MV_DUMP 0
#define MVS_ID_NOT_MAPPED 0x7f
/* #define DISABLE_HOTPLUG_DMA_FIX */
#define MAX_EXP_RUNNING_REQ 2
#define WIDE_PORT_MAX_PHY 4
#define MV_DISABLE_NCQ 0
#define mv_printk(fmt, arg ...) \
printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
#ifdef MV_DEBUG
#define mv_dprintk(format, arg...) \
printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg)
#else
#define mv_dprintk(format, arg...)
#endif
#define MV_MAX_U32 0xffffffff
extern struct mvs_tgt_initiator mvs_tgt;
extern struct mvs_info *tgt_mvi;
extern const struct mvs_dispatch mvs_64xx_dispatch;
extern const struct mvs_dispatch mvs_94xx_dispatch;
#define DEV_IS_EXPANDER(type) \
((type == EDGE_DEV) || (type == FANOUT_DEV))
#define bit(n) ((u32)1 << n)
#define for_each_phy(__lseq_mask, __mc, __lseq) \
for ((__mc) = (__lseq_mask), (__lseq) = 0; \
(__mc) != 0 ; \
(++__lseq), (__mc) >>= 1)
#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f)
#define UNASSOC_D2H_FIS(id) \
((void *) mvi->rx_fis + 0x100 * id)
#define SATA_RECEIVED_FIS_LIST(reg_set) \
((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set)
#define SATA_RECEIVED_SDB_FIS(reg_set) \
(SATA_RECEIVED_FIS_LIST(reg_set) + 0x58)
#define SATA_RECEIVED_D2H_FIS(reg_set) \
(SATA_RECEIVED_FIS_LIST(reg_set) + 0x40)
#define SATA_RECEIVED_PIO_FIS(reg_set) \
(SATA_RECEIVED_FIS_LIST(reg_set) + 0x20)
#define SATA_RECEIVED_DMA_FIS(reg_set) \
(SATA_RECEIVED_FIS_LIST(reg_set) + 0x00)
enum dev_status {
MVS_DEV_NORMAL = 0x0,
MVS_DEV_EH = 0x1,
};
struct mvs_info;
struct mvs_dispatch {
char *name;
int (*chip_init)(struct mvs_info *mvi);
int (*spi_init)(struct mvs_info *mvi);
int (*chip_ioremap)(struct mvs_info *mvi);
void (*chip_iounmap)(struct mvs_info *mvi);
irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat);
u32 (*isr_status)(struct mvs_info *mvi, int irq);
void (*interrupt_enable)(struct mvs_info *mvi);
void (*interrupt_disable)(struct mvs_info *mvi);
u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port);
void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val);
u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port);
void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val);
void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr);
u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port);
void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val);
void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr);
u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port);
void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val);
u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
void (*get_sas_addr)(void *buf, u32 buflen);
void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
u32 tfs);
void (*start_delivery)(struct mvs_info *mvi, u32 tx);
u32 (*rx_update)(struct mvs_info *mvi);
void (*int_full)(struct mvs_info *mvi);
u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs);
void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs);
u32 (*prd_size)(void);
u32 (*prd_count)(void);
void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
void (*detect_porttype)(struct mvs_info *mvi, int i);
int (*oob_done)(struct mvs_info *mvi, int i);
void (*fix_phy_info)(struct mvs_info *mvi, int i,
struct sas_identify_frame *id);
void (*phy_work_around)(struct mvs_info *mvi, int i);
void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id,
struct sas_phy_linkrates *rates);
u32 (*phy_max_link_rate)(void);
void (*phy_disable)(struct mvs_info *mvi, u32 phy_id);
void (*phy_enable)(struct mvs_info *mvi, u32 phy_id);
void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard);
void (*stp_reset)(struct mvs_info *mvi, u32 phy_id);
void (*clear_active_cmds)(struct mvs_info *mvi);
u32 (*spi_read_data)(struct mvs_info *mvi);
void (*spi_write_data)(struct mvs_info *mvi, u32 data);
int (*spi_buildcmd)(struct mvs_info *mvi,
u32 *dwCmd,
u8 cmd,
u8 read,
u8 length,
u32 addr
);
int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
#ifndef DISABLE_HOTPLUG_DMA_FIX
void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd);
#endif
};
struct mvs_chip_info {
u32 n_host;
u32 n_phy;
u32 fis_offs;
u32 fis_count;
u32 srs_sz;
u32 slot_width;
const struct mvs_dispatch *dispatch;
};
#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
#define MVS_RX_FISL_SZ \
(mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
#define MVS_CHIP_DISP (mvi->chip->dispatch)
struct mvs_err_info {
__le32 flags;
__le32 flags2;
};
struct mvs_cmd_hdr {
__le32 flags; /* PRD tbl len; SAS, SATA ctl */
__le32 lens; /* cmd, max resp frame len */
__le32 tags; /* targ port xfer tag; tag */
__le32 data_len; /* data xfer len */
__le64 cmd_tbl; /* command table address */
__le64 open_frame; /* open addr frame address */
__le64 status_buf; /* status buffer address */
__le64 prd_tbl; /* PRD tbl address */
__le32 reserved[4];
};
struct mvs_port {
struct asd_sas_port sas_port;
u8 port_attached;
u8 wide_port_phymap;
struct list_head list;
};
struct mvs_phy {
struct mvs_info *mvi;
struct mvs_port *port;
struct asd_sas_phy sas_phy;
struct sas_identify identify;
struct scsi_device *sdev;
struct timer_list timer;
u64 dev_sas_addr;
u64 att_dev_sas_addr;
u32 att_dev_info;
u32 dev_info;
u32 phy_type;
u32 phy_status;
u32 irq_status;
u32 frame_rcvd_size;
u8 frame_rcvd[32];
u8 phy_attached;
u8 phy_mode;
u8 reserved[2];
u32 phy_event;
enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate;
};
struct mvs_device {
struct list_head dev_entry;
enum sas_dev_type dev_type;
struct mvs_info *mvi_info;
struct domain_device *sas_device;
u32 attached_phy;
u32 device_id;
u32 runing_req;
u8 taskfileset;
u8 dev_status;
u16 reserved;
};
struct mvs_slot_info {
struct list_head entry;
union {
struct sas_task *task;
void *tdata;
};
u32 n_elem;
u32 tx;
u32 slot_tag;
/* DMA buffer for storing cmd tbl, open addr frame, status buffer,
* and PRD table
*/
void *buf;
dma_addr_t buf_dma;
#if _MV_DUMP
u32 cmd_size;
#endif
void *response;
struct mvs_port *port;
struct mvs_device *device;
void *open_frame;
};
struct mvs_info {
unsigned long flags;
/* host-wide lock */
spinlock_t lock;
/* our device */
struct pci_dev *pdev;
struct device *dev;
/* enhanced mode registers */
void __iomem *regs;
/* peripheral or soc registers */
void __iomem *regs_ex;
u8 sas_addr[SAS_ADDR_SIZE];
/* SCSI/SAS glue */
struct sas_ha_struct *sas;
struct Scsi_Host *shost;
/* TX (delivery) DMA ring */
__le32 *tx;
dma_addr_t tx_dma;
/* cached next-producer idx */
u32 tx_prod;
/* RX (completion) DMA ring */
__le32 *rx;
dma_addr_t rx_dma;
/* RX consumer idx */
u32 rx_cons;
/* RX'd FIS area */
__le32 *rx_fis;
dma_addr_t rx_fis_dma;
/* DMA command header slots */
struct mvs_cmd_hdr *slot;
dma_addr_t slot_dma;
u32 chip_id;
const struct mvs_chip_info *chip;
int tags_num;
DECLARE_BITMAP(tags, MVS_SLOTS);
/* further per-slot information */
struct mvs_phy phy[MVS_MAX_PHYS];
struct mvs_port port[MVS_MAX_PHYS];
u32 irq;
u32 exp_req;
u32 id;
u64 sata_reg_set;
struct list_head *hba_list;
struct list_head soc_entry;
struct list_head wq_list;
unsigned long instance;
u16 flashid;
u32 flashsize;
u32 flashsectSize;
void *addon;
struct mvs_device devices[MVS_MAX_DEVICES];
#ifndef DISABLE_HOTPLUG_DMA_FIX
void *bulk_buffer;
dma_addr_t bulk_buffer_dma;
#define TRASH_BUCKET_SIZE 0x20000
#endif
struct mvs_slot_info slot_info[0];
};
struct mvs_prv_info{
u8 n_host;
u8 n_phy;
u16 reserve;
struct mvs_info *mvi[2];
};
struct mvs_wq {
struct delayed_work work_q;
struct mvs_info *mvi;
void *data;
int handler;
struct list_head entry;
};
struct mvs_task_exec_info {
struct sas_task *task;
struct mvs_cmd_hdr *hdr;
struct mvs_port *port;
u32 tag;
int n_elem;
};
/******************** function prototype *********************/
void mvs_get_sas_addr(void *buf, u32 buflen);
void mvs_tag_clear(struct mvs_info *mvi, u32 tag);
void mvs_tag_free(struct mvs_info *mvi, u32 tag);
void mvs_tag_set(struct mvs_info *mvi, unsigned int tag);
int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out);
void mvs_tag_init(struct mvs_info *mvi);
void mvs_iounmap(void __iomem *regs);
int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
u32 off_lo, u32 off_hi, u64 sas_addr);
int mvs_slave_alloc(struct scsi_device *scsi_dev);
int mvs_slave_configure(struct scsi_device *sdev);
void mvs_scan_start(struct Scsi_Host *shost);
int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
int mvs_queue_command(struct sas_task *task, const int num,
gfp_t gfp_flags);
int mvs_abort_task(struct sas_task *task);
int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
int mvs_clear_aca(struct domain_device *dev, u8 *lun);
int mvs_clear_task_set(struct domain_device *dev, u8 * lun);
void mvs_port_formed(struct asd_sas_phy *sas_phy);
void mvs_port_deformed(struct asd_sas_phy *sas_phy);
int mvs_dev_found(struct domain_device *dev);
void mvs_dev_gone(struct domain_device *dev);
int mvs_lu_reset(struct domain_device *dev, u8 *lun);
int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags);
int mvs_I_T_nexus_reset(struct domain_device *dev);
int mvs_query_task(struct sas_task *task);
void mvs_release_task(struct mvs_info *mvi, int phy_no,
struct domain_device *dev);
void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
void mvs_hexdump(u32 size, u8 *data, u32 baseaddr);
#endif

View file

@ -11,31 +11,6 @@
# it under the terms of the GNU General Public License version 2
#
ifneq ($(OSD_INC),)
# we are built out-of-tree Kconfigure everything as on
CONFIG_SCSI_OSD_INITIATOR=m
ccflags-y += -DCONFIG_SCSI_OSD_INITIATOR -DCONFIG_SCSI_OSD_INITIATOR_MODULE
CONFIG_SCSI_OSD_ULD=m
ccflags-y += -DCONFIG_SCSI_OSD_ULD -DCONFIG_SCSI_OSD_ULD_MODULE
# CONFIG_SCSI_OSD_DPRINT_SENSE =
# 0 - no print of errors
# 1 - print errors
# 2 - errors + warrnings
ccflags-y += -DCONFIG_SCSI_OSD_DPRINT_SENSE=1
# Uncomment to turn debug on
# ccflags-y += -DCONFIG_SCSI_OSD_DEBUG
# if we are built out-of-tree and the hosting kernel has OSD headers
# then "ccflags-y +=" will not pick the out-off-tree headers. Only by doing
# this it will work. This might break in future kernels
LINUXINCLUDE := -I$(OSD_INC) $(LINUXINCLUDE)
endif
# libosd.ko - osd-initiator library
libosd-y := osd_initiator.o
obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o

View file

@ -1,37 +0,0 @@
#
# Makefile for the OSD modules (out of tree)
#
# Copyright (C) 2008 Panasas Inc. All rights reserved.
#
# Authors:
# Boaz Harrosh <bharrosh@panasas.com>
# Benny Halevy <bhalevy@panasas.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2
#
# This Makefile is used to call the kernel Makefile in case of an out-of-tree
# build.
# $KSRC should point to a Kernel source tree otherwise host's default is
# used. (eg. /lib/modules/`uname -r`/build)
# include path for out-of-tree Headers
OSD_INC ?= `pwd`/../../../include
# allow users to override these
# e.g. to compile for a kernel that you aren't currently running
KSRC ?= /lib/modules/$(shell uname -r)/build
KBUILD_OUTPUT ?=
ARCH ?=
V ?= 0
# this is the basic Kbuild out-of-tree invocation, with the M= option
KBUILD_BASE = +$(MAKE) -C $(KSRC) M=`pwd` KBUILD_OUTPUT=$(KBUILD_OUTPUT) ARCH=$(ARCH) V=$(V)
all: libosd
libosd: ;
$(KBUILD_BASE) OSD_INC=$(OSD_INC) modules
clean:
$(KBUILD_BASE) clean

Some files were not shown because too many files have changed in this diff Show more