Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6: (27 commits)
  [SCSI] mpt fusion: don't oops if NumPhys==0
  [SCSI] iscsi class: regression - fix races with state manipulation and blocking/unblocking
  [SCSI] qla4xxx: regression - add start scan callout
  [SCSI] qla4xxx: fix host reset dpc race
  [SCSI] tgt: fix build errors when dprintk is defined
  [SCSI] tgt: set the data length properly
  [SCSI] tgt: stop zero'ing scsi_cmnd
  [SCSI] ibmvstgt: set up scsi_host properly before __scsi_alloc_queue
  [SCSI] docbook: fix fusion source files
  [SCSI] docbook: fix scsi source file
  [SCSI] qla2xxx: Update version number to 8.02.00-k9.
  [SCSI] qla2xxx: Correct usage of inconsistent timeout values while issuing ELS commands.
  [SCSI] qla2xxx: Correct discrepancies during OVERRUN handling on FWI2-capable cards.
  [SCSI] qla2xxx: Correct needless clean-up resets during shutdown.
  [SCSI] arcmsr: update version and changelog
  [SCSI] ps3rom: disable clustering
  [SCSI] ps3rom: fix wrong resid calculation bug
  [SCSI] mvsas: fix phy sas address
  [SCSI] gdth: fix to internal commands execution
  [SCSI] gdth: bugfix for the at-exit problems
  ...
This commit is contained in:
Linus Torvalds 2008-03-05 17:49:59 -08:00
commit 103926c689
28 changed files with 385 additions and 327 deletions

View file

@ -109,4 +109,10 @@
** 8.replace pci_alloc_consistent()/pci_free_consistent() with kmalloc()/kfree() in arcmsr_iop_message_xfer()
** 9. fix the release of dma memory for type B in arcmsr_free_ccb_pool()
** 10.fix the arcmsr_polling_hbb_ccbdone()
** 1.20.00.15 02/27/2008 Erich Chen & Nick Cheng
** 1.arcmsr_iop_message_xfer() is called from atomic context under the
** queuecommand scsi_host_template handler. James Bottomley pointed out
** that the current GFP_KERNEL|GFP_DMA flags are wrong: firstly we are in
** atomic context, secondly this memory is not used for DMA.
** Also removed some unneeded casts. Thanks to Daniel Drake <dsd@gentoo.org>
**************************************************************************

View file

@ -1701,6 +1701,11 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
if (error)
goto out_free_consistent;
if (!buffer->NumPhys) {
error = -ENODEV;
goto out_free_consistent;
}
/* save config data */
port_info->num_phys = buffer->NumPhys;
port_info->phy_info = kcalloc(port_info->num_phys,

View file

@ -102,6 +102,7 @@ int asd_abort_task_set(struct domain_device *, u8 *lun);
int asd_clear_aca(struct domain_device *, u8 *lun);
int asd_clear_task_set(struct domain_device *, u8 *lun);
int asd_lu_reset(struct domain_device *, u8 *lun);
int asd_I_T_nexus_reset(struct domain_device *dev);
int asd_query_task(struct sas_task *);
/* ---------- Adapter and Port management ---------- */

View file

@ -140,7 +140,7 @@ struct asd_ascb {
/* internally generated command */
struct timer_list timer;
struct completion completion;
struct completion *completion;
u8 tag_valid:1;
__be16 tag; /* error recovery only */
@ -294,7 +294,6 @@ static inline void asd_init_ascb(struct asd_ha_struct *asd_ha,
ascb->timer.function = NULL;
init_timer(&ascb->timer);
ascb->tc_index = -1;
init_completion(&ascb->completion);
}
/* Must be called with the tc_index_lock held!

View file

@ -1003,7 +1003,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
.lldd_abort_task_set = asd_abort_task_set,
.lldd_clear_aca = asd_clear_aca,
.lldd_clear_task_set = asd_clear_task_set,
.lldd_I_T_nexus_reset = NULL,
.lldd_I_T_nexus_reset = asd_I_T_nexus_reset,
.lldd_lu_reset = asd_lu_reset,
.lldd_query_task = asd_query_task,

View file

@ -343,11 +343,13 @@ static void asd_task_tasklet_complete(struct asd_ascb *ascb,
task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
struct completion *completion = ascb->completion;
spin_unlock_irqrestore(&task->task_state_lock, flags);
ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x "
"stat 0x%x but aborted by upper layer!\n",
task, opcode, ts->resp, ts->stat);
complete(&ascb->completion);
if (completion)
complete(completion);
} else {
spin_unlock_irqrestore(&task->task_state_lock, flags);
task->lldd_task = NULL;

View file

@ -53,50 +53,64 @@ static int asd_enqueue_internal(struct asd_ascb *ascb,
return res;
}
static inline void asd_timedout_common(unsigned long data)
{
struct asd_ascb *ascb = (void *) data;
struct asd_seq_data *seq = &ascb->ha->seq;
unsigned long flags;
/* ---------- CLEAR NEXUS ---------- */
spin_lock_irqsave(&seq->pend_q_lock, flags);
seq->pending--;
list_del_init(&ascb->list);
spin_unlock_irqrestore(&seq->pend_q_lock, flags);
struct tasklet_completion_status {
int dl_opcode;
int tmf_state;
u8 tag_valid:1;
__be16 tag;
};
#define DECLARE_TCS(tcs) \
struct tasklet_completion_status tcs = { \
.dl_opcode = 0, \
.tmf_state = 0, \
.tag_valid = 0, \
.tag = 0, \
}
/* ---------- CLEAR NEXUS ---------- */
static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
struct done_list_struct *dl)
{
struct tasklet_completion_status *tcs = ascb->uldd_task;
ASD_DPRINTK("%s: here\n", __FUNCTION__);
if (!del_timer(&ascb->timer)) {
ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__);
return;
}
ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode);
ascb->uldd_task = (void *) (unsigned long) dl->opcode;
complete(&ascb->completion);
tcs->dl_opcode = dl->opcode;
complete(ascb->completion);
asd_ascb_free(ascb);
}
static void asd_clear_nexus_timedout(unsigned long data)
{
struct asd_ascb *ascb = (void *)data;
struct tasklet_completion_status *tcs = ascb->uldd_task;
ASD_DPRINTK("%s: here\n", __FUNCTION__);
asd_timedout_common(data);
ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED;
complete(&ascb->completion);
tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
complete(ascb->completion);
}
#define CLEAR_NEXUS_PRE \
struct asd_ascb *ascb; \
struct scb *scb; \
int res; \
DECLARE_COMPLETION_ONSTACK(completion); \
DECLARE_TCS(tcs); \
\
ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \
res = 1; \
ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
if (!ascb) \
return -ENOMEM; \
\
ascb->completion = &completion; \
ascb->uldd_task = &tcs; \
scb = ascb->scb; \
scb->header.opcode = CLEAR_NEXUS
@ -107,10 +121,11 @@ static void asd_clear_nexus_timedout(unsigned long data)
if (res) \
goto out_err; \
ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \
wait_for_completion(&ascb->completion); \
res = (int) (unsigned long) ascb->uldd_task; \
wait_for_completion(&completion); \
res = tcs.dl_opcode; \
if (res == TC_NO_ERROR) \
res = TMF_RESP_FUNC_COMPLETE; \
return res; \
out_err: \
asd_ascb_free(ascb); \
return res
@ -118,9 +133,6 @@ out_err: \
int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
{
struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
struct asd_ascb *ascb;
struct scb *scb;
int res;
CLEAR_NEXUS_PRE;
scb->clear_nexus.nexus = NEXUS_ADAPTER;
@ -130,9 +142,6 @@ int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
int asd_clear_nexus_port(struct asd_sas_port *port)
{
struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
struct asd_ascb *ascb;
struct scb *scb;
int res;
CLEAR_NEXUS_PRE;
scb->clear_nexus.nexus = NEXUS_PORT;
@ -140,29 +149,73 @@ int asd_clear_nexus_port(struct asd_sas_port *port)
CLEAR_NEXUS_POST;
}
#if 0
static int asd_clear_nexus_I_T(struct domain_device *dev)
enum clear_nexus_phase {
NEXUS_PHASE_PRE,
NEXUS_PHASE_POST,
NEXUS_PHASE_RESUME,
};
static int asd_clear_nexus_I_T(struct domain_device *dev,
enum clear_nexus_phase phase)
{
struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
struct asd_ascb *ascb;
struct scb *scb;
int res;
CLEAR_NEXUS_PRE;
scb->clear_nexus.nexus = NEXUS_I_T;
scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
switch (phase) {
case NEXUS_PHASE_PRE:
scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
break;
case NEXUS_PHASE_POST:
scb->clear_nexus.flags = SEND_Q | NOTINQ;
break;
case NEXUS_PHASE_RESUME:
scb->clear_nexus.flags = RESUME_TX;
}
scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
dev->lldd_dev);
CLEAR_NEXUS_POST;
}
#endif
int asd_I_T_nexus_reset(struct domain_device *dev)
{
int res, tmp_res, i;
struct sas_phy *phy = sas_find_local_phy(dev);
/* Standard mandates link reset for ATA (type 0) and
* hard reset for SSP (type 1) */
int reset_type = (dev->dev_type == SATA_DEV ||
(dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
/* send a hard reset */
ASD_DPRINTK("sending %s reset to %s\n",
reset_type ? "hard" : "soft", phy->dev.bus_id);
res = sas_phy_reset(phy, reset_type);
if (res == TMF_RESP_FUNC_COMPLETE) {
/* wait for the maximum settle time */
msleep(500);
/* clear all outstanding commands (keep nexus suspended) */
asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
}
for (i = 0 ; i < 3; i++) {
tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
if (tmp_res == TC_RESUME)
return res;
msleep(500);
}
/* This is a bit of a problem: the sequencer is still suspended
* and is refusing to resume. Hope it will resume on a bigger hammer
* or the disk is lost */
dev_printk(KERN_ERR, &phy->dev,
"Failed to resume nexus after reset 0x%x\n", tmp_res);
return TMF_RESP_FUNC_FAILED;
}
static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
{
struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
struct asd_ascb *ascb;
struct scb *scb;
int res;
CLEAR_NEXUS_PRE;
scb->clear_nexus.nexus = NEXUS_I_T_L;
@ -177,9 +230,6 @@ static int asd_clear_nexus_tag(struct sas_task *task)
{
struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
struct asd_ascb *tascb = task->lldd_task;
struct asd_ascb *ascb;
struct scb *scb;
int res;
CLEAR_NEXUS_PRE;
scb->clear_nexus.nexus = NEXUS_TAG;
@ -195,9 +245,6 @@ static int asd_clear_nexus_index(struct sas_task *task)
{
struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
struct asd_ascb *tascb = task->lldd_task;
struct asd_ascb *ascb;
struct scb *scb;
int res;
CLEAR_NEXUS_PRE;
scb->clear_nexus.nexus = NEXUS_TRANS_CX;
@ -213,11 +260,11 @@ static int asd_clear_nexus_index(struct sas_task *task)
static void asd_tmf_timedout(unsigned long data)
{
struct asd_ascb *ascb = (void *) data;
struct tasklet_completion_status *tcs = ascb->uldd_task;
ASD_DPRINTK("tmf timed out\n");
asd_timedout_common(data);
ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED;
complete(&ascb->completion);
tcs->tmf_state = TMF_RESP_FUNC_FAILED;
complete(ascb->completion);
}
static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
@ -269,18 +316,24 @@ static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
struct done_list_struct *dl)
{
struct tasklet_completion_status *tcs;
if (!del_timer(&ascb->timer))
return;
tcs = ascb->uldd_task;
ASD_DPRINTK("tmf tasklet complete\n");
if (dl->opcode == TC_SSP_RESP)
ascb->uldd_task = (void *) (unsigned long)
asd_get_tmf_resp_tasklet(ascb, dl);
else
ascb->uldd_task = (void *) 0xFF00 + (unsigned long) dl->opcode;
tcs->dl_opcode = dl->opcode;
complete(&ascb->completion);
if (dl->opcode == TC_SSP_RESP) {
tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
tcs->tag_valid = ascb->tag_valid;
tcs->tag = ascb->tag;
}
complete(ascb->completion);
asd_ascb_free(ascb);
}
static inline int asd_clear_nexus(struct sas_task *task)
@ -288,15 +341,19 @@ static inline int asd_clear_nexus(struct sas_task *task)
int res = TMF_RESP_FUNC_FAILED;
int leftover;
struct asd_ascb *tascb = task->lldd_task;
DECLARE_COMPLETION_ONSTACK(completion);
unsigned long flags;
tascb->completion = &completion;
ASD_DPRINTK("task not done, clearing nexus\n");
if (tascb->tag_valid)
res = asd_clear_nexus_tag(task);
else
res = asd_clear_nexus_index(task);
leftover = wait_for_completion_timeout(&tascb->completion,
leftover = wait_for_completion_timeout(&completion,
AIC94XX_SCB_TIMEOUT);
tascb->completion = NULL;
ASD_DPRINTK("came back from clear nexus\n");
spin_lock_irqsave(&task->task_state_lock, flags);
if (leftover < 1)
@ -350,6 +407,11 @@ int asd_abort_task(struct sas_task *task)
struct asd_ascb *ascb = NULL;
struct scb *scb;
int leftover;
DECLARE_TCS(tcs);
DECLARE_COMPLETION_ONSTACK(completion);
DECLARE_COMPLETION_ONSTACK(tascb_completion);
tascb->completion = &tascb_completion;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
@ -363,8 +425,10 @@ int asd_abort_task(struct sas_task *task)
ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
if (!ascb)
return -ENOMEM;
scb = ascb->scb;
ascb->uldd_task = &tcs;
ascb->completion = &completion;
scb = ascb->scb;
scb->header.opcode = SCB_ABORT_TASK;
switch (task->task_proto) {
@ -406,13 +470,12 @@ int asd_abort_task(struct sas_task *task)
res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
asd_tmf_timedout);
if (res)
goto out;
wait_for_completion(&ascb->completion);
goto out_free;
wait_for_completion(&completion);
ASD_DPRINTK("tmf came back\n");
res = (int) (unsigned long) ascb->uldd_task;
tascb->tag = ascb->tag;
tascb->tag_valid = ascb->tag_valid;
tascb->tag = tcs.tag;
tascb->tag_valid = tcs.tag_valid;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
@ -423,39 +486,39 @@ int asd_abort_task(struct sas_task *task)
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
switch (res) {
if (tcs.dl_opcode == TC_SSP_RESP) {
/* The task to be aborted has been sent to the device.
* We got a Response IU for the ABORT TASK TMF. */
case TC_NO_ERROR + 0xFF00:
case TMF_RESP_FUNC_COMPLETE:
case TMF_RESP_FUNC_FAILED:
if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
res = asd_clear_nexus(task);
break;
case TMF_RESP_INVALID_FRAME:
case TMF_RESP_OVERLAPPED_TAG:
case TMF_RESP_FUNC_ESUPP:
case TMF_RESP_NO_LUN:
goto out_done; break;
}
else
res = tcs.tmf_state;
} else if (tcs.dl_opcode == TC_NO_ERROR &&
tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
/* timeout */
res = TMF_RESP_FUNC_FAILED;
} else {
/* In the following we assume that the managing layer
* will _never_ make a mistake, when issuing ABORT TASK.
* will _never_ make a mistake, when issuing ABORT
* TASK.
*/
switch (res) {
switch (tcs.dl_opcode) {
default:
res = asd_clear_nexus(task);
/* fallthrough */
case TC_NO_ERROR + 0xFF00:
case TMF_RESP_FUNC_COMPLETE:
case TC_NO_ERROR:
break;
/* The task hasn't been sent to the device xor we never got
* a (sane) Response IU for the ABORT TASK TMF.
/* The task hasn't been sent to the device xor
* we never got a (sane) Response IU for the
* ABORT TASK TMF.
*/
case TF_NAK_RECV + 0xFF00:
case TF_NAK_RECV:
res = TMF_RESP_INVALID_FRAME;
break;
case TF_TMF_TASK_DONE + 0xFF00: /* done but not reported yet */
case TF_TMF_TASK_DONE: /* done but not reported yet */
res = TMF_RESP_FUNC_FAILED;
leftover = wait_for_completion_timeout(&tascb->completion,
leftover =
wait_for_completion_timeout(&tascb_completion,
AIC94XX_SCB_TIMEOUT);
spin_lock_irqsave(&task->task_state_lock, flags);
if (leftover < 1)
@ -463,23 +526,28 @@ int asd_abort_task(struct sas_task *task)
if (task->task_state_flags & SAS_TASK_STATE_DONE)
res = TMF_RESP_FUNC_COMPLETE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
goto out_done;
case TF_TMF_NO_TAG + 0xFF00:
case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */
case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */
break;
case TF_TMF_NO_TAG:
case TF_TMF_TAG_FREE: /* the tag is in the free list */
case TF_TMF_NO_CONN_HANDLE: /* no such device */
res = TMF_RESP_FUNC_COMPLETE;
goto out_done;
case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */
break;
case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
res = TMF_RESP_FUNC_ESUPP;
goto out;
break;
}
}
out_done:
tascb->completion = NULL;
if (res == TMF_RESP_FUNC_COMPLETE) {
task->lldd_task = NULL;
mb();
asd_ascb_free(tascb);
}
out:
ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
return res;
out_free:
asd_ascb_free(ascb);
ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
return res;
@ -507,6 +575,8 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
struct asd_ascb *ascb;
int res = 1;
struct scb *scb;
DECLARE_COMPLETION_ONSTACK(completion);
DECLARE_TCS(tcs);
if (!(dev->tproto & SAS_PROTOCOL_SSP))
return TMF_RESP_FUNC_ESUPP;
@ -514,6 +584,9 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
if (!ascb)
return -ENOMEM;
ascb->completion = &completion;
ascb->uldd_task = &tcs;
scb = ascb->scb;
if (tmf == TMF_QUERY_TASK)
@ -546,31 +619,32 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
asd_tmf_timedout);
if (res)
goto out_err;
wait_for_completion(&ascb->completion);
res = (int) (unsigned long) ascb->uldd_task;
wait_for_completion(&completion);
switch (res) {
case TC_NO_ERROR + 0xFF00:
switch (tcs.dl_opcode) {
case TC_NO_ERROR:
res = TMF_RESP_FUNC_COMPLETE;
break;
case TF_NAK_RECV + 0xFF00:
case TF_NAK_RECV:
res = TMF_RESP_INVALID_FRAME;
break;
case TF_TMF_TASK_DONE + 0xFF00:
case TF_TMF_TASK_DONE:
res = TMF_RESP_FUNC_FAILED;
break;
case TF_TMF_NO_TAG + 0xFF00:
case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */
case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */
case TF_TMF_NO_TAG:
case TF_TMF_TAG_FREE: /* the tag is in the free list */
case TF_TMF_NO_CONN_HANDLE: /* no such device */
res = TMF_RESP_FUNC_COMPLETE;
break;
case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */
case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
res = TMF_RESP_FUNC_ESUPP;
break;
default:
/* Allow TMF response codes to propagate upwards */
res = tcs.dl_opcode;
break;
}
return res;
out_err:
asd_ascb_free(ascb);
return res;

View file

@ -48,7 +48,7 @@ struct class_device_attribute;
/*The limit of outstanding scsi command that firmware can handle*/
#define ARCMSR_MAX_OUTSTANDING_CMD 256
#define ARCMSR_MAX_FREECCB_NUM 320
#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/12/24"
#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/02/27"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096

View file

@ -160,7 +160,7 @@ static void gdth_readapp_event(gdth_ha_str *ha, unchar application,
static void gdth_clear_events(void);
static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
char *buffer, ushort count, int to_buffer);
char *buffer, ushort count);
static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive);
@ -182,7 +182,6 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
unsigned int cmd, unsigned long arg);
static void gdth_flush(gdth_ha_str *ha);
static int gdth_halt(struct notifier_block *nb, ulong event, void *buf);
static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *));
static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
struct gdth_cmndinfo *cmndinfo);
@ -417,12 +416,6 @@ static inline void gdth_set_sglist(struct scsi_cmnd *cmd,
#include "gdth_proc.h"
#include "gdth_proc.c"
/* notifier block to get a notify on system shutdown/halt/reboot */
static struct notifier_block gdth_notifier = {
gdth_halt, NULL, 0
};
static int notifier_disabled = 0;
static gdth_ha_str *gdth_find_ha(int hanum)
{
gdth_ha_str *ha;
@ -445,8 +438,8 @@ static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha)
for (i=0; i<GDTH_MAXCMDS; ++i) {
if (ha->cmndinfo[i].index == 0) {
priv = &ha->cmndinfo[i];
priv->index = i+1;
memset(priv, 0, sizeof(*priv));
priv->index = i+1;
break;
}
}
@ -493,7 +486,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
gdth_ha_str *ha = shost_priv(sdev->host);
Scsi_Cmnd *scp;
struct gdth_cmndinfo cmndinfo;
struct scatterlist one_sg;
DECLARE_COMPLETION_ONSTACK(wait);
int rval;
@ -507,13 +499,10 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
/* use request field to save the ptr. to completion struct. */
scp->request = (struct request *)&wait;
scp->timeout_per_command = timeout*HZ;
sg_init_one(&one_sg, gdtcmd, sizeof(*gdtcmd));
gdth_set_sglist(scp, &one_sg);
gdth_set_sg_count(scp, 1);
gdth_set_bufflen(scp, sizeof(*gdtcmd));
scp->cmd_len = 12;
memcpy(scp->cmnd, cmnd, 12);
cmndinfo.priority = IOCTL_PRI;
cmndinfo.internal_cmd_str = gdtcmd;
cmndinfo.internal_command = 1;
TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0]));
@ -2355,7 +2344,7 @@ static void gdth_next(gdth_ha_str *ha)
* buffers, kmap_atomic() as needed.
*/
static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
char *buffer, ushort count, int to_buffer)
char *buffer, ushort count)
{
ushort cpcount,i, max_sg = gdth_sg_count(scp);
ushort cpsum,cpnow;
@ -2381,9 +2370,6 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
}
local_irq_save(flags);
address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset;
if (to_buffer)
memcpy(buffer, address, cpnow);
else
memcpy(address, buffer, cpnow);
flush_dcache_page(sg_page(sl));
kunmap_atomic(address, KM_BIO_SRC_IRQ);
@ -2438,7 +2424,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
strcpy(inq.vendor,ha->oem_name);
sprintf(inq.product,"Host Drive #%02d",t);
strcpy(inq.revision," ");
gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data), 0);
gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data));
break;
case REQUEST_SENSE:
@ -2448,7 +2434,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
sd.key = NO_SENSE;
sd.info = 0;
sd.add_length= 0;
gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data), 0);
gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data));
break;
case MODE_SENSE:
@ -2460,7 +2446,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16;
mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8;
mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff);
gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data), 0);
gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data));
break;
case READ_CAPACITY:
@ -2470,7 +2456,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
else
rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
rdc.block_length = cpu_to_be32(SECTOR_SIZE);
gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data), 0);
gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data));
break;
case SERVICE_ACTION_IN:
@ -2482,7 +2468,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1);
rdc16.block_length = cpu_to_be32(SECTOR_SIZE);
gdth_copy_internal_data(ha, scp, (char*)&rdc16,
sizeof(gdth_rdcap16_data), 0);
sizeof(gdth_rdcap16_data));
} else {
scp->result = DID_ABORT << 16;
}
@ -2852,6 +2838,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
{
register gdth_cmd_str *cmdp;
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
int cmd_index;
cmdp= ha->pccb;
@ -2860,7 +2847,7 @@ static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
if (ha->type==GDT_EISA && ha->cmd_cnt>0)
return 0;
gdth_copy_internal_data(ha, scp, (char *)cmdp, sizeof(gdth_cmd_str), 1);
*cmdp = *cmndinfo->internal_cmd_str;
cmdp->RequestBuffer = scp;
/* search free command index */
@ -3794,6 +3781,8 @@ static void gdth_timeout(ulong data)
gdth_ha_str *ha;
ulong flags;
BUG_ON(list_empty(&gdth_instances));
ha = list_first_entry(&gdth_instances, gdth_ha_str, list);
spin_lock_irqsave(&ha->smp_lock, flags);
@ -4669,45 +4658,6 @@ static void gdth_flush(gdth_ha_str *ha)
}
}
/* shutdown routine */
static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
{
gdth_ha_str *ha;
#ifndef __alpha__
gdth_cmd_str gdtcmd;
char cmnd[MAX_COMMAND_SIZE];
#endif
if (notifier_disabled)
return NOTIFY_OK;
TRACE2(("gdth_halt() event %d\n",(int)event));
if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
return NOTIFY_DONE;
notifier_disabled = 1;
printk("GDT-HA: Flushing all host drives .. ");
list_for_each_entry(ha, &gdth_instances, list) {
gdth_flush(ha);
#ifndef __alpha__
/* controller reset */
memset(cmnd, 0xff, MAX_COMMAND_SIZE);
gdtcmd.BoardNode = LOCALBOARD;
gdtcmd.Service = CACHESERVICE;
gdtcmd.OpCode = GDT_RESET;
TRACE2(("gdth_halt(): reset controller %d\n", ha->hanum));
gdth_execute(ha->shost, &gdtcmd, cmnd, 10, NULL);
#endif
}
printk("Done.\n");
#ifdef GDTH_STATISTICS
del_timer(&gdth_timer);
#endif
return NOTIFY_OK;
}
/* configure lun */
static int gdth_slave_configure(struct scsi_device *sdev)
{
@ -5142,13 +5092,13 @@ static void gdth_remove_one(gdth_ha_str *ha)
scsi_remove_host(shp);
gdth_flush(ha);
if (ha->sdev) {
scsi_free_host_dev(ha->sdev);
ha->sdev = NULL;
}
gdth_flush(ha);
if (shp->irq)
free_irq(shp->irq,ha);
@ -5174,6 +5124,24 @@ static void gdth_remove_one(gdth_ha_str *ha)
scsi_host_put(shp);
}
static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
{
gdth_ha_str *ha;
TRACE2(("gdth_halt() event %d\n", (int)event));
if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
return NOTIFY_DONE;
list_for_each_entry(ha, &gdth_instances, list)
gdth_flush(ha);
return NOTIFY_OK;
}
static struct notifier_block gdth_notifier = {
gdth_halt, NULL, 0
};
static int __init gdth_init(void)
{
if (disable) {
@ -5236,7 +5204,6 @@ static int __init gdth_init(void)
add_timer(&gdth_timer);
#endif
major = register_chrdev(0,"gdth", &gdth_fops);
notifier_disabled = 0;
register_reboot_notifier(&gdth_notifier);
gdth_polling = FALSE;
return 0;
@ -5246,14 +5213,15 @@ static void __exit gdth_exit(void)
{
gdth_ha_str *ha;
list_for_each_entry(ha, &gdth_instances, list)
gdth_remove_one(ha);
#ifdef GDTH_STATISTICS
del_timer(&gdth_timer);
#endif
unregister_chrdev(major, "gdth");
unregister_reboot_notifier(&gdth_notifier);
#ifdef GDTH_STATISTICS
del_timer_sync(&gdth_timer);
#endif
list_for_each_entry(ha, &gdth_instances, list)
gdth_remove_one(ha);
}
module_init(gdth_init);

View file

@ -915,6 +915,7 @@ typedef struct {
struct gdth_cmndinfo { /* per-command private info */
int index;
int internal_command; /* don't call scsi_done */
gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
dma_addr_t sense_paddr; /* sense dma-addr */
unchar priority;
int timeout;

View file

@ -290,7 +290,7 @@ static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
int err = 0;
dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0],
cmd->usg_sg);
scsi_sg_count(sc));
if (scsi_sg_count(sc))
err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
@ -838,9 +838,6 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
if (!shost)
goto free_vport;
shost->transportt = ibmvstgt_transport_template;
err = scsi_tgt_alloc_queue(shost);
if (err)
goto put_host;
target = host_to_srp_target(shost);
target->shost = shost;
@ -872,6 +869,10 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
if (err)
goto destroy_queue;
err = scsi_tgt_alloc_queue(shost);
if (err)
goto destroy_queue;
return 0;
destroy_queue:
crq_queue_destroy(target);

View file

@ -1708,8 +1708,8 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
qdepth = ISCSI_DEF_CMD_PER_LUN;
}
if (!is_power_of_2(cmds_max) ||
cmds_max >= ISCSI_MGMT_ITT_OFFSET) {
if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET ||
cmds_max < 2) {
if (cmds_max != 0)
printk(KERN_ERR "iscsi: invalid can_queue of %d. "
"can_queue must be a power of 2 and between "

View file

@ -236,12 +236,12 @@ static void sas_ata_phy_reset(struct ata_port *ap)
struct domain_device *dev = ap->private_data;
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
int res = 0;
int res = TMF_RESP_FUNC_FAILED;
if (i->dft->lldd_I_T_nexus_reset)
res = i->dft->lldd_I_T_nexus_reset(dev);
if (res)
if (res != TMF_RESP_FUNC_COMPLETE)
SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__);
switch (dev->sata_dev.command_set) {
@ -656,21 +656,6 @@ static int sas_issue_ata_cmd(struct domain_device *dev, u8 command,
return res;
}
static void sas_sata_propagate_sas_addr(struct domain_device *dev)
{
unsigned long flags;
struct asd_sas_port *port = dev->port;
struct asd_sas_phy *phy;
BUG_ON(dev->parent);
memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
spin_lock_irqsave(&port->phy_list_lock, flags);
list_for_each_entry(phy, &port->phy_list, port_phy_el)
memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
spin_unlock_irqrestore(&port->phy_list_lock, flags);
}
#define ATA_IDENTIFY_DEV 0xEC
#define ATA_IDENTIFY_PACKET_DEV 0xA1
#define ATA_SET_FEATURES 0xEF
@ -728,26 +713,6 @@ static int sas_discover_sata_dev(struct domain_device *dev)
goto out_err;
}
cont1:
/* Get WWN */
if (dev->port->oob_mode != SATA_OOB_MODE) {
memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr,
SAS_ADDR_SIZE);
} else if (dev->sata_dev.command_set == ATA_COMMAND_SET &&
(le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000)
== 0x5000) {
int i;
for (i = 0; i < 4; i++) {
dev->sas_addr[2*i] =
(le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8;
dev->sas_addr[2*i+1] =
le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF;
}
}
sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
if (!dev->parent)
sas_sata_propagate_sas_addr(dev);
/* XXX Hint: register this SATA device with SATL.
When this returns, dev->sata_dev->lu is alive and
present.

View file

@ -92,9 +92,6 @@ static void sas_form_port(struct asd_sas_phy *phy)
if (!port->phy)
port->phy = phy->phy;
SAS_DPRINTK("phy%d added to port%d, phy_mask:0x%x\n", phy->id,
port->id, port->phy_mask);
if (*(u64 *)port->attached_sas_addr == 0) {
port->class = phy->class;
memcpy(port->attached_sas_addr, phy->attached_sas_addr,
@ -115,6 +112,11 @@ static void sas_form_port(struct asd_sas_phy *phy)
}
sas_port_add_phy(port->port, phy->phy);
SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n",
phy->phy->dev.bus_id,port->port->dev.bus_id,
port->phy_mask,
SAS_ADDR(port->attached_sas_addr));
if (port->port_dev)
port->port_dev->pathways = port->num_phys;
@ -255,12 +257,11 @@ void sas_porte_hard_reset(struct work_struct *work)
static void sas_init_port(struct asd_sas_port *port,
struct sas_ha_struct *sas_ha, int i)
{
memset(port, 0, sizeof(*port));
port->id = i;
INIT_LIST_HEAD(&port->dev_list);
spin_lock_init(&port->phy_list_lock);
INIT_LIST_HEAD(&port->phy_list);
port->num_phys = 0;
port->phy_mask = 0;
port->ha = sas_ha;
spin_lock_init(&port->dev_list_lock);

View file

@ -434,7 +434,7 @@ static int sas_recover_I_T(struct domain_device *dev)
}
/* Find the sas_phy that's attached to this device */
static struct sas_phy *find_local_sas_phy(struct domain_device *dev)
struct sas_phy *sas_find_local_phy(struct domain_device *dev)
{
struct domain_device *pdev = dev->parent;
struct ex_phy *exphy = NULL;
@ -456,6 +456,7 @@ static struct sas_phy *find_local_sas_phy(struct domain_device *dev)
BUG_ON(!exphy);
return exphy->phy;
}
EXPORT_SYMBOL_GPL(sas_find_local_phy);
/* Attempt to send a LUN reset message to a device */
int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
@ -482,7 +483,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
{
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_phy *phy = find_local_sas_phy(dev);
struct sas_phy *phy = sas_find_local_phy(dev);
int res;
res = sas_phy_reset(phy, 1);
@ -497,10 +498,10 @@ int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
}
/* Try to reset a device */
static int try_to_reset_cmd_device(struct Scsi_Host *shost,
struct scsi_cmnd *cmd)
static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
{
int res;
struct Scsi_Host *shost = cmd->device->host;
if (!shost->hostt->eh_device_reset_handler)
goto try_bus_reset;
@ -540,6 +541,12 @@ static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (need_reset) {
SAS_DPRINTK("%s: task 0x%p requests reset\n",
__FUNCTION__, task);
goto reset;
}
SAS_DPRINTK("trying to find task 0x%p\n", task);
res = sas_scsi_find_task(task);
@ -550,18 +557,15 @@ static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
task);
sas_eh_finish_cmd(cmd);
if (need_reset)
try_to_reset_cmd_device(shost, cmd);
continue;
case TASK_IS_ABORTED:
SAS_DPRINTK("%s: task 0x%p is aborted\n",
__FUNCTION__, task);
sas_eh_finish_cmd(cmd);
if (need_reset)
try_to_reset_cmd_device(shost, cmd);
continue;
case TASK_IS_AT_LU:
SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
reset:
tmf_resp = sas_recover_lu(task->dev, cmd);
if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
SAS_DPRINTK("dev %016llx LU %x is "
@ -569,8 +573,6 @@ static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
SAS_ADDR(task->dev),
cmd->device->lun);
sas_eh_finish_cmd(cmd);
if (need_reset)
try_to_reset_cmd_device(shost, cmd);
sas_scsi_clear_queue_lu(work_q, cmd);
goto Again;
}
@ -581,15 +583,15 @@ static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
task);
tmf_resp = sas_recover_I_T(task->dev);
if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
struct domain_device *dev = task->dev;
SAS_DPRINTK("I_T %016llx recovered\n",
SAS_ADDR(task->dev->sas_addr));
sas_eh_finish_cmd(cmd);
if (need_reset)
try_to_reset_cmd_device(shost, cmd);
sas_scsi_clear_queue_I_T(work_q, task->dev);
sas_scsi_clear_queue_I_T(work_q, dev);
goto Again;
}
/* Hammer time :-) */
try_to_reset_cmd_device(cmd);
if (i->dft->lldd_clear_nexus_port) {
struct asd_sas_port *port = task->dev->port;
SAS_DPRINTK("clearing nexus for port:%d\n",
@ -599,8 +601,6 @@ static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
SAS_DPRINTK("clear nexus port:%d "
"succeeded\n", port->id);
sas_eh_finish_cmd(cmd);
if (need_reset)
try_to_reset_cmd_device(shost, cmd);
sas_scsi_clear_queue_port(work_q,
port);
goto Again;
@ -613,8 +613,6 @@ static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
SAS_DPRINTK("clear nexus ha "
"succeeded\n");
sas_eh_finish_cmd(cmd);
if (need_reset)
try_to_reset_cmd_device(shost, cmd);
goto clear_q;
}
}
@ -628,8 +626,6 @@ static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
cmd->device->lun);
sas_eh_finish_cmd(cmd);
if (need_reset)
try_to_reset_cmd_device(shost, cmd);
goto clear_q;
}
}

View file

@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "mvsas"
#define DRV_VERSION "0.5"
#define DRV_VERSION "0.5.1"
#define _MV_DUMP 0
#define MVS_DISABLE_NVRAM
#define MVS_DISABLE_MSI
@ -1005,7 +1005,7 @@ static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
return rc;
#else
/* FIXME , For SAS target mode */
memcpy(buf, "\x00\x00\xab\x11\x30\x04\x05\x50", 8);
memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
return 0;
#endif
}
@ -1330,7 +1330,7 @@ static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
mvs_hba_cq_dump(mvi);
if (unlikely(rx_desc & RXQ_DONE))
if (likely(rx_desc & RXQ_DONE))
mvs_slot_complete(mvi, rx_desc);
if (rx_desc & RXQ_ATTN) {
attn = true;
@ -2720,9 +2720,8 @@ static int __devinit mvs_hw_init(struct mvs_info *mvi)
msleep(100);
/* init and reset phys */
for (i = 0; i < mvi->chip->n_phy; i++) {
/* FIXME: is this the correct dword order? */
u32 lo = *((u32 *)&mvi->sas_addr[0]);
u32 hi = *((u32 *)&mvi->sas_addr[4]);
u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
mvs_detect_porttype(mvi, i);

View file

@ -124,7 +124,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd *cmd, const void *buf)
}
req_len += sgpnt->length;
}
scsi_set_resid(cmd, req_len - act_len);
scsi_set_resid(cmd, buflen - act_len);
return 0;
}
@ -427,7 +427,7 @@ static struct scsi_host_template ps3rom_host_template = {
.cmd_per_lun = 1,
.emulated = 1, /* only sg driver uses this */
.max_sectors = PS3ROM_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
.use_clustering = DISABLE_CLUSTERING,
.module = THIS_MODULE,
};

View file

@ -39,7 +39,7 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
ms_pkt->entry_count = 1;
SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
ms_pkt->timeout = __constant_cpu_to_le16(25);
ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
@ -75,7 +75,7 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
ct_pkt->entry_type = CT_IOCB_TYPE;
ct_pkt->entry_count = 1;
ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS);
ct_pkt->timeout = __constant_cpu_to_le16(25);
ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
@ -1144,7 +1144,7 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
ms_pkt->entry_count = 1;
SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id);
ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
ms_pkt->timeout = __constant_cpu_to_le16(59);
ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
@ -1181,7 +1181,7 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
ct_pkt->entry_type = CT_IOCB_TYPE;
ct_pkt->entry_count = 1;
ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id);
ct_pkt->timeout = __constant_cpu_to_le16(59);
ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
@ -1761,7 +1761,7 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size,
ct_pkt->entry_type = CT_IOCB_TYPE;
ct_pkt->entry_count = 1;
ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id);
ct_pkt->timeout = __constant_cpu_to_le16(59);
ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);

View file

@ -1733,8 +1733,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
ha->login_timeout = nv->login_timeout;
icb->login_timeout = nv->login_timeout;
/* Set minimum RATOV to 200 tenths of a second. */
ha->r_a_tov = 200;
/* Set minimum RATOV to 100 tenths of a second. */
ha->r_a_tov = 100;
ha->loop_reset_delay = nv->reset_delay;
@ -3645,8 +3645,8 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
ha->login_timeout = le16_to_cpu(nv->login_timeout);
icb->login_timeout = cpu_to_le16(nv->login_timeout);
/* Set minimum RATOV to 200 tenths of a second. */
ha->r_a_tov = 200;
/* Set minimum RATOV to 100 tenths of a second. */
ha->r_a_tov = 100;
ha->loop_reset_delay = nv->reset_delay;
@ -4022,7 +4022,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
return;
ret = qla2x00_stop_firmware(ha);
for (retries = 5; ret != QLA_SUCCESS && retries ; retries--) {
for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
retries ; retries--) {
qla2x00_reset_chip(ha);
if (qla2x00_chip_diag(ha) != QLA_SUCCESS)
continue;

View file

@ -958,6 +958,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
}
}
/* Check for overrun. */
if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
scsi_status & SS_RESIDUAL_OVER)
comp_status = CS_DATA_OVERRUN;
/*
* Based on Host and scsi status generate status code for Linux
*/

View file

@ -2206,7 +2206,7 @@ qla24xx_abort_target(fc_port_t *fcport)
tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
tsk->p.tsk.entry_count = 1;
tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
tsk->p.tsk.timeout = __constant_cpu_to_le16(25);
tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
tsk->p.tsk.control_flags = __constant_cpu_to_le32(TCF_TARGET_RESET);
tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
tsk->p.tsk.port_id[1] = fcport->d_id.b.area;

View file

@ -7,7 +7,7 @@
/*
* Driver version
*/
#define QLA2XXX_VERSION "8.02.00-k8"
#define QLA2XXX_VERSION "8.02.00-k9"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 2

View file

@ -1299,9 +1299,9 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
ddb_entry->fw_ddb_device_state = state;
/* Device is back online. */
if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
atomic_set(&ddb_entry->port_down_timer,
ha->port_down_retry_count);
atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
atomic_set(&ddb_entry->relogin_retry_count, 0);
atomic_set(&ddb_entry->relogin_timer, 0);
clear_bit(DF_RELOGIN, &ddb_entry->flags);

View file

@ -75,6 +75,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
static int qla4xxx_slave_alloc(struct scsi_device *device);
static int qla4xxx_slave_configure(struct scsi_device *device);
static void qla4xxx_slave_destroy(struct scsi_device *sdev);
static void qla4xxx_scan_start(struct Scsi_Host *shost);
static struct scsi_host_template qla4xxx_driver_template = {
.module = THIS_MODULE,
@ -90,6 +91,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.slave_destroy = qla4xxx_slave_destroy,
.scan_finished = iscsi_scan_finished,
.scan_start = qla4xxx_scan_start,
.this_id = -1,
.cmd_per_lun = 3,
@ -299,6 +301,18 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha)
return ddb_entry;
}
static void qla4xxx_scan_start(struct Scsi_Host *shost)
{
struct scsi_qla_host *ha = shost_priv(shost);
struct ddb_entry *ddb_entry, *ddbtemp;
/* finish setup of sessions that were already setup in firmware */
list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) {
if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE)
qla4xxx_add_sess(ddb_entry);
}
}
/*
* Timer routines
*/
@ -864,8 +878,9 @@ static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha)
* qla4xxx_recover_adapter - recovers adapter after a fatal error
* @ha: Pointer to host adapter structure.
* @renew_ddb_list: Indicates what to do with the adapter's ddb list
* after adapter recovery has completed.
* 0=preserve ddb list, 1=destroy and rebuild ddb list
*
* renew_ddb_list value can be 0=preserve ddb list, 1=destroy and rebuild
* ddb list.
**/
static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
uint8_t renew_ddb_list)
@ -874,6 +889,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
/* Stall incoming I/O until we are done */
clear_bit(AF_ONLINE, &ha->flags);
DEBUG2(printk("scsi%ld: %s calling qla4xxx_cmd_wait\n", ha->host_no,
__func__));
@ -1176,7 +1192,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
int ret = -ENODEV, status;
struct Scsi_Host *host;
struct scsi_qla_host *ha;
struct ddb_entry *ddb_entry, *ddbtemp;
uint8_t init_retry_count = 0;
char buf[34];
@ -1295,13 +1310,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
if (ret)
goto probe_failed;
/* Update transport device information for all devices. */
list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) {
if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE)
if (qla4xxx_add_sess(ddb_entry))
goto remove_host;
}
printk(KERN_INFO
" QLogic iSCSI HBA Driver version: %s\n"
" QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
@ -1311,10 +1319,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
scsi_scan_host(host);
return 0;
remove_host:
qla4xxx_free_ddb_list(ha);
scsi_remove_host(host);
probe_failed:
qla4xxx_free_adapter(ha);
scsi_host_put(ha->host);
@ -1600,9 +1604,12 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
return FAILED;
}
if (qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST) == QLA_SUCCESS) {
/* make sure the dpc thread is stopped while we reset the hba */
clear_bit(AF_ONLINE, &ha->flags);
flush_workqueue(ha->dpc_thread);
if (qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST) == QLA_SUCCESS)
return_status = SUCCESS;
}
dev_info(&ha->pdev->dev, "HOST RESET %s.\n",
return_status == FAILED ? "FAILED" : "SUCCEDED");

View file

@ -103,7 +103,6 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
if (!cmd)
goto release_rq;
memset(cmd, 0, sizeof(*cmd));
cmd->sc_data_direction = data_dir;
cmd->jiffies_at_alloc = jiffies;
cmd->request = rq;
@ -382,6 +381,11 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
scsi_release_buffers(cmd);
goto unmap_rq;
}
/*
* we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the
* length for us.
*/
cmd->sdb.length = rq->data_len;
return 0;

View file

@ -33,7 +33,7 @@
#define ISCSI_SESSION_ATTRS 19
#define ISCSI_CONN_ATTRS 13
#define ISCSI_HOST_ATTRS 4
#define ISCSI_TRANSPORT_VERSION "2.0-868"
#define ISCSI_TRANSPORT_VERSION "2.0-869"
struct iscsi_internal {
int daemon_pid;
@ -373,24 +373,25 @@ static void session_recovery_timedout(struct work_struct *work)
scsi_target_unblock(&session->dev);
}
static void __iscsi_unblock_session(struct iscsi_cls_session *session)
{
if (!cancel_delayed_work(&session->recovery_work))
flush_workqueue(iscsi_eh_timer_workq);
scsi_target_unblock(&session->dev);
}
void iscsi_unblock_session(struct iscsi_cls_session *session)
static void __iscsi_unblock_session(struct work_struct *work)
{
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session,
unblock_work);
struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_host *ihost = shost->shost_data;
unsigned long flags;
/*
* The recovery and unblock work get run from the same workqueue,
* so try to cancel it if it was going to run after this unblock.
*/
cancel_delayed_work(&session->recovery_work);
spin_lock_irqsave(&session->lock, flags);
session->state = ISCSI_SESSION_LOGGED_IN;
spin_unlock_irqrestore(&session->lock, flags);
__iscsi_unblock_session(session);
/* start IO */
scsi_target_unblock(&session->dev);
/*
* Only do kernel scanning if the driver is properly hooked into
* the async scanning code (drivers like iscsi_tcp do login and
@ -401,20 +402,43 @@ void iscsi_unblock_session(struct iscsi_cls_session *session)
atomic_inc(&ihost->nr_scans);
}
}
/**
* iscsi_unblock_session - set a session as logged in and start IO.
* @session: iscsi session
*
* Mark a session as ready to accept IO.
*/
void iscsi_unblock_session(struct iscsi_cls_session *session)
{
queue_work(iscsi_eh_timer_workq, &session->unblock_work);
/*
* make sure all the events have completed before tell the driver
* it is safe
*/
flush_workqueue(iscsi_eh_timer_workq);
}
EXPORT_SYMBOL_GPL(iscsi_unblock_session);
void iscsi_block_session(struct iscsi_cls_session *session)
static void __iscsi_block_session(struct work_struct *work)
{
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session,
block_work);
unsigned long flags;
spin_lock_irqsave(&session->lock, flags);
session->state = ISCSI_SESSION_FAILED;
spin_unlock_irqrestore(&session->lock, flags);
scsi_target_block(&session->dev);
queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
session->recovery_tmo * HZ);
}
void iscsi_block_session(struct iscsi_cls_session *session)
{
queue_work(iscsi_eh_timer_workq, &session->block_work);
}
EXPORT_SYMBOL_GPL(iscsi_block_session);
static void __iscsi_unbind_session(struct work_struct *work)
@ -463,6 +487,8 @@ iscsi_alloc_session(struct Scsi_Host *shost,
INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
INIT_LIST_HEAD(&session->host_list);
INIT_LIST_HEAD(&session->sess_list);
INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
INIT_WORK(&session->block_work, __iscsi_block_session);
INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
INIT_WORK(&session->scan_work, iscsi_scan_session);
spin_lock_init(&session->lock);
@ -575,24 +601,25 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
list_del(&session->sess_list);
spin_unlock_irqrestore(&sesslock, flags);
/* make sure there are no blocks/unblocks queued */
flush_workqueue(iscsi_eh_timer_workq);
/* make sure the timedout callout is not running */
if (!cancel_delayed_work(&session->recovery_work))
flush_workqueue(iscsi_eh_timer_workq);
/*
* If we are blocked let commands flow again. The lld or iscsi
* layer should set up the queuecommand to fail commands.
* We assume that LLD will not be calling block/unblock while
* removing the session.
*/
spin_lock_irqsave(&session->lock, flags);
session->state = ISCSI_SESSION_FREE;
spin_unlock_irqrestore(&session->lock, flags);
__iscsi_unblock_session(session);
__iscsi_unbind_session(&session->unbind_work);
/* flush running scans */
scsi_target_unblock(&session->dev);
/* flush running scans then delete devices */
flush_workqueue(ihost->scan_workq);
/*
* If the session dropped while removing devices then we need to make
* sure it is not blocked
*/
if (!cancel_delayed_work(&session->recovery_work))
flush_workqueue(iscsi_eh_timer_workq);
__iscsi_unbind_session(&session->unbind_work);
/* hw iscsi may not have removed all connections from session */
err = device_for_each_child(&session->dev, NULL,
@ -802,23 +829,16 @@ EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
{
struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
struct nlmsghdr *nlh;
struct sk_buff *skb;
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
int len = NLMSG_SPACE(sizeof(*ev));
unsigned long flags;
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
return;
spin_lock_irqsave(&session->lock, flags);
if (session->state == ISCSI_SESSION_LOGGED_IN)
session->state = ISCSI_SESSION_FAILED;
spin_unlock_irqrestore(&session->lock, flags);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "

View file

@ -675,5 +675,6 @@ extern int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
extern void sas_ssp_task_response(struct device *dev, struct sas_task *task,
struct ssp_response_iu *iu);
struct sas_phy *sas_find_local_phy(struct domain_device *dev);
#endif /* _SASLIB_H_ */

View file

@ -177,6 +177,8 @@ struct iscsi_cls_session {
struct list_head host_list;
struct iscsi_transport *transport;
spinlock_t lock;
struct work_struct block_work;
struct work_struct unblock_work;
struct work_struct scan_work;
struct work_struct unbind_work;