From b4567ca6304a9b31cb2eae62f812e9eb9badcb60 Mon Sep 17 00:00:00 2001 From: Douglas Gilbert Date: Thu, 25 Jun 2009 15:43:02 -0400 Subject: [PATCH 001/209] [SCSI] update scsi/constants.c It has been 3 years since this file was sync-ed with www.t10.org . Information taken from the last bunch of drafts released in May 2009. More asc/ascq codes are coming for thin provisioning; when approved and allocated another patch could add them prior to this patch going live. Changelog: - add some new command names and rename two commands - sync asc/ascq table with www.t10.org/lists/asc-num.txt - correct bug in scsi_extd_sense_format() [second for loop] Signed-off-by: Douglas Gilbert Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/constants.c | 95 +++++++++++++++++++++++++++++++--------- 1 file changed, 75 insertions(+), 20 deletions(-) diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index e79e18101f87..63abb06c4edb 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c @@ -4,8 +4,7 @@ * Additions for SCSI 2 and Linux 2.2.x by D. Gilbert (990422) * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002) * by D. Gilbert and aeb (20020609) - * Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025 - * Update to SPC-4 T10/1713-D Rev 5a, 14 June 2006, D. Gilbert 20060702 + * Update to SPC-4 T10/1713-D Rev 20, 22 May 2009, D. Gilbert 20090624 */ #include @@ -56,9 +55,9 @@ static const char * cdb_byte0_names[] = { "Read Buffer", /* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)", /* 40-41 */ "Change Definition", "Write Same(10)", -/* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support", - "Play audio(10)", "Get configuration", "Play audio msf", - "Play audio track/index", +/* 42-48 */ "Unmap/Read sub-channel", "Read TOC/PMA/ATIP", + "Read density support", "Play audio(10)", "Get configuration", + "Play audio msf", "Play audio track/index", /* 49-4f */ "Play track relative(10)", "Get event status notification", "Pause/resume", "Log Select", "Log Sense", "Stop play/scan", NULL, @@ -71,12 +70,13 @@ static const char * cdb_byte0_names[] = { /* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, -/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length", +/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, "Extended CDB", + "Variable length", /* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Extended copy", "Receive copy results", /* 85-89 */ "ATA command pass through(16)", "Access control in", "Access control out", "Read(16)", "Memory Export Out(16)", -/* 8a-8f */ "Write(16)", NULL, "Read attributes", "Write attributes", +/* 8a-8f */ "Write(16)", "ORWrite", "Read attributes", "Write attributes", "Write and verify(16)", "Verify(16)", /* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)", "Lock/unlock cache(16)", "Write same(16)", NULL, @@ -107,22 +107,24 @@ struct value_name_pair { }; static const struct value_name_pair maint_in_arr[] = { - {0x5, "Report device identifier"}, + {0x5, "Report identifying information"}, {0xa, "Report target port groups"}, {0xb, "Report aliases"}, {0xc, "Report supported operation codes"}, {0xd, "Report supported task management functions"}, {0xe, "Report priority"}, {0xf, "Report timestamp"}, + {0x10, "Management protocol in"}, }; #define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr) static const struct value_name_pair maint_out_arr[] = { - {0x6, "Set device identifier"}, + {0x6, "Set identifying information"}, {0xa, "Set target port groups"}, {0xb, "Change aliases"}, {0xe, "Set priority"}, - {0xe, "Set timestamp"}, + {0xf, "Set timestamp"}, + {0x10, "Management protocol out"}, }; #define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr) @@ -412,6 +414,7 @@ static const struct error_info additional[] = {0x0004, "Beginning-of-partition/medium detected"}, {0x0005, "End-of-data detected"}, {0x0006, "I/O process terminated"}, + {0x0007, "Programmable early warning detected"}, {0x0011, "Audio play operation in progress"}, {0x0012, "Audio play operation paused"}, {0x0013, "Audio play operation successfully completed"}, @@ -425,6 +428,7 @@ static const struct error_info additional[] = {0x001B, "Set capacity operation in progress"}, {0x001C, "Verify operation in progress"}, {0x001D, "ATA pass through information available"}, + {0x001E, "Conflicting SA creation request"}, {0x0100, "No index/sector signal"}, @@ -449,9 +453,12 @@ static const struct error_info additional[] = {0x040B, "Logical unit not accessible, target port in standby state"}, {0x040C, "Logical unit not accessible, target port in unavailable " "state"}, + {0x040D, "Logical unit not ready, structure check required"}, {0x0410, "Logical unit not ready, auxiliary memory not accessible"}, {0x0411, "Logical unit not ready, notify (enable spinup) required"}, {0x0412, "Logical unit not ready, offline"}, + {0x0413, "Logical unit not ready, SA creation in progress"}, + {0x0414, "Logical unit not ready, space allocation in progress"}, {0x0500, "Logical unit does not respond to selection"}, @@ -479,6 +486,9 @@ static const struct error_info additional[] = {0x0B03, "Warning - background self-test failed"}, {0x0B04, "Warning - background pre-scan detected medium error"}, {0x0B05, "Warning - background medium scan detected medium error"}, + {0x0B06, "Warning - non-volatile cache now volatile"}, + {0x0B07, "Warning - degraded power to non-volatile cache"}, + {0x0B08, "Warning - power loss expected"}, {0x0C00, "Write error"}, {0x0C01, "Write error - recovered with auto reallocation"}, @@ -593,6 +603,7 @@ static const struct error_info additional[] = {0x1C02, "Grown defect list not found"}, {0x1D00, "Miscompare during verify operation"}, + {0x1D01, "Miscompare verify of unmapped LBA"}, {0x1E00, "Recovered id with ECC correction"}, @@ -626,6 +637,7 @@ static const struct error_info additional[] = {0x2405, "Security working key frozen"}, {0x2406, "Nonce not unique"}, {0x2407, "Nonce timestamp out of range"}, + {0x2408, "Invalid XCDB"}, {0x2500, "Logical unit not supported"}, @@ -656,10 +668,12 @@ static const struct error_info additional[] = {0x2704, "Persistent write protect"}, {0x2705, "Permanent write protect"}, {0x2706, "Conditional write protect"}, + {0x2707, "Space allocation failed write protect"}, {0x2800, "Not ready to ready change, medium may have changed"}, {0x2801, "Import or export element accessed"}, {0x2802, "Format-layer may have changed"}, + {0x2803, "Import/export element accessed, medium changed"}, {0x2900, "Power on, reset, or bus device reset occurred"}, {0x2901, "Power on occurred"}, @@ -680,11 +694,16 @@ static const struct error_info additional[] = {0x2A07, "Implicit asymmetric access state transition failed"}, {0x2A08, "Priority changed"}, {0x2A09, "Capacity data has changed"}, + {0x2A0A, "Error history I_T nexus cleared"}, + {0x2A0B, "Error history snapshot released"}, + {0x2A0C, "Error recovery attributes have changed"}, + {0x2A0D, "Data encryption capabilities changed"}, {0x2A10, "Timestamp changed"}, {0x2A11, "Data encryption parameters changed by another i_t nexus"}, {0x2A12, "Data encryption parameters changed by vendor specific " "event"}, {0x2A13, "Data encryption key instance counter has changed"}, + {0x2A14, "SA creation capabilities data has changed"}, {0x2B00, "Copy cannot execute since host cannot disconnect"}, @@ -723,6 +742,8 @@ static const struct error_info additional[] = {0x300C, "WORM medium - overwrite attempted"}, {0x300D, "WORM medium - integrity check"}, {0x3010, "Medium not formatted"}, + {0x3011, "Incompatible volume type"}, + {0x3012, "Incompatible volume qualifier"}, {0x3100, "Medium format corrupted"}, {0x3101, "Format command failed"}, @@ -782,6 +803,10 @@ static const struct error_info additional[] = {0x3B15, "Medium magazine unlocked"}, {0x3B16, "Mechanical positioning or changer error"}, {0x3B17, "Read past end of user object"}, + {0x3B18, "Element disabled"}, + {0x3B19, "Element enabled"}, + {0x3B1A, "Data transfer device removed"}, + {0x3B1B, "Data transfer device inserted"}, {0x3D00, "Invalid bits in identify message"}, @@ -882,6 +907,8 @@ static const struct error_info additional[] = {0x5506, "Auxiliary memory out of space"}, {0x5507, "Quota error"}, {0x5508, "Maximum number of supplemental decryption keys exceeded"}, + {0x5509, "Medium auxiliary memory not accessible"}, + {0x550A, "Data currently unavailable"}, {0x5700, "Unable to recover table-of-contents"}, @@ -993,6 +1020,12 @@ static const struct error_info additional[] = {0x5E02, "Standby condition activated by timer"}, {0x5E03, "Idle condition activated by command"}, {0x5E04, "Standby condition activated by command"}, + {0x5E05, "Idle_b condition activated by timer"}, + {0x5E06, "Idle_b condition activated by command"}, + {0x5E07, "Idle_c condition activated by timer"}, + {0x5E08, "Idle_c condition activated by command"}, + {0x5E09, "Standby_y condition activated by timer"}, + {0x5E0A, "Standby_y condition activated by command"}, {0x5E41, "Power state change to active"}, {0x5E42, "Power state change to idle"}, {0x5E43, "Power state change to standby"}, @@ -1091,7 +1124,28 @@ static const struct error_info additional[] = {0x7403, "Incorrect data encryption key"}, {0x7404, "Cryptographic integrity validation failed"}, {0x7405, "Error decrypting data"}, + {0x7406, "Unknown signature verification key"}, + {0x7407, "Encryption parameters not useable"}, + {0x7408, "Digital signature validation failure"}, + {0x7409, "Encryption mode mismatch on read"}, + {0x740A, "Encrypted block not raw read enabled"}, + {0x740B, "Incorrect Encryption parameters"}, + {0x740C, "Unable to decrypt parameter list"}, + {0x740D, "Encryption algorithm disabled"}, + {0x7410, "SA creation parameter value invalid"}, + {0x7411, "SA creation parameter value rejected"}, + {0x7412, "Invalid SA usage"}, + {0x7421, "Data Encryption configuration prevented"}, + {0x7430, "SA creation parameter not supported"}, + {0x7440, "Authentication failed"}, + {0x7461, "External data encryption key manager access error"}, + {0x7462, "External data encryption key manager error"}, + {0x7463, "External data encryption key not found"}, + {0x7464, "External data encryption request not authorized"}, + {0x746E, "External data encryption control timeout"}, + {0x746F, "External data encryption control error"}, {0x7471, "Logical unit access not authorized"}, + {0x7479, "Security conflict in translated device"}, {0, NULL} }; @@ -1103,12 +1157,12 @@ struct error_info2 { static const struct error_info2 additional2[] = { - {0x40,0x00,0x7f,"Ram failure (%x)"}, - {0x40,0x80,0xff,"Diagnostic failure on component (%x)"}, - {0x41,0x00,0xff,"Data path failure (%x)"}, - {0x42,0x00,0xff,"Power-on or self-test failure (%x)"}, - {0x4D,0x00,0xff,"Tagged overlapped commands (queue tag %x)"}, - {0x70,0x00,0xff,"Decompression exception short algorithm id of %x"}, + {0x40, 0x00, 0x7f, "Ram failure (%x)"}, + {0x40, 0x80, 0xff, "Diagnostic failure on component (%x)"}, + {0x41, 0x00, 0xff, "Data path failure (%x)"}, + {0x42, 0x00, 0xff, "Power-on or self-test failure (%x)"}, + {0x4D, 0x00, 0xff, "Tagged overlapped commands (task tag %x)"}, + {0x70, 0x00, 0xff, "Decompression exception short algorithm id of %x"}, {0, 0, 0, NULL} }; @@ -1157,14 +1211,15 @@ scsi_extd_sense_format(unsigned char asc, unsigned char ascq) { int i; unsigned short code = ((asc << 8) | ascq); - for (i=0; additional[i].text; i++) + for (i = 0; additional[i].text; i++) if (additional[i].code12 == code) return additional[i].text; - for (i=0; additional2[i].fmt; i++) + for (i = 0; additional2[i].fmt; i++) { if (additional2[i].code1 == asc && - additional2[i].code2_min >= ascq && - additional2[i].code2_max <= ascq) + ascq >= additional2[i].code2_min && + ascq <= additional2[i].code2_max) return additional2[i].fmt; + } #endif return NULL; } From 6c10db72c94818573552fd71c89540da325efdfb Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Fri, 26 Jun 2009 19:30:06 -0700 Subject: [PATCH 002/209] [SCSI] scsi_dh: Reference count scsi_dh_attach Problem reported: http://marc.info/?l=dm-devel&m=124585978305866&w=2 scsi_dh does not do a refernce count for attach/detach, and this affects the way it is supposed to work with multipath when a device is not in the dev_list of the hardware handler. This patch adds a reference count that counts each attach. Signed-off-by: Chandra Seetharaman Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh.c | 23 ++++++++++++++++------- include/scsi/scsi_device.h | 2 ++ 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c index a518f2eff19a..53a7385e1b4d 100644 --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c @@ -153,12 +153,24 @@ static int scsi_dh_handler_attach(struct scsi_device *sdev, if (sdev->scsi_dh_data) { if (sdev->scsi_dh_data->scsi_dh != scsi_dh) err = -EBUSY; - } else if (scsi_dh->attach) + else + kref_get(&sdev->scsi_dh_data->kref); + } else if (scsi_dh->attach) { err = scsi_dh->attach(sdev); - + if (!err) { + kref_init(&sdev->scsi_dh_data->kref); + sdev->scsi_dh_data->sdev = sdev; + } + } return err; } +static void __detach_handler (struct kref *kref) +{ + struct scsi_dh_data *scsi_dh_data = container_of(kref, struct scsi_dh_data, kref); + scsi_dh_data->scsi_dh->detach(scsi_dh_data->sdev); +} + /* * scsi_dh_handler_detach - Detach a device handler from a device * @sdev - SCSI device the device handler should be detached from @@ -180,7 +192,7 @@ static void scsi_dh_handler_detach(struct scsi_device *sdev, scsi_dh = sdev->scsi_dh_data->scsi_dh; if (scsi_dh && scsi_dh->detach) - scsi_dh->detach(sdev); + kref_put(&sdev->scsi_dh_data->kref, __detach_handler); } /* @@ -474,7 +486,6 @@ int scsi_dh_attach(struct request_queue *q, const char *name) if (!err) { err = scsi_dh_handler_attach(sdev, scsi_dh); - put_device(&sdev->sdev_gendev); } return err; @@ -505,10 +516,8 @@ void scsi_dh_detach(struct request_queue *q) return; if (sdev->scsi_dh_data) { - /* if sdev is not on internal list, detach */ scsi_dh = sdev->scsi_dh_data->scsi_dh; - if (!device_handler_match(scsi_dh, sdev)) - scsi_dh_handler_detach(sdev, scsi_dh); + scsi_dh_handler_detach(sdev, scsi_dh); } put_device(&sdev->sdev_gendev); } diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 3f566af3f101..1f3a4c8044c0 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -191,6 +191,8 @@ struct scsi_device_handler { struct scsi_dh_data { struct scsi_device_handler *scsi_dh; + struct scsi_device *sdev; + struct kref kref; char buf[0]; }; From 002b1eb2c03ccec36bf6e7b719cccedf57d83402 Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Sat, 23 May 2009 11:45:07 -0400 Subject: [PATCH 003/209] [SCSI] Print failed commands When a request fails we print the sense data but not the actual command that failed. Add a printout of the operation + CDB for failed commands. Signed-off-by: Martin K. Petersen Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/scsi_lib.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index f3c40898fc7d..662024d86949 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -896,6 +896,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) scsi_print_result(cmd); if (driver_byte(result) & DRIVER_SENSE) scsi_print_sense("", cmd); + scsi_print_command(cmd); } blk_end_request_all(req, -EIO); scsi_next_command(cmd); From 1ed0f6a3ef94996341f3c5a2d4034ba1a2532b0a Mon Sep 17 00:00:00 2001 From: Anil Veerabhadrappa Date: Tue, 23 Jun 2009 13:56:29 -0700 Subject: [PATCH 004/209] [SCSI] bnx2i: remove global variable bnx2i_reg_devices Removed bnx2i_reg_devices as this counter is not really used in a meaningful way Signed-off-by: Michael Chan Signed-off-by: Anil Veerabhadrappa Reviewed-by: Mike Christie Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i_init.c | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index ae4b2d588fd3..269192d74372 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -15,7 +15,6 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); static u32 adapter_count; -static int bnx2i_reg_device; #define DRV_MODULE_NAME "bnx2i" #define DRV_MODULE_VERSION "2.0.1d" @@ -193,10 +192,6 @@ void bnx2i_register_device(struct bnx2i_hba *hba) hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba); - spin_lock(&hba->lock); - bnx2i_reg_device++; - spin_unlock(&hba->lock); - set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); } @@ -234,10 +229,6 @@ static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); - spin_lock(&hba->lock); - bnx2i_reg_device--; - spin_unlock(&hba->lock); - /* ep_disconnect could come before NETDEV_DOWN, driver won't * see NETDEV_DOWN as it already unregistered itself. */ @@ -276,16 +267,12 @@ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic) int rc; read_lock(&bnx2i_dev_lock); - if (bnx2i_reg_device && - !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba); if (rc) /* duplicate registration */ printk(KERN_ERR "bnx2i- dev reg failed\n"); - spin_lock(&hba->lock); - bnx2i_reg_device++; hba->age++; - spin_unlock(&hba->lock); set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); } @@ -350,10 +337,6 @@ void bnx2i_ulp_exit(struct cnic_dev *dev) if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); - - spin_lock(&hba->lock); - bnx2i_reg_device--; - spin_unlock(&hba->lock); } write_unlock(&bnx2i_dev_lock); @@ -421,7 +404,6 @@ static void __exit bnx2i_mod_exit(void) if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); - bnx2i_reg_device--; } write_unlock(&bnx2i_dev_lock); From 4e85f15166a6a9da8317ddb8600b604b27e58aa3 Mon Sep 17 00:00:00 2001 From: Anil Veerabhadrappa Date: Tue, 23 Jun 2009 14:04:23 -0700 Subject: [PATCH 005/209] [SCSI] bnx2i: bug fixes in bnx2i_init_one to handle error conditions Fixed bnx2i_init_one() to properly handle return code of cnic->register_device() and propagate it back to the caller. No need to check for BNX2I_CNIC_REGISTERED, because unless the adapter is added to adapter_list it will not be registered in ep_connect context Signed-off-by: Michael Chan Signed-off-by: Anil Veerabhadrappa Reviewed-by: Mike Christie Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i_init.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 269192d74372..2aeeeee3372d 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -267,22 +267,29 @@ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic) int rc; read_lock(&bnx2i_dev_lock); - if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { - rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba); - if (rc) /* duplicate registration */ - printk(KERN_ERR "bnx2i- dev reg failed\n"); - + rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba); + if (!rc) { hba->age++; - set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); - } + } else if (rc == -EBUSY) /* duplicate registration */ + printk(KERN_ALERT "bnx2i, duplicate registration" + "hba=%p, cnic=%p\n", hba, cnic); + else if (rc == -EAGAIN) + printk(KERN_ERR "bnx2i, driver not registered\n"); + else if (rc == -EINVAL) + printk(KERN_ERR "bnx2i, invalid type %d\n", CNIC_ULP_ISCSI); + else + printk(KERN_ERR "bnx2i dev reg, unknown error, %d\n", rc); read_unlock(&bnx2i_dev_lock); - write_lock(&bnx2i_dev_lock); - list_add_tail(&hba->link, &adapter_list); - adapter_count++; - write_unlock(&bnx2i_dev_lock); - return 0; + if (!rc) { + write_lock(&bnx2i_dev_lock); + list_add_tail(&hba->link, &adapter_list); + adapter_count++; + write_unlock(&bnx2i_dev_lock); + } + + return rc; } From 3be924fb1d7be90c3ae6aa30ca42e9aa5d75efaf Mon Sep 17 00:00:00 2001 From: Anil Veerabhadrappa Date: Tue, 23 Jun 2009 14:07:40 -0700 Subject: [PATCH 006/209] [SCSI] bnx2i: convert bnx2i_dev_lock to mutex convert bnx2i_dev_lock to type mutex from rwlock_t because cnic->register_device() can sleep for various reasons including memory allocation, waiting for ISCSI_INIT completion and while acquiring mutex lock, cnic_lock. Signed-off-by: Michael Chan Signed-off-by: Anil Veerabhadrappa Reviewed-by: Mike Christie Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i_init.c | 54 +++++++++++++++------------------ 1 file changed, 25 insertions(+), 29 deletions(-) diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 2aeeeee3372d..fd78540ffc95 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -17,8 +17,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); static u32 adapter_count; #define DRV_MODULE_NAME "bnx2i" -#define DRV_MODULE_VERSION "2.0.1d" -#define DRV_MODULE_RELDATE "Mar 25, 2009" +#define DRV_MODULE_VERSION "2.0.1e" +#define DRV_MODULE_RELDATE "June 22, 2009" static char version[] __devinitdata = "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ @@ -30,7 +30,7 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); -static DEFINE_RWLOCK(bnx2i_dev_lock); +static DEFINE_MUTEX(bnx2i_dev_lock); unsigned int event_coal_div = 1; module_param(event_coal_div, int, 0664); @@ -99,14 +99,14 @@ struct bnx2i_hba *get_adapter_list_head(void) if (!adapter_count) goto hba_not_found; - read_lock(&bnx2i_dev_lock); + mutex_lock(&bnx2i_dev_lock); list_for_each_entry(tmp_hba, &adapter_list, link) { if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) { hba = tmp_hba; break; } } - read_unlock(&bnx2i_dev_lock); + mutex_unlock(&bnx2i_dev_lock); hba_not_found: return hba; } @@ -121,14 +121,14 @@ struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic) { struct bnx2i_hba *hba, *temp; - read_lock(&bnx2i_dev_lock); + mutex_lock(&bnx2i_dev_lock); list_for_each_entry_safe(hba, temp, &adapter_list, link) { if (hba->cnic == cnic) { - read_unlock(&bnx2i_dev_lock); + mutex_unlock(&bnx2i_dev_lock); return hba; } } - read_unlock(&bnx2i_dev_lock); + mutex_unlock(&bnx2i_dev_lock); return NULL; } @@ -206,10 +206,10 @@ void bnx2i_reg_dev_all(void) { struct bnx2i_hba *hba, *temp; - read_lock(&bnx2i_dev_lock); + mutex_lock(&bnx2i_dev_lock); list_for_each_entry_safe(hba, temp, &adapter_list, link) bnx2i_register_device(hba); - read_unlock(&bnx2i_dev_lock); + mutex_unlock(&bnx2i_dev_lock); } @@ -246,10 +246,10 @@ void bnx2i_unreg_dev_all(void) { struct bnx2i_hba *hba, *temp; - read_lock(&bnx2i_dev_lock); + mutex_lock(&bnx2i_dev_lock); list_for_each_entry_safe(hba, temp, &adapter_list, link) bnx2i_unreg_one_device(hba); - read_unlock(&bnx2i_dev_lock); + mutex_unlock(&bnx2i_dev_lock); } @@ -258,19 +258,21 @@ void bnx2i_unreg_dev_all(void) * @hba: bnx2i adapter instance * @cnic: cnic device handle * - * Global resource lock and host adapter lock is held during critical sections - * below. This routine is called from cnic_register_driver() context and - * work horse thread which does majority of device specific initialization + * Global resource lock is held during critical sections below. This routine is + * called from either cnic_register_driver() or device hot plug context and + * and does majority of device specific initialization */ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic) { int rc; - read_lock(&bnx2i_dev_lock); + mutex_lock(&bnx2i_dev_lock); rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba); if (!rc) { hba->age++; set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); + list_add_tail(&hba->link, &adapter_list); + adapter_count++; } else if (rc == -EBUSY) /* duplicate registration */ printk(KERN_ALERT "bnx2i, duplicate registration" "hba=%p, cnic=%p\n", hba, cnic); @@ -280,14 +282,8 @@ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic) printk(KERN_ERR "bnx2i, invalid type %d\n", CNIC_ULP_ISCSI); else printk(KERN_ERR "bnx2i dev reg, unknown error, %d\n", rc); - read_unlock(&bnx2i_dev_lock); - if (!rc) { - write_lock(&bnx2i_dev_lock); - list_add_tail(&hba->link, &adapter_list); - adapter_count++; - write_unlock(&bnx2i_dev_lock); - } + mutex_unlock(&bnx2i_dev_lock); return rc; } @@ -337,7 +333,7 @@ void bnx2i_ulp_exit(struct cnic_dev *dev) "found, dev 0x%p\n", dev); return; } - write_lock(&bnx2i_dev_lock); + mutex_lock(&bnx2i_dev_lock); list_del_init(&hba->link); adapter_count--; @@ -345,7 +341,7 @@ void bnx2i_ulp_exit(struct cnic_dev *dev) hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); } - write_unlock(&bnx2i_dev_lock); + mutex_unlock(&bnx2i_dev_lock); bnx2i_free_hba(hba); } @@ -367,6 +363,8 @@ static int __init bnx2i_mod_init(void) if (!is_power_of_2(sq_size)) sq_size = roundup_pow_of_two(sq_size); + mutex_init(&bnx2i_dev_lock); + bnx2i_scsi_xport_template = iscsi_register_transport(&bnx2i_iscsi_transport); if (!bnx2i_scsi_xport_template) { @@ -402,7 +400,7 @@ static void __exit bnx2i_mod_exit(void) { struct bnx2i_hba *hba; - write_lock(&bnx2i_dev_lock); + mutex_lock(&bnx2i_dev_lock); while (!list_empty(&adapter_list)) { hba = list_entry(adapter_list.next, struct bnx2i_hba, link); list_del(&hba->link); @@ -413,11 +411,9 @@ static void __exit bnx2i_mod_exit(void) clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); } - write_unlock(&bnx2i_dev_lock); bnx2i_free_hba(hba); - write_lock(&bnx2i_dev_lock); } - write_unlock(&bnx2i_dev_lock); + mutex_unlock(&bnx2i_dev_lock); iscsi_unregister_transport(&bnx2i_iscsi_transport); cnic_unregister_driver(CNIC_ULP_ISCSI); From fe5d20c818a8c5fe83d9f2223a051fb5bc50d180 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 4 Jul 2009 13:10:41 -0700 Subject: [PATCH 007/209] [SCSI] scsi_transport_fc: fix kernel-doc param name Change function parameter name in kernel-doc to match the function's actual parameter name, to fix 2 kernel-doc warnings. Signed-off-by: Randy Dunlap Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_fc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 292c02f810d0..751f239e01ea 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -3432,7 +3432,7 @@ fc_bsg_jobdone(struct fc_bsg_job *job) /** * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests - * @req: BSG request that holds the job to be destroyed + * @rq: BSG request that holds the job to be destroyed */ static void fc_bsg_softirq_done(struct request *rq) { From fac3cc458fc2f8272bcc1ff1903474ff41715723 Mon Sep 17 00:00:00 2001 From: Anil Veerabhadrappa Date: Wed, 8 Jul 2009 18:21:01 -0700 Subject: [PATCH 008/209] [SCSI] bnx2i: register given device with cnic if shost != NULL in ep_connect() When using iface, bnx2i was unable to offload further connections after all active sessions are logged out. bnx2i will unregister the device from cnic when the last connection is torn down. Next call to ep_connect() will fail because the device is not registered. This issue is not seen if shost == NULL is passed to ep_connect() call because in that case bnx2i will registers all known devices with cnic before doing a route look-up. When shost != NULL, bnx2i knows the device on which to offload the connection and has to register this device before attempting to offload the connection Signed-off-by: Anil Veerabhadrappa Reviewed-by: Michael Chan Reviewed-by Mike Christie Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i_init.c | 7 +++++-- drivers/scsi/bnx2i/bnx2i_iscsi.c | 7 +++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index fd78540ffc95..0c4210d48ee8 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -185,14 +185,17 @@ void bnx2i_stop(void *handle) */ void bnx2i_register_device(struct bnx2i_hba *hba) { + int rc; + if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { return; } - hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba); + rc = hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba); - set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); + if (!rc) + set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); } diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index f7412196f2f8..98148f3f3c63 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1653,15 +1653,18 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, struct iscsi_endpoint *ep; int rc = 0; - if (shost) + if (shost) { /* driver is given scsi host to work with */ hba = iscsi_host_priv(shost); - else + /* Register the device with cnic if not already done so */ + bnx2i_register_device(hba); + } else /* * check if the given destination can be reached through * a iscsi capable NetXtreme2 device */ hba = bnx2i_check_route(dst_addr); + if (!hba) { rc = -ENOMEM; goto check_busy; From 0fd30f77693f4fef32d30d4801cd21dcd487c2f0 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Mon, 13 Jul 2009 16:27:46 -0400 Subject: [PATCH 009/209] [SCSI] qla2xxx: Fix __LITTLE_ENDIAN definition warnings On ppc64, gcc 4.4 spews lots of.. drivers/scsi/qla2xxx/qla_def.h:1485:7: warning: "__LITTLE_ENDIAN" is not defined Signed-off-by: Dave Jones Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_def.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 00aa48d975a6..9fde8bfe7607 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -1482,7 +1482,7 @@ typedef union { uint8_t domain; uint8_t area; uint8_t al_pa; -#elif __LITTLE_ENDIAN +#elif defined(__LITTLE_ENDIAN) uint8_t al_pa; uint8_t area; uint8_t domain; From e34ccdfe0e08a6acb8c5e649fef1e94e6cd637f9 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 13 Jul 2009 23:25:54 +0200 Subject: [PATCH 010/209] [SCSI] lpfc: don't dereference NULL When kzalloc fails in lpfc_hba_alloc, don't dereference the NULL by lpfc_printf_log. Use dev_err instead. Signed-off-by: Jiri Slaby Acked-By: James Smart Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_init.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index fc67cc65c63b..2452dc9c9014 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4130,8 +4130,7 @@ lpfc_hba_alloc(struct pci_dev *pdev) /* Allocate memory for HBA structure */ phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); if (!phba) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1417 Failed to allocate hba struct.\n"); + dev_err(&pdev->dev, "failed to allocate hba struct\n"); return NULL; } From 66d6faec2f874cf6bf9bd4900966584ea9feae3d Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 15 Jul 2009 17:21:14 +0200 Subject: [PATCH 011/209] [SCSI] fcoe: convert to %pM print_mac is being deprecated, and %pM makes for smaller code anyway. Signed-off-by: Johannes Berg Acked-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index f544340d318b..d6ed3f8255ad 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -1104,7 +1104,6 @@ static void fcoe_ctlr_timeout(unsigned long arg) struct fcoe_fcf *sel; struct fcoe_fcf *fcf; unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); - DECLARE_MAC_BUF(buf); u8 send_ctlr_ka; u8 send_port_ka; @@ -1128,9 +1127,8 @@ static void fcoe_ctlr_timeout(unsigned long arg) fcf = sel; /* the old FCF may have been freed */ if (sel) { printk(KERN_INFO "libfcoe: host%d: FIP selected " - "Fibre-Channel Forwarder MAC %s\n", - fip->lp->host->host_no, - print_mac(buf, sel->fcf_mac)); + "Fibre-Channel Forwarder MAC %pM\n", + fip->lp->host->host_no, sel->fcf_mac); memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN); fip->port_ka_time = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); From 8fa38513ddc1076f3e26c651f3567b084c273ba2 Mon Sep 17 00:00:00 2001 From: James Smart Date: Sun, 19 Jul 2009 10:01:03 -0400 Subject: [PATCH 012/209] [SCSI] lpfc 8.3.4: Various SLI4 fixes Various SLI4 fixes - Fix switch name not used in the FCF record for FCoE HBAs - Enabled HBA UE error polling error-condition action code - Rewrite lpfc_sli4_scmd_to_wqidx_distr() to handle counter rollover cleanly - Modify resume_rpi mailbox data structure to match current SLI4 spec - Do not issue mailbox command in MBX_POLL mode when LPFC_HBA_ERROR is set - Wait for HBA POST completion before checking Online and UE registers - Fix accumulated total length not being filled in on unsolicited IOCBs - Use PCI config space register to determine SLI rev of HBA - Turn on starting ELS tmo function timer during device initialization Signed-off-by: James Smart Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 2 + drivers/scsi/lpfc/lpfc_hbadisc.c | 66 ++++++++++++++++++++--- drivers/scsi/lpfc/lpfc_hw4.h | 74 +++++++++++++++++++++----- drivers/scsi/lpfc/lpfc_init.c | 48 ++++++++--------- drivers/scsi/lpfc/lpfc_mbox.c | 8 ++- drivers/scsi/lpfc/lpfc_sli.c | 90 ++++++++++++++------------------ drivers/scsi/lpfc/lpfc_sli4.h | 1 + 7 files changed, 190 insertions(+), 99 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 1877d9811831..8917bb2c5fc2 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -616,6 +616,8 @@ struct lpfc_hba { uint32_t hbq_count; /* Count of configured HBQs */ struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ + uint32_t fcp_qidx; /* next work queue to post work to */ + unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */ unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index ed46b24a3380..625b2ef3050f 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1053,6 +1053,39 @@ lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) return 0; } +/** + * lpfc_sw_name_match - Check if the fcf switch name match. + * @fab_name: pointer to fabric name. + * @new_fcf_record: pointer to fcf record. + * + * This routine compare the fcf record's switch name with provided + * switch name. If the switch name are identical this function + * returns 1 else return 0. + **/ +static uint32_t +lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) +{ + if ((sw_name[0] == + bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) && + (sw_name[1] == + bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) && + (sw_name[2] == + bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) && + (sw_name[3] == + bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) && + (sw_name[4] == + bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) && + (sw_name[5] == + bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) && + (sw_name[6] == + bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) && + (sw_name[7] == + bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))) + return 1; + else + return 0; +} + /** * lpfc_mac_addr_match - Check if the fcf mac address match. * @phba: pointer to lpfc hba data structure. @@ -1123,6 +1156,22 @@ lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) bf_get(lpfc_fcf_record_mac_5, new_fcf_record); phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); phba->fcf.priority = new_fcf_record->fip_priority; + phba->fcf.switch_name[0] = + bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record); + phba->fcf.switch_name[1] = + bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record); + phba->fcf.switch_name[2] = + bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record); + phba->fcf.switch_name[3] = + bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record); + phba->fcf.switch_name[4] = + bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record); + phba->fcf.switch_name[5] = + bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record); + phba->fcf.switch_name[6] = + bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record); + phba->fcf.switch_name[7] = + bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record); } /** @@ -1239,9 +1288,12 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name, - new_fcf_record)) + new_fcf_record)) + continue; + if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) && + !lpfc_sw_name_match(conn_entry->conn_rec.switch_name, + new_fcf_record)) continue; - if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { /* * If the vlan bit map does not have the bit set for the @@ -1424,7 +1476,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) spin_lock_irqsave(&phba->hbalock, flags); if (phba->fcf.fcf_flag & FCF_IN_USE) { if (lpfc_fab_name_match(phba->fcf.fabric_name, - new_fcf_record) && + new_fcf_record) && + lpfc_sw_name_match(phba->fcf.switch_name, + new_fcf_record) && lpfc_mac_addr_match(phba, new_fcf_record)) { phba->fcf.fcf_flag |= FCF_AVAILABLE; spin_unlock_irqrestore(&phba->hbalock, flags); @@ -1464,9 +1518,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) * If there is a record with lower priority value for * the current FCF, use that record. */ - if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record) - && (new_fcf_record->fip_priority < - phba->fcf.priority)) { + if (lpfc_fab_name_match(phba->fcf.fabric_name, + new_fcf_record) && + (new_fcf_record->fip_priority < phba->fcf.priority)) { /* Use this FCF record */ lpfc_copy_fcf_record(phba, new_fcf_record); phba->fcf.addr_mode = addr_mode; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 2995d128f07f..3689eee04535 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -52,6 +52,31 @@ struct dma_address { uint32_t addr_hi; }; +#define LPFC_SLIREV_CONF_WORD 0x58 +struct lpfc_sli_intf { + uint32_t word0; +#define lpfc_sli_intf_iftype_MASK 0x00000007 +#define lpfc_sli_intf_iftype_SHIFT 0 +#define lpfc_sli_intf_iftype_WORD word0 +#define lpfc_sli_intf_rev_MASK 0x0000000f +#define lpfc_sli_intf_rev_SHIFT 4 +#define lpfc_sli_intf_rev_WORD word0 +#define LPFC_SLIREV_CONF_SLI4 4 +#define lpfc_sli_intf_family_MASK 0x000000ff +#define lpfc_sli_intf_family_SHIFT 8 +#define lpfc_sli_intf_family_WORD word0 +#define lpfc_sli_intf_feat1_MASK 0x000000ff +#define lpfc_sli_intf_feat1_SHIFT 16 +#define lpfc_sli_intf_feat1_WORD word0 +#define lpfc_sli_intf_feat2_MASK 0x0000001f +#define lpfc_sli_intf_feat2_SHIFT 24 +#define lpfc_sli_intf_feat2_WORD word0 +#define lpfc_sli_intf_valid_MASK 0x00000007 +#define lpfc_sli_intf_valid_SHIFT 29 +#define lpfc_sli_intf_valid_WORD word0 +#define LPFC_SLI_INTF_VALID 6 +}; + #define LPFC_SLI4_BAR0 1 #define LPFC_SLI4_BAR1 2 #define LPFC_SLI4_BAR2 4 @@ -1181,6 +1206,32 @@ struct fcf_record { #define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF #define lpfc_fcf_record_fcf_state_WORD word8 uint8_t vlan_bitmap[512]; + uint32_t word137; +#define lpfc_fcf_record_switch_name_0_SHIFT 0 +#define lpfc_fcf_record_switch_name_0_MASK 0x000000FF +#define lpfc_fcf_record_switch_name_0_WORD word137 +#define lpfc_fcf_record_switch_name_1_SHIFT 8 +#define lpfc_fcf_record_switch_name_1_MASK 0x000000FF +#define lpfc_fcf_record_switch_name_1_WORD word137 +#define lpfc_fcf_record_switch_name_2_SHIFT 16 +#define lpfc_fcf_record_switch_name_2_MASK 0x000000FF +#define lpfc_fcf_record_switch_name_2_WORD word137 +#define lpfc_fcf_record_switch_name_3_SHIFT 24 +#define lpfc_fcf_record_switch_name_3_MASK 0x000000FF +#define lpfc_fcf_record_switch_name_3_WORD word137 + uint32_t word138; +#define lpfc_fcf_record_switch_name_4_SHIFT 0 +#define lpfc_fcf_record_switch_name_4_MASK 0x000000FF +#define lpfc_fcf_record_switch_name_4_WORD word138 +#define lpfc_fcf_record_switch_name_5_SHIFT 8 +#define lpfc_fcf_record_switch_name_5_MASK 0x000000FF +#define lpfc_fcf_record_switch_name_5_WORD word138 +#define lpfc_fcf_record_switch_name_6_SHIFT 16 +#define lpfc_fcf_record_switch_name_6_MASK 0x000000FF +#define lpfc_fcf_record_switch_name_6_WORD word138 +#define lpfc_fcf_record_switch_name_7_SHIFT 24 +#define lpfc_fcf_record_switch_name_7_MASK 0x000000FF +#define lpfc_fcf_record_switch_name_7_WORD word138 }; struct lpfc_mbx_read_fcf_tbl { @@ -1385,20 +1436,17 @@ struct lpfc_mbx_unreg_vfi { struct lpfc_mbx_resume_rpi { uint32_t word1; -#define lpfc_resume_rpi_rpi_SHIFT 0 -#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF -#define lpfc_resume_rpi_rpi_WORD word1 +#define lpfc_resume_rpi_index_SHIFT 0 +#define lpfc_resume_rpi_index_MASK 0x0000FFFF +#define lpfc_resume_rpi_index_WORD word1 +#define lpfc_resume_rpi_ii_SHIFT 30 +#define lpfc_resume_rpi_ii_MASK 0x00000003 +#define lpfc_resume_rpi_ii_WORD word1 +#define RESUME_INDEX_RPI 0 +#define RESUME_INDEX_VPI 1 +#define RESUME_INDEX_VFI 2 +#define RESUME_INDEX_FCFI 3 uint32_t event_tag; - uint32_t word3_rsvd; - uint32_t word4_rsvd; - uint32_t word5_rsvd; - uint32_t word6; -#define lpfc_resume_rpi_vpi_SHIFT 0 -#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF -#define lpfc_resume_rpi_vpi_WORD word6 -#define lpfc_resume_rpi_vfi_SHIFT 16 -#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF -#define lpfc_resume_rpi_vfi_WORD word6 }; #define REG_FCF_INVALID_QID 0xFFFF diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 2452dc9c9014..f8271a587aab 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4488,23 +4488,6 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) if (!phba->sli4_hba.STAregaddr) return -ENODEV; - /* With uncoverable error, log the error message and return error */ - onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); - onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); - if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { - uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); - uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); - if (uerrlo_reg.word0 || uerrhi_reg.word0) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1422 HBA Unrecoverable error: " - "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " - "online0_reg=0x%x, online1_reg=0x%x\n", - uerrlo_reg.word0, uerrhi_reg.word0, - onlnreg0, onlnreg1); - } - return -ENODEV; - } - /* Wait up to 30 seconds for the SLI Port POST done and ready */ for (i = 0; i < 3000; i++) { sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); @@ -4544,6 +4527,23 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); + /* With uncoverable error, log the error message and return error */ + onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); + onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); + if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { + uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); + uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); + if (uerrlo_reg.word0 || uerrhi_reg.word0) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1422 HBA Unrecoverable error: " + "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " + "online0_reg=0x%x, online1_reg=0x%x\n", + uerrlo_reg.word0, uerrhi_reg.word0, + onlnreg0, onlnreg1); + } + return -ENODEV; + } + return port_error; } @@ -7635,19 +7635,17 @@ static int __devinit lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) { int rc; - uint16_t dev_id; + struct lpfc_sli_intf intf; - if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id)) + if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0)) return -ENODEV; - switch (dev_id) { - case PCI_DEVICE_ID_TIGERSHARK: + if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && + (bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4)) rc = lpfc_pci_probe_one_s4(pdev, pid); - break; - default: + else rc = lpfc_pci_probe_one_s3(pdev, pid); - break; - } + return rc; } diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 3423571dd1b3..42b4f3841697 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1938,9 +1938,7 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp) memset(mbox, 0, sizeof(*mbox)); resume_rpi = &mbox->u.mqe.un.resume_rpi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI); - bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi); - bf_set(lpfc_resume_rpi_vpi, resume_rpi, - ndlp->vport->vpi + ndlp->vport->phba->vpi_base); - bf_set(lpfc_resume_rpi_vfi, resume_rpi, - ndlp->vport->vfi + ndlp->vport->phba->vfi_base); + bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi); + bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI); + resume_rpi->event_tag = ndlp->phba->fc_eventTag; } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index acc43b061ba1..04f527ca8f5f 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -4522,12 +4522,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) lpfc_sli4_rb_setup(phba); /* Start the ELS watchdog timer */ - /* - * The driver for SLI4 is not yet ready to process timeouts - * or interrupts. Once it is, the comment bars can be removed. - */ - /* mod_timer(&vport->els_tmofunc, - * jiffies + HZ * (phba->fc_ratov*2)); */ + mod_timer(&vport->els_tmofunc, + jiffies + HZ * (phba->fc_ratov * 2)); /* Start heart beat timer */ mod_timer(&phba->hb_tmofunc, @@ -5279,6 +5275,18 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, unsigned long iflags; int rc; + rc = lpfc_mbox_dev_check(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2544 Mailbox command x%x (x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, flag); + goto out_not_finished; + } + /* Detect polling mode and jump to a handler */ if (!phba->sli4_hba.intr_enable) { if (flag == MBX_POLL) @@ -5338,17 +5346,6 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, psli->sli_flag, flag); goto out_not_finished; } - rc = lpfc_mbox_dev_check(phba); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):2544 Mailbox command x%x (x%x) " - "cannot issue Data: x%x x%x\n", - mboxq->vport ? mboxq->vport->vpi : 0, - mboxq->u.mb.mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, mboxq), - psli->sli_flag, flag); - goto out_not_finished; - } /* Put the mailbox command to the driver internal FIFO */ psli->slistat.mbox_busy++; @@ -5817,19 +5814,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, /** * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution * @phba: Pointer to HBA context object. - * @piocb: Pointer to command iocb. * * This routine performs a round robin SCSI command to SLI4 FCP WQ index - * distribution. + * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock + * held. * * Return: index into SLI4 fast-path FCP queue index. **/ static uint32_t -lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) +lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) { - static uint32_t fcp_qidx; + ++phba->fcp_qidx; + if (phba->fcp_qidx >= phba->cfg_fcp_wq_count) + phba->fcp_qidx = 0; - return fcp_qidx++ % phba->cfg_fcp_wq_count; + return phba->fcp_qidx; } /** @@ -6156,7 +6155,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, return IOCB_ERROR; if (piocb->iocb_flag & LPFC_IO_FCP) { - fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb); + fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe)) return IOCB_ERROR; } else { @@ -7678,12 +7677,6 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) "online0_reg=0x%x, online1_reg=0x%x\n", uerr_sta_lo, uerr_sta_hi, onlnreg0, onlnreg1); - /* TEMP: as the driver error recover logic is not - * fully developed, we just log the error message - * and the device error attention action is now - * temporarily disabled. - */ - return 0; phba->work_status[0] = uerr_sta_lo; phba->work_status[1] = uerr_sta_hi; /* Set the driver HA work bitmap */ @@ -9499,8 +9492,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) eq->host_index = 0; eq->hba_index = 0; - if (rc != MBX_TIMEOUT) - mempool_free(mbox, phba->mbox_mem_pool); + mempool_free(mbox, phba->mbox_mem_pool); return status; } @@ -9604,10 +9596,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); cq->host_index = 0; cq->hba_index = 0; -out: - if (rc != MBX_TIMEOUT) - mempool_free(mbox, phba->mbox_mem_pool); +out: + mempool_free(mbox, phba->mbox_mem_pool); return status; } @@ -9712,8 +9703,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, /* link the mq onto the parent cq child list */ list_add_tail(&mq->list, &cq->child_list); out: - if (rc != MBX_TIMEOUT) - mempool_free(mbox, phba->mbox_mem_pool); + mempool_free(mbox, phba->mbox_mem_pool); return status; } @@ -9795,8 +9785,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, /* link the wq onto the parent cq child list */ list_add_tail(&wq->list, &cq->child_list); out: - if (rc != MBX_TIMEOUT) - mempool_free(mbox, phba->mbox_mem_pool); + mempool_free(mbox, phba->mbox_mem_pool); return status; } @@ -9970,8 +9959,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, list_add_tail(&drq->list, &cq->child_list); out: - if (rc != MBX_TIMEOUT) - mempool_free(mbox, phba->mbox_mem_pool); + mempool_free(mbox, phba->mbox_mem_pool); return status; } @@ -10026,8 +10014,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) /* Remove eq from any list */ list_del_init(&eq->list); - if (rc != MBX_TIMEOUT) - mempool_free(mbox, eq->phba->mbox_mem_pool); + mempool_free(mbox, eq->phba->mbox_mem_pool); return status; } @@ -10080,8 +10067,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) } /* Remove cq from any list */ list_del_init(&cq->list); - if (rc != MBX_TIMEOUT) - mempool_free(mbox, cq->phba->mbox_mem_pool); + mempool_free(mbox, cq->phba->mbox_mem_pool); return status; } @@ -10134,8 +10120,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) } /* Remove mq from any list */ list_del_init(&mq->list); - if (rc != MBX_TIMEOUT) - mempool_free(mbox, mq->phba->mbox_mem_pool); + mempool_free(mbox, mq->phba->mbox_mem_pool); return status; } @@ -10187,8 +10172,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) } /* Remove wq from any list */ list_del_init(&wq->list); - if (rc != MBX_TIMEOUT) - mempool_free(mbox, wq->phba->mbox_mem_pool); + mempool_free(mbox, wq->phba->mbox_mem_pool); return status; } @@ -10258,8 +10242,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, } list_del_init(&hrq->list); list_del_init(&drq->list); - if (rc != MBX_TIMEOUT) - mempool_free(mbox, hrq->phba->mbox_mem_pool); + mempool_free(mbox, hrq->phba->mbox_mem_pool); return status; } @@ -10933,6 +10916,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) first_iocbq = lpfc_sli_get_iocbq(vport->phba); if (first_iocbq) { /* Initialize the first IOCB. */ + first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); @@ -10945,6 +10929,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = LPFC_DATA_BUF_SIZE; first_iocbq->iocb.un.rcvels.remoteID = sid; + first_iocbq->iocb.unsli3.rcvsli3.acc_len += + bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); } iocbq = first_iocbq; /* @@ -10961,6 +10947,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) iocbq->iocb.ulpBdeCount++; iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = LPFC_DATA_BUF_SIZE; + first_iocbq->iocb.unsli3.rcvsli3.acc_len += + bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); } else { iocbq = lpfc_sli_get_iocbq(vport->phba); if (!iocbq) { @@ -10978,6 +10966,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) iocbq->iocb.ulpBdeCount = 1; iocbq->iocb.un.cont64[0].tus.f.bdeSize = LPFC_DATA_BUF_SIZE; + first_iocbq->iocb.unsli3.rcvsli3.acc_len += + bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); iocbq->iocb.un.rcvels.remoteID = sid; list_add_tail(&iocbq->list, &first_iocbq->list); } diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 3b276b47d18f..b6f3e04f3848 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -132,6 +132,7 @@ struct lpfc_sli4_link { struct lpfc_fcf { uint8_t fabric_name[8]; + uint8_t switch_name[8]; uint8_t mac_addr[6]; uint16_t fcf_indx; uint16_t fcfi; From a0c87cbdb52467a16343b31251f2722643db603c Mon Sep 17 00:00:00 2001 From: James Smart Date: Sun, 19 Jul 2009 10:01:10 -0400 Subject: [PATCH 013/209] [SCSI] lpfc 8.3.4: Consistently Implement persistent port disable Consistently implement persistent port disable. Ability was to be managed in the adapter via firmware via flash settings. However, not all firmware images supported it. Uniformly support it everywhere. Signed-off-by: James Smart Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 1 + drivers/scsi/lpfc/lpfc_attr.c | 7 +- drivers/scsi/lpfc/lpfc_crtn.h | 5 +- drivers/scsi/lpfc/lpfc_hw.h | 4 +- drivers/scsi/lpfc/lpfc_init.c | 59 +++++++++---- drivers/scsi/lpfc/lpfc_mbox.c | 36 +++++--- drivers/scsi/lpfc/lpfc_sli.c | 158 +++++++++++++++++++++++++++++++++- drivers/scsi/lpfc/lpfc_sli4.h | 4 + 8 files changed, 242 insertions(+), 32 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 8917bb2c5fc2..7b63fe530041 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -525,6 +525,7 @@ struct lpfc_hba { #define FCP_XRI_ABORT_EVENT 0x20 #define ELS_XRI_ABORT_EVENT 0x40 #define ASYNC_EVENT 0x80 +#define LINK_DISABLED 0x100 /* Link disabled by user */ struct lpfc_dmabuf slim2p; MAILBOX_t *mbox; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index fc07be5fbce9..7a629511338e 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -394,7 +394,12 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr, case LPFC_INIT_MBX_CMDS: case LPFC_LINK_DOWN: case LPFC_HBA_ERROR: - len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n"); + if (phba->hba_flag & LINK_DISABLED) + len += snprintf(buf + len, PAGE_SIZE-len, + "Link Down - User disabled\n"); + else + len += snprintf(buf + len, PAGE_SIZE-len, + "Link Down\n"); break; case LPFC_LINK_UP: case LPFC_CLEAR_LA: diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index d2a922997c0f..84e53bb1daa3 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -21,7 +21,9 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *); struct fc_rport; -void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); +void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_sli_read_link_ste(struct lpfc_hba *); +void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t); void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *); @@ -234,6 +236,7 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, uint32_t); void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); +void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 8a3a026667e4..ccb26724dc53 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -2496,8 +2496,8 @@ typedef struct { #define DMP_VPORT_REGION_SIZE 0x200 #define DMP_MBOX_OFFSET_WORD 0x5 -#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */ -#define DMP_FCOEPARAM_RGN_SIZE 0x400 +#define DMP_REGION_23 0x17 /* fcoe param and port state region */ +#define DMP_RGN23_SIZE 0x400 #define WAKE_UP_PARMS_REGION_ID 4 #define WAKE_UP_PARMS_WORD_SIZE 15 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index f8271a587aab..900b5628ceb9 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -211,7 +211,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba) goto out_free_mbox; do { - lpfc_dump_mem(phba, pmb, offset); + lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { @@ -425,6 +425,9 @@ lpfc_config_port_post(struct lpfc_hba *phba) return -EIO; } + /* Check if the port is disabled */ + lpfc_sli_read_link_ste(phba); + /* Reset the DFT_HBA_Q_DEPTH to the max xri */ if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) phba->cfg_hba_queue_depth = @@ -524,27 +527,49 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Set up error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); - lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); - pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - lpfc_set_loopback_flag(phba); - rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); - if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + /* Check if the port is disabled */ + lpfc_sli_read_serdes_param(phba); + + if (phba->hba_flag & LINK_DISABLED) { + lpfc_printf_log(phba, + KERN_ERR, LOG_INIT, + "2598 Adapter Link is disabled.\n"); + lpfc_down_link(phba, pmb); + pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { + lpfc_printf_log(phba, + KERN_ERR, LOG_INIT, + "2599 Adapter failed to issue DOWN_LINK" + " mbox command rc 0x%x\n", rc); + + mempool_free(pmb, phba->mbox_mem_pool); + return -EIO; + } + } else { + lpfc_init_link(phba, pmb, phba->cfg_topology, + phba->cfg_link_speed); + pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + lpfc_set_loopback_flag(phba); + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0454 Adapter failed to init, mbxCmd x%x " "INIT_LINK, mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); - /* Clear all interrupt enable conditions */ - writel(0, phba->HCregaddr); - readl(phba->HCregaddr); /* flush */ - /* Clear all pending interrupts */ - writel(0xffffffff, phba->HAregaddr); - readl(phba->HAregaddr); /* flush */ + /* Clear all interrupt enable conditions */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + /* Clear all pending interrupts */ + writel(0xffffffff, phba->HAregaddr); + readl(phba->HAregaddr); /* flush */ - phba->link_state = LPFC_HBA_ERROR; - if (rc != MBX_BUSY) - mempool_free(pmb, phba->mbox_mem_pool); - return -EIO; + phba->link_state = LPFC_HBA_ERROR; + if (rc != MBX_BUSY) + mempool_free(pmb, phba->mbox_mem_pool); + return -EIO; + } } /* MBOX buffer will be freed in mbox compl */ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 42b4f3841697..245945f2f3a0 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -79,21 +79,37 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, } /** - * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory + * lpfc_down_link - Bring down HBAs link. * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. - * @offset: offset for dumping VPD memory mailbox command. + * + * This routine prepares a mailbox command to bring down HBA link. + **/ +void +lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb; + memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); + mb = &pmb->u.mb; + mb->mbxCommand = MBX_DOWN_LINK; + mb->mbxOwner = OWN_HOST; +} + +/** + * lpfc_dump_mem - Prepare a mailbox command for reading a region. + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * @offset: offset into the region. + * @region_id: config region id. * * The dump mailbox command provides a method for the device driver to obtain * various types of information from the HBA device. * - * This routine prepares the mailbox command for dumping HBA Vital Product - * Data (VPD) memory. This mailbox command is to be used for retrieving a - * portion (DMP_RSP_SIZE bytes) of a HBA's VPD from the HBA at an address - * offset specified by the offset parameter. + * This routine prepares the mailbox command for dumping HBA's config region. **/ void -lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset) +lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset, + uint16_t region_id) { MAILBOX_t *mb; void *ctx; @@ -107,7 +123,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset) mb->un.varDmp.cv = 1; mb->un.varDmp.type = DMP_NV_PARAMS; mb->un.varDmp.entry_index = offset; - mb->un.varDmp.region_id = DMP_REGION_VPD; + mb->un.varDmp.region_id = region_id; mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t)); mb->un.varDmp.co = 0; mb->un.varDmp.resp_offset = 0; @@ -1864,8 +1880,8 @@ lpfc_dump_fcoe_param(struct lpfc_hba *phba, mb->mbxCommand = MBX_DUMP_MEMORY; mb->un.varDmp.type = DMP_NV_PARAMS; - mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM; - mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE; + mb->un.varDmp.region_id = DMP_REGION_23; + mb->un.varDmp.sli4_length = DMP_RGN23_SIZE; mb->un.varWords[3] = putPaddrLow(mp->phys); mb->un.varWords[4] = putPaddrHigh(mp->phys); return 0; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 04f527ca8f5f..a3f85454368d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -4139,7 +4139,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, return -EIO; } data_length = mqe->un.mb_words[5]; - if (data_length > DMP_FCOEPARAM_RGN_SIZE) { + if (data_length > DMP_RGN23_SIZE) { lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); return -EIO; @@ -6788,6 +6788,33 @@ lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) } +/** + * lpfc_sli_bemem_bcopy - SLI memory copy function + * @srcp: Source memory pointer. + * @destp: Destination memory pointer. + * @cnt: Number of words required to be copied. + * + * This function is used for copying data between a data structure + * with big endian representation to local endianness. + * This function can be called with or without lock. + **/ +void +lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) +{ + uint32_t *src = srcp; + uint32_t *dest = destp; + uint32_t ldata; + int i; + + for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { + ldata = *src; + ldata = be32_to_cpu(ldata); + *dest = ldata; + src++; + dest++; + } +} + /** * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq * @phba: Pointer to HBA context object. @@ -11564,3 +11591,132 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) error = 0; return error; } + +/** + * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. + * @phba: pointer to lpfc hba data structure. + * + * This function read region 23 and parse TLV for port status to + * decide if the user disaled the port. If the TLV indicates the + * port is disabled, the hba_flag is set accordingly. + **/ +void +lpfc_sli_read_link_ste(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *pmb = NULL; + MAILBOX_t *mb; + uint8_t *rgn23_data = NULL; + uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset; + int rc; + + pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2600 lpfc_sli_read_serdes_param failed to" + " allocate mailbox memory\n"); + goto out; + } + mb = &pmb->u.mb; + + /* Get adapter Region 23 data */ + rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); + if (!rgn23_data) + goto out; + + do { + lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2601 lpfc_sli_read_link_ste failed to" + " read config region 23 rc 0x%x Status 0x%x\n", + rc, mb->mbxStatus); + mb->un.varDmp.word_cnt = 0; + } + /* + * dump mem may return a zero when finished or we got a + * mailbox error, either way we are done. + */ + if (mb->un.varDmp.word_cnt == 0) + break; + if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) + mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; + + lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, + rgn23_data + offset, + mb->un.varDmp.word_cnt); + offset += mb->un.varDmp.word_cnt; + } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); + + data_size = offset; + offset = 0; + + if (!data_size) + goto out; + + /* Check the region signature first */ + if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2619 Config region 23 has bad signature\n"); + goto out; + } + offset += 4; + + /* Check the data structure version */ + if (rgn23_data[offset] != LPFC_REGION23_VERSION) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2620 Config region 23 has bad version\n"); + goto out; + } + offset += 4; + + /* Parse TLV entries in the region */ + while (offset < data_size) { + if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) + break; + /* + * If the TLV is not driver specific TLV or driver id is + * not linux driver id, skip the record. + */ + if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || + (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || + (rgn23_data[offset + 3] != 0)) { + offset += rgn23_data[offset + 1] * 4 + 4; + continue; + } + + /* Driver found a driver specific TLV in the config region */ + sub_tlv_len = rgn23_data[offset + 1] * 4; + offset += 4; + tlv_offset = 0; + + /* + * Search for configured port state sub-TLV. + */ + while ((offset < data_size) && + (tlv_offset < sub_tlv_len)) { + if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { + offset += 4; + tlv_offset += 4; + break; + } + if (rgn23_data[offset] != PORT_STE_TYPE) { + offset += rgn23_data[offset + 1] * 4 + 4; + tlv_offset += rgn23_data[offset + 1] * 4 + 4; + continue; + } + + /* This HBA contains PORT_STE configured */ + if (!rgn23_data[offset + 2]) + phba->hba_flag |= LINK_DISABLED; + + goto out; + } + } +out: + if (pmb) + mempool_free(pmb, phba->mbox_mem_pool); + kfree(rgn23_data); + return; +} diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index b6f3e04f3848..b5f4ba1a5c27 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -151,6 +151,10 @@ struct lpfc_fcf { #define LPFC_REGION23_SIGNATURE "RG23" #define LPFC_REGION23_VERSION 1 #define LPFC_REGION23_LAST_REC 0xff +#define DRIVER_SPECIFIC_TYPE 0xA2 +#define LINUX_DRIVER_ID 0x20 +#define PORT_STE_TYPE 0x1 + struct lpfc_fip_param_hdr { uint8_t type; #define FCOE_PARAM_TYPE 0xA0 From 8568a4d2495ebcf5da38a2141c7633399143b1a5 Mon Sep 17 00:00:00 2001 From: James Smart Date: Sun, 19 Jul 2009 10:01:16 -0400 Subject: [PATCH 014/209] [SCSI] lpfc 8.3.4: Various SLI3 fixes Various SLI3 fixes - Fix for firmware dump failure - Fix inband remote management Signed-off-by: James Smart Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 1 + drivers/scsi/lpfc/lpfc_init.c | 3 --- drivers/scsi/lpfc/lpfc_mem.c | 41 +++++++++++++++++++++++++---------- drivers/scsi/lpfc/lpfc_sli.c | 2 +- 4 files changed, 31 insertions(+), 16 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 7b63fe530041..8b69a110a305 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -685,6 +685,7 @@ struct lpfc_hba { struct pci_pool *lpfc_mbuf_pool; struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ + struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ struct lpfc_dma_pool lpfc_mbuf_safety_pool; mempool_t *mbox_mem_pool; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 900b5628ceb9..dc561e3c8b92 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -527,9 +527,6 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Set up error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); - /* Check if the port is disabled */ - lpfc_sli_read_serdes_param(phba); - if (phba->hba_flag & LINK_DISABLED) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index e198c917c13e..a1b6db6016da 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -110,17 +110,28 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) sizeof(struct lpfc_nodelist)); if (!phba->nlp_mem_pool) goto fail_free_mbox_pool; - phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool", + + if (phba->sli_rev == LPFC_SLI_REV4) { + phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool", phba->pcidev, LPFC_HDR_BUF_SIZE, align, 0); - if (!phba->lpfc_hrb_pool) - goto fail_free_nlp_mem_pool; - phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool", + if (!phba->lpfc_hrb_pool) + goto fail_free_nlp_mem_pool; + + phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool", phba->pcidev, LPFC_DATA_BUF_SIZE, align, 0); - if (!phba->lpfc_drb_pool) - goto fail_free_hbq_pool; - + if (!phba->lpfc_drb_pool) + goto fail_free_hrb_pool; + phba->lpfc_hbq_pool = NULL; + } else { + phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool", + phba->pcidev, LPFC_BPL_SIZE, align, 0); + if (!phba->lpfc_hbq_pool) + goto fail_free_nlp_mem_pool; + phba->lpfc_hrb_pool = NULL; + phba->lpfc_drb_pool = NULL; + } /* vpi zero is reserved for the physical port so add 1 to max */ longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); @@ -132,7 +143,7 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) fail_free_dbq_pool: pci_pool_destroy(phba->lpfc_drb_pool); phba->lpfc_drb_pool = NULL; - fail_free_hbq_pool: + fail_free_hrb_pool: pci_pool_destroy(phba->lpfc_hrb_pool); phba->lpfc_hrb_pool = NULL; fail_free_nlp_mem_pool: @@ -176,11 +187,17 @@ lpfc_mem_free(struct lpfc_hba *phba) /* Free HBQ pools */ lpfc_sli_hbqbuf_free_all(phba); - pci_pool_destroy(phba->lpfc_drb_pool); + if (phba->lpfc_drb_pool) + pci_pool_destroy(phba->lpfc_drb_pool); phba->lpfc_drb_pool = NULL; - pci_pool_destroy(phba->lpfc_hrb_pool); + if (phba->lpfc_hrb_pool) + pci_pool_destroy(phba->lpfc_hrb_pool); phba->lpfc_hrb_pool = NULL; + if (phba->lpfc_hbq_pool) + pci_pool_destroy(phba->lpfc_hbq_pool); + phba->lpfc_hbq_pool = NULL; + /* Free NLP memory pool */ mempool_destroy(phba->nlp_mem_pool); phba->nlp_mem_pool = NULL; @@ -380,7 +397,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) if (!hbqbp) return NULL; - hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, + hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, &hbqbp->dbuf.phys); if (!hbqbp->dbuf.virt) { kfree(hbqbp); @@ -405,7 +422,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) void lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) { - pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); + pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); kfree(hbqbp); return; } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index a3f85454368d..174a1b0d005d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -4702,13 +4702,13 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, spin_lock_irqsave(&phba->hbalock, drvr_flag); if (!pmbox) { + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; /* processing mbox queue from intr_handler */ if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); return MBX_SUCCESS; } processing_queue = 1; - phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; pmbox = lpfc_mbox_get(phba); if (!pmbox) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); From 32b9793fe6ff09a85f36b8bd7d6ff214653a7497 Mon Sep 17 00:00:00 2001 From: James Smart Date: Sun, 19 Jul 2009 10:01:21 -0400 Subject: [PATCH 015/209] [SCSI] lpfc 8.3.4: Fix a pair of FCoE issues Fix a pair of FCoE issues - Fix Region 23 FCoE Parameters not being read correctly - Fix race condition when there are FCoE events during FCF table read Signed-off-by: James Smart Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 3 ++ drivers/scsi/lpfc/lpfc_crtn.h | 1 + drivers/scsi/lpfc/lpfc_hbadisc.c | 77 +++++++++++++++++++++++++++++++- drivers/scsi/lpfc/lpfc_init.c | 9 ++-- drivers/scsi/lpfc/lpfc_sli.c | 7 ++- 5 files changed, 92 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 8b69a110a305..d982ac78009e 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -526,6 +526,7 @@ struct lpfc_hba { #define ELS_XRI_ABORT_EVENT 0x40 #define ASYNC_EVENT 0x80 #define LINK_DISABLED 0x100 /* Link disabled by user */ +#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */ struct lpfc_dmabuf slim2p; MAILBOX_t *mbox; @@ -767,6 +768,8 @@ struct lpfc_hba { /* Maximum number of events that can be outstanding at any time*/ #define LPFC_MAX_EVT_COUNT 512 atomic_t fast_event_count; + uint32_t fcoe_eventtag; + uint32_t fcoe_eventtag_at_fcf_scan; struct lpfc_fcf fcf; uint8_t fc_map[3]; uint8_t valid_vlan; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 84e53bb1daa3..d52aa3310551 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -189,6 +189,7 @@ void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t); void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); +int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t); void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, uint32_t , LPFC_MBOXQ_t *); diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 625b2ef3050f..cc4b6ba9e4d5 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -61,6 +61,7 @@ static uint8_t lpfcAlpaArray[] = { static void lpfc_disc_timeout_handler(struct lpfc_vport *); static void lpfc_disc_flush_list(struct lpfc_vport *vport); +static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_terminate_rport_io(struct fc_rport *rport) @@ -1009,9 +1010,15 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) spin_lock_irqsave(&phba->hbalock, flags); phba->fcf.fcf_flag |= FCF_REGISTERED; spin_unlock_irqrestore(&phba->hbalock, flags); + /* If there is a pending FCoE event, restart FCF table scan. */ + if (lpfc_check_pending_fcoe_event(phba, 1)) { + mempool_free(mboxq, phba->mbox_mem_pool); + return; + } if (vport->port_state != LPFC_FLOGI) { spin_lock_irqsave(&phba->hbalock, flags); phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); + phba->hba_flag &= ~FCF_DISC_INPROGRESS; spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_initial_flogi(vport); } @@ -1199,6 +1206,7 @@ lpfc_register_fcf(struct lpfc_hba *phba) /* The FCF is already registered, start discovery */ if (phba->fcf.fcf_flag & FCF_REGISTERED) { phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); + phba->hba_flag &= ~FCF_DISC_INPROGRESS; spin_unlock_irqrestore(&phba->hbalock, flags); if (phba->pport->port_state != LPFC_FLOGI) lpfc_initial_flogi(phba->pport); @@ -1387,6 +1395,60 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, return 0; } +/** + * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event. + * @phba: pointer to lpfc hba data structure. + * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned. + * + * This function check if there is any fcoe event pending while driver + * scan FCF entries. If there is any pending event, it will restart the + * FCF saning and return 1 else return 0. + */ +int +lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) +{ + LPFC_MBOXQ_t *mbox; + int rc; + /* + * If the Link is up and no FCoE events while in the + * FCF discovery, no need to restart FCF discovery. + */ + if ((phba->link_state >= LPFC_LINK_UP) && + (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) + return 0; + + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_AVAILABLE; + spin_unlock_irq(&phba->hbalock); + + if (phba->link_state >= LPFC_LINK_UP) + lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); + + if (unreg_fcf) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_REGISTERED; + spin_unlock_irq(&phba->hbalock); + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, + LOG_DISCOVERY|LOG_MBOX, + "2610 UNREG_FCFI mbox allocation failed\n"); + return 1; + } + lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + "2611 UNREG_FCFI issue mbox failed\n"); + mempool_free(mbox, phba->mbox_mem_pool); + } + } + + return 1; +} + /** * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. * @phba: pointer to lpfc hba data structure. @@ -1419,6 +1481,12 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) unsigned long flags; uint16_t vlan_id; + /* If there is pending FCoE event restart FCF table scan */ + if (lpfc_check_pending_fcoe_event(phba, 0)) { + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return; + } + /* Get the first SGE entry from the non-embedded DMA memory. This * routine only uses a single SGE. */ @@ -1823,6 +1891,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) goto out; } } else { + vport->port_state = LPFC_VPORT_UNKNOWN; /* * Add the driver's default FCF record at FCF index 0 now. This * is phase 1 implementation that support FCF index 0 and driver @@ -1858,6 +1927,12 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) * The driver is expected to do FIP/FCF. Call the port * and get the FCF Table. */ + spin_lock_irq(&phba->hbalock); + if (phba->hba_flag & FCF_DISC_INPROGRESS) { + spin_unlock_irq(&phba->hbalock); + return; + } + spin_unlock_irq(&phba->hbalock); rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) @@ -4414,7 +4489,7 @@ lpfc_read_fcoe_param(struct lpfc_hba *phba, fcoe_param_hdr = (struct lpfc_fip_param_hdr *) buff; fcoe_param = (struct lpfc_fcoe_params *) - buff + sizeof(struct lpfc_fip_param_hdr); + (buff + sizeof(struct lpfc_fip_param_hdr)); if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index dc561e3c8b92..a7f32ed256bd 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2829,6 +2829,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) return; + phba->fcoe_eventtag = acqe_link->event_tag; pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -2916,6 +2917,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); int rc; + phba->fcoe_eventtag = acqe_fcoe->event_tag; switch (event_type) { case LPFC_FCOE_EVENT_TYPE_NEW_FCF: lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, @@ -2923,11 +2925,12 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, acqe_fcoe->fcf_index, acqe_fcoe->event_tag); /* - * If the current FCF is in discovered state, - * do nothing. + * If the current FCF is in discovered state, or + * FCF discovery is in progress do nothing. */ spin_lock_irq(&phba->hbalock); - if (phba->fcf.fcf_flag & FCF_DISCOVERED) { + if ((phba->fcf.fcf_flag & FCF_DISCOVERED) || + (phba->hba_flag & FCF_DISC_INPROGRESS)) { spin_unlock_irq(&phba->hbalock); break; } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 174a1b0d005d..a0f973e7acb3 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -11536,6 +11536,7 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) uint32_t alloc_len, req_len; struct lpfc_mbx_read_fcf_tbl *read_fcf; + phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -11587,8 +11588,12 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) if (rc == MBX_NOT_FINISHED) { lpfc_sli4_mbox_cmd_free(phba, mboxq); error = -EIO; - } else + } else { + spin_lock_irq(&phba->hbalock); + phba->hba_flag |= FCF_DISC_INPROGRESS; + spin_unlock_irq(&phba->hbalock); error = 0; + } return error; } From 1c6834a7e85715a4ac07c1cac25a1950040decb0 Mon Sep 17 00:00:00 2001 From: James Smart Date: Sun, 19 Jul 2009 10:01:26 -0400 Subject: [PATCH 016/209] [SCSI] lpfc 8.3.4: NPIV vport fixes NPIV vport fixes - Fixed static vport creation on SLI4 HBAs - Fixed vport create sending init_vpi before REG_VFI - Fix unable to create vports on SLI4 HBA's Port2 Signed-off-by: James Smart Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 1 + drivers/scsi/lpfc/lpfc_crtn.h | 4 +- drivers/scsi/lpfc/lpfc_hbadisc.c | 112 +++++++++++++++++++++++++------ drivers/scsi/lpfc/lpfc_init.c | 5 ++ drivers/scsi/lpfc/lpfc_mbox.c | 47 ++++++++++--- drivers/scsi/lpfc/lpfc_sli.c | 2 +- drivers/scsi/lpfc/lpfc_vport.c | 51 ++++++++------ 7 files changed, 167 insertions(+), 55 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index d982ac78009e..8cca3619c745 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -312,6 +312,7 @@ struct lpfc_vport { #define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */ #define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */ #define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */ +#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */ uint32_t ct_flags; #define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */ diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index d52aa3310551..48943d344143 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -25,7 +25,7 @@ void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli_read_link_ste(struct lpfc_hba *); void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t); void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); -void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); +int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *); void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); @@ -184,7 +184,7 @@ int lpfc_mbox_dev_check(struct lpfc_hba *); int lpfc_mbox_tmo_val(struct lpfc_hba *, int); void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); -void lpfc_init_vpi(struct lpfcMboxq *, uint16_t); +void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t); void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t); void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index cc4b6ba9e4d5..491c53fd1ca4 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1633,6 +1633,39 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) return; } +/** + * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox data structure. + * + * This function handles completion of init vpi mailbox command. + */ +static void +lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_vport *vport = mboxq->vport; + if (mboxq->u.mb.mbxStatus) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_MBOX, + "2609 Init VPI mailbox failed 0x%x\n", + mboxq->u.mb.mbxStatus); + mempool_free(mboxq, phba->mbox_mem_pool); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + return; + } + vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; + + if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) + lpfc_initial_fdisc(vport); + else { + lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); + lpfc_printf_vlog(vport, KERN_ERR, + LOG_ELS, + "2606 No NPIV Fabric support\n"); + } + return; +} + /** * lpfc_start_fdiscs - send fdiscs for each vports on this port. * @phba: pointer to lpfc hba data structure. @@ -1645,6 +1678,8 @@ lpfc_start_fdiscs(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i; + LPFC_MBOXQ_t *mboxq; + int rc; vports = lpfc_create_vport_work_array(phba); if (vports != NULL) { @@ -1662,6 +1697,29 @@ lpfc_start_fdiscs(struct lpfc_hba *phba) FC_VPORT_LINKDOWN); continue; } + if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { + mboxq = mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!mboxq) { + lpfc_printf_vlog(vports[i], KERN_ERR, + LOG_MBOX, "2607 Failed to allocate " + "init_vpi mailbox\n"); + continue; + } + lpfc_init_vpi(phba, mboxq, vports[i]->vpi); + mboxq->vport = vports[i]; + mboxq->mbox_cmpl = lpfc_init_vpi_cmpl; + rc = lpfc_sli_issue_mbox(phba, mboxq, + MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_vlog(vports[i], KERN_ERR, + LOG_MBOX, "2608 Failed to issue " + "init_vpi mailbox\n"); + mempool_free(mboxq, + phba->mbox_mem_pool); + } + continue; + } if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) lpfc_initial_fdisc(vports[i]); else { @@ -2242,13 +2300,15 @@ lpfc_create_static_vport(struct lpfc_hba *phba) LPFC_MBOXQ_t *pmb = NULL; MAILBOX_t *mb; struct static_vport_info *vport_info; - int rc, i; + int rc = 0, i; struct fc_vport_identifiers vport_id; struct fc_vport *new_fc_vport; struct Scsi_Host *shost; struct lpfc_vport *vport; uint16_t offset = 0; uint8_t *vport_buff; + struct lpfc_dmabuf *mp; + uint32_t byte_count = 0; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { @@ -2271,7 +2331,9 @@ lpfc_create_static_vport(struct lpfc_hba *phba) vport_buff = (uint8_t *) vport_info; do { - lpfc_dump_static_vport(phba, pmb, offset); + if (lpfc_dump_static_vport(phba, pmb, offset)) + goto out; + pmb->vport = phba->pport; rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO); @@ -2284,17 +2346,30 @@ lpfc_create_static_vport(struct lpfc_hba *phba) goto out; } - if (mb->un.varDmp.word_cnt > - sizeof(struct static_vport_info) - offset) - mb->un.varDmp.word_cnt = - sizeof(struct static_vport_info) - offset; + if (phba->sli_rev == LPFC_SLI_REV4) { + byte_count = pmb->u.mqe.un.mb_words[5]; + mp = (struct lpfc_dmabuf *) pmb->context2; + if (byte_count > sizeof(struct static_vport_info) - + offset) + byte_count = sizeof(struct static_vport_info) + - offset; + memcpy(vport_buff + offset, mp->virt, byte_count); + offset += byte_count; + } else { + if (mb->un.varDmp.word_cnt > + sizeof(struct static_vport_info) - offset) + mb->un.varDmp.word_cnt = + sizeof(struct static_vport_info) + - offset; + byte_count = mb->un.varDmp.word_cnt; + lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, + vport_buff + offset, + byte_count); - lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, - vport_buff + offset, - mb->un.varDmp.word_cnt); - offset += mb->un.varDmp.word_cnt; + offset += byte_count; + } - } while (mb->un.varDmp.word_cnt && + } while (byte_count && offset < sizeof(struct static_vport_info)); @@ -2336,16 +2411,15 @@ lpfc_create_static_vport(struct lpfc_hba *phba) } out: - /* - * If this is timed out command, setting NULL to context2 tell SLI - * layer not to use this buffer. - */ - spin_lock_irq(&phba->hbalock); - pmb->context2 = NULL; - spin_unlock_irq(&phba->hbalock); kfree(vport_info); - if (rc != MBX_TIMEOUT) + if (rc != MBX_TIMEOUT) { + if (pmb->context2) { + mp = (struct lpfc_dmabuf *) pmb->context2; + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + } mempool_free(pmb, phba->mbox_mem_pool); + } return; } diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index a7f32ed256bd..18bc5905c44c 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2155,6 +2155,8 @@ lpfc_online(struct lpfc_hba *phba) vports[i]->fc_flag &= ~FC_OFFLINE_MODE; if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + if (phba->sli_rev == LPFC_SLI_REV4) + vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); @@ -7371,6 +7373,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* Perform post initialization setup */ lpfc_post_init_setup(phba); + /* Check if there are static vports to be created. */ + lpfc_create_static_vport(phba); + return 0; out_disable_intr: diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 245945f2f3a0..a776f86bbcc3 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -52,30 +52,51 @@ * This routine prepares the mailbox command for dumping list of static * vports to be created. **/ -void +int lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset) { MAILBOX_t *mb; - void *ctx; + struct lpfc_dmabuf *mp; mb = &pmb->u.mb; - ctx = pmb->context2; /* Setup to dump vport info region */ memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_DUMP_MEMORY; - mb->un.varDmp.cv = 1; mb->un.varDmp.type = DMP_NV_PARAMS; mb->un.varDmp.entry_index = offset; mb->un.varDmp.region_id = DMP_REGION_VPORT; - mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t); - mb->un.varDmp.co = 0; - mb->un.varDmp.resp_offset = 0; - pmb->context2 = ctx; mb->mbxOwner = OWN_HOST; - return; + /* For SLI3 HBAs data is embedded in mailbox */ + if (phba->sli_rev != LPFC_SLI_REV4) { + mb->un.varDmp.cv = 1; + mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t); + return 0; + } + + /* For SLI4 HBAs driver need to allocate memory */ + mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (mp) + mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); + + if (!mp || !mp->virt) { + kfree(mp); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2605 lpfc_dump_static_vport: memory" + " allocation failed\n"); + return 1; + } + memset(mp->virt, 0, LPFC_BPL_SIZE); + INIT_LIST_HEAD(&mp->list); + /* save address for completion */ + pmb->context2 = (uint8_t *) mp; + mb->un.varWords[3] = putPaddrLow(mp->phys); + mb->un.varWords[4] = putPaddrHigh(mp->phys); + mb->un.varDmp.sli4_length = sizeof(struct static_vport_info); + + return 0; } /** @@ -1805,6 +1826,7 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) /** * lpfc_init_vpi - Initialize the INIT_VPI mailbox command + * @phba: pointer to the hba structure to init the VPI for. * @mbox: pointer to lpfc mbox command to initialize. * @vpi: VPI to be initialized. * @@ -1815,11 +1837,14 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) * successful virtual NPort login. **/ void -lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi) +lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi) { memset(mbox, 0, sizeof(*mbox)); bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI); - bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi); + bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, + vpi + phba->vpi_base); + bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi, + phba->pport->vfi + phba->vfi_base); } /** diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index a0f973e7acb3..62f98b343b8b 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -11341,7 +11341,7 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi) mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) return -ENOMEM; - lpfc_init_vpi(mboxq, vpi); + lpfc_init_vpi(phba, mboxq, vpi); mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); if (rc != MBX_TIMEOUT) diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index e0b49922193e..9ba2a01c0e38 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -313,22 +313,6 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) goto error_out; } - /* - * In SLI4, the vpi must be activated before it can be used - * by the port. - */ - if (phba->sli_rev == LPFC_SLI_REV4) { - rc = lpfc_sli4_init_vpi(phba, vpi); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, - "1838 Failed to INIT_VPI on vpi %d " - "status %d\n", vpi, rc); - rc = VPORT_NORESOURCES; - lpfc_free_vpi(phba, vpi); - goto error_out; - } - } - /* Assign an unused board number */ if ((instance = lpfc_get_instance()) < 0) { lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, @@ -367,12 +351,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) goto error_out; } - memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8); - memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8); - if (fc_vport->node_name != 0) - u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn); - if (fc_vport->port_name != 0) - u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn); + u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn); + u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn); memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8); memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8); @@ -404,7 +384,34 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) *(struct lpfc_vport **)fc_vport->dd_data = vport; vport->fc_vport = fc_vport; + /* + * In SLI4, the vpi must be activated before it can be used + * by the port. + */ + if ((phba->sli_rev == LPFC_SLI_REV4) && + (pport->vfi_state & LPFC_VFI_REGISTERED)) { + rc = lpfc_sli4_init_vpi(phba, vpi); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + "1838 Failed to INIT_VPI on vpi %d " + "status %d\n", vpi, rc); + rc = VPORT_NORESOURCES; + lpfc_free_vpi(phba, vpi); + goto error_out; + } + } else if (phba->sli_rev == LPFC_SLI_REV4) { + /* + * Driver cannot INIT_VPI now. Set the flags to + * init_vpi when reg_vfi complete. + */ + vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); + rc = VPORT_OK; + goto out; + } + if ((phba->link_state < LPFC_LINK_UP) || + (pport->port_state < LPFC_FABRIC_CFG_LINK) || (phba->fc_topology == TOPOLOGY_LOOP)) { lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); rc = VPORT_OK; From f1c3b0fcbb8104dac92d65d5016500a09beea287 Mon Sep 17 00:00:00 2001 From: James Smart Date: Sun, 19 Jul 2009 10:01:32 -0400 Subject: [PATCH 017/209] [SCSI] lpfc 8.3.4: Add bsg (SGIOv4) support for ELS/CT support Add bsg (SGIOv4) support for sending and receiving ELS, CT commands This patch adds a new file, lpfc_bsg.c. Signed-off-by: James Smart Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/lpfc/Makefile | 2 +- drivers/scsi/lpfc/lpfc.h | 11 + drivers/scsi/lpfc/lpfc_attr.c | 3 + drivers/scsi/lpfc/lpfc_bsg.c | 904 ++++++++++++++++++++++++++++++++++ drivers/scsi/lpfc/lpfc_crtn.h | 8 + drivers/scsi/lpfc/lpfc_ct.c | 2 + drivers/scsi/lpfc/lpfc_els.c | 2 +- drivers/scsi/lpfc/lpfc_init.c | 3 + drivers/scsi/lpfc/lpfc_nl.h | 20 + drivers/scsi/lpfc/lpfc_scsi.c | 1 + 10 files changed, 954 insertions(+), 2 deletions(-) create mode 100644 drivers/scsi/lpfc/lpfc_bsg.c diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index 1c286707dd5f..ad05d6edb8f6 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile @@ -28,4 +28,4 @@ obj-$(CONFIG_SCSI_LPFC) := lpfc.o lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \ lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \ - lpfc_vport.o lpfc_debugfs.o + lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 8cca3619c745..aa10f7951634 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -441,6 +441,12 @@ enum intr_type_t { MSIX, }; +struct unsol_rcv_ct_ctx { + uint32_t ctxt_id; + uint32_t SID; + uint32_t oxid; +}; + struct lpfc_hba { /* SCSI interface function jump table entries */ int (*lpfc_new_scsi_buf) @@ -776,6 +782,11 @@ struct lpfc_hba { uint8_t valid_vlan; uint16_t vlan_id; struct list_head fcf_conn_rec_list; + + struct mutex ct_event_mutex; /* synchronize access to ct_ev_waiters */ + struct list_head ct_ev_waiters; + struct unsol_rcv_ct_ctx ct_ctx[64]; + uint32_t ctx_idx; }; static inline struct Scsi_Host * diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 7a629511338e..e1a30a16a9fa 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -4132,6 +4132,9 @@ struct fc_function_template lpfc_transport_functions = { .vport_disable = lpfc_vport_disable, .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, + + .bsg_request = lpfc_bsg_request, + .bsg_timeout = lpfc_bsg_timeout, }; struct fc_function_template lpfc_vport_transport_functions = { diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c new file mode 100644 index 000000000000..da6bf5aac9dd --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -0,0 +1,904 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2009 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include +#include +#include + +#include +#include +#include +#include + +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc_scsi.h" +#include "lpfc.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_version.h" + +/** + * lpfc_bsg_rport_ct - send a CT command from a bsg request + * @job: fc_bsg_job to handle + */ +static int +lpfc_bsg_rport_ct(struct fc_bsg_job *job) +{ + struct Scsi_Host *shost = job->shost; + struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_rport_data *rdata = job->rport->dd_data; + struct lpfc_nodelist *ndlp = rdata->pnode; + struct ulp_bde64 *bpl = NULL; + uint32_t timeout; + struct lpfc_iocbq *cmdiocbq = NULL; + struct lpfc_iocbq *rspiocbq = NULL; + IOCB_t *cmd; + IOCB_t *rsp; + struct lpfc_dmabuf *bmp = NULL; + int request_nseg; + int reply_nseg; + struct scatterlist *sgel = NULL; + int numbde; + dma_addr_t busaddr; + int rc = 0; + + /* in case no data is transferred */ + job->reply->reply_payload_rcv_len = 0; + + if (!lpfc_nlp_get(ndlp)) { + job->reply->result = -ENODEV; + return 0; + } + + if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { + rc = -ENODEV; + goto free_ndlp_exit; + } + + spin_lock_irq(shost->host_lock); + cmdiocbq = lpfc_sli_get_iocbq(phba); + if (!cmdiocbq) { + rc = -ENOMEM; + spin_unlock_irq(shost->host_lock); + goto free_ndlp_exit; + } + cmd = &cmdiocbq->iocb; + + rspiocbq = lpfc_sli_get_iocbq(phba); + if (!rspiocbq) { + rc = -ENOMEM; + goto free_cmdiocbq; + } + spin_unlock_irq(shost->host_lock); + + rsp = &rspiocbq->iocb; + + bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!bmp) { + rc = -ENOMEM; + spin_lock_irq(shost->host_lock); + goto free_rspiocbq; + } + + spin_lock_irq(shost->host_lock); + bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); + if (!bmp->virt) { + rc = -ENOMEM; + goto free_bmp; + } + spin_unlock_irq(shost->host_lock); + + INIT_LIST_HEAD(&bmp->list); + bpl = (struct ulp_bde64 *) bmp->virt; + + request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { + busaddr = sg_dma_address(sgel); + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bpl->tus.f.bdeSize = sg_dma_len(sgel); + bpl->tus.w = cpu_to_le32(bpl->tus.w); + bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); + bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); + bpl++; + } + + reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { + busaddr = sg_dma_address(sgel); + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; + bpl->tus.f.bdeSize = sg_dma_len(sgel); + bpl->tus.w = cpu_to_le32(bpl->tus.w); + bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); + bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); + bpl++; + } + + cmd->un.genreq64.bdl.ulpIoTag32 = 0; + cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); + cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); + cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; + cmd->un.genreq64.bdl.bdeSize = + (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); + cmd->ulpCommand = CMD_GEN_REQUEST64_CR; + cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); + cmd->un.genreq64.w5.hcsw.Dfctl = 0; + cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL; + cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; + cmd->ulpBdeCount = 1; + cmd->ulpLe = 1; + cmd->ulpClass = CLASS3; + cmd->ulpContext = ndlp->nlp_rpi; + cmd->ulpOwner = OWN_CHIP; + cmdiocbq->vport = phba->pport; + cmdiocbq->context1 = NULL; + cmdiocbq->context2 = NULL; + cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; + + timeout = phba->fc_ratov * 2; + job->dd_data = cmdiocbq; + + rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, + timeout + LPFC_DRVR_TIMEOUT); + + if (rc != IOCB_TIMEDOUT) { + pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + } + + if (rc == IOCB_TIMEDOUT) { + lpfc_sli_release_iocbq(phba, rspiocbq); + rc = -EACCES; + goto free_ndlp_exit; + } + + if (rc != IOCB_SUCCESS) { + rc = -EACCES; + goto free_outdmp; + } + + if (rsp->ulpStatus) { + if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { + switch (rsp->un.ulpWord[4] & 0xff) { + case IOERR_SEQUENCE_TIMEOUT: + rc = -ETIMEDOUT; + break; + case IOERR_INVALID_RPI: + rc = -EFAULT; + break; + default: + rc = -EACCES; + break; + } + goto free_outdmp; + } + } else + job->reply->reply_payload_rcv_len = + rsp->un.genreq64.bdl.bdeSize; + +free_outdmp: + spin_lock_irq(shost->host_lock); + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); +free_bmp: + kfree(bmp); +free_rspiocbq: + lpfc_sli_release_iocbq(phba, rspiocbq); +free_cmdiocbq: + lpfc_sli_release_iocbq(phba, cmdiocbq); + spin_unlock_irq(shost->host_lock); +free_ndlp_exit: + lpfc_nlp_put(ndlp); + + /* make error code available to userspace */ + job->reply->result = rc; + /* complete the job back to userspace */ + job->job_done(job); + + return 0; +} + +/** + * lpfc_bsg_rport_els - send an ELS command from a bsg request + * @job: fc_bsg_job to handle + */ +static int +lpfc_bsg_rport_els(struct fc_bsg_job *job) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_rport_data *rdata = job->rport->dd_data; + struct lpfc_nodelist *ndlp = rdata->pnode; + + uint32_t elscmd; + uint32_t cmdsize; + uint32_t rspsize; + struct lpfc_iocbq *rspiocbq; + struct lpfc_iocbq *cmdiocbq; + IOCB_t *rsp; + uint16_t rpi = 0; + struct lpfc_dmabuf *pcmd; + struct lpfc_dmabuf *prsp; + struct lpfc_dmabuf *pbuflist = NULL; + struct ulp_bde64 *bpl; + int iocb_status; + int request_nseg; + int reply_nseg; + struct scatterlist *sgel = NULL; + int numbde; + dma_addr_t busaddr; + int rc = 0; + + /* in case no data is transferred */ + job->reply->reply_payload_rcv_len = 0; + + if (!lpfc_nlp_get(ndlp)) { + rc = -ENODEV; + goto out; + } + + elscmd = job->request->rqst_data.r_els.els_code; + cmdsize = job->request_payload.payload_len; + rspsize = job->reply_payload.payload_len; + rspiocbq = lpfc_sli_get_iocbq(phba); + if (!rspiocbq) { + lpfc_nlp_put(ndlp); + rc = -ENOMEM; + goto out; + } + + rsp = &rspiocbq->iocb; + rpi = ndlp->nlp_rpi; + + cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp, + ndlp->nlp_DID, elscmd); + + if (!cmdiocbq) { + lpfc_sli_release_iocbq(phba, rspiocbq); + return -EIO; + } + + job->dd_data = cmdiocbq; + pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; + prsp = (struct lpfc_dmabuf *) pcmd->list.next; + + lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); + kfree(pcmd); + lpfc_mbuf_free(phba, prsp->virt, prsp->phys); + kfree(prsp); + cmdiocbq->context2 = NULL; + + pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3; + bpl = (struct ulp_bde64 *) pbuflist->virt; + + request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + + for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { + busaddr = sg_dma_address(sgel); + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bpl->tus.f.bdeSize = sg_dma_len(sgel); + bpl->tus.w = cpu_to_le32(bpl->tus.w); + bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); + bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); + bpl++; + } + + reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { + busaddr = sg_dma_address(sgel); + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; + bpl->tus.f.bdeSize = sg_dma_len(sgel); + bpl->tus.w = cpu_to_le32(bpl->tus.w); + bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); + bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); + bpl++; + } + + cmdiocbq->iocb.un.elsreq64.bdl.bdeSize = + (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); + cmdiocbq->iocb.ulpContext = rpi; + cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; + cmdiocbq->context1 = NULL; + cmdiocbq->context2 = NULL; + + iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, + rspiocbq, (phba->fc_ratov * 2) + + LPFC_DRVR_TIMEOUT); + + /* release the new ndlp once the iocb completes */ + lpfc_nlp_put(ndlp); + if (iocb_status != IOCB_TIMEDOUT) { + pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + } + + if (iocb_status == IOCB_SUCCESS) { + if (rsp->ulpStatus == IOSTAT_SUCCESS) { + job->reply->reply_payload_rcv_len = + rsp->un.elsreq64.bdl.bdeSize; + rc = 0; + } else if (rsp->ulpStatus == IOSTAT_LS_RJT) { + struct fc_bsg_ctels_reply *els_reply; + /* LS_RJT data returned in word 4 */ + uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; + + els_reply = &job->reply->reply_data.ctels_reply; + job->reply->result = 0; + els_reply->status = FC_CTELS_STATUS_REJECT; + els_reply->rjt_data.action = rjt_data[0]; + els_reply->rjt_data.reason_code = rjt_data[1]; + els_reply->rjt_data.reason_explanation = rjt_data[2]; + els_reply->rjt_data.vendor_unique = rjt_data[3]; + } else + rc = -EIO; + } else + rc = -EIO; + + if (iocb_status != IOCB_TIMEDOUT) + lpfc_els_free_iocb(phba, cmdiocbq); + + lpfc_sli_release_iocbq(phba, rspiocbq); + +out: + /* make error code available to userspace */ + job->reply->result = rc; + /* complete the job back to userspace */ + job->job_done(job); + + return 0; +} + +struct lpfc_ct_event { + struct list_head node; + int ref; + wait_queue_head_t wq; + + /* Event type and waiter identifiers */ + uint32_t type_mask; + uint32_t req_id; + uint32_t reg_id; + + /* next two flags are here for the auto-delete logic */ + unsigned long wait_time_stamp; + int waiting; + + /* seen and not seen events */ + struct list_head events_to_get; + struct list_head events_to_see; +}; + +struct event_data { + struct list_head node; + uint32_t type; + uint32_t immed_dat; + void *data; + uint32_t len; +}; + +static struct lpfc_ct_event * +lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id) +{ + struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); + if (!evt) + return NULL; + + INIT_LIST_HEAD(&evt->events_to_get); + INIT_LIST_HEAD(&evt->events_to_see); + evt->req_id = ev_req_id; + evt->reg_id = ev_reg_id; + evt->wait_time_stamp = jiffies; + init_waitqueue_head(&evt->wq); + + return evt; +} + +static void +lpfc_ct_event_free(struct lpfc_ct_event *evt) +{ + struct event_data *ed; + + list_del(&evt->node); + + while (!list_empty(&evt->events_to_get)) { + ed = list_entry(evt->events_to_get.next, typeof(*ed), node); + list_del(&ed->node); + kfree(ed->data); + kfree(ed); + } + + while (!list_empty(&evt->events_to_see)) { + ed = list_entry(evt->events_to_see.next, typeof(*ed), node); + list_del(&ed->node); + kfree(ed->data); + kfree(ed); + } + + kfree(evt); +} + +static inline void +lpfc_ct_event_ref(struct lpfc_ct_event *evt) +{ + evt->ref++; +} + +static inline void +lpfc_ct_event_unref(struct lpfc_ct_event *evt) +{ + if (--evt->ref < 0) + lpfc_ct_event_free(evt); +} + +#define SLI_CT_ELX_LOOPBACK 0x10 + +enum ELX_LOOPBACK_CMD { + ELX_LOOPBACK_XRI_SETUP, + ELX_LOOPBACK_DATA, +}; + +/** + * lpfc_bsg_ct_unsol_event - process an unsolicited CT command + * @phba: + * @pring: + * @piocbq: + * + * This function is called when an unsolicited CT command is received. It + * forwards the event to any processes registerd to receive CT events. + */ +void +lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *piocbq) +{ + uint32_t evt_req_id = 0; + uint32_t cmd; + uint32_t len; + struct lpfc_dmabuf *dmabuf = NULL; + struct lpfc_ct_event *evt; + struct event_data *evt_dat = NULL; + struct lpfc_iocbq *iocbq; + size_t offset = 0; + struct list_head head; + struct ulp_bde64 *bde; + dma_addr_t dma_addr; + int i; + struct lpfc_dmabuf *bdeBuf1 = piocbq->context2; + struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; + struct lpfc_hbq_entry *hbqe; + struct lpfc_sli_ct_request *ct_req; + + INIT_LIST_HEAD(&head); + list_add_tail(&head, &piocbq->list); + + if (piocbq->iocb.ulpBdeCount == 0 || + piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) + goto error_ct_unsol_exit; + + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) + dmabuf = bdeBuf1; + else { + dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh, + piocbq->iocb.un.cont64[0].addrLow); + dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); + } + + ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; + evt_req_id = ct_req->FsType; + cmd = ct_req->CommandResponse.bits.CmdRsp; + len = ct_req->CommandResponse.bits.Size; + if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) + lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); + + mutex_lock(&phba->ct_event_mutex); + list_for_each_entry(evt, &phba->ct_ev_waiters, node) { + if (evt->req_id != evt_req_id) + continue; + + lpfc_ct_event_ref(evt); + + evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); + if (!evt_dat) { + lpfc_ct_event_unref(evt); + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2614 Memory allocation failed for " + "CT event\n"); + break; + } + + mutex_unlock(&phba->ct_event_mutex); + + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + /* take accumulated byte count from the last iocbq */ + iocbq = list_entry(head.prev, typeof(*iocbq), list); + evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len; + } else { + list_for_each_entry(iocbq, &head, list) { + for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) + evt_dat->len += + iocbq->iocb.un.cont64[i].tus.f.bdeSize; + } + } + + evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); + if (!evt_dat->data) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2615 Memory allocation failed for " + "CT event data, size %d\n", + evt_dat->len); + kfree(evt_dat); + mutex_lock(&phba->ct_event_mutex); + lpfc_ct_event_unref(evt); + mutex_unlock(&phba->ct_event_mutex); + goto error_ct_unsol_exit; + } + + list_for_each_entry(iocbq, &head, list) { + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + bdeBuf1 = iocbq->context2; + bdeBuf2 = iocbq->context3; + } + for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { + int size = 0; + if (phba->sli3_options & + LPFC_SLI3_HBQ_ENABLED) { + if (i == 0) { + hbqe = (struct lpfc_hbq_entry *) + &iocbq->iocb.un.ulpWord[0]; + size = hbqe->bde.tus.f.bdeSize; + dmabuf = bdeBuf1; + } else if (i == 1) { + hbqe = (struct lpfc_hbq_entry *) + &iocbq->iocb.unsli3. + sli3Words[4]; + size = hbqe->bde.tus.f.bdeSize; + dmabuf = bdeBuf2; + } + if ((offset + size) > evt_dat->len) + size = evt_dat->len - offset; + } else { + size = iocbq->iocb.un.cont64[i]. + tus.f.bdeSize; + bde = &iocbq->iocb.un.cont64[i]; + dma_addr = getPaddr(bde->addrHigh, + bde->addrLow); + dmabuf = lpfc_sli_ringpostbuf_get(phba, + pring, dma_addr); + } + if (!dmabuf) { + lpfc_printf_log(phba, KERN_ERR, + LOG_LIBDFC, "2616 No dmabuf " + "found for iocbq 0x%p\n", + iocbq); + kfree(evt_dat->data); + kfree(evt_dat); + mutex_lock(&phba->ct_event_mutex); + lpfc_ct_event_unref(evt); + mutex_unlock(&phba->ct_event_mutex); + goto error_ct_unsol_exit; + } + memcpy((char *)(evt_dat->data) + offset, + dmabuf->virt, size); + offset += size; + if (evt_req_id != SLI_CT_ELX_LOOPBACK && + !(phba->sli3_options & + LPFC_SLI3_HBQ_ENABLED)) { + lpfc_sli_ringpostbuf_put(phba, pring, + dmabuf); + } else { + switch (cmd) { + case ELX_LOOPBACK_XRI_SETUP: + if (!(phba->sli3_options & + LPFC_SLI3_HBQ_ENABLED)) + lpfc_post_buffer(phba, + pring, + 1); + else + lpfc_in_buf_free(phba, + dmabuf); + break; + default: + if (!(phba->sli3_options & + LPFC_SLI3_HBQ_ENABLED)) + lpfc_post_buffer(phba, + pring, + 1); + break; + } + } + } + } + + mutex_lock(&phba->ct_event_mutex); + if (phba->sli_rev == LPFC_SLI_REV4) { + evt_dat->immed_dat = phba->ctx_idx; + phba->ctx_idx = (phba->ctx_idx + 1) % 64; + phba->ct_ctx[evt_dat->immed_dat].oxid = + piocbq->iocb.ulpContext; + phba->ct_ctx[evt_dat->immed_dat].SID = + piocbq->iocb.un.rcvels.remoteID; + } else + evt_dat->immed_dat = piocbq->iocb.ulpContext; + + evt_dat->type = FC_REG_CT_EVENT; + list_add(&evt_dat->node, &evt->events_to_see); + wake_up_interruptible(&evt->wq); + lpfc_ct_event_unref(evt); + if (evt_req_id == SLI_CT_ELX_LOOPBACK) + break; + } + mutex_unlock(&phba->ct_event_mutex); + +error_ct_unsol_exit: + if (!list_empty(&head)) + list_del(&head); + + return; +} + +/** + * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command + * @job: SET_EVENT fc_bsg_job + */ +static int +lpfc_bsg_set_event(struct fc_bsg_job *job) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct set_ct_event *event_req; + struct lpfc_ct_event *evt; + int rc = 0; + + if (job->request_len < + sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2612 Received SET_CT_EVENT below minimum " + "size\n"); + return -EINVAL; + } + + event_req = (struct set_ct_event *) + job->request->rqst_data.h_vendor.vendor_cmd; + + mutex_lock(&phba->ct_event_mutex); + list_for_each_entry(evt, &phba->ct_ev_waiters, node) { + if (evt->reg_id == event_req->ev_reg_id) { + lpfc_ct_event_ref(evt); + evt->wait_time_stamp = jiffies; + break; + } + } + mutex_unlock(&phba->ct_event_mutex); + + if (&evt->node == &phba->ct_ev_waiters) { + /* no event waiting struct yet - first call */ + evt = lpfc_ct_event_new(event_req->ev_reg_id, + event_req->ev_req_id); + if (!evt) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2617 Failed allocation of event " + "waiter\n"); + return -ENOMEM; + } + + mutex_lock(&phba->ct_event_mutex); + list_add(&evt->node, &phba->ct_ev_waiters); + lpfc_ct_event_ref(evt); + mutex_unlock(&phba->ct_event_mutex); + } + + evt->waiting = 1; + if (wait_event_interruptible(evt->wq, + !list_empty(&evt->events_to_see))) { + mutex_lock(&phba->ct_event_mutex); + lpfc_ct_event_unref(evt); /* release ref */ + lpfc_ct_event_unref(evt); /* delete */ + mutex_unlock(&phba->ct_event_mutex); + rc = -EINTR; + goto set_event_out; + } + + evt->wait_time_stamp = jiffies; + evt->waiting = 0; + + mutex_lock(&phba->ct_event_mutex); + list_move(evt->events_to_see.prev, &evt->events_to_get); + lpfc_ct_event_unref(evt); /* release ref */ + mutex_unlock(&phba->ct_event_mutex); + +set_event_out: + /* set_event carries no reply payload */ + job->reply->reply_payload_rcv_len = 0; + /* make error code available to userspace */ + job->reply->result = rc; + /* complete the job back to userspace */ + job->job_done(job); + + return 0; +} + +/** + * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command + * @job: GET_EVENT fc_bsg_job + */ +static int +lpfc_bsg_get_event(struct fc_bsg_job *job) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct get_ct_event *event_req; + struct get_ct_event_reply *event_reply; + struct lpfc_ct_event *evt; + struct event_data *evt_dat = NULL; + int rc = 0; + + if (job->request_len < + sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2613 Received GET_CT_EVENT request below " + "minimum size\n"); + return -EINVAL; + } + + event_req = (struct get_ct_event *) + job->request->rqst_data.h_vendor.vendor_cmd; + + event_reply = (struct get_ct_event_reply *) + job->reply->reply_data.vendor_reply.vendor_rsp; + + mutex_lock(&phba->ct_event_mutex); + list_for_each_entry(evt, &phba->ct_ev_waiters, node) { + if (evt->reg_id == event_req->ev_reg_id) { + if (list_empty(&evt->events_to_get)) + break; + lpfc_ct_event_ref(evt); + evt->wait_time_stamp = jiffies; + evt_dat = list_entry(evt->events_to_get.prev, + struct event_data, node); + list_del(&evt_dat->node); + break; + } + } + mutex_unlock(&phba->ct_event_mutex); + + if (!evt_dat) { + job->reply->reply_payload_rcv_len = 0; + rc = -ENOENT; + goto error_get_event_exit; + } + + if (evt_dat->len > job->reply_payload.payload_len) { + evt_dat->len = job->reply_payload.payload_len; + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2618 Truncated event data at %d " + "bytes\n", + job->reply_payload.payload_len); + } + + event_reply->immed_data = evt_dat->immed_dat; + + if (evt_dat->len > 0) + job->reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + evt_dat->data, evt_dat->len); + else + job->reply->reply_payload_rcv_len = 0; + rc = 0; + + if (evt_dat) + kfree(evt_dat->data); + kfree(evt_dat); + mutex_lock(&phba->ct_event_mutex); + lpfc_ct_event_unref(evt); + mutex_unlock(&phba->ct_event_mutex); + +error_get_event_exit: + /* make error code available to userspace */ + job->reply->result = rc; + /* complete the job back to userspace */ + job->job_done(job); + + return rc; +} + +/** + * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job + * @job: fc_bsg_job to handle + */ +static int +lpfc_bsg_hst_vendor(struct fc_bsg_job *job) +{ + int command = job->request->rqst_data.h_vendor.vendor_cmd[0]; + + switch (command) { + case LPFC_BSG_VENDOR_SET_CT_EVENT: + return lpfc_bsg_set_event(job); + break; + + case LPFC_BSG_VENDOR_GET_CT_EVENT: + return lpfc_bsg_get_event(job); + break; + + default: + return -EINVAL; + } +} + +/** + * lpfc_bsg_request - handle a bsg request from the FC transport + * @job: fc_bsg_job to handle + */ +int +lpfc_bsg_request(struct fc_bsg_job *job) +{ + uint32_t msgcode; + int rc = -EINVAL; + + msgcode = job->request->msgcode; + + switch (msgcode) { + case FC_BSG_HST_VENDOR: + rc = lpfc_bsg_hst_vendor(job); + break; + case FC_BSG_RPT_ELS: + rc = lpfc_bsg_rport_els(job); + break; + case FC_BSG_RPT_CT: + rc = lpfc_bsg_rport_ct(job); + break; + default: + break; + } + + return rc; +} + +/** + * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport + * @job: fc_bsg_job that has timed out + * + * This function just aborts the job's IOCB. The aborted IOCB will return to + * the waiting function which will handle passing the error back to userspace + */ +int +lpfc_bsg_timeout(struct fc_bsg_job *job) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data; + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + + if (cmdiocb) + lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); + + return 0; +} diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 48943d344143..0830f37409a3 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -137,6 +137,9 @@ int lpfc_els_disc_adisc(struct lpfc_vport *); int lpfc_els_disc_plogi(struct lpfc_vport *); void lpfc_els_timeout(unsigned long); void lpfc_els_timeout_handler(struct lpfc_vport *); +struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t, + uint8_t, struct lpfc_nodelist *, + uint32_t, uint32_t); void lpfc_hb_timeout_handler(struct lpfc_hba *); void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, @@ -364,3 +367,8 @@ void lpfc_start_fdiscs(struct lpfc_hba *phba); #define HBA_EVENT_LINK_UP 2 #define HBA_EVENT_LINK_DOWN 3 +/* functions to support SGIOv4/bsg interface */ +int lpfc_bsg_request(struct fc_bsg_job *); +int lpfc_bsg_timeout(struct fc_bsg_job *); +void lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, + struct lpfc_iocbq *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 0e532f072eb3..9df7ed38e1be 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -97,6 +97,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct list_head head; struct lpfc_dmabuf *bdeBuf; + lpfc_bsg_ct_unsol_event(phba, pring, piocbq); + if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) { lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) && diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index f72fdf23bf1b..45337cd23feb 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -146,7 +146,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport) * Pointer to the newly allocated/prepared els iocb data structure * NULL - when els iocb data structure allocation/preparation failed **/ -static struct lpfc_iocbq * +struct lpfc_iocbq * lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, uint16_t cmdSize, uint8_t retry, struct lpfc_nodelist *ndlp, uint32_t did, diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 18bc5905c44c..08112e7a8346 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4171,6 +4171,9 @@ lpfc_hba_alloc(struct pci_dev *pdev) return NULL; } + mutex_init(&phba->ct_event_mutex); + INIT_LIST_HEAD(&phba->ct_ev_waiters); + return phba; } diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h index 27d1a88a98fe..d655ed3eebef 100644 --- a/drivers/scsi/lpfc/lpfc_nl.h +++ b/drivers/scsi/lpfc/lpfc_nl.h @@ -177,3 +177,23 @@ struct temp_event { uint32_t data; }; +/* bsg definitions */ +#define LPFC_BSG_VENDOR_SET_CT_EVENT 1 +#define LPFC_BSG_VENDOR_GET_CT_EVENT 2 + +struct set_ct_event { + uint32_t command; + uint32_t ev_req_id; + uint32_t ev_reg_id; +}; + +struct get_ct_event { + uint32_t command; + uint32_t ev_reg_id; + uint32_t ev_req_id; +}; + +struct get_ct_event_reply { + uint32_t immed_data; + uint32_t type; +}; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index da59c4f0168f..6ba2c3bae2a3 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -3584,6 +3584,7 @@ struct scsi_host_template lpfc_template = { .use_clustering = ENABLE_CLUSTERING, .shost_attrs = lpfc_hba_attrs, .max_sectors = 0xFFFF, + .vendor_id = LPFC_NL_VENDOR_ID, }; struct scsi_host_template lpfc_vport_template = { From e4e74273c3e8d0dc54ddce4fed27e266927c7ad0 Mon Sep 17 00:00:00 2001 From: James Smart Date: Sun, 19 Jul 2009 10:01:38 -0400 Subject: [PATCH 018/209] [SCSI] lpfc 8.3.4: Remove spaces before newlines in several log messages Remove spaces before newlines in several log messages Signed-off-by: James Smart Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_hbadisc.c | 2 +- drivers/scsi/lpfc/lpfc_init.c | 10 +++++----- drivers/scsi/lpfc/lpfc_mbox.c | 2 +- drivers/scsi/lpfc/lpfc_scsi.c | 6 +++--- drivers/scsi/lpfc/lpfc_sli.c | 4 ++-- drivers/scsi/lpfc/lpfc_vport.c | 2 +- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 491c53fd1ca4..e6a47e25b218 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -2402,7 +2402,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba) if (!new_fc_vport) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0546 lpfc_create_static_vport failed to" - " create vport \n"); + " create vport\n"); continue; } diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 08112e7a8346..562d8cee874b 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -580,7 +580,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) KERN_ERR, LOG_INIT, "0456 Adapter failed to issue " - "ASYNCEVT_ENABLE mbox status x%x \n.", + "ASYNCEVT_ENABLE mbox status x%x\n", rc); mempool_free(pmb, phba->mbox_mem_pool); } @@ -594,7 +594,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " - "to get Option ROM version status x%x\n.", rc); + "to get Option ROM version status x%x\n", rc); mempool_free(pmb, phba->mbox_mem_pool); } @@ -2923,7 +2923,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, switch (event_type) { case LPFC_FCOE_EVENT_TYPE_NEW_FCF: lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "2546 New FCF found index 0x%x tag 0x%x \n", + "2546 New FCF found index 0x%x tag 0x%x\n", acqe_fcoe->fcf_index, acqe_fcoe->event_tag); /* @@ -2949,7 +2949,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2548 FCF Table full count 0x%x tag 0x%x \n", + "2548 FCF Table full count 0x%x tag 0x%x\n", bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), acqe_fcoe->event_tag); break; @@ -2957,7 +2957,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, "2549 FCF disconnected fron network index 0x%x" - " tag 0x%x \n", acqe_fcoe->fcf_index, + " tag 0x%x\n", acqe_fcoe->fcf_index, acqe_fcoe->event_tag); /* If the event is not for currently used fcf do nothing */ if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index a776f86bbcc3..1ab405902a18 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1893,7 +1893,7 @@ lpfc_dump_fcoe_param(struct lpfc_hba *phba, /* dump_fcoe_param failed to allocate memory */ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "2569 lpfc_dump_fcoe_param: memory" - " allocation failed \n"); + " allocation failed\n"); return 1; } diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 6ba2c3bae2a3..61d089703806 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -2142,7 +2142,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, } else if (resp_info & RESID_OVER) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "9028 FCP command x%x residual overrun error. " - "Data: x%x x%x \n", cmnd->cmnd[0], + "Data: x%x x%x\n", cmnd->cmnd[0], scsi_bufflen(cmnd), scsi_get_resid(cmnd)); host_status = DID_ERROR; @@ -2843,7 +2843,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) dif_op_str[scsi_get_prot_op(cmnd)]); lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x " - "%02x %02x %02x %02x %02x \n", + "%02x %02x %02x %02x %02x\n", cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], @@ -2871,7 +2871,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) dif_op_str[scsi_get_prot_op(cmnd)]); lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x " - "%02x %02x %02x %02x %02x \n", + "%02x %02x %02x %02x %02x\n", cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 62f98b343b8b..43cbe336f1f8 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -4304,7 +4304,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) */ if (lpfc_sli4_read_fcoe_params(phba, mboxq)) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, - "2570 Failed to read FCoE parameters \n"); + "2570 Failed to read FCoE parameters\n"); /* Issue READ_REV to collect vpd and FW information. */ vpd_size = PAGE_SIZE; @@ -6326,7 +6326,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba, KERN_ERR, LOG_SLI, "0346 Ring %d handler: unexpected ASYNC_STATUS" - " evt_code 0x%x \n" + " evt_code 0x%x\n" "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 9ba2a01c0e38..606efa767548 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -668,7 +668,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_printf_log(vport->phba, KERN_WARNING, LOG_VPORT, "1829 CT command failed to " - "delete objects on fabric. \n"); + "delete objects on fabric\n"); } /* First look for the Fabric ndlp */ ndlp = lpfc_findnode_did(vport, Fabric_DID); From 7cfbc8d9663c88d11e2c1acd2009f1786e150cf6 Mon Sep 17 00:00:00 2001 From: James Smart Date: Sun, 19 Jul 2009 10:01:44 -0400 Subject: [PATCH 019/209] [SCSI] lpfc 8.3.4: Update driver version to 8.3.4 Update driver version to 8.3.4 Signed-off-by: James Smart Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 41094e02304b..9ae20af4bdb7 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.3" +#define LPFC_DRIVER_VERSION "8.3.4" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" From 4643682ba50634db5cd61292711d031630baf233 Mon Sep 17 00:00:00 2001 From: James Smart Date: Tue, 28 Jul 2009 12:30:01 -0400 Subject: [PATCH 020/209] [SCSI] fc_transport: Correct max fc_host attribute count Signed-off-by: James Smart Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_fc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 751f239e01ea..b98885de6876 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -291,7 +291,7 @@ static void fc_scsi_scan_rport(struct work_struct *work); #define FC_STARGET_NUM_ATTRS 3 #define FC_RPORT_NUM_ATTRS 10 #define FC_VPORT_NUM_ATTRS 9 -#define FC_HOST_NUM_ATTRS 21 +#define FC_HOST_NUM_ATTRS 22 struct fc_internal { struct scsi_transport_template t; From a4a8b064dd06d30d007e46445d17575e2875c340 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Tue, 28 Jul 2009 16:39:05 +0000 Subject: [PATCH 021/209] [SCSI] scsi_transport_sas: fix incorrect duplicate setup of max_phys There are two setup places for max_phys in scsi_transport_sas.c; one incorrectly places a NULL into host_attrs instead of port_attrs. Remove it. Reported-by: Roel Kluin Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_sas.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 0895d3c71b03..fd47cb1bee1b 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1691,10 +1691,6 @@ sas_attach_transport(struct sas_function_template *ft) i->f = ft; - count = 0; - SETUP_PORT_ATTRIBUTE(num_phys); - i->host_attrs[count] = NULL; - count = 0; SETUP_PHY_ATTRIBUTE(initiator_port_protocols); SETUP_PHY_ATTRIBUTE(target_port_protocols); From 3c0d1d94aa516cad50274fe1aa9f745957b67a65 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Tue, 28 Jul 2009 23:08:06 -0500 Subject: [PATCH 022/209] [SCSI] ALUA: send STPG if explicit and implicit is supported alua_activate only sends a STPG if only explicit is suppored. As a result, for EMC targets that support both we end up doing a implicit failover when X commands are finally sent to the other SP. This patch does a AND on the h->tpgs, so we do a explicit failover right away. Signed-off-by: Mike Christie Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_alua.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index dba154c8ff64..b5cdefaf2608 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -663,7 +663,7 @@ static int alua_activate(struct scsi_device *sdev) goto out; } - if (h->tpgs == TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED) + if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED) err = alua_stpg(sdev, TPGS_STATE_OPTIMIZED, h); out: From 7a7f0c7f7a0cbda062d1ff2ff1d3f99d0e41d2af Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Wed, 29 Jul 2009 17:03:50 -0700 Subject: [PATCH 023/209] [SCSI] fcoe: Fix validation of mac address when checking for spma support Fix this bug of validating the wrong mac address while checking for SAN MAC address support from LLD as we should check ha->addr not ctlr.ctl_src_addr. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 0a5609bb5817..b7dfc6ebed36 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -329,7 +329,7 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) rcu_read_lock(); for_each_dev_addr(netdev, ha) { if ((ha->type == NETDEV_HW_ADDR_T_SAN) && - (is_valid_ether_addr(fc->ctlr.ctl_src_addr))) { + (is_valid_ether_addr(ha->addr))) { memcpy(fc->ctlr.ctl_src_addr, ha->addr, ETH_ALEN); fc->ctlr.spma = 1; break; From 5a84baeaf7b8bb4188219140cb326a3e859b2312 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Wed, 29 Jul 2009 17:03:55 -0700 Subject: [PATCH 024/209] [SCSI] libfcoe: Set fip_flags according to fcf and lport's capability of SPMA support When encap the els for FIP, set the fip_flags according to the FCF and lport's capability of supporting SPMA or FPMA or both. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index d6ed3f8255ad..78caa6be1130 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -413,10 +413,18 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fip_mac_desc *mac; struct fcoe_fcf *fcf; size_t dlen; + u16 fip_flags; fcf = fip->sel_fcf; if (!fcf) return -ENODEV; + + /* set flags according to both FCF and lport's capability on SPMA */ + fip_flags = fcf->flags; + fip_flags &= fip->spma ? FIP_FL_SPMA | FIP_FL_FPMA : FIP_FL_FPMA; + if (!fip_flags) + return -ENODEV; + dlen = sizeof(struct fip_encaps) + skb->len; /* len before push */ cap = (struct fip_encaps_head *)skb_push(skb, sizeof(*cap)); @@ -429,9 +437,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, cap->fip.fip_op = htons(FIP_OP_LS); cap->fip.fip_subcode = FIP_SC_REQ; cap->fip.fip_dl_len = htons((dlen + sizeof(*mac)) / FIP_BPW); - cap->fip.fip_flags = htons(FIP_FL_FPMA); - if (fip->spma) - cap->fip.fip_flags |= htons(FIP_FL_SPMA); + cap->fip.fip_flags = htons(fip_flags); cap->encaps.fd_desc.fip_dtype = dtype; cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW; From 2f718d64ecc7010463d36e6ec4ae37778d03fc0b Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Wed, 29 Jul 2009 17:04:01 -0700 Subject: [PATCH 025/209] [SCSI] fcoe: Call dev_ethtool_get_settings() in fcoe_link_ok No need to check phys_dev here, just call dev_ethtool_get_settings() directly will take care of this. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 38 +++++++++++++++++--------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index b7dfc6ebed36..81601269f4cd 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1635,29 +1635,25 @@ int fcoe_link_ok(struct fc_lport *lp) struct fcoe_softc *fc = lport_priv(lp); struct net_device *dev = fc->real_dev; struct ethtool_cmd ecmd = { ETHTOOL_GSET }; - int rc = 0; - if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) { - dev = fc->phys_dev; - if (dev->ethtool_ops->get_settings) { - dev->ethtool_ops->get_settings(dev, &ecmd); - lp->link_supported_speeds &= - ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); - if (ecmd.supported & (SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full)) - lp->link_supported_speeds |= FC_PORTSPEED_1GBIT; - if (ecmd.supported & SUPPORTED_10000baseT_Full) - lp->link_supported_speeds |= - FC_PORTSPEED_10GBIT; - if (ecmd.speed == SPEED_1000) - lp->link_speed = FC_PORTSPEED_1GBIT; - if (ecmd.speed == SPEED_10000) - lp->link_speed = FC_PORTSPEED_10GBIT; - } - } else - rc = -1; + if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) && + (!dev_ethtool_get_settings(dev, &ecmd))) { + lp->link_supported_speeds &= + ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); + if (ecmd.supported & (SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full)) + lp->link_supported_speeds |= FC_PORTSPEED_1GBIT; + if (ecmd.supported & SUPPORTED_10000baseT_Full) + lp->link_supported_speeds |= + FC_PORTSPEED_10GBIT; + if (ecmd.speed == SPEED_1000) + lp->link_speed = FC_PORTSPEED_1GBIT; + if (ecmd.speed == SPEED_10000) + lp->link_speed = FC_PORTSPEED_10GBIT; - return rc; + return 0; + } + return -1; } /** From beb29a6d421f6dbd41d68d0621c1b28ad1d4a9f4 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 29 Jul 2009 17:04:06 -0700 Subject: [PATCH 026/209] [SCSI] libfc: remove extra semicolons from debug macros This is unlikely to cause any problems, but the libfc debug macros introduce extra undesirable semicolons. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- include/scsi/libfc.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index b92584a8843a..ef04a2c52b8c 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -51,22 +51,22 @@ do { \ do { \ CMD; \ } while (0); \ -} while (0); +} while (0) #define FC_LIBFC_DBG(fmt, args...) \ FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \ - printk(KERN_INFO "libfc: " fmt, ##args);) + printk(KERN_INFO "libfc: " fmt, ##args)) #define FC_LPORT_DBG(lport, fmt, args...) \ FC_CHECK_LOGGING(FC_LPORT_LOGGING, \ printk(KERN_INFO "lport: %6x: " fmt, \ - fc_host_port_id(lport->host), ##args);) + fc_host_port_id(lport->host), ##args)) #define FC_DISC_DBG(disc, fmt, args...) \ FC_CHECK_LOGGING(FC_DISC_LOGGING, \ printk(KERN_INFO "disc: %6x: " fmt, \ fc_host_port_id(disc->lport->host), \ - ##args);) + ##args)) #define FC_RPORT_DBG(rport, fmt, args...) \ do { \ @@ -75,31 +75,31 @@ do { \ FC_CHECK_LOGGING(FC_RPORT_LOGGING, \ printk(KERN_INFO "rport: %6x: %6x: " fmt, \ fc_host_port_id(lport->host), \ - rport->port_id, ##args);) \ -} while (0); + rport->port_id, ##args)); \ +} while (0) #define FC_FCP_DBG(pkt, fmt, args...) \ FC_CHECK_LOGGING(FC_FCP_LOGGING, \ printk(KERN_INFO "fcp: %6x: %6x: " fmt, \ fc_host_port_id(pkt->lp->host), \ - pkt->rport->port_id, ##args);) + pkt->rport->port_id, ##args)) #define FC_EM_DBG(em, fmt, args...) \ FC_CHECK_LOGGING(FC_EM_LOGGING, \ printk(KERN_INFO "em: %6x: " fmt, \ fc_host_port_id(em->lp->host), \ - ##args);) + ##args)) #define FC_EXCH_DBG(exch, fmt, args...) \ FC_CHECK_LOGGING(FC_EXCH_LOGGING, \ printk(KERN_INFO "exch: %6x: %4x: " fmt, \ fc_host_port_id(exch->lp->host), \ - exch->xid, ##args);) + exch->xid, ##args)) #define FC_SCSI_DBG(lport, fmt, args...) \ FC_CHECK_LOGGING(FC_SCSI_LOGGING, \ printk(KERN_INFO "scsi: %6x: " fmt, \ - fc_host_port_id(lport->host), ##args);) + fc_host_port_id(lport->host), ##args)) /* * libfc error codes From 7f74549ff630ad444b0b6bbcabf426f781910906 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 29 Jul 2009 17:04:12 -0700 Subject: [PATCH 027/209] [SCSI] libfc: change debug messages to give host number. libfc debug messages currently show 'lport: :' wher is the hex assigned port-id. When the lport is logged off, that will be zero, so its hard to distinguish which instance is involved. The FC-ID can change if the port is re-patched or changes VSANs. Two lports may even have the same FC-ID if connected to isolated SANs. Change the debug messages to print the SCSI host number "hostN:", which will not change for the life of the lport. Still show the FC_ID on lport messages. Also, add a macro to FC_RPORT_ID_DBG for rport debugging where there's no rdata structure involved. It takes the lport and port_id as parameters. Use this in fc_rport_recv_plogi_req() and fc_rport_recv_logo_req(). Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- include/scsi/libfc.h | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index ef04a2c52b8c..efdb6ba310e5 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -59,47 +59,51 @@ do { \ #define FC_LPORT_DBG(lport, fmt, args...) \ FC_CHECK_LOGGING(FC_LPORT_LOGGING, \ - printk(KERN_INFO "lport: %6x: " fmt, \ - fc_host_port_id(lport->host), ##args)) + printk(KERN_INFO "host%u: lport %6x: " fmt, \ + (lport)->host->host_no, \ + fc_host_port_id((lport)->host), ##args)) #define FC_DISC_DBG(disc, fmt, args...) \ FC_CHECK_LOGGING(FC_DISC_LOGGING, \ - printk(KERN_INFO "disc: %6x: " fmt, \ - fc_host_port_id(disc->lport->host), \ + printk(KERN_INFO "host%u: disc: " fmt, \ + (disc)->lport->host->host_no, \ ##args)) +#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \ + FC_CHECK_LOGGING(FC_RPORT_LOGGING, \ + printk(KERN_INFO "host%u: rport %6x: " fmt, \ + (lport)->host->host_no, \ + (port_id), ##args)) + #define FC_RPORT_DBG(rport, fmt, args...) \ do { \ struct fc_rport_libfc_priv *rdata = rport->dd_data; \ struct fc_lport *lport = rdata->local_port; \ - FC_CHECK_LOGGING(FC_RPORT_LOGGING, \ - printk(KERN_INFO "rport: %6x: %6x: " fmt, \ - fc_host_port_id(lport->host), \ - rport->port_id, ##args)); \ + FC_RPORT_ID_DBG(lport, rport->port_id, fmt, ##args); \ } while (0) #define FC_FCP_DBG(pkt, fmt, args...) \ FC_CHECK_LOGGING(FC_FCP_LOGGING, \ - printk(KERN_INFO "fcp: %6x: %6x: " fmt, \ - fc_host_port_id(pkt->lp->host), \ + printk(KERN_INFO "host%u: fcp: %6x: " fmt, \ + (pkt)->lp->host->host_no, \ pkt->rport->port_id, ##args)) #define FC_EM_DBG(em, fmt, args...) \ FC_CHECK_LOGGING(FC_EM_LOGGING, \ - printk(KERN_INFO "em: %6x: " fmt, \ - fc_host_port_id(em->lp->host), \ + printk(KERN_INFO "host%u: em: " fmt, \ + (em)->lp->host->host_no, \ ##args)) #define FC_EXCH_DBG(exch, fmt, args...) \ FC_CHECK_LOGGING(FC_EXCH_LOGGING, \ - printk(KERN_INFO "exch: %6x: %4x: " fmt, \ - fc_host_port_id(exch->lp->host), \ + printk(KERN_INFO "host%u: xid %4x: " fmt, \ + (exch)->lp->host->host_no, \ exch->xid, ##args)) #define FC_SCSI_DBG(lport, fmt, args...) \ FC_CHECK_LOGGING(FC_SCSI_LOGGING, \ - printk(KERN_INFO "scsi: %6x: " fmt, \ - fc_host_port_id(lport->host), ##args)) + printk(KERN_INFO "host%u: scsi: " fmt, \ + (lport)->host->host_no, ##args)) /* * libfc error codes From f161fb72104c7addac3d404a1ff543b2491c1426 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 29 Jul 2009 17:04:17 -0700 Subject: [PATCH 028/209] [SCSI] fcoe: stop delivery of received frames before doing lport_destroy() To be more sure that no more input arrives at the local port as it is being destroyed, clean the queues in the per-cpu receive threads. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 81601269f4cd..14a4017a1535 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -466,6 +466,9 @@ static int fcoe_if_destroy(struct net_device *netdev) /* tear-down the FCoE controller */ fcoe_ctlr_destroy(&fc->ctlr); + /* Free queued packets for the per-CPU receive threads */ + fcoe_percpu_clean(lp); + /* Cleanup the fc_lport */ fc_lport_destroy(lp); fc_fcp_destroy(lp); @@ -478,9 +481,6 @@ static int fcoe_if_destroy(struct net_device *netdev) if (lp->emp) fc_exch_mgr_free(lp->emp); - /* Free the per-CPU receive threads */ - fcoe_percpu_clean(lp); - /* Free existing skbs */ fcoe_clean_pending_queue(lp); From b1d9fd5574763abe5c763e32e3547a4adee9bd88 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 29 Jul 2009 17:04:22 -0700 Subject: [PATCH 029/209] [SCSI] libfc: rename lport NONE state to DISABLED The state NONE was meant to be invalid, but has been used as the initial state. Rename it to be DISABLED, as more descriptive. Further patches will make it the like the RESET state, except it won't transition to FLOGI until fc_lport_fabric_login() is called. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 2 +- drivers/scsi/libfc/fc_lport.c | 12 ++++++------ include/scsi/libfc.h | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 145ab9ba55ea..e6d82d780acd 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1875,7 +1875,7 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp, u32 f_ctl; /* lport lock ? */ - if (!lp || !mp || (lp->state == LPORT_ST_NONE)) { + if (!lp || !mp || lp->state == LPORT_ST_DISABLED) { FC_LPORT_DBG(lp, "Receiving frames for an lport that " "has not been initialized correctly\n"); fc_frame_free(fp); diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 745fa5555d6a..3b28190ca2eb 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -113,7 +113,7 @@ static void fc_lport_enter_ready(struct fc_lport *); static void fc_lport_enter_logo(struct fc_lport *); static const char *fc_lport_state_names[] = { - [LPORT_ST_NONE] = "none", + [LPORT_ST_DISABLED] = "disabled", [LPORT_ST_FLOGI] = "FLOGI", [LPORT_ST_DNS] = "dNS", [LPORT_ST_RPN_ID] = "RPN_ID", @@ -550,7 +550,7 @@ int fc_fabric_login(struct fc_lport *lport) int rc = -1; mutex_lock(&lport->lp_mutex); - if (lport->state == LPORT_ST_NONE) { + if (lport->state == LPORT_ST_DISABLED) { fc_lport_enter_reset(lport); rc = 0; } @@ -637,7 +637,7 @@ EXPORT_SYMBOL(fc_fabric_logoff); int fc_lport_destroy(struct fc_lport *lport) { mutex_lock(&lport->lp_mutex); - lport->state = LPORT_ST_NONE; + lport->state = LPORT_ST_DISABLED; lport->link_up = 0; lport->tt.frame_send = fc_frame_drop; mutex_unlock(&lport->lp_mutex); @@ -992,7 +992,7 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) schedule_delayed_work(&lport->retry_work, delay); } else { switch (lport->state) { - case LPORT_ST_NONE: + case LPORT_ST_DISABLED: case LPORT_ST_READY: case LPORT_ST_RESET: case LPORT_ST_RPN_ID: @@ -1316,7 +1316,7 @@ static void fc_lport_timeout(struct work_struct *work) mutex_lock(&lport->lp_mutex); switch (lport->state) { - case LPORT_ST_NONE: + case LPORT_ST_DISABLED: case LPORT_ST_READY: case LPORT_ST_RESET: WARN_ON(1); @@ -1550,7 +1550,7 @@ int fc_lport_config(struct fc_lport *lport) INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); mutex_init(&lport->lp_mutex); - fc_lport_state_enter(lport, LPORT_ST_NONE); + fc_lport_state_enter(lport, LPORT_ST_DISABLED); fc_lport_add_fc4_type(lport, FC_TYPE_FCP); fc_lport_add_fc4_type(lport, FC_TYPE_CT); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index efdb6ba310e5..b5c9b285b462 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -129,7 +129,7 @@ do { \ * FC HBA status */ enum fc_lport_state { - LPORT_ST_NONE = 0, + LPORT_ST_DISABLED = 0, LPORT_ST_FLOGI, LPORT_ST_DNS, LPORT_ST_RPN_ID, From 1190d925813aab80d17ff10f26c115f5846b3308 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 29 Jul 2009 17:04:27 -0700 Subject: [PATCH 030/209] [SCSI] libfc: stop login after fabric logoff When removing the fcoe module, several lports were being shut down through fc_lport_fabric_logoff(). Occasionally, one would enter reset state before fc_lport_destroy() was called, and since link_up was still true, it would log back in. If we just clear link_up earlier, then we wouldn't be accepting LOGO requests from other initiators while we are shutting down. Fix by changing the LOGO response handler to enter DISABLED instead of RESET. Add an fc_lport_enter_disabled() function which does what fc_lport_enter_reset() did, except it doesn't proceed to FLOGI state. Move the code that was common between fc_lport_enter_reset() and fc_lport_enter_disabled() into a new fc_lport_reset_locked() function. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 42 ++++++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 3b28190ca2eb..7bb451ab0b87 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -930,19 +930,14 @@ int fc_lport_reset(struct fc_lport *lport) EXPORT_SYMBOL(fc_lport_reset); /** - * fc_rport_enter_reset() - Reset the local port + * fc_lport_reset_locked() - Reset the local port * @lport: Fibre Channel local port to be reset * * Locking Note: The lport lock is expected to be held before calling * this routine. */ -static void fc_lport_enter_reset(struct fc_lport *lport) +static void fc_lport_reset_locked(struct fc_lport *lport) { - FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", - fc_lport_state(lport)); - - fc_lport_state_enter(lport, LPORT_ST_RESET); - if (lport->dns_rp) lport->tt.rport_logoff(lport->dns_rp); @@ -956,11 +951,42 @@ static void fc_lport_enter_reset(struct fc_lport *lport) lport->tt.exch_mgr_reset(lport, 0, 0); fc_host_fabric_name(lport->host) = 0; fc_host_port_id(lport->host) = 0; +} +/** + * fc_lport_enter_reset() - Reset the local port + * @lport: Fibre Channel local port to be reset + * + * Locking Note: The lport lock is expected to be held before calling + * this routine. + */ +static void fc_lport_enter_reset(struct fc_lport *lport) +{ + FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", + fc_lport_state(lport)); + + fc_lport_state_enter(lport, LPORT_ST_RESET); + fc_lport_reset_locked(lport); if (lport->link_up) fc_lport_enter_flogi(lport); } +/** + * fc_lport_enter_disabled() - disable the local port + * @lport: Fibre Channel local port to be reset + * + * Locking Note: The lport lock is expected to be held before calling + * this routine. + */ +static void fc_lport_enter_disabled(struct fc_lport *lport) +{ + FC_LPORT_DBG(lport, "Entered disabled state from %s state\n", + fc_lport_state(lport)); + + fc_lport_state_enter(lport, LPORT_ST_DISABLED); + fc_lport_reset_locked(lport); +} + /** * fc_lport_error() - Handler for any errors * @lport: The fc_lport object @@ -1382,7 +1408,7 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC) - fc_lport_enter_reset(lport); + fc_lport_enter_disabled(lport); else fc_lport_error(lport, fp); From e9ba8b427852937caee6ca39bb6f9a893bb32ae1 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 29 Jul 2009 17:04:33 -0700 Subject: [PATCH 031/209] [SCSI] libfc: in fc_lport_destroy, flush rports after turning off link During an fcoe module unload, we saw a problem where fc_rport_work() finds the lport has been freed. The rdata points to an area containing 0x6b6b6b6b... the pool poison value from kmem_free(). In fcoe_if_destroy() we call fc_fabric_logoff() then fc_lport_destroy(). fc_fabric_logoff() flushes the remote port work, but we're still receiving requests, and an RSCN or PLOGI arrives which creates more rports. Note that although the LLD also checks link_up, it doesn't do it under the lport mutex, so it can deliver frames to fc_lport_recv_req() even after link_up is cleared. So, re-check link_up there. We need to flush the rports by calling disc_stop_final() after we clear link_up. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 7bb451ab0b87..a430335ebf59 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -643,6 +643,7 @@ int fc_lport_destroy(struct fc_lport *lport) mutex_unlock(&lport->lp_mutex); lport->tt.fcp_abort_io(lport); + lport->tt.disc_stop_final(lport); lport->tt.exch_mgr_reset(lport, 0, 0); return 0; } @@ -844,7 +845,10 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, * RSCN here. These don't require a session. * Even if we had a session, it might not be ready. */ - if (fh->fh_type == FC_TYPE_ELS && fh->fh_r_ctl == FC_RCTL_ELS_REQ) { + if (!lport->link_up) + fc_frame_free(fp); + else if (fh->fh_type == FC_TYPE_ELS && + fh->fh_r_ctl == FC_RCTL_ELS_REQ) { /* * Check opcode. */ From 84b05445b9f0b1ac2192f32260c916426d902d79 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 29 Jul 2009 17:04:38 -0700 Subject: [PATCH 032/209] [SCSI] libfc: fix WARNING from fc_seq_start_next on closed exchanges We saw periodic messages like: WARNING: at drivers/scsi/libfc/fc_exch.c:825 fc_seq_start_next+0x30/0x4b This was due to trying to allocate a sequence in a request handler when the exchange had been reset. Delete the WARN_ON. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index e6d82d780acd..cab54996375c 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -822,7 +822,6 @@ struct fc_seq *fc_seq_start_next(struct fc_seq *sp) struct fc_exch *ep = fc_seq_exch(sp); spin_lock_bh(&ep->ex_lock); - WARN_ON((ep->esb_stat & ESB_ST_COMPLETE) != 0); sp = fc_seq_start_next_locked(sp); spin_unlock_bh(&ep->ex_lock); From 141940548c6919c22bf0573c68fd59d961e22475 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 29 Jul 2009 17:04:43 -0700 Subject: [PATCH 033/209] [SCSI] libfc: rename rport state "NONE" to "DELETE". State RPORT_ST_NONE was intented to be an invalid state (0), never used. This was a misguided attempt to be sure it was always initialized. Having an extra state meaning nothing requires switch statements to have a case covering that state. State NONE has been used instead to mean the remote port is being deleted. Changing the name to RPORT_ST_DELETE. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 28 ++++++++++++++-------------- include/scsi/libfc.h | 2 +- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 7162385f52eb..bf7364fc16cb 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -77,13 +77,13 @@ static void fc_rport_error_retry(struct fc_rport *, struct fc_frame *); static void fc_rport_work(struct work_struct *); static const char *fc_rport_state_names[] = { - [RPORT_ST_NONE] = "None", [RPORT_ST_INIT] = "Init", [RPORT_ST_PLOGI] = "PLOGI", [RPORT_ST_PRLI] = "PRLI", [RPORT_ST_RTV] = "RTV", [RPORT_ST_READY] = "Ready", [RPORT_ST_LOGO] = "LOGO", + [RPORT_ST_DELETE] = "Delete", }; static void fc_rport_rogue_destroy(struct device *dev) @@ -326,8 +326,8 @@ int fc_rport_logoff(struct fc_rport *rport) FC_RPORT_DBG(rport, "Remove port\n"); - if (rdata->rp_state == RPORT_ST_NONE) { - FC_RPORT_DBG(rport, "Port in NONE state, not removing\n"); + if (rdata->rp_state == RPORT_ST_DELETE) { + FC_RPORT_DBG(rport, "Port in Delete state, not removing\n"); mutex_unlock(&rdata->rp_mutex); goto out; } @@ -335,10 +335,10 @@ int fc_rport_logoff(struct fc_rport *rport) fc_rport_enter_logo(rport); /* - * Change the state to NONE so that we discard + * Change the state to Delete so that we discard * the response. */ - fc_rport_state_enter(rport, RPORT_ST_NONE); + fc_rport_state_enter(rport, RPORT_ST_DELETE); mutex_unlock(&rdata->rp_mutex); @@ -405,7 +405,7 @@ static void fc_rport_timeout(struct work_struct *work) break; case RPORT_ST_READY: case RPORT_ST_INIT: - case RPORT_ST_NONE: + case RPORT_ST_DELETE: break; } @@ -433,14 +433,14 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) case RPORT_ST_PRLI: case RPORT_ST_LOGO: rdata->event = RPORT_EV_FAILED; - fc_rport_state_enter(rport, RPORT_ST_NONE); + fc_rport_state_enter(rport, RPORT_ST_DELETE); queue_work(rport_event_queue, &rdata->event_work); break; case RPORT_ST_RTV: fc_rport_enter_ready(rport); break; - case RPORT_ST_NONE: + case RPORT_ST_DELETE: case RPORT_ST_READY: case RPORT_ST_INIT: break; @@ -652,7 +652,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, } else { FC_RPORT_DBG(rport, "Bad ELS response for PRLI command\n"); rdata->event = RPORT_EV_FAILED; - fc_rport_state_enter(rport, RPORT_ST_NONE); + fc_rport_state_enter(rport, RPORT_ST_DELETE); queue_work(rport_event_queue, &rdata->event_work); } @@ -703,7 +703,7 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, } else { FC_RPORT_DBG(rport, "Bad ELS response for LOGO command\n"); rdata->event = RPORT_EV_LOGO; - fc_rport_state_enter(rport, RPORT_ST_NONE); + fc_rport_state_enter(rport, RPORT_ST_DELETE); queue_work(rport_event_queue, &rdata->event_work); } @@ -1012,7 +1012,7 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, "- ignored for now\n", rdata->rp_state); /* XXX TBD - should reset */ break; - case RPORT_ST_NONE: + case RPORT_ST_DELETE: default: FC_RPORT_DBG(rport, "Received PLOGI in unexpected " "state %d\n", rdata->rp_state); @@ -1238,7 +1238,7 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp, FC_RPORT_DBG(rport, "Received PRLO request while in state %s\n", fc_rport_state(rport)); - if (rdata->rp_state == RPORT_ST_NONE) { + if (rdata->rp_state == RPORT_ST_DELETE) { fc_frame_free(fp); return; } @@ -1271,13 +1271,13 @@ static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp, FC_RPORT_DBG(rport, "Received LOGO request while in state %s\n", fc_rport_state(rport)); - if (rdata->rp_state == RPORT_ST_NONE) { + if (rdata->rp_state == RPORT_ST_DELETE) { fc_frame_free(fp); return; } rdata->event = RPORT_EV_LOGO; - fc_rport_state_enter(rport, RPORT_ST_NONE); + fc_rport_state_enter(rport, RPORT_ST_DELETE); queue_work(rport_event_queue, &rdata->event_work); lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index b5c9b285b462..04db7a9e631b 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -147,13 +147,13 @@ enum fc_disc_event { }; enum fc_rport_state { - RPORT_ST_NONE = 0, RPORT_ST_INIT, /* initialized */ RPORT_ST_PLOGI, /* waiting for PLOGI completion */ RPORT_ST_PRLI, /* waiting for PRLI completion */ RPORT_ST_RTV, /* waiting for RTV completion */ RPORT_ST_READY, /* ready for use */ RPORT_ST_LOGO, /* port logout sent */ + RPORT_ST_DELETE, /* port being deleted */ }; enum fc_rport_trans_state { From 5f7ea3b7f81d5e5180647a071998b73a841bdba9 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 29 Jul 2009 17:04:49 -0700 Subject: [PATCH 034/209] [SCSI] libfc: fc_rport_logoff should not drop the lock fc_rport_logoff drops the rport lock in order to cancel work that may be pending. This is undesirable as the state can completely change, and the caller may not expect that the lock could've been dropped. If there is work pending, it will acquire the rdata mutex and so we're protected and can change the event from READY to DELETE. Queue the work only if there is no event already pending. There were a couple other cases where the state was set to DELETE and work queued, even though the state may have already been DELETE. Fix these using a common function fc_rport_enter_delete(). Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 60 ++++++++++++++++++++++------------- 1 file changed, 38 insertions(+), 22 deletions(-) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index bf7364fc16cb..a86df0b41ae3 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -230,6 +230,7 @@ static void fc_rport_work(struct work_struct *work) ids.port_name = rport->port_name; ids.node_name = rport->node_name; + rdata->event = RPORT_EV_NONE; mutex_unlock(&rdata->rp_mutex); new_rport = fc_remote_port_add(lport->host, 0, &ids); @@ -310,6 +311,37 @@ int fc_rport_login(struct fc_rport *rport) return 0; } +/** + * fc_rport_enter_delete() - schedule a remote port to be deleted. + * @rport: Fibre Channel remote port + * @event: event to report as the reason for deletion + * + * Locking Note: Called with the rport lock held. + * + * Allow state change into DELETE only once. + * + * Call queue_work only if there's no event already pending. + * Set the new event so that the old pending event will not occur. + * Since we have the mutex, even if fc_rport_work() is already started, + * it'll see the new event. + */ +static void fc_rport_enter_delete(struct fc_rport *rport, + enum fc_rport_event event) +{ + struct fc_rport_libfc_priv *rdata = rport->dd_data; + + if (rdata->rp_state == RPORT_ST_DELETE) + return; + + FC_RPORT_DBG(rport, "Delete port\n"); + + fc_rport_state_enter(rport, RPORT_ST_DELETE); + + if (rdata->event == RPORT_EV_NONE) + queue_work(rport_event_queue, &rdata->event_work); + rdata->event = event; +} + /** * fc_rport_logoff() - Logoff and remove an rport * @rport: Fibre Channel remote port to be removed @@ -338,17 +370,7 @@ int fc_rport_logoff(struct fc_rport *rport) * Change the state to Delete so that we discard * the response. */ - fc_rport_state_enter(rport, RPORT_ST_DELETE); - - mutex_unlock(&rdata->rp_mutex); - - cancel_delayed_work_sync(&rdata->retry_work); - - mutex_lock(&rdata->rp_mutex); - - rdata->event = RPORT_EV_STOP; - queue_work(rport_event_queue, &rdata->event_work); - + fc_rport_enter_delete(rport, RPORT_EV_STOP); mutex_unlock(&rdata->rp_mutex); out: @@ -370,8 +392,9 @@ static void fc_rport_enter_ready(struct fc_rport *rport) FC_RPORT_DBG(rport, "Port is Ready\n"); + if (rdata->event == RPORT_EV_NONE) + queue_work(rport_event_queue, &rdata->event_work); rdata->event = RPORT_EV_CREATED; - queue_work(rport_event_queue, &rdata->event_work); } /** @@ -432,10 +455,7 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) case RPORT_ST_PLOGI: case RPORT_ST_PRLI: case RPORT_ST_LOGO: - rdata->event = RPORT_EV_FAILED; - fc_rport_state_enter(rport, RPORT_ST_DELETE); - queue_work(rport_event_queue, - &rdata->event_work); + fc_rport_enter_delete(rport, RPORT_EV_FAILED); break; case RPORT_ST_RTV: fc_rport_enter_ready(rport); @@ -651,9 +671,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, } else { FC_RPORT_DBG(rport, "Bad ELS response for PRLI command\n"); - rdata->event = RPORT_EV_FAILED; - fc_rport_state_enter(rport, RPORT_ST_DELETE); - queue_work(rport_event_queue, &rdata->event_work); + fc_rport_enter_delete(rport, RPORT_EV_FAILED); } out: @@ -702,9 +720,7 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, fc_rport_enter_rtv(rport); } else { FC_RPORT_DBG(rport, "Bad ELS response for LOGO command\n"); - rdata->event = RPORT_EV_LOGO; - fc_rport_state_enter(rport, RPORT_ST_DELETE); - queue_work(rport_event_queue, &rdata->event_work); + fc_rport_enter_delete(rport, RPORT_EV_LOGO); } out: From 201e5795b7b9582accb6d83316e30f24d7d40fd3 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 29 Jul 2009 17:04:54 -0700 Subject: [PATCH 035/209] [SCSI] libfc: fix: cancel rport retry timer The timer for rport retries wasn't getting canceled, and would occasionally go off after the module was unloaded. Add logic to cancel the timer in fc_rport_work(). Since we cancel the timer before deleting the rdata, it is no longer necessary to do a get_device() for the pending timer. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index a86df0b41ae3..90cc90dd3b5d 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -276,6 +276,7 @@ static void fc_rport_work(struct work_struct *work) mutex_unlock(&rdata->rp_mutex); if (rport_ops->event_callback) rport_ops->event_callback(lport, rport, event); + cancel_delayed_work_sync(&rdata->retry_work); if (trans_state == FC_PORTSTATE_ROGUE) put_device(&rport->dev); else { @@ -433,7 +434,6 @@ static void fc_rport_timeout(struct work_struct *work) } mutex_unlock(&rdata->rp_mutex); - put_device(&rport->dev); } /** @@ -494,7 +494,6 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp) /* no additional delay on exchange timeouts */ if (PTR_ERR(fp) == -FC_EX_TIMEOUT) delay = 0; - get_device(&rport->dev); schedule_delayed_work(&rdata->retry_work, delay); return; } From 96316099ac3cb259eac2d6891f3c75b38b29d26e Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Wed, 29 Jul 2009 17:05:00 -0700 Subject: [PATCH 036/209] [SCSI] fcoe, libfc: adds exchange manager(EM) anchor list per lport and related APIs Adds EM list using a anchor struct fc_exch_mgr_anchor, anchor is used to allow same EM instance sharing across more than one lport on a eth device, this implementation is per discussed design posted at http://www.open-fcoe.org/pipermail/devel/2009-June/002566.html. The shared EM is required for multiple lports on eth device when using multiple VLANs or NPIV. Adds fc_exch_mgr_add API to add a EM to the lport and fc_exch_mgr_del API to delete previously added EM. Also adds function fc_exch_mgr_destroy() to destroy allocated EM. The kref is added to the EM to keep track of EM usage count, the EM is destroyed when no longer in use upon kref reaching to zero. The caller can specify match function to fc_exch_mgr_add, this will be used in determining exchange allocation from its EM or not. Moved calling of fcoe_em_config below fcoe_libfc_config calling, so that list head lp->ema_list is initialized before configuring EM. Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 16 ++++++------ drivers/scsi/libfc/fc_exch.c | 48 +++++++++++++++++++++++++++++++++++ drivers/scsi/libfc/fc_lport.c | 1 + include/scsi/libfc.h | 24 ++++++++++++++++++ 4 files changed, 81 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 14a4017a1535..719a99d4a438 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -603,14 +603,6 @@ static int fcoe_if_create(struct net_device *netdev) goto out_netdev_cleanup; } - /* lport exch manager allocation */ - rc = fcoe_em_config(lp); - if (rc) { - FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the " - "interface\n"); - goto out_netdev_cleanup; - } - /* Initialize the library */ rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ); if (rc) { @@ -619,6 +611,14 @@ static int fcoe_if_create(struct net_device *netdev) goto out_lp_destroy; } + /* lport exch manager allocation */ + rc = fcoe_em_config(lp); + if (rc) { + FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the " + "interface\n"); + goto out_lp_destroy; + } + /* add to lports list */ fcoe_hostlist_add(lp); diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index cab54996375c..f1fa2b196e98 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -55,6 +55,7 @@ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ */ struct fc_exch_mgr { enum fc_class class; /* default class for sequences */ + struct kref kref; /* exchange mgr reference count */ spinlock_t em_lock; /* exchange manager lock, must be taken before ex_lock */ u16 last_xid; /* last allocated exchange ID */ @@ -84,6 +85,12 @@ struct fc_exch_mgr { }; #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) +struct fc_exch_mgr_anchor { + struct list_head ema_list; + struct fc_exch_mgr *mp; + bool (*match)(struct fc_frame *); +}; + static void fc_exch_rrq(struct fc_exch *); static void fc_seq_ls_acc(struct fc_seq *); static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason, @@ -1729,6 +1736,47 @@ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) fc_frame_free(fp); } +struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, + struct fc_exch_mgr *mp, + bool (*match)(struct fc_frame *)) +{ + struct fc_exch_mgr_anchor *ema; + + ema = kmalloc(sizeof(*ema), GFP_ATOMIC); + if (!ema) + return ema; + + ema->mp = mp; + ema->match = match; + /* add EM anchor to EM anchors list */ + list_add_tail(&ema->ema_list, &lport->ema_list); + kref_get(&mp->kref); + return ema; +} +EXPORT_SYMBOL(fc_exch_mgr_add); + +static void fc_exch_mgr_destroy(struct kref *kref) +{ + struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref); + + /* + * The total exch count must be zero + * before freeing exchange manager. + */ + WARN_ON(mp->total_exches != 0); + mempool_destroy(mp->ep_pool); + kfree(mp); +} + +void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema) +{ + /* remove EM anchor from EM anchors list */ + list_del(&ema->ema_list); + kref_put(&ema->mp->kref, fc_exch_mgr_destroy); + kfree(ema); +} +EXPORT_SYMBOL(fc_exch_mgr_del); + struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, enum fc_class class, u16 min_xid, u16 max_xid) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index a430335ebf59..ca8ea264b684 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1618,6 +1618,7 @@ int fc_lport_init(struct fc_lport *lport) if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; + INIT_LIST_HEAD(&lport->ema_list); return 0; } EXPORT_SYMBOL(fc_lport_init); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 04db7a9e631b..b381b1ca9aec 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -348,6 +348,7 @@ static inline bool fc_fcp_is_read(const struct fc_fcp_pkt *fsp) */ struct fc_exch_mgr; +struct fc_exch_mgr_anchor; /* * Sequence. @@ -709,6 +710,7 @@ struct fc_lport { /* Associations */ struct Scsi_Host *host; struct fc_exch_mgr *emp; + struct list_head ema_list; struct fc_rport *dns_rp; struct fc_rport *ptp_rp; void *scsi_priv; @@ -963,6 +965,28 @@ int fc_elsct_init(struct fc_lport *lp); */ int fc_exch_init(struct fc_lport *lp); +/* + * Adds Exchange Manager (EM) mp to lport. + * + * Adds specified mp to lport using struct fc_exch_mgr_anchor, + * the struct fc_exch_mgr_anchor allows same EM sharing by + * more than one lport with their specified match function, + * the match function is used in allocating exchange from + * added mp. + */ +struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, + struct fc_exch_mgr *mp, + bool (*match)(struct fc_frame *)); + +/* + * Deletes Exchange Manager (EM) from lport by removing + * its anchor ema from lport. + * + * If removed anchor ema was the last user of its associated EM + * then also destroys associated EM. + */ +void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema); + /* * Allocates an Exchange Manager (EM). * From d459b7ea1b4c7aa3dacfeee174d02b2f7a95850d Mon Sep 17 00:00:00 2001 From: Robert Love Date: Wed, 29 Jul 2009 17:05:05 -0700 Subject: [PATCH 037/209] [SCSI] libfc: Remove the FC_EM_DBG macro Currently there is a 1:1 relationship between the lport and exchange manager. This macro takes an EM as an argument and determines the lport from it. However, later patches will use an EM list per lport, so we will no longer have this 1:1 relationship- this macro must change. The FC_EM_DBG macro is rarely used. There are four callers, two can use FC_LPORT_DBG instead and two can be removed since they're not necessary. This patch makes those changes and removes the macro. Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 13 ++++++------- include/scsi/libfc.h | 6 ------ 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index f1fa2b196e98..3ad7f88e7ae3 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1129,7 +1129,7 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, lp->tt.lport_recv(lp, sp, fp); fc_exch_release(ep); /* release from lookup */ } else { - FC_EM_DBG(mp, "exch/seq lookup failed: reject %x\n", reject); + FC_LPORT_DBG(lp, "exch/seq lookup failed: reject %x\n", reject); fc_frame_free(fp); } } @@ -1235,13 +1235,12 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) struct fc_seq *sp; sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ - if (!sp) { + + if (!sp) atomic_inc(&mp->stats.xid_not_found); - FC_EM_DBG(mp, "seq lookup failed\n"); - } else { + else atomic_inc(&mp->stats.non_bls_resp); - FC_EM_DBG(mp, "non-BLS response to sequence"); - } + fc_frame_free(fp); } @@ -1950,7 +1949,7 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp, fc_exch_recv_req(lp, mp, fp); break; default: - FC_EM_DBG(mp, "dropping invalid frame (eof %x)", fr_eof(fp)); + FC_LPORT_DBG(lp, "dropping invalid frame (eof %x)", fr_eof(fp)); fc_frame_free(fp); break; } diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index b381b1ca9aec..f1bde91f98a2 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -88,12 +88,6 @@ do { \ (pkt)->lp->host->host_no, \ pkt->rport->port_id, ##args)) -#define FC_EM_DBG(em, fmt, args...) \ - FC_CHECK_LOGGING(FC_EM_LOGGING, \ - printk(KERN_INFO "host%u: em: " fmt, \ - (em)->lp->host->host_no, \ - ##args)) - #define FC_EXCH_DBG(exch, fmt, args...) \ FC_CHECK_LOGGING(FC_EXCH_LOGGING, \ printk(KERN_INFO "host%u: xid %4x: " fmt, \ From 52ff878c912215210f53c0a080552dd6ba3055a2 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Wed, 29 Jul 2009 17:05:10 -0700 Subject: [PATCH 038/209] [SCSI] fcoe, fnic, libfc: modifies current code paths to use EM anchor list Modifies current code to use EM anchor list in EM allocation, EM free, EM reset, exch allocation and exch lookup code paths. 1. Modifies fc_exch_mgr_alloc to accept EM match function and then have allocated EM added to the lport using fc_exch_mgr_add API while also updating EM kref for newly added EM. 2. Updates fc_exch_mgr_free API to accept only lport pointer instead EM and then have this API free all EMs of the lport from EM anchor list. 3. Removes single lport pointer link from the EM, which was used in associating lport pointer in newly allocated exchange. Instead have lport pointer passed along new exchange allocation call path and then store passed lport pointer in newly allocated exchange, this will allow a single EM instance to be used across more than one lport and used in EM reset to reset only lport specific exchanges. 4. Modifies fc_exch_mgr_reset to reset all EMs from the EM anchor list of the lport, adds additional exch lport pointer (ep->lp) check for shared EM case to reset exchange specific to a lport requested reset. 5. Updates exch allocation API fc_exch_alloc to use EM anchor list and its anchor match func pointer. The fc_exch_alloc will walk the list of EMs until it finds a match, a match will be either null match func pointer or call to match function returning true value. 6. Updates fc_exch_recv to accept incoming frame on local port using only lport pointer and frame pointer without specifying EM instance of incoming frame. Instead modified fc_exch_recv to locate EM for the incoming frame by matching xid of incoming frame against a EM xid range. This change was required to use EM list in libfc Rx path and after this change the lport fc_exch_mgr pointer emp is not needed anymore, so removed emp pointer. 7. Updates fnic for removed lport emp pointer and above modified libfc APIs fc_exch_recv, fc_exch_mgr_alloc and fc_exch_mgr_free. 8. Removes exch_get and exch_put from libfc_function_template as these are no longer needed with EM anchor list and its match function use. Also removes its default function fc_exch_get. A defect this patch introduced regarding the libfc initialization order in the fnic driver was fixed by Joe Eykholt . Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 16 ++- drivers/scsi/fcoe/libfcoe.c | 2 +- drivers/scsi/fnic/fnic_fcs.c | 2 +- drivers/scsi/fnic/fnic_main.c | 20 ++-- drivers/scsi/libfc/fc_exch.c | 185 +++++++++++++++++++++------------- include/scsi/libfc.h | 48 ++------- 6 files changed, 139 insertions(+), 134 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 719a99d4a438..ebf2e20370d7 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -423,11 +423,8 @@ static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, */ static inline int fcoe_em_config(struct fc_lport *lp) { - BUG_ON(lp->emp); - - lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3, - FCOE_MIN_XID, FCOE_MAX_XID); - if (!lp->emp) + if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCOE_MIN_XID, + FCOE_MAX_XID, NULL)) return -ENOMEM; return 0; @@ -478,8 +475,7 @@ static int fcoe_if_destroy(struct net_device *netdev) scsi_remove_host(lp->host); /* There are no more rports or I/O, free the EM */ - if (lp->emp) - fc_exch_mgr_free(lp->emp); + fc_exch_mgr_free(lp); /* Free existing skbs */ fcoe_clean_pending_queue(lp); @@ -634,7 +630,7 @@ static int fcoe_if_create(struct net_device *netdev) return rc; out_lp_destroy: - fc_exch_mgr_free(lp->emp); /* Free the EM */ + fc_exch_mgr_free(lp); out_netdev_cleanup: fcoe_netdev_cleanup(fc); out_host_put: @@ -1277,7 +1273,7 @@ int fcoe_percpu_receive_thread(void *arg) fh = fc_frame_header_get(fp); if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) { - fc_exch_recv(lp, lp->emp, fp); + fc_exch_recv(lp, fp); continue; } if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { @@ -1298,7 +1294,7 @@ int fcoe_percpu_receive_thread(void *arg) fc_frame_free(fp); continue; } - fc_exch_recv(lp, lp->emp, fp); + fc_exch_recv(lp, fp); } return 0; } diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 78caa6be1130..4db719d6ada1 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -885,7 +885,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) stats->RxFrames++; stats->RxWords += skb->len / FIP_BPW; - fc_exch_recv(lp, lp->emp, fp); + fc_exch_recv(lp, fp); return; len_err: diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 07e6eedb83ce..50db3e36a619 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -115,7 +115,7 @@ void fnic_handle_frame(struct work_struct *work) } spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fc_exch_recv(lp, lp->emp, fp); + fc_exch_recv(lp, fp); } } diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 2c266c01dc5a..71c7bbe26d05 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -671,14 +671,6 @@ static int __devinit fnic_probe(struct pci_dev *pdev, lp->link_up = 0; lp->tt = fnic_transport_template; - lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3, - FCPIO_HOST_EXCH_RANGE_START, - FCPIO_HOST_EXCH_RANGE_END); - if (!lp->emp) { - err = -ENOMEM; - goto err_out_remove_scsi_host; - } - lp->max_retry_count = fnic->config.flogi_retries; lp->max_rport_retry_count = fnic->config.plogi_retries; lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | @@ -693,12 +685,18 @@ static int __devinit fnic_probe(struct pci_dev *pdev, fc_set_wwnn(lp, fnic->config.node_wwn); fc_set_wwpn(lp, fnic->config.port_wwn); - fc_exch_init(lp); fc_lport_init(lp); + fc_exch_init(lp); fc_elsct_init(lp); fc_rport_init(lp); fc_disc_init(lp); + if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START, + FCPIO_HOST_EXCH_RANGE_END, NULL)) { + err = -ENOMEM; + goto err_out_remove_scsi_host; + } + fc_lport_config(lp); if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + @@ -738,7 +736,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev, return 0; err_out_free_exch_mgr: - fc_exch_mgr_free(lp->emp); + fc_exch_mgr_free(lp); err_out_remove_scsi_host: fc_remove_host(fnic->lport->host); scsi_remove_host(fnic->lport->host); @@ -827,7 +825,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev) fc_remove_host(fnic->lport->host); scsi_remove_host(fnic->lport->host); - fc_exch_mgr_free(fnic->lport->emp); + fc_exch_mgr_free(fnic->lport); vnic_dev_notify_unset(fnic->vdev); fnic_free_vnic_resources(fnic); fnic_free_intr(fnic); diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 3ad7f88e7ae3..324589a5cc03 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -65,7 +65,6 @@ struct fc_exch_mgr { u16 last_read; /* last xid allocated for read */ u32 total_exches; /* total allocated exchanges */ struct list_head ex_list; /* allocated exchanges list */ - struct fc_lport *lp; /* fc device instance */ mempool_t *ep_pool; /* reserve ep's */ /* @@ -275,8 +274,6 @@ static void fc_exch_release(struct fc_exch *ep) mp = ep->em; if (ep->destructor) ep->destructor(&ep->seq, ep->arg); - if (ep->lp->tt.exch_put) - ep->lp->tt.exch_put(ep->lp, mp, ep->xid); WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE)); mempool_free(ep, mp->ep_pool); } @@ -513,17 +510,20 @@ static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp) return xid; } -/* - * fc_exch_alloc - allocate an exchange. - * @mp : ptr to the exchange manager - * @xid: input xid +/** + * fc_exch_em_alloc() - allocate an exchange from a specified EM. + * @lport: ptr to the local port + * @mp: ptr to the exchange manager + * @fp: ptr to the FC frame + * @xid: input xid * * if xid is supplied zero then assign next free exchange ID * from exchange manager, otherwise use supplied xid. * Returns with exch lock held. */ -struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, - struct fc_frame *fp, u16 xid) +static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, + struct fc_exch_mgr *mp, + struct fc_frame *fp, u16 xid) { struct fc_exch *ep; @@ -566,7 +566,7 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, */ ep->oxid = ep->xid = xid; ep->em = mp; - ep->lp = mp->lp; + ep->lp = lport; ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ ep->rxid = FC_XID_UNKNOWN; ep->class = mp->class; @@ -579,6 +579,31 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, mempool_free(ep, mp->ep_pool); return NULL; } + +/** + * fc_exch_alloc() - allocate an exchange. + * @lport: ptr to the local port + * @fp: ptr to the FC frame + * + * This function walks the list of the exchange manager(EM) + * anchors to select a EM for new exchange allocation. The + * EM is selected having either a NULL match function pointer + * or call to match function returning true. + */ +struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_exch_mgr_anchor *ema; + struct fc_exch *ep; + + list_for_each_entry(ema, &lport->ema_list, ema_list) { + if (!ema->match || ema->match(fp)) { + ep = fc_exch_em_alloc(lport, ema->mp, fp, 0); + if (ep) + return ep; + } + } + return NULL; +} EXPORT_SYMBOL(fc_exch_alloc); /* @@ -617,12 +642,14 @@ EXPORT_SYMBOL(fc_exch_done); * Allocate a new exchange as responder. * Sets the responder ID in the frame header. */ -static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) +static struct fc_exch *fc_exch_resp(struct fc_lport *lport, + struct fc_exch_mgr *mp, + struct fc_frame *fp) { struct fc_exch *ep; struct fc_frame_header *fh; - ep = mp->lp->tt.exch_get(mp->lp, fp); + ep = fc_exch_alloc(lport, fp); if (ep) { ep->class = fc_frame_class(fp); @@ -648,7 +675,7 @@ static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) ep->esb_stat &= ~ESB_ST_SEQ_INIT; fc_exch_hold(ep); /* hold for caller */ - spin_unlock_bh(&ep->ex_lock); /* lock from exch_get */ + spin_unlock_bh(&ep->ex_lock); /* lock from fc_exch_alloc */ } return ep; } @@ -658,7 +685,8 @@ static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold * on the ep that should be released by the caller. */ -static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_exch_mgr *mp, +static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, + struct fc_exch_mgr *mp, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); @@ -712,7 +740,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_exch_mgr *mp, reject = FC_RJT_RX_ID; goto rel; } - ep = fc_exch_resp(mp, fp); + ep = fc_exch_resp(lport, mp, fp); if (!ep) { reject = FC_RJT_EXCH_EST; /* XXX */ goto out; @@ -1103,7 +1131,7 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, enum fc_pf_rjt_reason reject; fr_seq(fp) = NULL; - reject = fc_seq_lookup_recip(mp, fp); + reject = fc_seq_lookup_recip(lp, mp, fp); if (reject == FC_RJT_NONE) { sp = fr_seq(fp); /* sequence will be held */ ep = fc_seq_exch(sp); @@ -1467,29 +1495,34 @@ void fc_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) { struct fc_exch *ep; struct fc_exch *next; - struct fc_exch_mgr *mp = lp->emp; + struct fc_exch_mgr *mp; + struct fc_exch_mgr_anchor *ema; - spin_lock_bh(&mp->em_lock); + list_for_each_entry(ema, &lp->ema_list, ema_list) { + mp = ema->mp; + spin_lock_bh(&mp->em_lock); restart: - list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) { - if ((sid == 0 || sid == ep->sid) && - (did == 0 || did == ep->did)) { - fc_exch_hold(ep); - spin_unlock_bh(&mp->em_lock); + list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) { + if ((lp == ep->lp) && + (sid == 0 || sid == ep->sid) && + (did == 0 || did == ep->did)) { + fc_exch_hold(ep); + spin_unlock_bh(&mp->em_lock); - fc_exch_reset(ep); + fc_exch_reset(ep); - fc_exch_release(ep); - spin_lock_bh(&mp->em_lock); + fc_exch_release(ep); + spin_lock_bh(&mp->em_lock); - /* - * must restart loop incase while lock was down - * multiple eps were released. - */ - goto restart; + /* + * must restart loop incase while lock + * was down multiple eps were released. + */ + goto restart; + } } + spin_unlock_bh(&mp->em_lock); } - spin_unlock_bh(&mp->em_lock); } EXPORT_SYMBOL(fc_exch_mgr_reset); @@ -1778,7 +1811,8 @@ EXPORT_SYMBOL(fc_exch_mgr_del); struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, enum fc_class class, - u16 min_xid, u16 max_xid) + u16 min_xid, u16 max_xid, + bool (*match)(struct fc_frame *)) { struct fc_exch_mgr *mp; size_t len; @@ -1803,7 +1837,6 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, mp->class = class; mp->total_exches = 0; mp->exches = (struct fc_exch **)(mp + 1); - mp->lp = lp; /* adjust em exch xid range for offload */ mp->min_xid = min_xid; mp->max_xid = max_xid; @@ -1826,6 +1859,18 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, if (!mp->ep_pool) goto free_mp; + kref_init(&mp->kref); + if (!fc_exch_mgr_add(lp, mp, match)) { + mempool_destroy(mp->ep_pool); + goto free_mp; + } + + /* + * Above kref_init() sets mp->kref to 1 and then + * call to fc_exch_mgr_add incremented mp->kref again, + * so adjust that extra increment. + */ + kref_put(&mp->kref, fc_exch_mgr_destroy); return mp; free_mp: @@ -1834,27 +1879,15 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, } EXPORT_SYMBOL(fc_exch_mgr_alloc); -void fc_exch_mgr_free(struct fc_exch_mgr *mp) +void fc_exch_mgr_free(struct fc_lport *lport) { - WARN_ON(!mp); - /* - * The total exch count must be zero - * before freeing exchange manager. - */ - WARN_ON(mp->total_exches != 0); - mempool_destroy(mp->ep_pool); - kfree(mp); + struct fc_exch_mgr_anchor *ema, *next; + + list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list) + fc_exch_mgr_del(ema); } EXPORT_SYMBOL(fc_exch_mgr_free); -struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp) -{ - if (!lp || !lp->emp) - return NULL; - - return fc_exch_alloc(lp->emp, fp, 0); -} -EXPORT_SYMBOL(fc_exch_get); struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, struct fc_frame *fp, @@ -1869,7 +1902,7 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, struct fc_frame_header *fh; int rc = 1; - ep = lp->tt.exch_get(lp, fp); + ep = fc_exch_alloc(lp, fp); if (!ep) { fc_frame_free(fp); return NULL; @@ -1914,24 +1947,44 @@ EXPORT_SYMBOL(fc_exch_seq_send); /* * Receive a frame */ -void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp, - struct fc_frame *fp) +void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); - u32 f_ctl; + struct fc_exch_mgr_anchor *ema; + u32 f_ctl, found = 0; + u16 oxid; /* lport lock ? */ - if (!lp || !mp || lp->state == LPORT_ST_DISABLED) { + if (!lp || lp->state == LPORT_ST_DISABLED) { FC_LPORT_DBG(lp, "Receiving frames for an lport that " "has not been initialized correctly\n"); fc_frame_free(fp); return; } + f_ctl = ntoh24(fh->fh_f_ctl); + oxid = ntohs(fh->fh_ox_id); + if (f_ctl & FC_FC_EX_CTX) { + list_for_each_entry(ema, &lp->ema_list, ema_list) { + if ((oxid >= ema->mp->min_xid) && + (oxid <= ema->mp->max_xid)) { + found = 1; + break; + } + } + + if (!found) { + FC_LPORT_DBG(lp, "Received response for out " + "of range oxid:%hx\n", oxid); + fc_frame_free(fp); + return; + } + } else + ema = list_entry(lp->ema_list.prev, typeof(*ema), ema_list); + /* * If frame is marked invalid, just drop it. */ - f_ctl = ntoh24(fh->fh_f_ctl); switch (fr_eof(fp)) { case FC_EOF_T: if (f_ctl & FC_FC_END_SEQ) @@ -1939,34 +1992,24 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp, /* fall through */ case FC_EOF_N: if (fh->fh_type == FC_TYPE_BLS) - fc_exch_recv_bls(mp, fp); + fc_exch_recv_bls(ema->mp, fp); else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX) - fc_exch_recv_seq_resp(mp, fp); + fc_exch_recv_seq_resp(ema->mp, fp); else if (f_ctl & FC_FC_SEQ_CTX) - fc_exch_recv_resp(mp, fp); + fc_exch_recv_resp(ema->mp, fp); else - fc_exch_recv_req(lp, mp, fp); + fc_exch_recv_req(lp, ema->mp, fp); break; default: FC_LPORT_DBG(lp, "dropping invalid frame (eof %x)", fr_eof(fp)); fc_frame_free(fp); - break; } } EXPORT_SYMBOL(fc_exch_recv); int fc_exch_init(struct fc_lport *lp) { - if (!lp->tt.exch_get) { - /* - * exch_put() should be NULL if - * exch_get() is NULL - */ - WARN_ON(lp->tt.exch_put); - lp->tt.exch_get = fc_exch_get; - } - if (!lp->tt.seq_start_next) lp->tt.seq_start_next = fc_seq_start_next; diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index f1bde91f98a2..c2b928cfafb9 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -517,25 +517,6 @@ struct libfc_function_template { */ void (*exch_done)(struct fc_seq *sp); - /* - * Assigns a EM and a free XID for an new exchange and then - * allocates a new exchange and sequence pair. - * The fp can be used to determine free XID. - * - * STATUS: OPTIONAL - */ - struct fc_exch *(*exch_get)(struct fc_lport *lp, struct fc_frame *fp); - - /* - * Release previously assigned XID by exch_get API. - * The LLD may implement this if XID is assigned by LLD - * in exch_get(). - * - * STATUS: OPTIONAL - */ - void (*exch_put)(struct fc_lport *lp, struct fc_exch_mgr *mp, - u16 ex_id); - /* * Start a new sequence on the same exchange/sequence tuple. * @@ -703,7 +684,6 @@ struct fc_lport { /* Associations */ struct Scsi_Host *host; - struct fc_exch_mgr *emp; struct list_head ema_list; struct fc_rport *dns_rp; struct fc_rport *ptp_rp; @@ -996,27 +976,25 @@ void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema); * a new exchange. * The LLD may choose to have multiple EMs, * e.g. one EM instance per CPU receive thread in LLD. - * The LLD can use exch_get() of struct libfc_function_template - * to specify XID for a new exchange within - * a specified EM instance. * - * The em_idx to uniquely identify an EM instance. + * Specified match function is used in allocating exchanges + * from newly allocated EM. */ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, enum fc_class class, u16 min_xid, - u16 max_xid); + u16 max_xid, + bool (*match)(struct fc_frame *)); /* - * Free an exchange manager. + * Free all exchange managers of a lport. */ -void fc_exch_mgr_free(struct fc_exch_mgr *mp); +void fc_exch_mgr_free(struct fc_lport *lport); /* * Receive a frame on specified local port and exchange manager. */ -void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp, - struct fc_frame *fp); +void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp); /* * This function is for exch_seq_send function pointer in @@ -1057,20 +1035,10 @@ int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec); */ void fc_exch_done(struct fc_seq *sp); -/* - * Assigns a EM and XID for a frame and then allocates - * a new exchange and sequence pair. - * The fp can be used to determine free XID. - */ -struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp); - /* * Allocate a new exchange and sequence pair. - * if ex_id is zero then next free exchange id - * from specified exchange manger mp will be assigned. */ -struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, - struct fc_frame *fp, u16 ex_id); +struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp); /* * Start a new sequence on the same exchange as the supplied sequence. */ From e8af4d4380babc89d193c16163f070a6418f033b Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Wed, 29 Jul 2009 17:05:15 -0700 Subject: [PATCH 039/209] [SCSI] fcoe: modifies fcoe_hostlist_lock uses as prep work to add shared offload EM Modifies fcoe_hostlist_lock uses such that a new EM allocation in fcoe_em_config and adding new fcoe_softc using fcoe_hostlist_add are atomic, this is to ensure that a shared offload EM gets allocated only once per eth device for its all lports. Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index ebf2e20370d7..86410b9a30c3 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -419,6 +419,8 @@ static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, * fcoe_em_config() - allocates em for this lport * @lp: the port that em is to allocated for * + * Called with write fcoe_hostlist_lock held. + * * Returns : 0 on success */ static inline int fcoe_em_config(struct fc_lport *lp) @@ -607,6 +609,13 @@ static int fcoe_if_create(struct net_device *netdev) goto out_lp_destroy; } + /* + * fcoe_em_alloc() and fcoe_hostlist_add() both + * need to be atomic under fcoe_hostlist_lock + * since fcoe_em_alloc() looks for an existing EM + * instance on host list updated by fcoe_hostlist_add(). + */ + write_lock(&fcoe_hostlist_lock); /* lport exch manager allocation */ rc = fcoe_em_config(lp); if (rc) { @@ -617,6 +626,7 @@ static int fcoe_if_create(struct net_device *netdev) /* add to lports list */ fcoe_hostlist_add(lp); + write_unlock(&fcoe_hostlist_lock); lp->boot_time = jiffies; @@ -1720,6 +1730,8 @@ int fcoe_reset(struct Scsi_Host *shost) * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device * @dev: this is currently ptr to net_device * + * Called with fcoe_hostlist_lock held. + * * Returns: NULL or the located fcoe_softc */ static struct fcoe_softc * @@ -1727,14 +1739,10 @@ fcoe_hostlist_lookup_softc(const struct net_device *dev) { struct fcoe_softc *fc; - read_lock(&fcoe_hostlist_lock); list_for_each_entry(fc, &fcoe_hostlist, list) { - if (fc->real_dev == dev) { - read_unlock(&fcoe_hostlist_lock); + if (fc->real_dev == dev) return fc; - } } - read_unlock(&fcoe_hostlist_lock); return NULL; } @@ -1748,7 +1756,9 @@ struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) { struct fcoe_softc *fc; + read_lock(&fcoe_hostlist_lock); fc = fcoe_hostlist_lookup_softc(netdev); + read_unlock(&fcoe_hostlist_lock); return (fc) ? fc->ctlr.lp : NULL; } @@ -1757,6 +1767,8 @@ struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) * fcoe_hostlist_add() - Add a lport to lports list * @lp: ptr to the fc_lport to be added * + * Called with write fcoe_hostlist_lock held. + * * Returns: 0 for success */ int fcoe_hostlist_add(const struct fc_lport *lp) @@ -1766,9 +1778,7 @@ int fcoe_hostlist_add(const struct fc_lport *lp) fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp)); if (!fc) { fc = lport_priv(lp); - write_lock_bh(&fcoe_hostlist_lock); list_add_tail(&fc->list, &fcoe_hostlist); - write_unlock_bh(&fcoe_hostlist_lock); } return 0; } @@ -1783,9 +1793,9 @@ int fcoe_hostlist_remove(const struct fc_lport *lp) { struct fcoe_softc *fc; + write_lock_bh(&fcoe_hostlist_lock); fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp)); BUG_ON(!fc); - write_lock_bh(&fcoe_hostlist_lock); list_del(&fc->list); write_unlock_bh(&fcoe_hostlist_lock); From d7179680d04f1e196b7a5f70e7f93bb1850407c6 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Wed, 29 Jul 2009 17:05:21 -0700 Subject: [PATCH 040/209] [SCSI] fcoe, libfc: adds offload EM per eth device with only single xid range per EM Updates fcoe_em_config to allocate a single instance of sharable offload EM for supported lp->lro_xid per eth device, and then share this EM for subsequently more lports creation on same eth device (e.g when using VLAN). Adds tiny fcoe_oem_match function for offload EM to return true for read types IO to have read IO exchanges allocated from offload shared EM. Removes fc_em_alloc_xid function completely which was needed to manage two xid ranges within a EM, this is not needed any more with allocation of separate sharable offload EM per eth device. Instead this patch adds simple xid allocation logic to manage single xid range. Adds fc_exch_em_alloc with mp->next_xid as cursor to allocate new xid from single xid range of EM, uses mp->next_xid instead removed mp->last_xid which slightly increase probability of finding empty xid on exch allocation. Removes restriction of not allowing use of xid zero along with changing two xid range change to single xid range. Makes fc_fcp_ddp_setup calling conditional to only xid allocated from shared offload EM. Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 67 ++++++++++++++++++++++++- drivers/scsi/fcoe/fcoe.h | 3 +- drivers/scsi/libfc/fc_exch.c | 94 +++++++----------------------------- 3 files changed, 85 insertions(+), 79 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 86410b9a30c3..0306aa95eb34 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -415,6 +415,17 @@ static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, return 0; } +/* + * fcoe_oem_match() - match for read types IO + * @fp: the fc_frame for new IO. + * + * Returns : true for read types IO, otherwise returns false. + */ +bool fcoe_oem_match(struct fc_frame *fp) +{ + return fc_fcp_is_read(fr_fsp(fp)); +} + /** * fcoe_em_config() - allocates em for this lport * @lp: the port that em is to allocated for @@ -425,9 +436,61 @@ static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, */ static inline int fcoe_em_config(struct fc_lport *lp) { - if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCOE_MIN_XID, - FCOE_MAX_XID, NULL)) + struct fcoe_softc *fc = lport_priv(lp); + struct fcoe_softc *oldfc = NULL; + u16 min_xid = FCOE_MIN_XID; + u16 max_xid = FCOE_MAX_XID; + + /* + * Check if need to allocate an em instance for + * offload exchange ids to be shared across all VN_PORTs/lport. + */ + if (!lp->lro_enabled || !lp->lro_xid || (lp->lro_xid >= max_xid)) { + lp->lro_xid = 0; + goto skip_oem; + } + + /* + * Reuse existing offload em instance in case + * it is already allocated on phys_dev. + */ + list_for_each_entry(oldfc, &fcoe_hostlist, list) { + if (oldfc->phys_dev == fc->phys_dev) { + fc->oem = oldfc->oem; + break; + } + } + + if (fc->oem) { + if (!fc_exch_mgr_add(lp, fc->oem, fcoe_oem_match)) { + printk(KERN_ERR "fcoe_em_config: failed to add " + "offload em:%p on interface:%s\n", + fc->oem, fc->real_dev->name); + return -ENOMEM; + } + } else { + fc->oem = fc_exch_mgr_alloc(lp, FC_CLASS_3, + FCOE_MIN_XID, lp->lro_xid, + fcoe_oem_match); + if (!fc->oem) { + printk(KERN_ERR "fcoe_em_config: failed to allocate " + "em for offload exches on interface:%s\n", + fc->real_dev->name); + return -ENOMEM; + } + } + + /* + * Exclude offload EM xid range from next EM xid range. + */ + min_xid += lp->lro_xid + 1; + +skip_oem: + if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, min_xid, max_xid, NULL)) { + printk(KERN_ERR "fcoe_em_config: failed to " + "allocate em on interface %s\n", fc->real_dev->name); return -ENOMEM; + } return 0; } diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 0d724fa0898f..6905efc166bf 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -37,7 +37,7 @@ #define FCOE_MAX_OUTSTANDING_COMMANDS 1024 -#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */ +#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */ #define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */ unsigned int fcoe_debug_logging; @@ -81,6 +81,7 @@ struct fcoe_softc { struct list_head list; struct net_device *real_dev; struct net_device *phys_dev; /* device with ethtool_ops */ + struct fc_exch_mgr *oem; /* offload exchange manger */ struct packet_type fcoe_packet_type; struct packet_type fip_packet_type; struct sk_buff_head fcoe_pending_queue; diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 324589a5cc03..11ddd115efb6 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -58,7 +58,7 @@ struct fc_exch_mgr { struct kref kref; /* exchange mgr reference count */ spinlock_t em_lock; /* exchange manager lock, must be taken before ex_lock */ - u16 last_xid; /* last allocated exchange ID */ + u16 next_xid; /* next possible free exchange ID */ u16 min_xid; /* min exchange ID */ u16 max_xid; /* max exchange ID */ u16 max_read; /* max exchange ID for read */ @@ -464,68 +464,21 @@ static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id) return sp; } -/* - * fc_em_alloc_xid - returns an xid based on request type - * @lp : ptr to associated lport - * @fp : ptr to the assocated frame - * - * check the associated fc_fsp_pkt to get scsi command type and - * command direction to decide from which range this exch id - * will be allocated from. - * - * Returns : 0 or an valid xid - */ -static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp) -{ - u16 xid, min, max; - u16 *plast; - struct fc_exch *ep = NULL; - - if (mp->max_read) { - if (fc_fcp_is_read(fr_fsp(fp))) { - min = mp->min_xid; - max = mp->max_read; - plast = &mp->last_read; - } else { - min = mp->max_read + 1; - max = mp->max_xid; - plast = &mp->last_xid; - } - } else { - min = mp->min_xid; - max = mp->max_xid; - plast = &mp->last_xid; - } - xid = *plast; - do { - xid = (xid == max) ? min : xid + 1; - ep = mp->exches[xid - mp->min_xid]; - } while ((ep != NULL) && (xid != *plast)); - - if (unlikely(ep)) - xid = 0; - else - *plast = xid; - - return xid; -} - /** * fc_exch_em_alloc() - allocate an exchange from a specified EM. * @lport: ptr to the local port * @mp: ptr to the exchange manager - * @fp: ptr to the FC frame - * @xid: input xid * - * if xid is supplied zero then assign next free exchange ID - * from exchange manager, otherwise use supplied xid. - * Returns with exch lock held. + * Returns pointer to allocated fc_exch with exch lock held. */ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, - struct fc_exch_mgr *mp, - struct fc_frame *fp, u16 xid) + struct fc_exch_mgr *mp) { struct fc_exch *ep; + u16 min, max, xid; + + min = mp->min_xid; + max = mp->max_xid; /* allocate memory for exchange */ ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); @@ -536,15 +489,14 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, memset(ep, 0, sizeof(*ep)); spin_lock_bh(&mp->em_lock); - /* alloc xid if input xid 0 */ - if (!xid) { - /* alloc a new xid */ - xid = fc_em_alloc_xid(mp, fp); - if (!xid) { - printk(KERN_WARNING "libfc: Failed to allocate an exhange\n"); + xid = mp->next_xid; + /* alloc a new xid */ + while (mp->exches[xid - min]) { + xid = (xid == max) ? min : xid + 1; + if (xid == mp->next_xid) goto err; - } } + mp->next_xid = (xid == max) ? min : xid + 1; fc_exch_hold(ep); /* hold for exch in mp */ spin_lock_init(&ep->ex_lock); @@ -597,7 +549,7 @@ struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp) list_for_each_entry(ema, &lport->ema_list, ema_list) { if (!ema->match || ema->match(fp)) { - ep = fc_exch_em_alloc(lport, ema->mp, fp, 0); + ep = fc_exch_em_alloc(lport, ema->mp); if (ep) return ep; } @@ -1817,7 +1769,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, struct fc_exch_mgr *mp; size_t len; - if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) { + if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", min_xid, max_xid); return NULL; @@ -1826,7 +1778,6 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, /* * Memory need for EM */ -#define xid_ok(i, m1, m2) (((i) >= (m1)) && ((i) <= (m2))) len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *)); len += sizeof(struct fc_exch_mgr); @@ -1840,17 +1791,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, /* adjust em exch xid range for offload */ mp->min_xid = min_xid; mp->max_xid = max_xid; - mp->last_xid = min_xid - 1; - mp->max_read = 0; - mp->last_read = 0; - if (lp->lro_enabled && xid_ok(lp->lro_xid, min_xid, max_xid)) { - mp->max_read = lp->lro_xid; - mp->last_read = min_xid - 1; - mp->last_xid = mp->max_read; - } else { - /* disable lro if no xid control over read */ - lp->lro_enabled = 0; - } + mp->next_xid = min_xid; INIT_LIST_HEAD(&mp->ex_list); spin_lock_init(&mp->em_lock); @@ -1922,7 +1863,8 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, fc_exch_setup_hdr(ep, fp, ep->f_ctl); sp->cnt++; - fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); + if (ep->xid <= lp->lro_xid) + fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); if (unlikely(lp->tt.frame_send(lp, fp))) goto err; From 15a521b4243f3d61971f8422f3e514ef009a42b8 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Wed, 29 Jul 2009 17:05:26 -0700 Subject: [PATCH 041/209] [SCSI] fcoe: Remove ifdef for NETIF_F_FCOE_CRC and NETIF_F_FSO Remove the extra ifdef for NETIF_F_FSO and NETIF_F_FCOE_CRC since they are already defined in the current kernel as in include/linux/netdevice.h. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 0306aa95eb34..ad21e87e4006 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -300,20 +300,16 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) if (fc->real_dev->features & NETIF_F_SG) lp->sg_supp = 1; -#ifdef NETIF_F_FCOE_CRC if (netdev->features & NETIF_F_FCOE_CRC) { lp->crc_offload = 1; FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n"); } -#endif -#ifdef NETIF_F_FSO if (netdev->features & NETIF_F_FSO) { lp->seq_offload = 1; lp->lso_max = netdev->gso_max_size; FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n", lp->lso_max); } -#endif if (netdev->fcoe_ddp_xid) { lp->lro_enabled = 1; lp->lro_xid = netdev->fcoe_ddp_xid; @@ -1205,7 +1201,6 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); hp->fcoe_sof = sof; -#ifdef NETIF_F_FSO /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ if (lp->seq_offload && fr_max_payload(fp)) { skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; @@ -1214,7 +1209,6 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) skb_shinfo(skb)->gso_type = 0; skb_shinfo(skb)->gso_size = 0; } -#endif /* update tx stats: regardless if LLD fails */ stats = fc_lport_get_stats(lp); stats->TxFrames++; From 537029f8e950776951ca2a3fe30121d5c05643d1 Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Wed, 29 Jul 2009 17:05:32 -0700 Subject: [PATCH 042/209] [SCSI] libfc: Remove FC_FRAME_SG_LEN in fc_fcp_send_data FC_FRAME_SG_LEN is 4 which is too small when offload is enabled. Actually, the WARN_ON() in fc_fcp_send_data() should be: WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags > MAX_SKB_FRAGS); But since we will not get anything more than 64K anyway, so there is no need to do this anyway here. Therefore, I am getting rid of FC_FRAME_SG_LEN here and the WARN_ON here. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 2 -- include/scsi/fc_frame.h | 7 ------- 2 files changed, 9 deletions(-) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index e303e0d12c4b..2069edf80268 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -569,8 +569,6 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, } sg_bytes = min(tlen, sg->length - offset); if (using_sg) { - WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags > - FC_FRAME_SG_LEN); get_page(sg_page(sg)); skb_fill_page_desc(fp_skb(fp), skb_shinfo(fp_skb(fp))->nr_frags, diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h index 59511057cee0..c35d2383cc26 100644 --- a/include/scsi/fc_frame.h +++ b/include/scsi/fc_frame.h @@ -37,13 +37,6 @@ #define FC_FRAME_HEADROOM 32 /* headroom for VLAN + FCoE headers */ #define FC_FRAME_TAILROOM 8 /* trailer space for FCoE */ -/* - * Information about an individual fibre channel frame received or to be sent. - * The buffer may be in up to 4 additional non-contiguous sections, - * but the linear section must hold the frame header. - */ -#define FC_FRAME_SG_LEN 4 /* scatter/gather list maximum length */ - #define fp_skb(fp) (&((fp)->skb)) #define fr_hdr(fp) ((fp)->skb.data) #define fr_len(fp) ((fp)->skb.len) From 53fcfbbef569819706b880a502ff62e2852edfce Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Wed, 29 Jul 2009 17:05:40 -0700 Subject: [PATCH 043/209] [SCSI] libfc: Remove page flags check for sglist I don't believe this check is needed any more in the current kernel, which, if I understand correctly, is for compound page where only the first page is supposed to get ref-counted. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_fcp.c | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 2069edf80268..7d5ffcbbf39b 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -507,33 +507,6 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, f_ctl = FC_FC_REL_OFF; WARN_ON(!seq); - /* - * If a get_page()/put_page() will fail, don't use sg lists - * in the fc_frame structure. - * - * The put_page() may be long after the I/O has completed - * in the case of FCoE, since the network driver does it - * via free_skb(). See the test in free_pages_check(). - * - * Test this case with 'dd /dev/st0 bs=64k'. - */ - if (using_sg) { - for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) { - if (page_count(sg_page(sg)) == 0 || - (sg_page(sg)->flags & (1 << PG_lru | - 1 << PG_private | - 1 << PG_locked | - 1 << PG_active | - 1 << PG_slab | - 1 << PG_swapcache | - 1 << PG_writeback | - 1 << PG_reserved | - 1 << PG_buddy))) { - using_sg = 0; - break; - } - } - } sg = scsi_sglist(sc); while (remaining > 0 && sg) { From 1d1b88dc01e5fd2b3e2abb7aa42d0f1eca4c33ea Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Wed, 29 Jul 2009 17:05:45 -0700 Subject: [PATCH 044/209] [SCSI] fcoe: removes phys_dev and renames real_dev to netdev. The phys_dev was used only to locate common offload EM instance for all FCoE instances on a eth devices in function fcoe_em_config, so just updated fcoe_em_config to look for actual real eth device in locating common offload EM instance and then no need to store phys_dev in fcoe_softc, so removes phys_dev from fcoe_softc also. Renames fcoe_softc real_dev to netdev and updates all its uses to use netdev. So effectively no functional change, use of single netdev instead phys_dev and real_dev saves one pointer memory in fcoe_softc, also real_dev used here was confusing with vlan driver terminology since real_dev in vlan driver is referred to physical eth device. Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 93 +++++++++++++++++++++------------------- drivers/scsi/fcoe/fcoe.h | 5 +-- 2 files changed, 51 insertions(+), 47 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index ad21e87e4006..757aa28f0f04 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -159,7 +159,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev, */ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) { - skb->dev = fcoe_from_ctlr(fip)->real_dev; + skb->dev = fcoe_from_ctlr(fip)->netdev; dev_queue_xmit(skb); } @@ -179,8 +179,8 @@ static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new) fc = fcoe_from_ctlr(fip); rtnl_lock(); if (!is_zero_ether_addr(old)) - dev_unicast_delete(fc->real_dev, old); - dev_unicast_add(fc->real_dev, new); + dev_unicast_delete(fc->netdev, old); + dev_unicast_add(fc->netdev, new); rtnl_unlock(); } @@ -231,12 +231,12 @@ void fcoe_netdev_cleanup(struct fcoe_softc *fc) /* Delete secondary MAC addresses */ rtnl_lock(); memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); - dev_unicast_delete(fc->real_dev, flogi_maddr); + dev_unicast_delete(fc->netdev, flogi_maddr); if (!is_zero_ether_addr(fc->ctlr.data_src_addr)) - dev_unicast_delete(fc->real_dev, fc->ctlr.data_src_addr); + dev_unicast_delete(fc->netdev, fc->ctlr.data_src_addr); if (fc->ctlr.spma) - dev_unicast_delete(fc->real_dev, fc->ctlr.ctl_src_addr); - dev_mc_delete(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); + dev_unicast_delete(fc->netdev, fc->ctlr.ctl_src_addr); + dev_mc_delete(fc->netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); rtnl_unlock(); } @@ -272,17 +272,12 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) /* Setup lport private data to point to fcoe softc */ fc = lport_priv(lp); fc->ctlr.lp = lp; - fc->real_dev = netdev; - fc->phys_dev = netdev; - - /* Require support for get_pauseparam ethtool op. */ - if (netdev->priv_flags & IFF_802_1Q_VLAN) - fc->phys_dev = vlan_dev_real_dev(netdev); + fc->netdev = netdev; /* Do not support for bonding device */ - if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) || - (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) || - (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) { + if ((netdev->priv_flags & IFF_MASTER_ALB) || + (netdev->priv_flags & IFF_SLAVE_INACTIVE) || + (netdev->priv_flags & IFF_MASTER_8023AD)) { return -EOPNOTSUPP; } @@ -291,13 +286,13 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) * user-configured limit. If the MFS is too low, fcoe_link_ok() * will return 0, so do this first. */ - mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) + - sizeof(struct fcoe_crc_eof)); + mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + + sizeof(struct fcoe_crc_eof)); if (fc_set_mfs(lp, mfs)) return -EINVAL; /* offload features support */ - if (fc->real_dev->features & NETIF_F_SG) + if (netdev->features & NETIF_F_SG) lp->sg_supp = 1; if (netdev->features & NETIF_F_FCOE_CRC) { @@ -335,13 +330,13 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) /* setup Source Mac Address */ if (!fc->ctlr.spma) - memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr, - fc->real_dev->addr_len); + memcpy(fc->ctlr.ctl_src_addr, netdev->dev_addr, + fc->netdev->addr_len); - wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0); + wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0); fc_set_wwnn(lp, wwnn); /* XXX - 3rd arg needs to be vlan id */ - wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0); + wwpn = fcoe_wwn_from_mac(netdev->dev_addr, 2, 0); fc_set_wwpn(lp, wwpn); /* @@ -351,10 +346,10 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) */ rtnl_lock(); memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); - dev_unicast_add(fc->real_dev, flogi_maddr); + dev_unicast_add(netdev, flogi_maddr); if (fc->ctlr.spma) - dev_unicast_add(fc->real_dev, fc->ctlr.ctl_src_addr); - dev_mc_add(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); + dev_unicast_add(netdev, fc->ctlr.ctl_src_addr); + dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); rtnl_unlock(); /* @@ -363,12 +358,12 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) */ fc->fcoe_packet_type.func = fcoe_rcv; fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); - fc->fcoe_packet_type.dev = fc->real_dev; + fc->fcoe_packet_type.dev = netdev; dev_add_pack(&fc->fcoe_packet_type); fc->fip_packet_type.func = fcoe_fip_recv; fc->fip_packet_type.type = htons(ETH_P_FIP); - fc->fip_packet_type.dev = fc->real_dev; + fc->fip_packet_type.dev = netdev; dev_add_pack(&fc->fip_packet_type); return 0; @@ -434,6 +429,7 @@ static inline int fcoe_em_config(struct fc_lport *lp) { struct fcoe_softc *fc = lport_priv(lp); struct fcoe_softc *oldfc = NULL; + struct net_device *old_real_dev, *cur_real_dev; u16 min_xid = FCOE_MIN_XID; u16 max_xid = FCOE_MAX_XID; @@ -448,10 +444,20 @@ static inline int fcoe_em_config(struct fc_lport *lp) /* * Reuse existing offload em instance in case - * it is already allocated on phys_dev. + * it is already allocated on real eth device */ + if (fc->netdev->priv_flags & IFF_802_1Q_VLAN) + cur_real_dev = vlan_dev_real_dev(fc->netdev); + else + cur_real_dev = fc->netdev; + list_for_each_entry(oldfc, &fcoe_hostlist, list) { - if (oldfc->phys_dev == fc->phys_dev) { + if (oldfc->netdev->priv_flags & IFF_802_1Q_VLAN) + old_real_dev = vlan_dev_real_dev(oldfc->netdev); + else + old_real_dev = oldfc->netdev; + + if (cur_real_dev == old_real_dev) { fc->oem = oldfc->oem; break; } @@ -461,7 +467,7 @@ static inline int fcoe_em_config(struct fc_lport *lp) if (!fc_exch_mgr_add(lp, fc->oem, fcoe_oem_match)) { printk(KERN_ERR "fcoe_em_config: failed to add " "offload em:%p on interface:%s\n", - fc->oem, fc->real_dev->name); + fc->oem, fc->netdev->name); return -ENOMEM; } } else { @@ -471,7 +477,7 @@ static inline int fcoe_em_config(struct fc_lport *lp) if (!fc->oem) { printk(KERN_ERR "fcoe_em_config: failed to allocate " "em for offload exches on interface:%s\n", - fc->real_dev->name); + fc->netdev->name); return -ENOMEM; } } @@ -484,7 +490,7 @@ static inline int fcoe_em_config(struct fc_lport *lp) skip_oem: if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, min_xid, max_xid, NULL)) { printk(KERN_ERR "fcoe_em_config: failed to " - "allocate em on interface %s\n", fc->real_dev->name); + "allocate em on interface %s\n", fc->netdev->name); return -ENOMEM; } @@ -548,7 +554,7 @@ static int fcoe_if_destroy(struct net_device *netdev) fc_lport_free_stats(lp); /* Release the net_device and Scsi_Host */ - dev_put(fc->real_dev); + dev_put(netdev); scsi_host_put(lp->host); return 0; @@ -1179,7 +1185,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) skb_reset_network_header(skb); skb->mac_len = elen; skb->protocol = htons(ETH_P_FCOE); - skb->dev = fc->real_dev; + skb->dev = fc->netdev; /* fill up mac and fcoe headers */ eh = eth_hdr(skb); @@ -1454,7 +1460,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, ulong event, void *ptr) { struct fc_lport *lp = NULL; - struct net_device *real_dev = ptr; + struct net_device *netdev = ptr; struct fcoe_softc *fc; struct fcoe_dev_stats *stats; u32 link_possible = 1; @@ -1463,7 +1469,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, read_lock(&fcoe_hostlist_lock); list_for_each_entry(fc, &fcoe_hostlist, list) { - if (fc->real_dev == real_dev) { + if (fc->netdev == netdev) { lp = fc->ctlr.lp; break; } @@ -1483,16 +1489,15 @@ static int fcoe_device_notification(struct notifier_block *notifier, case NETDEV_CHANGE: break; case NETDEV_CHANGEMTU: - mfs = fc->real_dev->mtu - - (sizeof(struct fcoe_hdr) + - sizeof(struct fcoe_crc_eof)); + mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + + sizeof(struct fcoe_crc_eof)); if (mfs >= FC_MIN_MAX_FRAME) fc_set_mfs(lp, mfs); break; case NETDEV_REGISTER: break; default: - FCOE_NETDEV_DBG(real_dev, "Unknown event %ld " + FCOE_NETDEV_DBG(netdev, "Unknown event %ld " "from netdev netlink\n", event); } if (link_possible && !fcoe_link_ok(lp)) @@ -1696,7 +1701,7 @@ MODULE_PARM_DESC(destroy, "Destroy fcoe port"); int fcoe_link_ok(struct fc_lport *lp) { struct fcoe_softc *fc = lport_priv(lp); - struct net_device *dev = fc->real_dev; + struct net_device *dev = fc->netdev; struct ethtool_cmd ecmd = { ETHTOOL_GSET }; if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) && @@ -1797,7 +1802,7 @@ fcoe_hostlist_lookup_softc(const struct net_device *dev) struct fcoe_softc *fc; list_for_each_entry(fc, &fcoe_hostlist, list) { - if (fc->real_dev == dev) + if (fc->netdev == dev) return fc; } return NULL; @@ -1916,7 +1921,7 @@ static void __exit fcoe_exit(void) /* releases the associated fcoe hosts */ list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) - fcoe_if_destroy(fc->real_dev); + fcoe_if_destroy(fc->netdev); unregister_hotcpu_notifier(&fcoe_cpu_notifier); diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 6905efc166bf..5ae8ca71afcb 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -79,8 +79,7 @@ struct fcoe_percpu_s { */ struct fcoe_softc { struct list_head list; - struct net_device *real_dev; - struct net_device *phys_dev; /* device with ethtool_ops */ + struct net_device *netdev; struct fc_exch_mgr *oem; /* offload exchange manger */ struct packet_type fcoe_packet_type; struct packet_type fip_packet_type; @@ -95,7 +94,7 @@ struct fcoe_softc { static inline struct net_device *fcoe_netdev( const struct fc_lport *lp) { - return ((struct fcoe_softc *)lport_priv(lp))->real_dev; + return ((struct fcoe_softc *)lport_priv(lp))->netdev; } #endif /* _FCOE_H_ */ From 534cc9c165017dd29ae0f4458832893dc8033c6f Mon Sep 17 00:00:00 2001 From: Anil Veerabhadrappa Date: Wed, 29 Jul 2009 21:49:48 -0700 Subject: [PATCH 045/209] [SCSI] bnx2i : Fix command session number jump issue seen during cable pull test Without the fix bnx2i would fail tt->xmit_task() when link is down and libiscsi would have already incremented session->cmdsn before calling bnx2i's xmit_task() entry point and will just return the command to SCSI-ML when xmit_task() fails. libiscsi does not retract the session->cmdsn as the command was never sent on wire. It is generally good idea for LLD, bnx2i to accept the scsi cmnd/nopout and let upper layer timeout and go though normal session recovery process. When link is down, unsolicited nopout will not be accepted by bnx2i and connection will never enter recovery state. This fix is required for MPIO to work corectly Signed-off-by: Anil Veerabhadrappa Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i_iscsi.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 98148f3f3c63..40120a242eff 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1160,9 +1160,6 @@ static int bnx2i_task_xmit(struct iscsi_task *task) struct bnx2i_cmd *cmd = task->dd_data; struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; - if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state)) - return -ENOTCONN; - if (!bnx2i_conn->is_bound) return -ENOTCONN; From c19dcd011238118ad07d3ab7ed1bbc4916826324 Mon Sep 17 00:00:00 2001 From: Anil Veerabhadrappa Date: Wed, 29 Jul 2009 21:50:11 -0700 Subject: [PATCH 046/209] [SCSI] bnx2i : Fix "cid #n not valid" issue When bnx2i_adapter_ready() fails, connection handle(cid) = 0 is wrongly freed because 'cid' is not yet allocated for the endpoint. Fix is to initialize bnx2i_ep->ep_iscsi_cid to '-1' in bnx2i_alloc_ep() and not in bnx2i_ep_connect() to avoid releasing invalid 'cid'. There is already a check in bnx2i_free_iscsi_cid() not to free invalid iscsi connection handle (-1) Signed-off-by: Anil Veerabhadrappa Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i_iscsi.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 40120a242eff..9a7ba71f1af4 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -387,6 +387,7 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba) bnx2i_ep = ep->dd_data; INIT_LIST_HEAD(&bnx2i_ep->link); bnx2i_ep->state = EP_STATE_IDLE; + bnx2i_ep->ep_iscsi_cid = (u16) -1; bnx2i_ep->hba = hba; bnx2i_ep->hba_age = hba->age; hba->ofld_conns_active++; @@ -1681,8 +1682,6 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, goto net_if_down; } - bnx2i_ep->state = EP_STATE_IDLE; - bnx2i_ep->ep_iscsi_cid = (u16) -1; bnx2i_ep->num_active_cmds = 0; iscsi_cid = bnx2i_alloc_iscsi_cid(hba); if (iscsi_cid == -1) { From 0124ca9d8ee58b3cd028a23cef2fe225fcfee3b8 Mon Sep 17 00:00:00 2001 From: Brian King Date: Thu, 30 Jul 2009 10:58:51 -0500 Subject: [PATCH 047/209] [SCSI] ipr: fix buffer overflow ipr_cmd_label[] isn't big enough for an eight byte string plus terminator. Fix by shortening the string to seven bytes. Signed-off-by: Brian King Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/ipr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 4b63dd6b1c81..163245a1c3e5 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -1199,7 +1199,7 @@ struct ipr_ioa_cfg { struct ata_host ata_host; char ipr_cmd_label[8]; -#define IPR_CMD_LABEL "ipr_cmnd" +#define IPR_CMD_LABEL "ipr_cmd" struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS]; u32 ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS]; }; From 163f52b6cf3a639df6a72c7937e0eb88b20f1ef3 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sat, 1 Aug 2009 00:39:36 +0000 Subject: [PATCH 048/209] [SCSI] ses: fix hotplug with multiple devices and expanders In a situation either with expanders or with multiple enclosure devices, hot add doesn't always work. This is because we try to find a single enclosure device attached to the host. Fix this by looping over all enclosure devices attached to the host and also by making the find loop recognise that the enclosure devices may be expander remote (i.e. not parented by the host). Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/misc/enclosure.c | 44 ++++++++++++++++++++++++++++----------- drivers/scsi/ses.c | 10 +++++---- include/linux/enclosure.h | 3 ++- 3 files changed, 40 insertions(+), 17 deletions(-) diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 348443bdb23b..789d12128c24 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -33,24 +33,44 @@ static DEFINE_MUTEX(container_list_lock); static struct class enclosure_class; /** - * enclosure_find - find an enclosure given a device - * @dev: the device to find for + * enclosure_find - find an enclosure given a parent device + * @dev: the parent to match against + * @start: Optional enclosure device to start from (NULL if none) * - * Looks through the list of registered enclosures to see - * if it can find a match for a device. Returns NULL if no - * enclosure is found. Obtains a reference to the enclosure class - * device which must be released with device_put(). + * Looks through the list of registered enclosures to find all those + * with @dev as a parent. Returns NULL if no enclosure is + * found. @start can be used as a starting point to obtain multiple + * enclosures per parent (should begin with NULL and then be set to + * each returned enclosure device). Obtains a reference to the + * enclosure class device which must be released with device_put(). + * If @start is not NULL, a reference must be taken on it which is + * released before returning (this allows a loop through all + * enclosures to exit with only the reference on the enclosure of + * interest held). Note that the @dev may correspond to the actual + * device housing the enclosure, in which case no iteration via @start + * is required. */ -struct enclosure_device *enclosure_find(struct device *dev) +struct enclosure_device *enclosure_find(struct device *dev, + struct enclosure_device *start) { struct enclosure_device *edev; mutex_lock(&container_list_lock); - list_for_each_entry(edev, &container_list, node) { - if (edev->edev.parent == dev) { - get_device(&edev->edev); - mutex_unlock(&container_list_lock); - return edev; + edev = list_prepare_entry(start, &container_list, node); + if (start) + put_device(&start->edev); + + list_for_each_entry_continue(edev, &container_list, node) { + struct device *parent = edev->edev.parent; + /* parent might not be immediate, so iterate up to + * the root of the tree if necessary */ + while (parent) { + if (parent == dev) { + get_device(&edev->edev); + mutex_unlock(&container_list_lock); + return edev; + } + parent = parent->parent; } } mutex_unlock(&container_list_lock); diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 4f618f487356..e1b8c828f03a 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -413,10 +413,11 @@ static int ses_intf_add(struct device *cdev, if (!scsi_device_enclosure(sdev)) { /* not an enclosure, but might be in one */ - edev = enclosure_find(&sdev->host->shost_gendev); - if (edev) { + struct enclosure_device *prev = NULL; + + while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) { ses_match_to_enclosure(edev, sdev); - put_device(&edev->edev); + prev = edev; } return -ENODEV; } @@ -625,7 +626,8 @@ static void ses_intf_remove(struct device *cdev, if (!scsi_device_enclosure(sdev)) return; - edev = enclosure_find(cdev->parent); + /* exact match to this enclosure */ + edev = enclosure_find(cdev->parent, NULL); if (!edev) return; diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h index 4332442b1b57..d77811e9ed84 100644 --- a/include/linux/enclosure.h +++ b/include/linux/enclosure.h @@ -123,7 +123,8 @@ enclosure_component_register(struct enclosure_device *, unsigned int, int enclosure_add_device(struct enclosure_device *enclosure, int component, struct device *dev); int enclosure_remove_device(struct enclosure_device *enclosure, int component); -struct enclosure_device *enclosure_find(struct device *dev); +struct enclosure_device *enclosure_find(struct device *dev, + struct enclosure_device *start); int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *), void *data); From 43d8eb9cfd0aea93be32181c64e18191b69c211c Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sat, 1 Aug 2009 00:41:22 +0000 Subject: [PATCH 049/209] [SCSI] ses: add support for enclosure component hot removal Right at the moment, hot removal of a device within an enclosure does nothing (because the intf_remove only copes with enclosure removal not with component removal). Fix this by adding a function to remove the component. Also needed to fix the prototype of enclosure_remove_device, since we know the device we've removed but not the internal component number Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/misc/enclosure.c | 22 ++++++++++++++-------- drivers/scsi/ses.c | 33 ++++++++++++++++++++++++++------- include/linux/enclosure.h | 2 +- 3 files changed, 41 insertions(+), 16 deletions(-) diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 789d12128c24..850706a5e553 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -332,19 +332,25 @@ EXPORT_SYMBOL_GPL(enclosure_add_device); * Returns zero on success or an error. * */ -int enclosure_remove_device(struct enclosure_device *edev, int component) +int enclosure_remove_device(struct enclosure_device *edev, struct device *dev) { struct enclosure_component *cdev; + int i; - if (!edev || component >= edev->components) + if (!edev || !dev) return -EINVAL; - cdev = &edev->component[component]; - - device_del(&cdev->cdev); - put_device(cdev->dev); - cdev->dev = NULL; - return device_add(&cdev->cdev); + for (i = 0; i < edev->components; i++) { + cdev = &edev->component[i]; + if (cdev->dev == dev) { + enclosure_remove_links(cdev); + device_del(&cdev->cdev); + put_device(dev); + cdev->dev = NULL; + return device_add(&cdev->cdev); + } + } + return -ENODEV; } EXPORT_SYMBOL_GPL(enclosure_remove_device); diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index e1b8c828f03a..be593c8525b5 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -616,18 +616,26 @@ static int ses_remove(struct device *dev) return 0; } -static void ses_intf_remove(struct device *cdev, - struct class_interface *intf) +static void ses_intf_remove_component(struct scsi_device *sdev) +{ + struct enclosure_device *edev, *prev = NULL; + + while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) { + prev = edev; + if (!enclosure_remove_device(edev, &sdev->sdev_gendev)) + break; + } + if (edev) + put_device(&edev->edev); +} + +static void ses_intf_remove_enclosure(struct scsi_device *sdev) { - struct scsi_device *sdev = to_scsi_device(cdev->parent); struct enclosure_device *edev; struct ses_device *ses_dev; - if (!scsi_device_enclosure(sdev)) - return; - /* exact match to this enclosure */ - edev = enclosure_find(cdev->parent, NULL); + edev = enclosure_find(&sdev->sdev_gendev, NULL); if (!edev) return; @@ -645,6 +653,17 @@ static void ses_intf_remove(struct device *cdev, enclosure_unregister(edev); } +static void ses_intf_remove(struct device *cdev, + struct class_interface *intf) +{ + struct scsi_device *sdev = to_scsi_device(cdev->parent); + + if (!scsi_device_enclosure(sdev)) + ses_intf_remove_component(sdev); + else + ses_intf_remove_enclosure(sdev); +} + static struct class_interface ses_interface = { .add_dev = ses_intf_add, .remove_dev = ses_intf_remove, diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h index d77811e9ed84..90d1c2184112 100644 --- a/include/linux/enclosure.h +++ b/include/linux/enclosure.h @@ -122,7 +122,7 @@ enclosure_component_register(struct enclosure_device *, unsigned int, enum enclosure_component_type, const char *); int enclosure_add_device(struct enclosure_device *enclosure, int component, struct device *dev); -int enclosure_remove_device(struct enclosure_device *enclosure, int component); +int enclosure_remove_device(struct enclosure_device *, struct device *); struct enclosure_device *enclosure_find(struct device *dev, struct enclosure_device *start); int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *), From 21fab1d0595eacf781705ec3509012a28f298245 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sat, 1 Aug 2009 00:43:59 +0000 Subject: [PATCH 050/209] [SCSI] ses: update enclosure data on hot add Now that hot add works correctly, if a new device is added, we're still operating on stale enclosure data, so fix that by updating the enclosure diagnostic pages when we get notified of a device hot add Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/misc/enclosure.c | 3 + drivers/scsi/ses.c | 168 ++++++++++++++++++++++----------------- 2 files changed, 100 insertions(+), 71 deletions(-) diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 850706a5e553..7b039306037f 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -315,6 +315,9 @@ int enclosure_add_device(struct enclosure_device *edev, int component, cdev = &edev->component[component]; + if (cdev->dev == dev) + return -EEXIST; + if (cdev->dev) enclosure_remove_links(cdev); diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index be593c8525b5..55b034b72708 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -347,6 +347,97 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev, return 0; } +#define INIT_ALLOC_SIZE 32 + +static void ses_enclosure_data_process(struct enclosure_device *edev, + struct scsi_device *sdev, + int create) +{ + u32 result; + unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL; + int i, j, page7_len, len, components; + struct ses_device *ses_dev = edev->scratch; + int types = ses_dev->page1[10]; + unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL); + + if (!hdr_buf) + goto simple_populate; + + /* re-read page 10 */ + if (ses_dev->page10) + ses_recv_diag(sdev, 10, ses_dev->page10, ses_dev->page10_len); + /* Page 7 for the descriptors is optional */ + result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE); + if (result) + goto simple_populate; + + page7_len = len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; + /* add 1 for trailing '\0' we'll use */ + buf = kzalloc(len + 1, GFP_KERNEL); + if (!buf) + goto simple_populate; + result = ses_recv_diag(sdev, 7, buf, len); + if (result) { + simple_populate: + kfree(buf); + buf = NULL; + desc_ptr = NULL; + len = 0; + page7_len = 0; + } else { + desc_ptr = buf + 8; + len = (desc_ptr[2] << 8) + desc_ptr[3]; + /* skip past overall descriptor */ + desc_ptr += len + 4; + if (ses_dev->page10) + addl_desc_ptr = ses_dev->page10 + 8; + } + type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; + components = 0; + for (i = 0; i < types; i++, type_ptr += 4) { + for (j = 0; j < type_ptr[1]; j++) { + char *name = NULL; + struct enclosure_component *ecomp; + + if (desc_ptr) { + if (desc_ptr >= buf + page7_len) { + desc_ptr = NULL; + } else { + len = (desc_ptr[2] << 8) + desc_ptr[3]; + desc_ptr += 4; + /* Add trailing zero - pushes into + * reserved space */ + desc_ptr[len] = '\0'; + name = desc_ptr; + } + } + if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || + type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) { + + if (create) + ecomp = enclosure_component_register(edev, + components++, + type_ptr[0], + name); + else + ecomp = &edev->component[components++]; + + if (!IS_ERR(ecomp) && addl_desc_ptr) + ses_process_descriptor(ecomp, + addl_desc_ptr); + } + if (desc_ptr) + desc_ptr += len; + + if (addl_desc_ptr) + addl_desc_ptr += addl_desc_ptr[1] + 2; + + } + } + kfree(buf); + kfree(hdr_buf); +} + static void ses_match_to_enclosure(struct enclosure_device *edev, struct scsi_device *sdev) { @@ -361,6 +452,8 @@ static void ses_match_to_enclosure(struct enclosure_device *edev, if (!buf) return; + ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); + vpd_len = ((buf[2] << 8) | buf[3]) + 4; desc = buf + 4; @@ -395,18 +488,15 @@ static void ses_match_to_enclosure(struct enclosure_device *edev, kfree(buf); } -#define INIT_ALLOC_SIZE 32 - static int ses_intf_add(struct device *cdev, struct class_interface *intf) { struct scsi_device *sdev = to_scsi_device(cdev->parent); struct scsi_device *tmp_sdev; - unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr = NULL, - *addl_desc_ptr = NULL; + unsigned char *buf = NULL, *hdr_buf, *type_ptr; struct ses_device *ses_dev; u32 result; - int i, j, types, len, page7_len = 0, components = 0; + int i, types, len, components = 0; int err = -ENOMEM; struct enclosure_device *edev; struct ses_component *scomp = NULL; @@ -501,6 +591,7 @@ static int ses_intf_add(struct device *cdev, ses_dev->page10_len = len; buf = NULL; } + kfree(hdr_buf); scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL); if (!scomp) @@ -517,72 +608,7 @@ static int ses_intf_add(struct device *cdev, for (i = 0; i < components; i++) edev->component[i].scratch = scomp + i; - /* Page 7 for the descriptors is optional */ - result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE); - if (result) - goto simple_populate; - - page7_len = len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; - /* add 1 for trailing '\0' we'll use */ - buf = kzalloc(len + 1, GFP_KERNEL); - if (!buf) - goto simple_populate; - result = ses_recv_diag(sdev, 7, buf, len); - if (result) { - simple_populate: - kfree(buf); - buf = NULL; - desc_ptr = NULL; - addl_desc_ptr = NULL; - } else { - desc_ptr = buf + 8; - len = (desc_ptr[2] << 8) + desc_ptr[3]; - /* skip past overall descriptor */ - desc_ptr += len + 4; - if (ses_dev->page10) - addl_desc_ptr = ses_dev->page10 + 8; - } - type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; - components = 0; - for (i = 0; i < types; i++, type_ptr += 4) { - for (j = 0; j < type_ptr[1]; j++) { - char *name = NULL; - struct enclosure_component *ecomp; - - if (desc_ptr) { - if (desc_ptr >= buf + page7_len) { - desc_ptr = NULL; - } else { - len = (desc_ptr[2] << 8) + desc_ptr[3]; - desc_ptr += 4; - /* Add trailing zero - pushes into - * reserved space */ - desc_ptr[len] = '\0'; - name = desc_ptr; - } - } - if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || - type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) { - - ecomp = enclosure_component_register(edev, - components++, - type_ptr[0], - name); - - if (!IS_ERR(ecomp) && addl_desc_ptr) - ses_process_descriptor(ecomp, - addl_desc_ptr); - } - if (desc_ptr) - desc_ptr += len; - - if (addl_desc_ptr) - addl_desc_ptr += addl_desc_ptr[1] + 2; - - } - } - kfree(buf); - kfree(hdr_buf); + ses_enclosure_data_process(edev, sdev, 1); /* see if there are any devices matching before * we found the enclosure */ From 18ee70c9d7b2dcd312a1f8c6536841e7c0fea5ca Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Mon, 3 Aug 2009 12:42:33 -0700 Subject: [PATCH 051/209] [SCSI] scsi_dh: add the interface scsi_dh_set_params() When we moved the device handler functionality from dm layer to SCSI layer we dropped the parameter functionality. This path adds an interface to scsi dh layer to set device handler parameters. Basically, multipath layer need to create a string with all the parameters and call scsi_dh_set_params() after it called scsi_dh_attach() on a device. If a device handler provides such an interface it will handle the parameters as it expects them. Reported-by: Eddie Williams Signed-off-by: Chandra Seetharaman Tested-by: Eddie Williams Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh.c | 33 +++++++++++++++++++++++++++ include/scsi/scsi_device.h | 1 + include/scsi/scsi_dh.h | 5 ++++ 3 files changed, 39 insertions(+) diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c index 53a7385e1b4d..3ee1cbc89479 100644 --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c @@ -451,6 +451,39 @@ int scsi_dh_activate(struct request_queue *q) } EXPORT_SYMBOL_GPL(scsi_dh_activate); +/* + * scsi_dh_set_params - set the parameters for the device as per the + * string specified in params. + * @q - Request queue that is associated with the scsi_device for + * which the parameters to be set. + * @params - parameters in the following format + * "no_of_params\0param1\0param2\0param3\0...\0" + * for example, string for 2 parameters with value 10 and 21 + * is specified as "2\010\021\0". + */ +int scsi_dh_set_params(struct request_queue *q, const char *params) +{ + int err = -SCSI_DH_NOSYS; + unsigned long flags; + struct scsi_device *sdev; + struct scsi_device_handler *scsi_dh = NULL; + + spin_lock_irqsave(q->queue_lock, flags); + sdev = q->queuedata; + if (sdev && sdev->scsi_dh_data) + scsi_dh = sdev->scsi_dh_data->scsi_dh; + if (scsi_dh && scsi_dh->set_params && get_device(&sdev->sdev_gendev)) + err = 0; + spin_unlock_irqrestore(q->queue_lock, flags); + + if (err) + return err; + err = scsi_dh->set_params(sdev, params); + put_device(&sdev->sdev_gendev); + return err; +} +EXPORT_SYMBOL_GPL(scsi_dh_set_params); + /* * scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for * the given name. FALSE(0) otherwise. diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 1f3a4c8044c0..9af48cbf0036 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -187,6 +187,7 @@ struct scsi_device_handler { void (*detach)(struct scsi_device *); int (*activate)(struct scsi_device *); int (*prep_fn)(struct scsi_device *, struct request *); + int (*set_params)(struct scsi_device *, const char *); }; struct scsi_dh_data { diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h index 33efce20c26c..ff2407405b42 100644 --- a/include/scsi/scsi_dh.h +++ b/include/scsi/scsi_dh.h @@ -60,6 +60,7 @@ extern int scsi_dh_activate(struct request_queue *); extern int scsi_dh_handler_exist(const char *); extern int scsi_dh_attach(struct request_queue *, const char *); extern void scsi_dh_detach(struct request_queue *); +extern int scsi_dh_set_params(struct request_queue *, const char *); #else static inline int scsi_dh_activate(struct request_queue *req) { @@ -77,4 +78,8 @@ static inline void scsi_dh_detach(struct request_queue *q) { return; } +static inline int scsi_dh_set_params(struct request_queue *req, const char *params) +{ + return -SCSI_DH_NOSYS; +} #endif From 14d9cb5f760c86b3a8f686909465982231c06627 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Mon, 3 Aug 2009 12:42:39 -0700 Subject: [PATCH 052/209] [SCSI] scsi_dh: Provide set_params interface in emc device handler Handle the parameters provided by user thru multipath. This handler expects only 2 parameters and their value can either be 0 or 1. This code originates from the old dm-emc.c file. Appropriate changes have been made to make it work in the new design. Reported-by: Eddie Williams Signed-off-by: Chandra Seetharaman Tested-by: Eddie Williams Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_emc.c | 59 +++++++++++++++++++++-- 1 file changed, 56 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index 0e572d2c5b0a..0cffe84976fe 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -561,6 +561,61 @@ static int clariion_activate(struct scsi_device *sdev) return result; } +/* + * params - parameters in the following format + * "no_of_params\0param1\0param2\0param3\0...\0" + * for example, string for 2 parameters with value 10 and 21 + * is specified as "2\010\021\0". + */ +static int clariion_set_params(struct scsi_device *sdev, const char *params) +{ + struct clariion_dh_data *csdev = get_clariion_data(sdev); + unsigned int hr = 0, st = 0, argc; + const char *p = params; + int result = SCSI_DH_OK; + + if ((sscanf(params, "%u", &argc) != 1) || (argc != 2)) + return -EINVAL; + + while (*p++) + ; + if ((sscanf(p, "%u", &st) != 1) || (st > 1)) + return -EINVAL; + + while (*p++) + ; + if ((sscanf(p, "%u", &hr) != 1) || (hr > 1)) + return -EINVAL; + + if (st) + csdev->flags |= CLARIION_SHORT_TRESPASS; + else + csdev->flags &= ~CLARIION_SHORT_TRESPASS; + + if (hr) + csdev->flags |= CLARIION_HONOR_RESERVATIONS; + else + csdev->flags &= ~CLARIION_HONOR_RESERVATIONS; + + /* + * If this path is owned, we have to send a trespass command + * with the new parameters. If not, simply return. Next trespass + * command would use the parameters. + */ + if (csdev->lun_state != CLARIION_LUN_OWNED) + goto done; + + csdev->lun_state = CLARIION_LUN_UNINITIALIZED; + result = send_trespass_cmd(sdev, csdev); + if (result != SCSI_DH_OK) + goto done; + + /* Update status */ + result = clariion_send_inquiry(sdev, csdev); + +done: + return result; +} static const struct scsi_dh_devlist clariion_dev_list[] = { {"DGC", "RAID"}, @@ -581,11 +636,9 @@ static struct scsi_device_handler clariion_dh = { .check_sense = clariion_check_sense, .activate = clariion_activate, .prep_fn = clariion_prep_fn, + .set_params = clariion_set_params, }; -/* - * TODO: need some interface so we can set trespass values - */ static int clariion_bus_attach(struct scsi_device *sdev) { struct scsi_dh_data *scsi_dh_data; From 2bfd2e1337f0d8bb6ff45ce12934c45b83d70ee0 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Mon, 3 Aug 2009 12:42:45 -0700 Subject: [PATCH 053/209] [SCSI] scsi_dh: Use scsi_dh_set_params() in multipath. Use scsi_dh_set_params() set parameters provided. Save the parameters in parse_hw_handler() and use it in parse_path(). Reported-by: Eddie Williams Signed-off-by: Chandra Seetharaman Tested-by: Eddie Williams Cc: Alasdair G Kergon Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/md/dm-mpath.c | 42 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 6f0d90d4a541..32d0b878eccc 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -64,6 +64,7 @@ struct multipath { spinlock_t lock; const char *hw_handler_name; + char *hw_handler_params; unsigned nr_priority_groups; struct list_head priority_groups; unsigned pg_init_required; /* pg_init needs calling? */ @@ -219,6 +220,7 @@ static void free_multipath(struct multipath *m) } kfree(m->hw_handler_name); + kfree(m->hw_handler_params); mempool_destroy(m->mpio_pool); kfree(m); } @@ -615,6 +617,17 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, dm_put_device(ti, p->path.dev); goto bad; } + + if (m->hw_handler_params) { + r = scsi_dh_set_params(q, m->hw_handler_params); + if (r < 0) { + ti->error = "unable to set hardware " + "handler parameters"; + scsi_dh_detach(q); + dm_put_device(ti, p->path.dev); + goto bad; + } + } } r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); @@ -705,6 +718,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as, static int parse_hw_handler(struct arg_set *as, struct multipath *m) { unsigned hw_argc; + int ret; struct dm_target *ti = m->ti; static struct param _params[] = { @@ -726,17 +740,33 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m) request_module("scsi_dh_%s", m->hw_handler_name); if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { ti->error = "unknown hardware handler type"; - kfree(m->hw_handler_name); - m->hw_handler_name = NULL; - return -EINVAL; + ret = -EINVAL; + goto fail; } - if (hw_argc > 1) - DMWARN("Ignoring user-specified arguments for " - "hardware handler \"%s\"", m->hw_handler_name); + if (hw_argc > 1) { + char *p; + int i, j, len = 4; + + for (i = 0; i <= hw_argc - 2; i++) + len += strlen(as->argv[i]) + 1; + p = m->hw_handler_params = kzalloc(len, GFP_KERNEL); + if (!p) { + ti->error = "memory allocation failed"; + ret = -ENOMEM; + goto fail; + } + j = sprintf(p, "%d", hw_argc - 1); + for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1) + j = sprintf(p, "%s", as->argv[i]); + } consume(as, hw_argc - 1); return 0; +fail: + kfree(m->hw_handler_name); + m->hw_handler_name = NULL; + return ret; } static int parse_features(struct arg_set *as, struct multipath *m) From 8f1f3ece891e8fe6ee69fa27617b60cb26e37bec Mon Sep 17 00:00:00 2001 From: Ravi Anand Date: Fri, 31 Jul 2009 15:09:23 -0700 Subject: [PATCH 054/209] [SCSI] qla2xxx: Pass the command's data residual to upper-layer callers. Signed-off-by: Ravi Anand Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_isr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 245e7afb4c4d..f966d540d2cd 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1223,6 +1223,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) cp->device->id, cp->device->lun, resid, scsi_bufflen(cp))); + scsi_set_resid(cp, resid); cp->result = DID_ERROR << 16; break; } From 9764ff8807a2455218e2ec5024e823cc09b01906 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Fri, 31 Jul 2009 15:09:24 -0700 Subject: [PATCH 055/209] [SCSI] qla2xxx: Correctly handle 'global port-unavailable' AEN. Treat a global port-unavailable PORT_UPDATE (8014h) AEN as a loop-down event. For this case, within the FCoE domain, the 'logical' interface has been terminated, but the driver will not receive the classic LOOP_DOWN AEN. Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_isr.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index f966d540d2cd..417a9b9fb19c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -599,9 +599,37 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) case MBA_PORT_UPDATE: /* Port database update */ /* Only handle SCNs for our Vport index. */ - if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff)) + if (mb[1] != 0xffff && + vha->vp_idx && vha->vp_idx != (mb[3] & 0xff)) break; + /* Global event -- port logout or port unavailable. */ + if (mb[1] == 0xffff && mb[2] == 0x7) { + DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", + vha->host_no)); + DEBUG(printk(KERN_INFO + "scsi(%ld): Port unavailable %04x %04x %04x.\n", + vha->host_no, mb[1], mb[2], mb[3])); + + if (atomic_read(&vha->loop_state) != LOOP_DOWN) { + atomic_set(&vha->loop_state, LOOP_DOWN); + atomic_set(&vha->loop_down_timer, + LOOP_DOWN_TIME); + vha->device_flags |= DFLG_NO_CABLE; + qla2x00_mark_all_devices_lost(vha, 1); + } + + if (vha->vp_idx) { + atomic_set(&vha->vp_state, VP_FAILED); + fc_vport_set_state(vha->fc_vport, + FC_VPORT_FAILED); + } + + vha->flags.management_server_logged_in = 0; + ha->link_data_rate = PORT_SPEED_UNKNOWN; + break; + } + /* * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET * event etc. earlier indicating loop is down) then process From 55903b9d152e91d4df2ab242c33efbd33e03e499 Mon Sep 17 00:00:00 2001 From: Santosh Vernekar Date: Fri, 31 Jul 2009 15:09:25 -0700 Subject: [PATCH 056/209] [SCSI] qla2xxx: Skip RSCN processing on vha if event is global. The RSCN processing is skipped if the event received is global and vha is not recipient. Signed-off-by: Santosh Vernekar Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_isr.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 417a9b9fb19c..00f23d20a983 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -598,10 +598,26 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) break; case MBA_PORT_UPDATE: /* Port database update */ - /* Only handle SCNs for our Vport index. */ - if (mb[1] != 0xffff && - vha->vp_idx && vha->vp_idx != (mb[3] & 0xff)) - break; + /* + * Handle only global and vn-port update events + * + * Relevant inputs: + * mb[1] = N_Port handle of changed port + * OR 0xffff for global event + * mb[2] = New login state + * 7 = Port logged out + * mb[3] = LSB is vp_idx, 0xff = all vps + * + * Skip processing if: + * Event is global, vp_idx is NOT all vps, + * vp_idx does not match + * Event is not global, vp_idx does not match + */ + if ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) + || (mb[1] != 0xffff)) { + if (vha->vp_idx != (mb[3] & 0xff)) + break; + } /* Global event -- port logout or port unavailable. */ if (mb[1] == 0xffff && mb[2] == 0x7) { From faadc5e71cb69cff9c4517f7a4add97f92820120 Mon Sep 17 00:00:00 2001 From: Santosh Vernekar Date: Fri, 31 Jul 2009 15:09:26 -0700 Subject: [PATCH 057/209] [SCSI] qla2xxx: Mark all devices lost on loss of fc port. Signed-off-by: Santosh Vernekar Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_isr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 00f23d20a983..63e7cdce244c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -639,6 +639,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); + qla2x00_mark_all_devices_lost(vha, 1); } vha->flags.management_server_logged_in = 0; From 4b785241ad6d07176a692631dbda3605f4917a18 Mon Sep 17 00:00:00 2001 From: Lalit Chandivade Date: Fri, 31 Jul 2009 15:09:27 -0700 Subject: [PATCH 058/209] [SCSI] qla2xxx: Process DPC requests within valid Fabric topologies. If vports are created and topology is changed to Loop only, the driver continuously gets a LIP reset occurred and keeps trying to enable the vport. Only manage requests during F_Port. Signed-off-by: Lalit Chandivade Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_mid.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index cd78c501803a..6238be37d7bb 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -250,6 +250,9 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + if (!(ha->current_topology & ISP_CFG_F)) + return 0; + if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { /* VP acquired. complete port configuration */ if (atomic_read(&base_vha->loop_state) == LOOP_READY) { From a2d301e8d62ecd36dbfec69be13859ae3050f6fa Mon Sep 17 00:00:00 2001 From: Santosh Vernekar Date: Fri, 31 Jul 2009 15:09:29 -0700 Subject: [PATCH 059/209] [SCSI] qla2xxx: Handle RSCN's per master/slave vn-port basis. Signed-off-by: Santosh Vernekar Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_isr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 63e7cdce244c..40014f3407b9 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -685,7 +685,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) break; /* Only handle SCNs for our Vport index. */ - if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff)) + if (vha->vp_idx != (mb[3] & 0xff)) break; DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", vha->host_no)); From f9e899ebe502e547fca37f64ebc4b1399880d8f7 Mon Sep 17 00:00:00 2001 From: Shyam Sundar Date: Fri, 31 Jul 2009 15:09:30 -0700 Subject: [PATCH 060/209] [SCSI] qla2xxx: Pad IOCB structure for size requirements. One byte added to make the IOCB structure satisfy size requirements. Signed-off-by: Shyam Sundar Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_fw.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index dfde2dd865cb..66a8da5d7d08 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1126,7 +1126,7 @@ struct vp_config_entry_24xx { uint16_t id; uint16_t reserved_4; uint16_t hopct; - uint8_t reserved_5; + uint8_t reserved_5[2]; }; #define VP_RPT_ID_IOCB_TYPE 0x32 /* Report ID Acquisition entry. */ From 80de7efa6d6a4eb72e917bb2d814f38332090d45 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Fri, 31 Jul 2009 15:09:33 -0700 Subject: [PATCH 061/209] [SCSI] qla2xxx: ISP21xx/22xx do not support FDMI registrations. Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_gs.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 917534b9f221..4647015eba63 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -1674,6 +1674,10 @@ int qla2x00_fdmi_register(scsi_qla_host_t *vha) { int rval; + struct qla_hw_data *ha = vha->hw; + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) + return QLA_FUNCTION_FAILED; rval = qla2x00_mgmt_svr_login(vha); if (rval) From eb94114bfd894dce1955e2ece575db544cecd921 Mon Sep 17 00:00:00 2001 From: Anirban Chakraborty Date: Tue, 4 Aug 2009 16:12:13 -0700 Subject: [PATCH 062/209] [SCSI] qla2xxx: Fix a bug that clears the interrupt status register for the base queue The interrupt handler clears the interrupt status register for response updates in the base queue while working in the multique mode. This could lead to missing interrupt for async events, mail box completions etc. as these are also handled in the base queue. The fix ensures that the interrupt bit is not cleared for response updates in the ISR when the driver is working in multiqueue mode. Signed-off-by: Anirban Chakraborty Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_isr.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 40014f3407b9..74fa6f992041 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1769,8 +1769,10 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) vha = qla25xx_get_host(rsp); qla24xx_process_response_queue(vha, rsp); - WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); - + if (!ha->mqenable) { + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); + RD_REG_DWORD_RELAXED(®->hccr); + } spin_unlock_irq(&ha->hardware_lock); return IRQ_HANDLED; From 09ccbcc628f71e7f57b4a96982ad1bb2084391d8 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Tue, 4 Aug 2009 23:38:21 -0700 Subject: [PATCH 063/209] [SCSI] qla2xxx: Correct display of vp_count in debug printk. Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_mbx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index fe69f3057671..8fcd99eeec00 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2746,7 +2746,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, if (rptid_entry->format == 0) { DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d," " number of VPs acquired %d\n", __func__, vha->host_no, - MSB(rptid_entry->vp_count), LSB(rptid_entry->vp_count))); + MSB(le16_to_cpu(rptid_entry->vp_count)), + LSB(le16_to_cpu(rptid_entry->vp_count)))); DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__, rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0])); From 7163ea815170f8c5d56ead27d7e6fa3fa1f9844b Mon Sep 17 00:00:00 2001 From: Anirban Chakraborty Date: Wed, 5 Aug 2009 09:18:40 -0700 Subject: [PATCH 064/209] [SCSI] qla2xxx: Fix to ensure driver works in sinlge queue mode if multiqueue fails When the multiqueue mode fails to work, the driver falls back on single queue mode. This ensures that the firmware is reinitialized with single queue options and all the resources are readjusted accordingly. Signed-off-by: Anirban Chakraborty Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 4 ++-- drivers/scsi/qla2xxx/qla_def.h | 1 + drivers/scsi/qla2xxx/qla_init.c | 5 ++--- drivers/scsi/qla2xxx/qla_iocb.c | 2 +- drivers/scsi/qla2xxx/qla_mbx.c | 4 ++-- drivers/scsi/qla2xxx/qla_os.c | 23 +++++++++++++++++++---- 6 files changed, 27 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 0f8796201504..5b0a222241bb 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1670,7 +1670,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) qla24xx_vport_disable(fc_vport, disable); - if (ql2xmultique_tag) { + if (ha->flags.cpu_affinity_enabled) { req = ha->req_q_map[1]; goto vport_queue; } else if (ql2xmaxqueues == 1 || !ha->npiv_info) @@ -1743,7 +1743,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) vha->host_no, vha->vp_idx, vha)); } - if (vha->req->id && !ql2xmultique_tag) { + if (vha->req->id && !ha->flags.cpu_affinity_enabled) { if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) qla_printk(KERN_WARNING, ha, "Queue delete failed.\n"); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 9fde8bfe7607..68ab28c8152b 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2224,6 +2224,7 @@ struct qla_hw_data { uint32_t chip_reset_done :1; uint32_t port0 :1; uint32_t running_gold_fw :1; + uint32_t cpu_affinity_enabled :1; } flags; /* This spinlock is used to protect "io transactions", you must diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f2ce8e3cc91b..0cbe39e92506 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -987,7 +987,6 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) ha->phy_version); if (rval != QLA_SUCCESS) goto failed; - ha->flags.npiv_supported = 0; if (IS_QLA2XXX_MIDTYPE(ha) && (ha->fw_attributes & BIT_2)) { @@ -3244,7 +3243,7 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) struct req_que *req; struct rsp_que *rsp; - if (ql2xmultique_tag) + if (vha->hw->flags.cpu_affinity_enabled) req = vha->hw->req_q_map[0]; else req = vha->req; @@ -4264,7 +4263,7 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha) return -EINVAL; rval = qla2x00_fw_ready(base_vha); - if (ql2xmultique_tag) + if (ha->flags.cpu_affinity_enabled) req = ha->req_q_map[0]; else req = vha->req; diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 13396beae2ce..c0ba370b23cf 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -852,7 +852,7 @@ static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) struct qla_hw_data *ha = sp->fcport->vha->hw; int affinity = cmd->request->cpu; - if (ql2xmultique_tag && affinity >= 0 && + if (ha->flags.cpu_affinity_enabled && affinity >= 0 && affinity < ha->max_rsp_queues - 1) *rsp = ha->rsp_q_map[affinity + 1]; else diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 8fcd99eeec00..b6202fe118ac 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -1507,7 +1507,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - if (ql2xmultique_tag) + if (ha->flags.cpu_affinity_enabled) req = ha->req_q_map[0]; else req = vha->req; @@ -2324,7 +2324,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, vha = fcport->vha; ha = vha->hw; req = vha->req; - if (ql2xmultique_tag) + if (ha->flags.cpu_affinity_enabled) rsp = ha->rsp_q_map[tag + 1]; else rsp = req->rsp; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index f0396e79b6fa..d7b271339a5c 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -287,9 +287,12 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha) int ques, req, ret; struct qla_hw_data *ha = vha->hw; + if (!(ha->fw_attributes & BIT_6)) { + qla_printk(KERN_INFO, ha, + "Firmware is not multi-queue capable\n"); + goto fail; + } if (ql2xmultique_tag) { - /* CPU affinity mode */ - ha->wq = create_workqueue("qla2xxx_wq"); /* create a request queue for IO */ options |= BIT_7; req = qla25xx_create_req_que(ha, options, 0, 0, -1, @@ -299,6 +302,7 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha) "Can't create request queue\n"); goto fail; } + ha->wq = create_workqueue("qla2xxx_wq"); vha->req = ha->req_q_map[req]; options |= BIT_1; for (ques = 1; ques < ha->max_rsp_queues; ques++) { @@ -309,6 +313,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha) goto fail2; } } + ha->flags.cpu_affinity_enabled = 1; + DEBUG2(qla_printk(KERN_INFO, ha, "CPU affinity mode enabled, no. of response" " queues:%d, no. of request queues:%d\n", @@ -317,8 +323,13 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha) return 0; fail2: qla25xx_delete_queues(vha); + destroy_workqueue(ha->wq); + ha->wq = NULL; fail: ha->mqenable = 0; + kfree(ha->req_q_map); + kfree(ha->rsp_q_map); + ha->max_req_queues = ha->max_rsp_queues = 1; return 1; } @@ -1923,6 +1934,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto probe_init_failed; /* Alloc arrays of request and response ring ptrs */ +que_init: if (!qla2x00_alloc_queues(ha)) { qla_printk(KERN_WARNING, ha, "[ERROR] Failed to allocate memory for queue" @@ -1959,11 +1971,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto probe_failed; } - if (ha->mqenable) - if (qla25xx_setup_mode(base_vha)) + if (ha->mqenable) { + if (qla25xx_setup_mode(base_vha)) { qla_printk(KERN_WARNING, ha, "Can't create queues, falling back to single" " queue mode\n"); + goto que_init; + } + } if (ha->flags.running_gold_fw) goto skip_dpc; From d430ddc660d2f9875125b4f13a2b9279313db529 Mon Sep 17 00:00:00 2001 From: Giridhar Malavali Date: Fri, 31 Jul 2009 15:09:34 -0700 Subject: [PATCH 065/209] [SCSI] qla2xxx: Update version number to 8.03.01-k5 Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 84369705a9ad..87a884e341a1 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.01-k4" +#define QLA2XXX_VERSION "8.03.01-k5" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 From d68866927ddd0ed3145cb3fba4a1c07ca5e4709d Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 3 Aug 2009 13:54:47 -0700 Subject: [PATCH 066/209] [SCSI] ibmvscsi: avoid unnecessary use of kzalloc_pool The allocated struct is manually zeroed after allocation, so avoid using the (broken) kzalloc mempool (which does not re-zero previously used items when they are returned to the pool). Acked-by: Brian King Signed-off-by: Sage Weil Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 166d96450a0e..bb2c696c006a 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -4217,7 +4217,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost) if (!vhost->trace) goto free_disc_buffer; - vhost->tgt_pool = mempool_create_kzalloc_pool(IBMVFC_TGT_MEMPOOL_SZ, + vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ, sizeof(struct ibmvfc_target)); if (!vhost->tgt_pool) { From a2cf8a6306c89223d0ed35a7e9d40da99902e32a Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso A Date: Fri, 7 Aug 2009 16:42:21 -0400 Subject: [PATCH 067/209] [SCSI] ch: Check NULL for kmalloc() return Verify that ch->dt is not NULL before using it. Signed-off-by: Davidlohr Bueso Signed-off-by: James Bottomley --- drivers/scsi/ch.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index 7b1633a8c15a..fe11c1d4b31d 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c @@ -353,6 +353,12 @@ ch_readconfig(scsi_changer *ch) /* look up the devices of the data transfer elements */ ch->dt = kmalloc(ch->counts[CHET_DT]*sizeof(struct scsi_device), GFP_KERNEL); + + if (!ch->dt) { + kfree(buffer); + return -ENOMEM; + } + for (elem = 0; elem < ch->counts[CHET_DT]; elem++) { id = -1; lun = 0; From edced191e4512b7795380563634f4d44b21c684a Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Sat, 8 Aug 2009 11:36:06 +0200 Subject: [PATCH 068/209] [SCSI] nsp_cs: fix buf overflow In nsp_cs_config there is a wrong struct nsp_cs_configdata allocation. It allocates only sizeof(pointer to nsp_cs_configdata) for a whole structure. Add a dereference to the sizeof to allocate sizeof(nsp_cs_configdata). Signed-off-by: Jiri Slaby Signed-off-by: James Bottomley --- drivers/scsi/pcmcia/nsp_cs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 70b60ade049e..e32c344d7ad8 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -1713,7 +1713,7 @@ static int nsp_cs_config(struct pcmcia_device *link) nsp_dbg(NSP_DEBUG_INIT, "in"); - cfg_mem = kzalloc(sizeof(cfg_mem), GFP_KERNEL); + cfg_mem = kzalloc(sizeof(*cfg_mem), GFP_KERNEL); if (!cfg_mem) return -ENOMEM; cfg_mem->data = data; From 5f91bb050ecc4ff1d8d3d07edbe550c8f431c5e1 Mon Sep 17 00:00:00 2001 From: Michael Reed Date: Mon, 10 Aug 2009 11:59:28 -0500 Subject: [PATCH 069/209] [SCSI] reservation conflict after timeout causes device to be taken offline An IBM tape drive failed to complete a PERSISTENT RESERVE IN within the scsi cmd timeout. Error recovery was initiated and it sequenced from abort through taking the tape drive offline. The device was taken offline because it repeatedly responded to the TUR command issued by error recovery with a RESERVATION CONFLICT status. The tape drive was reserved to another system. This is perfectly legitimate response to TUR, and is one that an escalation of recovery is unlikely to clear. Further, escalation of recovery can have undesirable side effects on the operation of tape drives shared with other initiators. Instead of escalating recovery, error recovery should treat the RESERVATION CONFLICT response to the TUR as a good status, giving the issuer of the command the opportunity to handle the timeout and reservation conflict. Signed-off-by: Michael reed Signed-off-by: James Bottomley --- drivers/scsi/scsi_error.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index a1689353d7fd..877204daf549 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -382,9 +382,13 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) * who knows? FIXME(eric) */ return SUCCESS; + case RESERVATION_CONFLICT: + /* + * let issuer deal with this, it could be just fine + */ + return SUCCESS; case BUSY: case QUEUE_FULL: - case RESERVATION_CONFLICT: default: return FAILED; } From 95a3639e275fb7aec5c9a2116c88a2cdd23e0b8b Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Tue, 11 Aug 2009 10:59:09 -0500 Subject: [PATCH 070/209] [SCSI] fix bugs in scsi_vpd_inquiry() Universally, SCSI functions assume the lengths fed in are those of the buffer to DMA data to, not the lengths of the data minus the header. scsi_vpd_inquiry() assumed the latter and got it wrong, so fix up all the functions to use the correct assumption (and fix a bug where INQUIRY in SCSI-2 dcannot go over 255). [jejb: Matthew posted an identical version of this at the same time I did] Signed-off-by: Matthew Wilcox Signed-off-by: James Bottomley --- drivers/scsi/scsi.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 2de5f3ad640b..b6e03074cb8f 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -994,7 +994,7 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, * all the existing users tried this hard. */ result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, - len + 4, NULL, 30 * HZ, 3, NULL); + len, NULL, 30 * HZ, 3, NULL); if (result) return result; @@ -1021,13 +1021,14 @@ unsigned char *scsi_get_vpd_page(struct scsi_device *sdev, u8 page) { int i, result; unsigned int len; - unsigned char *buf = kmalloc(259, GFP_KERNEL); + const unsigned int init_vpd_len = 255; + unsigned char *buf = kmalloc(init_vpd_len, GFP_KERNEL); if (!buf) return NULL; /* Ask for all the pages supported by this device */ - result = scsi_vpd_inquiry(sdev, buf, 0, 255); + result = scsi_vpd_inquiry(sdev, buf, 0, init_vpd_len); if (result) goto fail; @@ -1050,12 +1051,12 @@ unsigned char *scsi_get_vpd_page(struct scsi_device *sdev, u8 page) * Some pages are longer than 255 bytes. The actual length of * the page is returned in the header. */ - len = (buf[2] << 8) | buf[3]; - if (len <= 255) + len = ((buf[2] << 8) | buf[3]) + 4; + if (len <= init_vpd_len) return buf; kfree(buf); - buf = kmalloc(len + 4, GFP_KERNEL); + buf = kmalloc(len, GFP_KERNEL); result = scsi_vpd_inquiry(sdev, buf, page, len); if (result) goto fail; From 4dbfb544ad5a28ac9e60634bdfbf09d2eb39cdb5 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Wed, 12 Aug 2009 12:43:20 -0700 Subject: [PATCH 071/209] [SCSI] scsi_dh: add two SUN devices to the list of devices supported by default Reported-by: Rice Brown Signed-Off-by: Chandra Seetharaman Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_rdac.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index fd0544f7da81..bea92ef7e269 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -633,6 +633,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = { {"DELL", "MD3000i"}, {"LSI", "INF-01-00"}, {"ENGENIO", "INF-01-00"}, + {"STK", "FLEXLINE 380"}, + {"SUN", "CSM100_R_FC"}, {NULL, NULL}, }; From 9d2e9d66a3f032667934144cd61c396ba49f090d Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 5 Aug 2009 12:48:44 +0530 Subject: [PATCH 072/209] [SCSI] mptsas : Change DEFINED value of can queue for FC and SAS devices. Change DEFINED value of can queue for FC and SAS devices. Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.h | 3 ++- drivers/message/fusion/mptsas.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index 1c8514dc31ca..73810fa53456 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -157,8 +157,9 @@ /* * Try to keep these at 2^N-1 */ -#define MPT_FC_CAN_QUEUE 127 +#define MPT_FC_CAN_QUEUE 1024 #define MPT_SCSI_CAN_QUEUE 127 +#define MPT_SAS_CAN_QUEUE 127 /* * Set the MAX_SGE value based on user input. diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 55ff25244af4..c03968b9e7d2 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -1891,7 +1891,7 @@ static struct scsi_host_template mptsas_driver_template = { .eh_bus_reset_handler = mptscsih_bus_reset, .eh_host_reset_handler = mptscsih_host_reset, .bios_param = mptscsih_bios_param, - .can_queue = MPT_FC_CAN_QUEUE, + .can_queue = MPT_SAS_CAN_QUEUE, .this_id = -1, .sg_tablesize = MPT_SCSI_SG_DEPTH, .max_sectors = 8192, From a247fa4521ccec7cb82a9f2d4e8544fce27ea109 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 5 Aug 2009 12:50:02 +0530 Subject: [PATCH 073/209] [SCSI] mptsas : Removed mptscsih_timer_expired. Removed mptscsih_timer_expired. This timer is no more use. Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.h | 14 --------- drivers/message/fusion/mptfc.c | 19 ------------ drivers/message/fusion/mptsas.c | 19 ------------ drivers/message/fusion/mptscsih.c | 48 ------------------------------- drivers/message/fusion/mptscsih.h | 1 - drivers/message/fusion/mptspi.c | 21 -------------- 6 files changed, 122 deletions(-) diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index 73810fa53456..bf446d41cda2 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -880,23 +880,9 @@ typedef enum { typedef struct _MPT_SCSI_HOST { MPT_ADAPTER *ioc; - int port; - u32 pad0; - MPT_LOCAL_REPLY *pLocal; /* used for internal commands */ - struct timer_list timer; - /* Pool of memory for holding SCpnts before doing - * OS callbacks. freeQ is the free pool. - */ - u8 negoNvram; /* DV disabled, nego NVRAM */ - u8 pad1; - u8 rsvd[2]; - MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */ - struct scsi_cmnd *abortSCpnt; - MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */ ushort sel_timeout[MPT_MAX_FC_DEVICES]; char *info_kbuf; long last_queue_full; - u16 tm_iocstatus; u16 spi_pending; struct list_head target_reset_list; } MPT_SCSI_HOST; diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index e61df133a59e..ebf6ae024da4 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c @@ -1288,25 +1288,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n", ioc->name, ioc->ScsiLookup)); - /* Clear the TM flags - */ - hd->abortSCpnt = NULL; - - /* Clear the pointer used to store - * single-threaded commands, i.e., those - * issued during a bus scan, dv and - * configuration pages. - */ - hd->cmdPtr = NULL; - - /* Initialize this SCSI Hosts' timers - * To use, set the timer expires field - * and add_timer - */ - init_timer(&hd->timer); - hd->timer.data = (unsigned long) hd; - hd->timer.function = mptscsih_timer_expired; - hd->last_queue_full = 0; sh->transportt = mptfc_transport_template; diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index c03968b9e7d2..54fc73c380ee 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -4821,25 +4821,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n", ioc->name, ioc->ScsiLookup)); - /* Clear the TM flags - */ - hd->abortSCpnt = NULL; - - /* Clear the pointer used to store - * single-threaded commands, i.e., those - * issued during a bus scan, dv and - * configuration pages. - */ - hd->cmdPtr = NULL; - - /* Initialize this SCSI Hosts' timers - * To use, set the timer expires field - * and add_timer - */ - init_timer(&hd->timer); - hd->timer.data = (unsigned long) hd; - hd->timer.function = mptscsih_timer_expired; - ioc->sas_data.ptClear = mpt_pt_clear; hd->last_queue_full = 0; diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 8440f78f6969..abdc727c4a51 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -1729,9 +1729,6 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) */ mf = MPT_INDEX_2_MFPTR(ioc, scpnt_idx); ctx2abort = mf->u.frame.hwhdr.msgctxu.MsgContext; - - hd->abortSCpnt = SCpnt; - retval = mptscsih_IssueTaskMgmt(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, vdevice->vtarget->channel, @@ -2627,50 +2624,6 @@ mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, return 1; } -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/* mptscsih_timer_expired - Call back for timer process. - * Used only for dv functionality. - * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long - * - */ -void -mptscsih_timer_expired(unsigned long data) -{ - MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *) data; - MPT_ADAPTER *ioc = hd->ioc; - - ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Timer Expired! Cmd %p\n", ioc->name, hd->cmdPtr)); - - if (hd->cmdPtr) { - MPIHeader_t *cmd = (MPIHeader_t *)hd->cmdPtr; - - if (cmd->Function == MPI_FUNCTION_SCSI_IO_REQUEST) { - /* Desire to issue a task management request here. - * TM requests MUST be single threaded. - * If old eh code and no TM current, issue request. - * If new eh code, do nothing. Wait for OS cmd timeout - * for bus reset. - */ - } else { - /* Perform a FW reload */ - if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0) { - printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name); - } - } - } else { - /* This should NEVER happen */ - printk(MYIOC_s_WARN_FMT "Null cmdPtr!!!!\n", ioc->name); - } - - /* No more processing. - * TM call will generate an interrupt for SCSI TM Management. - * The FW will reply to all outstanding commands, callback will finish cleanup. - * Hard reset clean-up will free all resources. - */ - ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Timer Expired Complete!\n", ioc->name)); - - return; -} /** * mptscsih_get_completion_code - @@ -3265,6 +3218,5 @@ EXPORT_SYMBOL(mptscsih_scandv_complete); EXPORT_SYMBOL(mptscsih_event_process); EXPORT_SYMBOL(mptscsih_ioc_reset); EXPORT_SYMBOL(mptscsih_change_queue_depth); -EXPORT_SYMBOL(mptscsih_timer_expired); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h index eb3f677528ac..e0b33e04a33b 100644 --- a/drivers/message/fusion/mptscsih.h +++ b/drivers/message/fusion/mptscsih.h @@ -129,7 +129,6 @@ extern int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRA extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth); -extern void mptscsih_timer_expired(unsigned long data); extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); extern struct device_attribute *mptscsih_host_attrs[]; diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index c5b808fd55ba..69f4257419b5 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c @@ -1472,28 +1472,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n", ioc->name, ioc->ScsiLookup)); - /* Clear the TM flags - */ - hd->abortSCpnt = NULL; - - /* Clear the pointer used to store - * single-threaded commands, i.e., those - * issued during a bus scan, dv and - * configuration pages. - */ - hd->cmdPtr = NULL; - - /* Initialize this SCSI Hosts' timers - * To use, set the timer expires field - * and add_timer - */ - init_timer(&hd->timer); - hd->timer.data = (unsigned long) hd; - hd->timer.function = mptscsih_timer_expired; - ioc->spi_data.Saf_Te = mpt_saf_te; - - hd->negoNvram = MPT_SCSICFG_USE_NVRAM; ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "saf_te %x\n", ioc->name, From d23321b4880fb7a95fa58277fafb98a4368b4c92 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 5 Aug 2009 12:51:25 +0530 Subject: [PATCH 074/209] [SCSI] mptsas : Handle INSUFFICIENT resources status as similar to IOC BUSY status Handle insufficient resources status as similar to busy status. Signed-off-by: James Bottomley --- drivers/message/fusion/mptscsih.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index abdc727c4a51..1466a5aed5f8 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -689,6 +689,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) switch(status) { case MPI_IOCSTATUS_BUSY: /* 0x0002 */ + case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */ /* CHECKME! * Maybe: DRIVER_BUSY | SUGGEST_RETRY | DID_SOFT_ERROR (retry) * But not: DID_BUS_BUSY lest one risk @@ -872,7 +873,6 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) case MPI_IOCSTATUS_INVALID_SGL: /* 0x0003 */ case MPI_IOCSTATUS_INTERNAL_ERROR: /* 0x0004 */ case MPI_IOCSTATUS_RESERVED: /* 0x0005 */ - case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */ case MPI_IOCSTATUS_INVALID_FIELD: /* 0x0007 */ case MPI_IOCSTATUS_INVALID_STATE: /* 0x0008 */ case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */ From 4b97650b555b1cd09b547104d98da0ff700187d9 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 5 Aug 2009 12:52:03 +0530 Subject: [PATCH 075/209] [SCSI] mptsas : Change config request timeout value to 30 seconds. Change config request timeout value to 30 seconds. Signed-off-by: James Bottomley --- drivers/message/fusion/mptsas.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 54fc73c380ee..936e35ae88d4 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -72,6 +72,7 @@ */ #define MPTSAS_RAID_CHANNEL 1 +#define SAS_CONFIG_PAGE_TIMEOUT 30 MODULE_AUTHOR(MODULEAUTHOR); MODULE_DESCRIPTION(my_NAME); MODULE_LICENSE("GPL"); @@ -661,7 +662,7 @@ mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc, cfg.pageAddr = starget->id; cfg.cfghdr.hdr = &hdr; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; - cfg.timeout = 10; + cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT; if (mpt_config(ioc, &cfg) != 0) goto out; @@ -1318,7 +1319,7 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, cfg.pageAddr = form + form_specific; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; /* read */ - cfg.timeout = 10; + cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT; error = mpt_config(ioc, &cfg); if (error) @@ -1926,7 +1927,7 @@ static int mptsas_get_linkerrors(struct sas_phy *phy) cfg.pageAddr = phy->identify.phy_identifier; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; /* read */ - cfg.timeout = 10; + cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT; error = mpt_config(ioc, &cfg); if (error) @@ -2278,7 +2279,7 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info) cfg.pageAddr = 0; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; /* read */ - cfg.timeout = 10; + cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT; error = mpt_config(ioc, &cfg); if (error) @@ -2349,7 +2350,7 @@ mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc) cfg.cfghdr.ehdr = &hdr; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; - cfg.timeout = 10; + cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT; cfg.cfghdr.ehdr->PageType = MPI_CONFIG_PAGETYPE_EXTENDED; cfg.cfghdr.ehdr->ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; cfg.cfghdr.ehdr->PageVersion = MPI_SASIOUNITPAGE1_PAGEVERSION; @@ -2411,7 +2412,7 @@ mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, cfg.cfghdr.ehdr = &hdr; cfg.dir = 0; /* read */ - cfg.timeout = 10; + cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT; /* Get Phy Pg 0 for each Phy. */ cfg.physAddr = -1; @@ -2479,7 +2480,7 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info, cfg.physAddr = -1; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; /* read */ - cfg.timeout = 10; + cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT; memset(device_info, 0, sizeof(struct mptsas_devinfo)); error = mpt_config(ioc, &cfg); @@ -2554,7 +2555,7 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info, cfg.pageAddr = form + form_specific; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; /* read */ - cfg.timeout = 10; + cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT; memset(port_info, 0, sizeof(struct mptsas_portinfo)); error = mpt_config(ioc, &cfg); @@ -2635,7 +2636,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, cfg.pageAddr = form + form_specific; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; /* read */ - cfg.timeout = 10; + cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT; error = mpt_config(ioc, &cfg); if (error) From 79a3ec1ace2329d115ecd2445379b46aed3286b0 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 5 Aug 2009 12:52:58 +0530 Subject: [PATCH 076/209] [SCSI] mptsas : set max_id to infinite value. Do not set max_id value received from FW. Once SAS transport layer is introduced max_id value is missleading to SCSI mid layer. Use max_id to infinite value. logic of can queue of scsi host is changed. Signed-off-by: James Bottomley --- drivers/message/fusion/mptsas.c | 5 ++--- drivers/message/fusion/mptscsih.c | 5 ++++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 936e35ae88d4..21bd78e4f306 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -4761,10 +4761,9 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* set 16 byte cdb's */ sh->max_cmd_len = 16; - - sh->max_id = ioc->pfacts[0].PortSCSIID; + sh->can_queue = min_t(int, ioc->req_depth - 10, sh->can_queue); + sh->max_id = -1; sh->max_lun = max_lun; - sh->transportt = mptsas_transport_template; /* Required entry. diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 1466a5aed5f8..0e402eb95716 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -2290,7 +2290,10 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth) else max_depth = MPT_SCSI_CMD_PER_DEV_LOW; } else - max_depth = MPT_SCSI_CMD_PER_DEV_HIGH; + max_depth = ioc->sh->can_queue; + + if (!sdev->tagged_supported) + max_depth = 1; if (qdepth > max_depth) qdepth = max_depth; From d130691725e9c4a098a59fc64f5ac3dcb1e2aa3b Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 5 Aug 2009 12:53:51 +0530 Subject: [PATCH 077/209] [SCSI] mptsas : Code cleanup of host page alloc and diag reset. Code cleanup of host page alloc and diag reset. Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.c | 84 ++++++++++++-------------------- 1 file changed, 30 insertions(+), 54 deletions(-) diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 5d0ba4f5924c..8ab7b37ed70d 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -1297,12 +1297,8 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init) psge = (char *)&ioc_init->HostPageBufferSGE; flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_SYSTEM_ADDRESS | - MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_END_OF_BUFFER; - if (sizeof(dma_addr_t) == sizeof(u64)) { - flags_length |= MPI_SGE_FLAGS_64_BIT_ADDRESSING; - } flags_length = flags_length << MPI_SGE_FLAGS_SHIFT; flags_length |= ioc->HostPageBuffer_sz; ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma); @@ -2224,8 +2220,6 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) int hard; int rc=0; int ii; - u8 cb_idx; - int handlers; int ret = 0; int reset_alt_ioc_active = 0; int irq_allocated = 0; @@ -2548,34 +2542,6 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) mpt_get_manufacturing_pg_0(ioc); } - /* - * Call each currently registered protocol IOC reset handler - * with post-reset indication. - * NOTE: If we're doing _IOC_BRINGUP, there can be no - * MptResetHandlers[] registered yet. - */ - if (hard_reset_done) { - rc = handlers = 0; - for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { - if ((ret == 0) && MptResetHandlers[cb_idx]) { - dprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "Calling IOC post_reset handler #%d\n", - ioc->name, cb_idx)); - rc += mpt_signal_reset(cb_idx, ioc, MPT_IOC_POST_RESET); - handlers++; - } - - if (alt_ioc_ready && MptResetHandlers[cb_idx]) { - drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "Calling IOC post_reset handler #%d\n", - ioc->alt_ioc->name, cb_idx)); - rc += mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_POST_RESET); - handlers++; - } - } - /* FIXME? Examine results here? */ - } - out: if ((ret != 0) && irq_allocated) { free_irq(ioc->pci_irq, ioc); @@ -3938,6 +3904,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) int count = 0; u32 diag1val = 0; MpiFwHeader_t *cached_fw; /* Pointer to FW */ + u8 cb_idx; /* Clear any existing interrupts */ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); @@ -3956,6 +3923,18 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) else mdelay(1); + /* + * Call each currently registered protocol IOC reset handler + * with pre-reset indication. + * NOTE: If we're doing _IOC_BRINGUP, there can be no + * MptResetHandlers[] registered yet. + */ + for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { + if (MptResetHandlers[cb_idx]) + (*(MptResetHandlers[cb_idx]))(ioc, + MPT_IOC_PRE_RESET); + } + for (count = 0; count < 60; count ++) { doorbell = CHIPREG_READ32(&ioc->chip->Doorbell); doorbell &= MPI_IOC_STATE_MASK; @@ -4052,25 +4031,15 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) * NOTE: If we're doing _IOC_BRINGUP, there can be no * MptResetHandlers[] registered yet. */ - { - u8 cb_idx; - int r = 0; - - for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { - if (MptResetHandlers[cb_idx]) { - dprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "Calling IOC pre_reset handler #%d\n", - ioc->name, cb_idx)); - r += mpt_signal_reset(cb_idx, ioc, MPT_IOC_PRE_RESET); - if (ioc->alt_ioc) { - dprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "Calling alt-%s pre_reset handler #%d\n", - ioc->name, ioc->alt_ioc->name, cb_idx)); - r += mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_PRE_RESET); - } + for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { + if (MptResetHandlers[cb_idx]) { + mpt_signal_reset(cb_idx, + ioc, MPT_IOC_PRE_RESET); + if (ioc->alt_ioc) { + mpt_signal_reset(cb_idx, + ioc->alt_ioc, MPT_IOC_PRE_RESET); } } - /* FIXME? Examine results here? */ } if (ioc->cached_fw) @@ -6956,7 +6925,7 @@ EXPORT_SYMBOL(mpt_halt_firmware); int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) { - int rc; + int rc; u8 cb_idx; unsigned long flags; unsigned long time_count; @@ -6982,8 +6951,6 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) ioc->alt_ioc->ioc_reset_in_progress = 1; spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); - /* FIXME: If do_ioc_recovery fails, repeat.... - */ /* The SCSI driver needs to adjust timeouts on all current * commands prior to the diagnostic reset being issued. @@ -7020,6 +6987,15 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) } spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { + if (MptResetHandlers[cb_idx]) { + mpt_signal_reset(cb_idx, ioc, MPT_IOC_POST_RESET); + if (ioc->alt_ioc) + mpt_signal_reset(cb_idx, + ioc->alt_ioc, MPT_IOC_POST_RESET); + } + } + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler: completed (%d seconds): %s\n", ioc->name, From 8ea0696e9251c2283d3d306c6b096e71cf6a60c0 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 5 Aug 2009 12:54:32 +0530 Subject: [PATCH 078/209] [SCSI] mptsas : Bump version to 3.04.11 Bump version to 3.04.11 Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index bf446d41cda2..79b17ceb714a 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -76,8 +76,8 @@ #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "3.04.10" -#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.09" +#define MPT_LINUX_VERSION_COMMON "3.04.11" +#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.11" #define WHAT_MAGIC_STRING "@" "(" "#" ")" #define show_mptmod_ver(s,ver) \ From 60df75c169c5d42f4eef03d80e65c3fe223a1620 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 13 Aug 2009 19:23:12 +0000 Subject: [PATCH 079/209] [SCSI] update MAINTAINERS with new email Novell is now funding SCSI work, so the MAINTAINERS file should reflect this. Signed-off-by: James Bottomley --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 60299a9a7adb..b841e3c20e9b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4408,7 +4408,7 @@ F: drivers/scsi/sg.c F: include/scsi/sg.h SCSI SUBSYSTEM -M: "James E.J. Bottomley" +M: "James E.J. Bottomley" L: linux-scsi@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6.git From cdf69bb91b0ef14f0e0a987a4430764e6c290644 Mon Sep 17 00:00:00 2001 From: "Yanqing_Liu@Dell.com" Date: Mon, 17 Aug 2009 14:31:42 -0500 Subject: [PATCH 080/209] [SCSI] scsi_dh_rdac: add support for next generation of Dell PV array This patch is to add DM support for next generation of Dell PowerVault storage array. Signed-off-by: Yanqing Liu Acked-by: Chandra Seetharaman Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_rdac.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index bea92ef7e269..872292f659c3 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -631,6 +631,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = { {"SUN", "LCSM100_F"}, {"DELL", "MD3000"}, {"DELL", "MD3000i"}, + {"DELL", "MD32xx"}, + {"DELL", "MD32xxi"}, {"LSI", "INF-01-00"}, {"ENGENIO", "INF-01-00"}, {"STK", "FLEXLINE 380"}, From a67417ab7eeff45bba55666c0e1083260f3624ee Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:06 +0200 Subject: [PATCH 081/209] [SCSI] zfcp: invalid usage after free of port resources In certain error scenarios ports, rports are getting attached, validated and removed from the systems environment. Depending on the layer this occurs asynchronously. This patch fixes the few races which existed and ensures all references and cross references are cleared at the time they're invalid. In addition fc transports actions are only scheduled when required. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 7 +++++-- drivers/s390/scsi/zfcp_scsi.c | 6 ++++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 2ccbd185a5fb..fa2460b42298 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -672,12 +672,15 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, */ void zfcp_port_dequeue(struct zfcp_port *port) { - wait_event(port->remove_wq, atomic_read(&port->refcount) == 0); write_lock_irq(&zfcp_data.config_lock); list_del(&port->list); write_unlock_irq(&zfcp_data.config_lock); - if (port->rport) + if (port->rport) { port->rport->dd_data = NULL; + port->rport = NULL; + } + wait_event(port->remove_wq, atomic_read(&port->refcount) == 0); + cancel_work_sync(&port->rport_work); /* usually not necessary */ zfcp_adapter_put(port->adapter); sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs); device_unregister(&port->sysfs_device); diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 6925a1784682..54a7a7474aa5 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -582,8 +582,10 @@ void zfcp_scsi_schedule_rport_block(struct zfcp_port *port) zfcp_port_get(port); port->rport_task = RPORT_DEL; - if (!queue_work(zfcp_data.work_queue, &port->rport_work)) - zfcp_port_put(port); + if (port->rport && queue_work(zfcp_data.work_queue, &port->rport_work)) + return; + + zfcp_port_put(port); } void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter) From d46f384a89c2378cb7858747faa1935db17e22a8 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:07 +0200 Subject: [PATCH 082/209] [SCSI] zfcp: Move debug data from zfcp_data to own data structure The struct zfcp_adapter includes everything related to the debug traces. This introduces dependences between the definitions in zfcp_def.h and zfcp_dbf.h. Move all debug related data structures to a new data structure to break those dependencies and manage the debug data in zfcp_dbf.[hc]. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 4 - drivers/s390/scsi/zfcp_dbf.c | 217 ++++++++++++++++++++--------------- drivers/s390/scsi/zfcp_dbf.h | 17 ++- drivers/s390/scsi/zfcp_def.h | 15 +-- drivers/s390/scsi/zfcp_fsf.h | 3 +- 5 files changed, 143 insertions(+), 113 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index fa2460b42298..870fe79bc7ea 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -502,10 +502,6 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) spin_lock_init(&adapter->req_list_lock); - spin_lock_init(&adapter->hba_dbf_lock); - spin_lock_init(&adapter->san_dbf_lock); - spin_lock_init(&adapter->scsi_dbf_lock); - spin_lock_init(&adapter->rec_dbf_lock); spin_lock_init(&adapter->req_q_lock); spin_lock_init(&adapter->qdio_stat_lock); diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index b99b87ce5a39..995e6128b6c9 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -3,7 +3,7 @@ * * Debug traces for zfcp. * - * Copyright IBM Corporation 2002, 2008 + * Copyright IBM Corporation 2002, 2009 */ #define KMSG_COMPONENT "zfcp" @@ -11,6 +11,7 @@ #include #include +#include "zfcp_dbf.h" #include "zfcp_ext.h" static u32 dbfsize = 4; @@ -126,6 +127,7 @@ static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view, void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) { struct zfcp_adapter *adapter = fsf_req->adapter; + struct zfcp_dbf *dbf = adapter->dbf; struct fsf_qtcb *qtcb = fsf_req->qtcb; union fsf_prot_status_qual *prot_status_qual = &qtcb->prefix.prot_status_qual; @@ -134,12 +136,12 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) struct zfcp_port *port; struct zfcp_unit *unit; struct zfcp_send_els *send_els; - struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf; + struct zfcp_hba_dbf_record *rec = &dbf->hba_dbf_buf; struct zfcp_hba_dbf_record_response *response = &rec->u.response; int level; unsigned long flags; - spin_lock_irqsave(&adapter->hba_dbf_lock, flags); + spin_lock_irqsave(&dbf->hba_dbf_lock, flags); memset(rec, 0, sizeof(*rec)); strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE); @@ -224,7 +226,7 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) break; } - debug_event(adapter->hba_dbf, level, rec, sizeof(*rec)); + debug_event(dbf->hba_dbf, level, rec, sizeof(*rec)); /* have fcp channel microcode fixed to use as little as possible */ if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) { @@ -232,11 +234,11 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) char *buf = (char *)qtcb + qtcb->header.log_start; int len = qtcb->header.log_length; for (; len && !buf[len - 1]; len--); - zfcp_dbf_hexdump(adapter->hba_dbf, rec, sizeof(*rec), level, - buf, len); + zfcp_dbf_hexdump(dbf->hba_dbf, rec, sizeof(*rec), level, buf, + len); } - spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); } /** @@ -248,10 +250,11 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, struct fsf_status_read_buffer *status_buffer) { - struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_hba_dbf_record *rec = &dbf->hba_dbf_buf; unsigned long flags; - spin_lock_irqsave(&adapter->hba_dbf_lock, flags); + spin_lock_irqsave(&dbf->hba_dbf_lock, flags); memset(rec, 0, sizeof(*rec)); strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE); strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE); @@ -293,8 +296,8 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, &status_buffer->payload, rec->u.status.payload_size); } - debug_event(adapter->hba_dbf, 2, rec, sizeof(*rec)); - spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); + debug_event(dbf->hba_dbf, 2, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); } /** @@ -308,17 +311,18 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int qdio_error, int sbal_index, int sbal_count) { - struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_hba_dbf_record *r = &dbf->hba_dbf_buf; unsigned long flags; - spin_lock_irqsave(&adapter->hba_dbf_lock, flags); + spin_lock_irqsave(&dbf->hba_dbf_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); r->u.qdio.qdio_error = qdio_error; r->u.qdio.sbal_index = sbal_index; r->u.qdio.sbal_count = sbal_count; - debug_event(adapter->hba_dbf, 0, r, sizeof(*r)); - spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); + debug_event(dbf->hba_dbf, 0, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); } /** @@ -329,17 +333,18 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, void zfcp_hba_dbf_event_berr(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req) { - struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_hba_dbf_record *r = &dbf->hba_dbf_buf; struct fsf_status_read_buffer *sr_buf = req->data; struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error; unsigned long flags; - spin_lock_irqsave(&adapter->hba_dbf_lock, flags); + spin_lock_irqsave(&dbf->hba_dbf_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE); memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload)); - debug_event(adapter->hba_dbf, 0, r, sizeof(*r)); - spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); + debug_event(dbf->hba_dbf, 0, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); } static void zfcp_hba_dbf_view_response(char **p, struct zfcp_hba_dbf_record_response *r) @@ -554,7 +559,8 @@ static struct debug_view zfcp_rec_dbf_view = { */ void zfcp_rec_dbf_event_thread(char *id2, struct zfcp_adapter *adapter) { - struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_rec_dbf_record *r = &dbf->rec_dbf_buf; unsigned long flags = 0; struct list_head *entry; unsigned ready = 0, running = 0, total; @@ -565,15 +571,15 @@ void zfcp_rec_dbf_event_thread(char *id2, struct zfcp_adapter *adapter) running++; total = adapter->erp_total_count; - spin_lock_irqsave(&adapter->rec_dbf_lock, flags); + spin_lock_irqsave(&dbf->rec_dbf_lock, flags); memset(r, 0, sizeof(*r)); r->id = ZFCP_REC_DBF_ID_THREAD; memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); r->u.thread.total = total; r->u.thread.ready = ready; r->u.thread.running = running; - debug_event(adapter->rec_dbf, 6, r, sizeof(*r)); - spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); + debug_event(dbf->rec_dbf, 6, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->rec_dbf_lock, flags); } /** @@ -596,10 +602,11 @@ static void zfcp_rec_dbf_event_target(char *id2, void *ref, atomic_t *status, atomic_t *erp_count, u64 wwpn, u32 d_id, u64 fcp_lun) { - struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_rec_dbf_record *r = &dbf->rec_dbf_buf; unsigned long flags; - spin_lock_irqsave(&adapter->rec_dbf_lock, flags); + spin_lock_irqsave(&dbf->rec_dbf_lock, flags); memset(r, 0, sizeof(*r)); r->id = ZFCP_REC_DBF_ID_TARGET; memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); @@ -609,8 +616,8 @@ static void zfcp_rec_dbf_event_target(char *id2, void *ref, r->u.target.d_id = d_id; r->u.target.fcp_lun = fcp_lun; r->u.target.erp_count = atomic_read(erp_count); - debug_event(adapter->rec_dbf, 3, r, sizeof(*r)); - spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); + debug_event(dbf->rec_dbf, 3, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->rec_dbf_lock, flags); } /** @@ -672,10 +679,11 @@ void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need, void *action, struct zfcp_adapter *adapter, struct zfcp_port *port, struct zfcp_unit *unit) { - struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_rec_dbf_record *r = &dbf->rec_dbf_buf; unsigned long flags; - spin_lock_irqsave(&adapter->rec_dbf_lock, flags); + spin_lock_irqsave(&dbf->rec_dbf_lock, flags); memset(r, 0, sizeof(*r)); r->id = ZFCP_REC_DBF_ID_TRIGGER; memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); @@ -692,8 +700,8 @@ void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need, r->u.trigger.us = atomic_read(&unit->status); r->u.trigger.fcp_lun = unit->fcp_lun; } - debug_event(adapter->rec_dbf, action ? 1 : 4, r, sizeof(*r)); - spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); + debug_event(dbf->rec_dbf, action ? 1 : 4, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->rec_dbf_lock, flags); } /** @@ -704,10 +712,11 @@ void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need, void zfcp_rec_dbf_event_action(char *id2, struct zfcp_erp_action *erp_action) { struct zfcp_adapter *adapter = erp_action->adapter; - struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_rec_dbf_record *r = &dbf->rec_dbf_buf; unsigned long flags; - spin_lock_irqsave(&adapter->rec_dbf_lock, flags); + spin_lock_irqsave(&dbf->rec_dbf_lock, flags); memset(r, 0, sizeof(*r)); r->id = ZFCP_REC_DBF_ID_ACTION; memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); @@ -715,8 +724,8 @@ void zfcp_rec_dbf_event_action(char *id2, struct zfcp_erp_action *erp_action) r->u.action.status = erp_action->status; r->u.action.step = erp_action->step; r->u.action.fsf_req = (unsigned long)erp_action->fsf_req; - debug_event(adapter->rec_dbf, 5, r, sizeof(*r)); - spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); + debug_event(dbf->rec_dbf, 5, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->rec_dbf_lock, flags); } /** @@ -728,13 +737,14 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; struct zfcp_wka_port *wka_port = ct->wka_port; struct zfcp_adapter *adapter = wka_port->adapter; + struct zfcp_dbf *dbf = adapter->dbf; struct ct_hdr *hdr = sg_virt(ct->req); - struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; + struct zfcp_san_dbf_record *r = &dbf->san_dbf_buf; struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req; int level = 3; unsigned long flags; - spin_lock_irqsave(&adapter->san_dbf_lock, flags); + spin_lock_irqsave(&dbf->san_dbf_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); r->fsf_reqid = fsf_req->req_id; @@ -749,10 +759,10 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) oct->max_res_size = hdr->max_res_size; oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr), ZFCP_DBF_SAN_MAX_PAYLOAD); - debug_event(adapter->san_dbf, level, r, sizeof(*r)); - zfcp_dbf_hexdump(adapter->san_dbf, r, sizeof(*r), level, + debug_event(dbf->san_dbf, level, r, sizeof(*r)); + zfcp_dbf_hexdump(dbf->san_dbf, r, sizeof(*r), level, (void *)hdr + sizeof(struct ct_hdr), oct->len); - spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->san_dbf_lock, flags); } /** @@ -765,12 +775,13 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) struct zfcp_wka_port *wka_port = ct->wka_port; struct zfcp_adapter *adapter = wka_port->adapter; struct ct_hdr *hdr = sg_virt(ct->resp); - struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_san_dbf_record *r = &dbf->san_dbf_buf; struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp; int level = 3; unsigned long flags; - spin_lock_irqsave(&adapter->san_dbf_lock, flags); + spin_lock_irqsave(&dbf->san_dbf_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); r->fsf_reqid = fsf_req->req_id; @@ -785,10 +796,10 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) rct->max_res_size = hdr->max_res_size; rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr), ZFCP_DBF_SAN_MAX_PAYLOAD); - debug_event(adapter->san_dbf, level, r, sizeof(*r)); - zfcp_dbf_hexdump(adapter->san_dbf, r, sizeof(*r), level, + debug_event(dbf->san_dbf, level, r, sizeof(*r)); + zfcp_dbf_hexdump(dbf->san_dbf, r, sizeof(*r), level, (void *)hdr + sizeof(struct ct_hdr), rct->len); - spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->san_dbf_lock, flags); } static void zfcp_san_dbf_event_els(const char *tag, int level, @@ -797,10 +808,11 @@ static void zfcp_san_dbf_event_els(const char *tag, int level, int buflen) { struct zfcp_adapter *adapter = fsf_req->adapter; - struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_san_dbf_record *rec = &dbf->san_dbf_buf; unsigned long flags; - spin_lock_irqsave(&adapter->san_dbf_lock, flags); + spin_lock_irqsave(&dbf->san_dbf_lock, flags); memset(rec, 0, sizeof(*rec)); strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); rec->fsf_reqid = fsf_req->req_id; @@ -808,10 +820,10 @@ static void zfcp_san_dbf_event_els(const char *tag, int level, rec->s_id = s_id; rec->d_id = d_id; rec->u.els.ls_code = ls_code; - debug_event(adapter->san_dbf, level, rec, sizeof(*rec)); - zfcp_dbf_hexdump(adapter->san_dbf, rec, sizeof(*rec), level, + debug_event(dbf->san_dbf, level, rec, sizeof(*rec)); + zfcp_dbf_hexdump(dbf->san_dbf, rec, sizeof(*rec), level, buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD)); - spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->san_dbf_lock, flags); } /** @@ -915,14 +927,15 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, struct zfcp_fsf_req *fsf_req, unsigned long old_req_id) { - struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_scsi_dbf_record *rec = &dbf->scsi_dbf_buf; struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; unsigned long flags; struct fcp_rsp_iu *fcp_rsp; char *fcp_rsp_info = NULL, *fcp_sns_info = NULL; int offset = 0, buflen = 0; - spin_lock_irqsave(&adapter->scsi_dbf_lock, flags); + spin_lock_irqsave(&dbf->scsi_dbf_lock, flags); do { memset(rec, 0, sizeof(*rec)); if (offset == 0) { @@ -981,9 +994,9 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, memcpy(dump->data, fcp_sns_info + offset, dump->size); offset += dump->size; } - debug_event(adapter->scsi_dbf, level, rec, sizeof(*rec)); + debug_event(dbf->scsi_dbf, level, rec, sizeof(*rec)); } while (offset < buflen); - spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->scsi_dbf_lock, flags); } /** @@ -1087,6 +1100,22 @@ static struct debug_view zfcp_scsi_dbf_view = { NULL }; +static debug_info_t *zfcp_dbf_reg(const char *name, int level, + struct debug_view *view, int size) +{ + struct debug_info *d; + + d = debug_register(name, dbfsize, level, size); + if (!d) + return NULL; + + debug_register_view(d, &debug_hex_ascii_view); + debug_register_view(d, view); + debug_set_level(d, level); + + return d; +} + /** * zfcp_adapter_debug_register - registers debug feature for an adapter * @adapter: pointer to adapter for which debug features should be registered @@ -1095,52 +1124,56 @@ static struct debug_view zfcp_scsi_dbf_view = { int zfcp_adapter_debug_register(struct zfcp_adapter *adapter) { char dbf_name[DEBUG_MAX_NAME_LEN]; + struct zfcp_dbf *dbf; + + dbf = kmalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); + if (!dbf) + return -ENOMEM; + + spin_lock_init(&dbf->hba_dbf_lock); + spin_lock_init(&dbf->san_dbf_lock); + spin_lock_init(&dbf->scsi_dbf_lock); + spin_lock_init(&dbf->rec_dbf_lock); /* debug feature area which records recovery activity */ sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev)); - adapter->rec_dbf = debug_register(dbf_name, dbfsize, 1, - sizeof(struct zfcp_rec_dbf_record)); - if (!adapter->rec_dbf) - goto failed; - debug_register_view(adapter->rec_dbf, &debug_hex_ascii_view); - debug_register_view(adapter->rec_dbf, &zfcp_rec_dbf_view); - debug_set_level(adapter->rec_dbf, 3); + dbf->rec_dbf = zfcp_dbf_reg(dbf_name, 3, &zfcp_rec_dbf_view, + sizeof(struct zfcp_rec_dbf_record)); + if (!dbf->rec_dbf) + goto fail_rec; /* debug feature area which records HBA (FSF and QDIO) conditions */ sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev)); - adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1, - sizeof(struct zfcp_hba_dbf_record)); - if (!adapter->hba_dbf) - goto failed; - debug_register_view(adapter->hba_dbf, &debug_hex_ascii_view); - debug_register_view(adapter->hba_dbf, &zfcp_hba_dbf_view); - debug_set_level(adapter->hba_dbf, 3); + dbf->hba_dbf = zfcp_dbf_reg(dbf_name, 3, &zfcp_hba_dbf_view, + sizeof(struct zfcp_hba_dbf_record)); + if (!dbf->hba_dbf) + goto fail_hba; /* debug feature area which records SAN command failures and recovery */ sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev)); - adapter->san_dbf = debug_register(dbf_name, dbfsize, 1, - sizeof(struct zfcp_san_dbf_record)); - if (!adapter->san_dbf) - goto failed; - debug_register_view(adapter->san_dbf, &debug_hex_ascii_view); - debug_register_view(adapter->san_dbf, &zfcp_san_dbf_view); - debug_set_level(adapter->san_dbf, 6); + dbf->san_dbf = zfcp_dbf_reg(dbf_name, 6, &zfcp_san_dbf_view, + sizeof(struct zfcp_san_dbf_record)); + if (!dbf->san_dbf) + goto fail_san; /* debug feature area which records SCSI command failures and recovery */ sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev)); - adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1, - sizeof(struct zfcp_scsi_dbf_record)); - if (!adapter->scsi_dbf) - goto failed; - debug_register_view(adapter->scsi_dbf, &debug_hex_ascii_view); - debug_register_view(adapter->scsi_dbf, &zfcp_scsi_dbf_view); - debug_set_level(adapter->scsi_dbf, 3); + dbf->scsi_dbf = zfcp_dbf_reg(dbf_name, 3, &zfcp_scsi_dbf_view, + sizeof(struct zfcp_scsi_dbf_record)); + if (!dbf->scsi_dbf) + goto fail_scsi; + adapter->dbf = dbf; return 0; - failed: - zfcp_adapter_debug_unregister(adapter); - +fail_scsi: + debug_unregister(dbf->san_dbf); +fail_san: + debug_unregister(dbf->hba_dbf); +fail_hba: + debug_unregister(dbf->rec_dbf); +fail_rec: + kfree(dbf); return -ENOMEM; } @@ -1150,12 +1183,10 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter) */ void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter) { - debug_unregister(adapter->scsi_dbf); - debug_unregister(adapter->san_dbf); - debug_unregister(adapter->hba_dbf); - debug_unregister(adapter->rec_dbf); - adapter->scsi_dbf = NULL; - adapter->san_dbf = NULL; - adapter->hba_dbf = NULL; - adapter->rec_dbf = NULL; + debug_unregister(adapter->dbf->scsi_dbf); + debug_unregister(adapter->dbf->san_dbf); + debug_unregister(adapter->dbf->hba_dbf); + debug_unregister(adapter->dbf->rec_dbf); + kfree(adapter->dbf); + adapter->dbf = NULL; } diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index a573f7344dd6..4cfd68fe8a26 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -2,7 +2,7 @@ * This file is part of the zfcp device driver for * FCP adapters for IBM System z9 and zSeries. * - * Copyright IBM Corp. 2008, 2008 + * Copyright IBM Corp. 2008, 2009 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -222,4 +222,19 @@ struct zfcp_scsi_dbf_record { u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO]; } __attribute__ ((packed)); +struct zfcp_dbf { + debug_info_t *rec_dbf; + debug_info_t *hba_dbf; + debug_info_t *san_dbf; + debug_info_t *scsi_dbf; + spinlock_t rec_dbf_lock; + spinlock_t hba_dbf_lock; + spinlock_t san_dbf_lock; + spinlock_t scsi_dbf_lock; + struct zfcp_rec_dbf_record rec_dbf_buf; + struct zfcp_hba_dbf_record hba_dbf_buf; + struct zfcp_san_dbf_record san_dbf_buf; + struct zfcp_scsi_dbf_record scsi_dbf_buf; +}; + #endif /* ZFCP_DBF_H */ diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 49d0532bca1c..601f5a575def 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -37,10 +37,8 @@ #include #include #include -#include "zfcp_dbf.h" #include "zfcp_fsf.h" - /********************* GENERAL DEFINES *********************************/ #define REQUEST_LIST_SIZE 128 @@ -468,18 +466,7 @@ struct zfcp_adapter { u32 erp_low_mem_count; /* nr of erp actions waiting for memory */ struct zfcp_wka_ports *gs; /* generic services */ - debug_info_t *rec_dbf; - debug_info_t *hba_dbf; - debug_info_t *san_dbf; /* debug feature areas */ - debug_info_t *scsi_dbf; - spinlock_t rec_dbf_lock; - spinlock_t hba_dbf_lock; - spinlock_t san_dbf_lock; - spinlock_t scsi_dbf_lock; - struct zfcp_rec_dbf_record rec_dbf_buf; - struct zfcp_hba_dbf_record hba_dbf_buf; - struct zfcp_san_dbf_record san_dbf_buf; - struct zfcp_scsi_dbf_record scsi_dbf_buf; + struct zfcp_dbf *dbf; /* debug traces */ struct zfcp_adapter_mempool pool; /* Adapter memory pools */ struct qdio_initialize qdio_init_data; /* for qdio_establish */ struct fc_host_statistics *fc_stats; diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index df7f232faba8..dcc7c1dbcf58 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h @@ -3,13 +3,14 @@ * * Interface to the FSF support functions. * - * Copyright IBM Corporation 2002, 2008 + * Copyright IBM Corporation 2002, 2009 */ #ifndef FSF_H #define FSF_H #include +#include #define FSF_QTCB_CURRENT_VERSION 0x00000001 From dcd20e2316cdc333dfdee09649dbe3642eb30e75 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:08 +0200 Subject: [PATCH 083/209] [SCSI] zfcp: Only collect SCSI debug data for matching trace levels The default trace level is to only trace failed SCSI commands. Thus it is not necessary to collect trace data for most SCSI commands since it will be thrown away later. Restructure the SCSI trace infrastructure to first check the trace level in a inline function and only do the expensive data collection for matching trace levels. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_dbf.c | 58 +++----------------------------- drivers/s390/scsi/zfcp_dbf.h | 63 +++++++++++++++++++++++++++++++++++ drivers/s390/scsi/zfcp_ext.h | 11 ++---- drivers/s390/scsi/zfcp_fsf.c | 1 + drivers/s390/scsi/zfcp_scsi.c | 3 +- 5 files changed, 73 insertions(+), 63 deletions(-) diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 995e6128b6c9..61776d490d13 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -921,13 +921,11 @@ static struct debug_view zfcp_san_dbf_view = { NULL }; -static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, - struct zfcp_adapter *adapter, - struct scsi_cmnd *scsi_cmnd, - struct zfcp_fsf_req *fsf_req, - unsigned long old_req_id) +void _zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, + struct zfcp_dbf *dbf, struct scsi_cmnd *scsi_cmnd, + struct zfcp_fsf_req *fsf_req, + unsigned long old_req_id) { - struct zfcp_dbf *dbf = adapter->dbf; struct zfcp_scsi_dbf_record *rec = &dbf->scsi_dbf_buf; struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; unsigned long flags; @@ -999,54 +997,6 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, spin_unlock_irqrestore(&dbf->scsi_dbf_lock, flags); } -/** - * zfcp_scsi_dbf_event_result - trace event for SCSI command completion - * @tag: tag indicating success or failure of SCSI command - * @level: trace level applicable for this event - * @adapter: adapter that has been used to issue the SCSI command - * @scsi_cmnd: SCSI command pointer - * @fsf_req: request used to issue SCSI command (might be NULL) - */ -void zfcp_scsi_dbf_event_result(const char *tag, int level, - struct zfcp_adapter *adapter, - struct scsi_cmnd *scsi_cmnd, - struct zfcp_fsf_req *fsf_req) -{ - zfcp_scsi_dbf_event("rslt", tag, level, adapter, scsi_cmnd, fsf_req, 0); -} - -/** - * zfcp_scsi_dbf_event_abort - trace event for SCSI command abort - * @tag: tag indicating success or failure of abort operation - * @adapter: adapter thas has been used to issue SCSI command to be aborted - * @scsi_cmnd: SCSI command to be aborted - * @new_fsf_req: request containing abort (might be NULL) - * @old_req_id: identifier of request containg SCSI command to be aborted - */ -void zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, - struct scsi_cmnd *scsi_cmnd, - struct zfcp_fsf_req *new_fsf_req, - unsigned long old_req_id) -{ - zfcp_scsi_dbf_event("abrt", tag, 1, adapter, scsi_cmnd, new_fsf_req, - old_req_id); -} - -/** - * zfcp_scsi_dbf_event_devreset - trace event for Logical Unit or Target Reset - * @tag: tag indicating success or failure of reset operation - * @flag: indicates type of reset (Target Reset, Logical Unit Reset) - * @unit: unit that needs reset - * @scsi_cmnd: SCSI command which caused this error recovery - */ -void zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, - struct zfcp_unit *unit, - struct scsi_cmnd *scsi_cmnd) -{ - zfcp_scsi_dbf_event(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1, - unit->port->adapter, scsi_cmnd, NULL, 0); -} - static int zfcp_scsi_dbf_view_format(debug_info_t *id, struct debug_view *view, char *out_buf, const char *in_buf) { diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 4cfd68fe8a26..c2d5ef18b73a 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -22,7 +22,9 @@ #ifndef ZFCP_DBF_H #define ZFCP_DBF_H +#include "zfcp_ext.h" #include "zfcp_fsf.h" +#include "zfcp_def.h" #define ZFCP_DBF_TAG_SIZE 4 #define ZFCP_DBF_ID_SIZE 7 @@ -237,4 +239,65 @@ struct zfcp_dbf { struct zfcp_scsi_dbf_record scsi_dbf_buf; }; +static inline +void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, + struct zfcp_adapter *adapter, struct scsi_cmnd *scmd, + struct zfcp_fsf_req *req, unsigned long old_id) +{ + struct zfcp_dbf *dbf = adapter->dbf; + + if (level <= dbf->scsi_dbf->level) + _zfcp_scsi_dbf_event(tag, tag2, level, dbf, scmd, req, old_id); +} + +/** + * zfcp_scsi_dbf_event_result - trace event for SCSI command completion + * @tag: tag indicating success or failure of SCSI command + * @level: trace level applicable for this event + * @adapter: adapter that has been used to issue the SCSI command + * @scmd: SCSI command pointer + * @fsf_req: request used to issue SCSI command (might be NULL) + */ +static inline +void zfcp_scsi_dbf_event_result(const char *tag, int level, + struct zfcp_adapter *adapter, + struct scsi_cmnd *scmd, + struct zfcp_fsf_req *fsf_req) +{ + zfcp_scsi_dbf_event("rslt", tag, level, adapter, scmd, fsf_req, 0); +} + +/** + * zfcp_scsi_dbf_event_abort - trace event for SCSI command abort + * @tag: tag indicating success or failure of abort operation + * @adapter: adapter thas has been used to issue SCSI command to be aborted + * @scmd: SCSI command to be aborted + * @new_req: request containing abort (might be NULL) + * @old_id: identifier of request containg SCSI command to be aborted + */ +static inline +void zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, + struct scsi_cmnd *scmd, + struct zfcp_fsf_req *new_req, + unsigned long old_id) +{ + zfcp_scsi_dbf_event("abrt", tag, 1, adapter, scmd, new_req, old_id); +} + +/** + * zfcp_scsi_dbf_event_devreset - trace event for Logical Unit or Target Reset + * @tag: tag indicating success or failure of reset operation + * @flag: indicates type of reset (Target Reset, Logical Unit Reset) + * @unit: unit that needs reset + * @scsi_cmnd: SCSI command which caused this error recovery + */ +static inline +void zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, + struct zfcp_unit *unit, + struct scsi_cmnd *scsi_cmnd) +{ + zfcp_scsi_dbf_event(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1, + unit->port->adapter, scsi_cmnd, NULL, 0); +} + #endif /* ZFCP_DBF_H */ diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 3044c6010306..28e76f5be1af 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -57,14 +57,9 @@ extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); -extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, - struct scsi_cmnd *, - struct zfcp_fsf_req *); -extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, - struct scsi_cmnd *, struct zfcp_fsf_req *, - unsigned long); -extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, - struct scsi_cmnd *); +extern void _zfcp_scsi_dbf_event(const char *, const char *, int, + struct zfcp_dbf *, struct scsi_cmnd *, + struct zfcp_fsf_req *, unsigned long); /* zfcp_erp.c */ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *, diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 47795fbf081f..2635216bfd93 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -11,6 +11,7 @@ #include #include "zfcp_ext.h" +#include "zfcp_dbf.h" #define ZFCP_REQ_AUTO_CLEANUP 0x00000002 #define ZFCP_REQ_NO_QTCB 0x00000008 diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 54a7a7474aa5..0bd80a90426a 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -9,8 +9,9 @@ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt -#include "zfcp_ext.h" #include +#include "zfcp_ext.h" +#include "zfcp_dbf.h" static unsigned int default_depth = 32; module_param_named(queue_depth, default_depth, uint, 0600); From 2e261af84cdb6a6008a9c361443e35ea646ec683 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:09 +0200 Subject: [PATCH 084/209] [SCSI] zfcp: Only collect FSF/HBA debug data for matching trace levels The default trace level is to only trace failed FSF commands. Thus it is not necessary to collect trace data for most FSF commands, since it will be thrown away later. Restructure the FSF/HBA trace infrastructure to first check the trace level in a inline function and only do the expensive data collection for matching trace levels. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_dbf.c | 44 ++++++------------------------ drivers/s390/scsi/zfcp_dbf.h | 53 ++++++++++++++++++++++++++++++++++++ drivers/s390/scsi/zfcp_ext.h | 9 ++++-- 3 files changed, 67 insertions(+), 39 deletions(-) diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 61776d490d13..5568440ec2fc 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -120,14 +120,10 @@ static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view, return p - out_buf; } -/** - * zfcp_hba_dbf_event_fsf_response - trace event for request completion - * @fsf_req: request that has been completed - */ -void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) +void _zfcp_hba_dbf_event_fsf_response(const char *tag2, int level, + struct zfcp_fsf_req *fsf_req, + struct zfcp_dbf *dbf) { - struct zfcp_adapter *adapter = fsf_req->adapter; - struct zfcp_dbf *dbf = adapter->dbf; struct fsf_qtcb *qtcb = fsf_req->qtcb; union fsf_prot_status_qual *prot_status_qual = &qtcb->prefix.prot_status_qual; @@ -138,31 +134,12 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) struct zfcp_send_els *send_els; struct zfcp_hba_dbf_record *rec = &dbf->hba_dbf_buf; struct zfcp_hba_dbf_record_response *response = &rec->u.response; - int level; unsigned long flags; spin_lock_irqsave(&dbf->hba_dbf_lock, flags); memset(rec, 0, sizeof(*rec)); strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE); - - if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && - (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) { - strncpy(rec->tag2, "perr", ZFCP_DBF_TAG_SIZE); - level = 1; - } else if (qtcb->header.fsf_status != FSF_GOOD) { - strncpy(rec->tag2, "ferr", ZFCP_DBF_TAG_SIZE); - level = 1; - } else if ((fsf_req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || - (fsf_req->fsf_command == FSF_QTCB_OPEN_LUN)) { - strncpy(rec->tag2, "open", ZFCP_DBF_TAG_SIZE); - level = 4; - } else if (qtcb->header.log_length) { - strncpy(rec->tag2, "qtcb", ZFCP_DBF_TAG_SIZE); - level = 5; - } else { - strncpy(rec->tag2, "norm", ZFCP_DBF_TAG_SIZE); - level = 6; - } + strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); response->fsf_command = fsf_req->fsf_command; response->fsf_reqid = fsf_req->req_id; @@ -241,14 +218,9 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); } -/** - * zfcp_hba_dbf_event_fsf_unsol - trace event for an unsolicited status buffer - * @tag: tag indicating which kind of unsolicited status has been received - * @adapter: adapter that has issued the unsolicited status buffer - * @status_buffer: buffer containing payload of unsolicited status - */ -void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, - struct fsf_status_read_buffer *status_buffer) +void _zfcp_hba_dbf_event_fsf_unsol(const char *tag, int level, + struct zfcp_adapter *adapter, + struct fsf_status_read_buffer *status_buffer) { struct zfcp_dbf *dbf = adapter->dbf; struct zfcp_hba_dbf_record *rec = &dbf->hba_dbf_buf; @@ -296,7 +268,7 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, &status_buffer->payload, rec->u.status.payload_size); } - debug_event(dbf->hba_dbf, 2, rec, sizeof(*rec)); + debug_event(dbf->hba_dbf, level, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); } diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index c2d5ef18b73a..bceaff449033 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -239,6 +239,59 @@ struct zfcp_dbf { struct zfcp_scsi_dbf_record scsi_dbf_buf; }; +static inline +void zfcp_hba_dbf_event_fsf_resp(const char *tag2, int level, + struct zfcp_fsf_req *req, struct zfcp_dbf *dbf) +{ + if (level <= dbf->hba_dbf->level) + _zfcp_hba_dbf_event_fsf_response(tag2, level, req, dbf); +} + +/** + * zfcp_hba_dbf_event_fsf_response - trace event for request completion + * @fsf_req: request that has been completed + */ +static inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *req) +{ + struct zfcp_dbf *dbf = req->adapter->dbf; + struct fsf_qtcb *qtcb = req->qtcb; + + if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && + (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) { + zfcp_hba_dbf_event_fsf_resp("perr", 1, req, dbf); + + } else if (qtcb->header.fsf_status != FSF_GOOD) { + zfcp_hba_dbf_event_fsf_resp("ferr", 1, req, dbf); + + } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || + (req->fsf_command == FSF_QTCB_OPEN_LUN)) { + zfcp_hba_dbf_event_fsf_resp("open", 4, req, dbf); + + } else if (qtcb->header.log_length) { + zfcp_hba_dbf_event_fsf_resp("qtcb", 5, req, dbf); + + } else { + zfcp_hba_dbf_event_fsf_resp("norm", 6, req, dbf); + } + } + +/** + * zfcp_hba_dbf_event_fsf_unsol - trace event for an unsolicited status buffer + * @tag: tag indicating which kind of unsolicited status has been received + * @adapter: adapter that has issued the unsolicited status buffer + * @status_buffer: buffer containing payload of unsolicited status + */ +static inline +void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, + struct fsf_status_read_buffer *buf) +{ + struct zfcp_dbf *dbf = adapter->dbf; + int level = 2; + + if (level <= dbf->hba_dbf->level) + _zfcp_hba_dbf_event_fsf_unsol(tag, level, adapter, buf); +} + static inline void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, struct zfcp_adapter *adapter, struct scsi_cmnd *scmd, diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 28e76f5be1af..eeed322f32d0 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -45,9 +45,12 @@ extern void zfcp_rec_dbf_event_trigger(char *, void *, u8, u8, void *, struct zfcp_adapter *, struct zfcp_port *, struct zfcp_unit *); extern void zfcp_rec_dbf_event_action(char *, struct zfcp_erp_action *); -extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *); -extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *, - struct fsf_status_read_buffer *); +extern void _zfcp_hba_dbf_event_fsf_response(const char *, int level, + struct zfcp_fsf_req *, + struct zfcp_dbf *dbf); +extern void _zfcp_hba_dbf_event_fsf_unsol(const char *, int level, + struct zfcp_adapter *, + struct fsf_status_read_buffer *); extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int, int); extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *, From 44f09f73766a97d9c1ff8bf787cfe6b932eabc2c Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:10 +0200 Subject: [PATCH 085/209] [SCSI] zfcp: Remove useless assignment Using a bitwise OR to not set anything at all is pointless so remove the useless statement. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_fsf.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 2635216bfd93..c023db864dc5 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -832,7 +832,6 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) } sbale = zfcp_qdio_sbale_req(req); - sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS; sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; req->sbale_curr = 2; From 14e242ea55a8b807dc1fb7654941caf68a20cd81 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:11 +0200 Subject: [PATCH 086/209] [SCSI] zfcp: Only issue one test link command per port When the FCP channel returns a series of commands with the error status "test link", zfcp will send a series of ELS ADISC commands. This is technically no problem, but it is enough to only issue one test command per remote port. So, track whether a ELS ADISC command is already pending, and do not send a new one if there is already a pending command. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_def.h | 1 + drivers/s390/scsi/zfcp_fc.c | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 601f5a575def..c1becfc1e7fe 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -232,6 +232,7 @@ struct zfcp_ls_adisc { /* remote port status */ #define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 +#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002 /* well known address (WKA) port status*/ enum zfcp_wka_status { diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 47daebfa7e59..94c13bd32b5f 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -404,6 +404,7 @@ static void zfcp_fc_adisc_handler(unsigned long data) /* port is good, unblock rport without going through erp */ zfcp_scsi_schedule_rport_register(port); out: + atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); zfcp_port_put(port); kfree(adisc); } @@ -450,13 +451,21 @@ void zfcp_fc_link_test_work(struct work_struct *work) port->rport_task = RPORT_DEL; zfcp_scsi_rport_work(&port->rport_work); + /* only issue one test command at one time per port */ + if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST) + goto out; + + atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); + retval = zfcp_fc_adisc(port); if (retval == 0) return; /* send of ADISC was not possible */ + atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL); +out: zfcp_port_put(port); } From 55c770fa11d21456e02dc7afb9a37404da9c7b4c Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:12 +0200 Subject: [PATCH 087/209] [SCSI] zfcp: Implicitly close all wka ports An adapter shutdown implicitly closes all open ports. Make sure to mark all WKA ports as offline, not only the directory server. Also make sure that no pending wka port work is running when the adapter is being removed. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 1 + drivers/s390/scsi/zfcp_erp.c | 2 +- drivers/s390/scsi/zfcp_ext.h | 4 +--- drivers/s390/scsi/zfcp_fc.c | 11 ++++++++++- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 870fe79bc7ea..7aba6840243f 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -555,6 +555,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) cancel_work_sync(&adapter->scan_work); cancel_work_sync(&adapter->stat_work); + zfcp_fc_wka_ports_force_offline(adapter->gs); zfcp_adapter_scsi_unregister(adapter); sysfs_remove_group(&adapter->ccw_device->dev.kobj, &zfcp_sysfs_adapter_attrs); diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index c75d6f35cb5f..39e4dd15453f 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -714,7 +714,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act) zfcp_qdio_close(adapter); zfcp_fsf_req_dismiss_all(adapter); adapter->fsf_req_seq_no = 0; - zfcp_fc_wka_port_force_offline(&adapter->gs->ds); + zfcp_fc_wka_ports_force_offline(adapter->gs); /* all ports and units are closed */ zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL, ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index eeed322f32d0..1a66695f11a2 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -104,12 +104,10 @@ extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *); extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); extern void zfcp_test_link(struct zfcp_port *); extern void zfcp_fc_link_test_work(struct work_struct *); -extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *); +extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *); extern void zfcp_fc_wka_ports_init(struct zfcp_adapter *); extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *); extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *); -extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *); - /* zfcp_fsf.c */ extern int zfcp_fsf_open_port(struct zfcp_erp_action *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 94c13bd32b5f..6d0fef92567b 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -132,7 +132,7 @@ static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id, INIT_DELAYED_WORK(&wka_port->work, zfcp_wka_port_offline); } -void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) +static void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) { cancel_delayed_work_sync(&wka->work); mutex_lock(&wka->mutex); @@ -140,6 +140,15 @@ void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) mutex_unlock(&wka->mutex); } +void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs) +{ + zfcp_fc_wka_port_force_offline(&gs->ms); + zfcp_fc_wka_port_force_offline(&gs->ts); + zfcp_fc_wka_port_force_offline(&gs->ds); + zfcp_fc_wka_port_force_offline(&gs->as); + zfcp_fc_wka_port_force_offline(&gs->ks); +} + void zfcp_fc_wka_ports_init(struct zfcp_adapter *adapter) { struct zfcp_wka_ports *gs = adapter->gs; From bd63eaf4b8d783e6033930e377e516169abcadc4 Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:13 +0200 Subject: [PATCH 088/209] [SCSI] zfcp: fix layering oddities between zfcp_fsf and zfcp_qdio There is no need for the QDIO layer to have knowledge or do things wich are done better by the FSF layer and vice versa. Straighten a few things to improve vividness. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_ext.h | 2 +- drivers/s390/scsi/zfcp_fsf.c | 100 ++++++++++++++++++++++++---------- drivers/s390/scsi/zfcp_qdio.c | 45 +-------------- 3 files changed, 73 insertions(+), 74 deletions(-) diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 1a66695f11a2..6a3727bdb386 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -133,11 +133,11 @@ extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, extern int zfcp_fsf_send_els(struct zfcp_send_els *); extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, struct scsi_cmnd *); -extern void zfcp_fsf_req_complete(struct zfcp_fsf_req *); extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8); extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, struct zfcp_unit *); +extern void zfcp_fsf_reqid_check(struct zfcp_adapter *, int); /* zfcp_qdio.c */ extern int zfcp_qdio_allocate(struct zfcp_adapter *); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index c023db864dc5..7ca2995aaf68 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -122,35 +122,6 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req) } } -/** - * zfcp_fsf_req_dismiss_all - dismiss all fsf requests - * @adapter: pointer to struct zfcp_adapter - * - * Never ever call this without shutting down the adapter first. - * Otherwise the adapter would continue using and corrupting s390 storage. - * Included BUG_ON() call to ensure this is done. - * ERP is supposed to be the only user of this function. - */ -void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) -{ - struct zfcp_fsf_req *req, *tmp; - unsigned long flags; - LIST_HEAD(remove_queue); - unsigned int i; - - BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); - spin_lock_irqsave(&adapter->req_list_lock, flags); - for (i = 0; i < REQUEST_LIST_SIZE; i++) - list_splice_init(&adapter->req_list[i], &remove_queue); - spin_unlock_irqrestore(&adapter->req_list_lock, flags); - - list_for_each_entry_safe(req, tmp, &remove_queue, list) { - list_del(&req->list); - req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; - zfcp_fsf_req_complete(req); - } -} - static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) { struct fsf_status_read_buffer *sr_buf = req->data; @@ -459,7 +430,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) * is called to process the completion status and trigger further * events related to the FSF request. */ -void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) +static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) { if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) { zfcp_fsf_status_read_handler(req); @@ -492,6 +463,35 @@ void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) wake_up(&req->completion_wq); } +/** + * zfcp_fsf_req_dismiss_all - dismiss all fsf requests + * @adapter: pointer to struct zfcp_adapter + * + * Never ever call this without shutting down the adapter first. + * Otherwise the adapter would continue using and corrupting s390 storage. + * Included BUG_ON() call to ensure this is done. + * ERP is supposed to be the only user of this function. + */ +void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) +{ + struct zfcp_fsf_req *req, *tmp; + unsigned long flags; + LIST_HEAD(remove_queue); + unsigned int i; + + BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); + spin_lock_irqsave(&adapter->req_list_lock, flags); + for (i = 0; i < REQUEST_LIST_SIZE; i++) + list_splice_init(&adapter->req_list[i], &remove_queue); + spin_unlock_irqrestore(&adapter->req_list_lock, flags); + + list_for_each_entry_safe(req, tmp, &remove_queue, list) { + list_del(&req->list); + req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; + zfcp_fsf_req_complete(req); + } +} + static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) { struct fsf_qtcb_bottom_config *bottom; @@ -2578,3 +2578,43 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, } return ERR_PTR(retval); } + +/** + * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO + * @adapter: pointer to struct zfcp_adapter + * @sbal_idx: response queue index of SBAL to be processed + */ +void zfcp_fsf_reqid_check(struct zfcp_adapter *adapter, int sbal_idx) +{ + struct qdio_buffer *sbal = adapter->resp_q.sbal[sbal_idx]; + struct qdio_buffer_element *sbale; + struct zfcp_fsf_req *fsf_req; + unsigned long flags, req_id; + int idx; + + for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { + + sbale = &sbal->element[idx]; + req_id = (unsigned long) sbale->addr; + spin_lock_irqsave(&adapter->req_list_lock, flags); + fsf_req = zfcp_reqlist_find(adapter, req_id); + + if (!fsf_req) + /* + * Unknown request means that we have potentially memory + * corruption and must stop the machine immediately. + */ + panic("error: unknown req_id (%lx) on adapter %s.\n", + req_id, dev_name(&adapter->ccw_device->dev)); + + list_del(&fsf_req->list); + spin_unlock_irqrestore(&adapter->req_list_lock, flags); + + fsf_req->sbal_response = sbal_idx; + fsf_req->qdio_inb_usage = atomic_read(&adapter->resp_q.count); + zfcp_fsf_req_complete(fsf_req); + + if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) + break; + } +} diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index e0a215309df0..2e9b3a9cebd9 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -112,31 +112,6 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, wake_up(&adapter->request_wq); } -static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, - unsigned long req_id, int sbal_idx) -{ - struct zfcp_fsf_req *fsf_req; - unsigned long flags; - - spin_lock_irqsave(&adapter->req_list_lock, flags); - fsf_req = zfcp_reqlist_find(adapter, req_id); - - if (!fsf_req) - /* - * Unknown request means that we have potentially memory - * corruption and must stop the machine immediatly. - */ - panic("error: unknown request id (%lx) on adapter %s.\n", - req_id, dev_name(&adapter->ccw_device->dev)); - - zfcp_reqlist_remove(adapter, fsf_req); - spin_unlock_irqrestore(&adapter->req_list_lock, flags); - - fsf_req->sbal_response = sbal_idx; - fsf_req->qdio_inb_usage = atomic_read(&adapter->resp_q.count); - zfcp_fsf_req_complete(fsf_req); -} - static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) { struct zfcp_qdio_queue *queue = &adapter->resp_q; @@ -163,9 +138,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, unsigned long parm) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; - struct zfcp_qdio_queue *queue = &adapter->resp_q; - struct qdio_buffer_element *sbale; - int sbal_idx, sbale_idx, sbal_no; + int sbal_idx, sbal_no; if (unlikely(qdio_err)) { zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); @@ -179,22 +152,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, */ for (sbal_no = 0; sbal_no < count; sbal_no++) { sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; - /* go through all SBALEs of SBAL */ - for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER; - sbale_idx++) { - sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx); - zfcp_qdio_reqid_check(adapter, - (unsigned long) sbale->addr, - sbal_idx); - if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) - break; - }; - - if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY))) - dev_warn(&adapter->ccw_device->dev, - "A QDIO protocol error occurred, " - "operations continue\n"); + zfcp_fsf_reqid_check(adapter, sbal_idx); } /* From 058b8647892ed49ba6a0d2c0966a72e20e2e69ff Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:14 +0200 Subject: [PATCH 089/209] [SCSI] zfcp: Replace fsf_req wait_queue with completion The combination wait_queue/wakeup in conjunction with the flag ZFCP_STATUS_FSFREQ_COMPLETED to signal the completion of an fsfreq was not race-safe and can be better solved by a completion. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_def.h | 3 +-- drivers/s390/scsi/zfcp_erp.c | 3 +-- drivers/s390/scsi/zfcp_fsf.c | 26 ++++++-------------------- drivers/s390/scsi/zfcp_scsi.c | 6 ++---- 4 files changed, 10 insertions(+), 28 deletions(-) diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index c1becfc1e7fe..944f67754eed 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -248,7 +248,6 @@ enum zfcp_wka_status { /* FSF request status (this does not have a common part) */ #define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002 -#define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004 #define ZFCP_STATUS_FSFREQ_ERROR 0x00000008 #define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010 #define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 @@ -532,7 +531,7 @@ struct zfcp_fsf_req { u8 sbale_curr; /* current SBALE during creation of request */ u8 sbal_response; /* SBAL used in interrupt */ - wait_queue_head_t completion_wq; /* can be used by a routine + struct completion completion; /* can be used by a routine to wait for completion */ u32 status; /* status of this request */ u32 fsf_command; /* FSF Command copy */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 39e4dd15453f..a377e2f91251 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -485,8 +485,7 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) } if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) zfcp_rec_dbf_event_action("erscf_2", act); - if (act->fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED | - ZFCP_STATUS_FSFREQ_DISMISSED)) + if (act->fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) act->fsf_req = NULL; } else act->fsf_req = NULL; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 7ca2995aaf68..ed06a1d17b73 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -444,23 +444,11 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) if (req->erp_action) zfcp_erp_notify(req->erp_action, 0); - req->status |= ZFCP_STATUS_FSFREQ_COMPLETED; if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP)) zfcp_fsf_req_free(req); else - /* notify initiator waiting for the requests completion */ - /* - * FIXME: Race! We must not access fsf_req here as it might have been - * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED - * flag. It's an improbable case. But, we have the same paranoia for - * the cleanup flag already. - * Might better be handled using complete()? - * (setting the flag and doing wakeup ought to be atomic - * with regard to checking the flag as long as waitqueue is - * part of the to be released structure) - */ - wake_up(&req->completion_wq); + complete(&req->completion); } /** @@ -733,7 +721,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, INIT_LIST_HEAD(&req->list); init_timer(&req->timer); - init_waitqueue_head(&req->completion_wq); + init_completion(&req->completion); req->adapter = adapter; req->fsf_command = fsf_cmd; @@ -1309,8 +1297,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, retval = zfcp_fsf_req_send(req); spin_unlock_bh(&adapter->req_q_lock); if (!retval) - wait_event(req->completion_wq, - req->status & ZFCP_STATUS_FSFREQ_COMPLETED); + wait_for_completion(&req->completion); zfcp_fsf_req_free(req); return retval; @@ -1405,8 +1392,8 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, spin_unlock_bh(&adapter->req_q_lock); if (!retval) - wait_event(req->completion_wq, - req->status & ZFCP_STATUS_FSFREQ_COMPLETED); + wait_for_completion(&req->completion); + zfcp_fsf_req_free(req); return retval; @@ -2572,8 +2559,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, spin_unlock_bh(&adapter->req_q_lock); if (!retval) { - wait_event(req->completion_wq, - req->status & ZFCP_STATUS_FSFREQ_COMPLETED); + wait_for_completion(&req->completion); return req; } return ERR_PTR(retval); diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 0bd80a90426a..0de059161b35 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -206,8 +206,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) if (!abrt_req) return FAILED; - wait_event(abrt_req->completion_wq, - abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); + wait_for_completion(&abrt_req->completion); if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) dbf_tag = "okay"; @@ -246,8 +245,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) if (!fsf_req) return FAILED; - wait_event(fsf_req->completion_wq, - fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); + wait_for_completion(&fsf_req->completion); if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt); From a4623c467ff76f9258555d44d68371e10c5406c2 Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:15 +0200 Subject: [PATCH 090/209] [SCSI] zfcp: Improve request allocation through mempools Remove the special case for NO_QTCB requests and optimize the mempool and cache processing for fsfreqs. Especially use seperate mempools for the zfcp_fsf_req and zfcp_qtcb structs. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 98 ++++++++++++++++++++---------------- drivers/s390/scsi/zfcp_def.h | 31 +++++++----- drivers/s390/scsi/zfcp_fc.c | 19 ++----- drivers/s390/scsi/zfcp_fsf.c | 92 +++++++++++++++++---------------- 4 files changed, 127 insertions(+), 113 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 7aba6840243f..de623292277b 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -42,6 +42,12 @@ static char *init_device; module_param_named(device, init_device, charp, 0400); MODULE_PARM_DESC(device, "specify initial device"); +static struct kmem_cache *zfcp_cache_hw_align(const char *name, + unsigned long size) +{ + return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL); +} + static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter) { int idx; @@ -110,14 +116,6 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) return; } -static struct kmem_cache *zfcp_cache_create(int size, char *name) -{ - int align = 1; - while ((size - align) > 0) - align <<= 1; - return kmem_cache_create(name , size, align, 0, NULL); -} - static void __init zfcp_init_device_setup(char *devstr) { char *token; @@ -158,18 +156,23 @@ static int __init zfcp_module_init(void) { int retval = -ENOMEM; - zfcp_data.fsf_req_qtcb_cache = zfcp_cache_create( - sizeof(struct zfcp_fsf_req_qtcb), "zfcp_fsf"); - if (!zfcp_data.fsf_req_qtcb_cache) + zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn", + sizeof(struct ct_iu_gpn_ft_req)); + if (!zfcp_data.gpn_ft_cache) goto out; - zfcp_data.sr_buffer_cache = zfcp_cache_create( - sizeof(struct fsf_status_read_buffer), "zfcp_sr"); + zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb", + sizeof(struct fsf_qtcb)); + if (!zfcp_data.qtcb_cache) + goto out_qtcb_cache; + + zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr", + sizeof(struct fsf_status_read_buffer)); if (!zfcp_data.sr_buffer_cache) goto out_sr_cache; - zfcp_data.gid_pn_cache = zfcp_cache_create( - sizeof(struct zfcp_gid_pn_data), "zfcp_gid"); + zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", + sizeof(struct zfcp_gid_pn_data)); if (!zfcp_data.gid_pn_cache) goto out_gid_cache; @@ -209,7 +212,9 @@ static int __init zfcp_module_init(void) out_gid_cache: kmem_cache_destroy(zfcp_data.sr_buffer_cache); out_sr_cache: - kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache); + kmem_cache_destroy(zfcp_data.qtcb_cache); +out_qtcb_cache: + kmem_cache_destroy(zfcp_data.gpn_ft_cache); out: return retval; } @@ -354,36 +359,41 @@ void zfcp_unit_dequeue(struct zfcp_unit *unit) static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) { /* must only be called with zfcp_data.config_sema taken */ - adapter->pool.fsf_req_erp = - mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache); - if (!adapter->pool.fsf_req_erp) + adapter->pool.erp_req = + mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); + if (!adapter->pool.erp_req) return -ENOMEM; - adapter->pool.fsf_req_scsi = - mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache); - if (!adapter->pool.fsf_req_scsi) + adapter->pool.scsi_req = + mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); + if (!adapter->pool.scsi_req) return -ENOMEM; - adapter->pool.fsf_req_abort = - mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache); - if (!adapter->pool.fsf_req_abort) + adapter->pool.scsi_abort = + mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); + if (!adapter->pool.scsi_abort) return -ENOMEM; - adapter->pool.fsf_req_status_read = + adapter->pool.status_read_req = mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM, sizeof(struct zfcp_fsf_req)); - if (!adapter->pool.fsf_req_status_read) + if (!adapter->pool.status_read_req) return -ENOMEM; - adapter->pool.data_status_read = + adapter->pool.qtcb_pool = + mempool_create_slab_pool(3, zfcp_data.qtcb_cache); + if (!adapter->pool.qtcb_pool) + return -ENOMEM; + + adapter->pool.status_read_data = mempool_create_slab_pool(FSF_STATUS_READS_RECOM, zfcp_data.sr_buffer_cache); - if (!adapter->pool.data_status_read) + if (!adapter->pool.status_read_data) return -ENOMEM; - adapter->pool.data_gid_pn = + adapter->pool.gid_pn_data = mempool_create_slab_pool(1, zfcp_data.gid_pn_cache); - if (!adapter->pool.data_gid_pn) + if (!adapter->pool.gid_pn_data) return -ENOMEM; return 0; @@ -392,18 +402,20 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) { /* zfcp_data.config_sema must be held */ - if (adapter->pool.fsf_req_erp) - mempool_destroy(adapter->pool.fsf_req_erp); - if (adapter->pool.fsf_req_scsi) - mempool_destroy(adapter->pool.fsf_req_scsi); - if (adapter->pool.fsf_req_abort) - mempool_destroy(adapter->pool.fsf_req_abort); - if (adapter->pool.fsf_req_status_read) - mempool_destroy(adapter->pool.fsf_req_status_read); - if (adapter->pool.data_status_read) - mempool_destroy(adapter->pool.data_status_read); - if (adapter->pool.data_gid_pn) - mempool_destroy(adapter->pool.data_gid_pn); + if (adapter->pool.erp_req) + mempool_destroy(adapter->pool.erp_req); + if (adapter->pool.scsi_req) + mempool_destroy(adapter->pool.scsi_req); + if (adapter->pool.scsi_abort) + mempool_destroy(adapter->pool.scsi_abort); + if (adapter->pool.qtcb_pool) + mempool_destroy(adapter->pool.qtcb_pool); + if (adapter->pool.status_read_req) + mempool_destroy(adapter->pool.status_read_req); + if (adapter->pool.status_read_data) + mempool_destroy(adapter->pool.status_read_data); + if (adapter->pool.gid_pn_data) + mempool_destroy(adapter->pool.gid_pn_data); } /** diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 944f67754eed..1e27ed5d90e0 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -264,12 +264,13 @@ struct zfcp_fsf_req; /* holds various memory pools of an adapter */ struct zfcp_adapter_mempool { - mempool_t *fsf_req_erp; - mempool_t *fsf_req_scsi; - mempool_t *fsf_req_abort; - mempool_t *fsf_req_status_read; - mempool_t *data_status_read; - mempool_t *data_gid_pn; + mempool_t *erp_req; + mempool_t *scsi_req; + mempool_t *scsi_abort; + mempool_t *status_read_req; + mempool_t *status_read_data; + mempool_t *gid_pn_data; + mempool_t *qtcb_pool; }; /* @@ -303,6 +304,15 @@ struct ct_iu_gid_pn_resp { u32 d_id; } __attribute__ ((packed)); +struct ct_iu_gpn_ft_req { + struct ct_hdr header; + u8 flags; + u8 domain_id_scope; + u8 area_id_scope; + u8 fc4_type; +} __attribute__ ((packed)); + + /** * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct * @wka_port: port where the request is sent to @@ -559,18 +569,13 @@ struct zfcp_data { lists */ struct semaphore config_sema; /* serialises configuration changes */ - struct kmem_cache *fsf_req_qtcb_cache; + struct kmem_cache *gpn_ft_cache; + struct kmem_cache *qtcb_cache; struct kmem_cache *sr_buffer_cache; struct kmem_cache *gid_pn_cache; struct workqueue_struct *work_queue; }; -/* struct used by memory pools for fsf_requests */ -struct zfcp_fsf_req_qtcb { - struct zfcp_fsf_req fsf_req; - struct fsf_qtcb qtcb; -}; - /********************** ZFCP SPECIFIC DEFINES ********************************/ #define ZFCP_SET 0x00000100 diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 6d0fef92567b..acadcd3c276a 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -25,14 +25,6 @@ static u32 rscn_range_mask[] = { [RSCN_FABRIC_ADDRESS] = 0x000000, }; -struct ct_iu_gpn_ft_req { - struct ct_hdr header; - u8 flags; - u8 domain_id_scope; - u8 area_id_scope; - u8 fc4_type; -} __attribute__ ((packed)); - struct gpn_ft_resp_acc { u8 control; u8 port_id[3]; @@ -322,8 +314,7 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action, init_completion(&compl_rec.done); compl_rec.handler = zfcp_fc_ns_gid_pn_eval; compl_rec.handler_data = (unsigned long) gid_pn; - ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp, - erp_action); + ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.erp_req, erp_action); if (!ret) wait_for_completion(&compl_rec.done); return ret; @@ -340,7 +331,7 @@ int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action) struct zfcp_gid_pn_data *gid_pn; struct zfcp_adapter *adapter = erp_action->adapter; - gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC); + gid_pn = mempool_alloc(adapter->pool.gid_pn_data, GFP_ATOMIC); if (!gid_pn) return -ENOMEM; @@ -354,7 +345,7 @@ int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action) zfcp_wka_port_put(&adapter->gs->ds); out: - mempool_free(gid_pn, adapter->pool.data_gid_pn); + mempool_free(gid_pn, adapter->pool.gid_pn_data); return ret; } @@ -497,7 +488,7 @@ static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num) { struct scatterlist *sg = &gpn_ft->sg_req; - kfree(sg_virt(sg)); /* free request buffer */ + kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg)); zfcp_sg_free_table(gpn_ft->sg_resp, buf_num); kfree(gpn_ft); @@ -512,7 +503,7 @@ static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num) if (!gpn_ft) return NULL; - req = kzalloc(sizeof(struct ct_iu_gpn_ft_req), GFP_KERNEL); + req = kmem_cache_alloc(zfcp_data.gpn_ft_cache, GFP_KERNEL); if (!req) { kfree(gpn_ft); gpn_ft = NULL; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index ed06a1d17b73..96c580ee7509 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -14,7 +14,6 @@ #include "zfcp_dbf.h" #define ZFCP_REQ_AUTO_CLEANUP 0x00000002 -#define ZFCP_REQ_NO_QTCB 0x00000008 static void zfcp_fsf_request_timeout_handler(unsigned long data) { @@ -112,14 +111,15 @@ static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) void zfcp_fsf_req_free(struct zfcp_fsf_req *req) { if (likely(req->pool)) { + if (likely(req->qtcb)) + mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); mempool_free(req, req->pool); return; } - if (req->qtcb) { - kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, req); - return; - } + if (likely(req->qtcb)) + kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb); + kfree(req); } static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) @@ -251,7 +251,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { zfcp_hba_dbf_event_fsf_unsol("dism", adapter, sr_buf); - mempool_free(sr_buf, adapter->pool.data_status_read); + mempool_free(sr_buf, adapter->pool.status_read_data); zfcp_fsf_req_free(req); return; } @@ -303,7 +303,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) break; } - mempool_free(sr_buf, adapter->pool.data_status_read); + mempool_free(sr_buf, adapter->pool.status_read_data); zfcp_fsf_req_free(req); atomic_inc(&adapter->stat_miss); @@ -669,34 +669,37 @@ static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) return -EIO; } -static struct zfcp_fsf_req *zfcp_fsf_alloc_noqtcb(mempool_t *pool) +static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) { struct zfcp_fsf_req *req; - req = mempool_alloc(pool, GFP_ATOMIC); - if (!req) + + if (likely(pool)) + req = mempool_alloc(pool, GFP_ATOMIC); + else + req = kmalloc(sizeof(*req), GFP_ATOMIC); + + if (unlikely(!req)) return NULL; + memset(req, 0, sizeof(*req)); req->pool = pool; return req; } -static struct zfcp_fsf_req *zfcp_fsf_alloc_qtcb(mempool_t *pool) +static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) { - struct zfcp_fsf_req_qtcb *qtcb; + struct fsf_qtcb *qtcb; if (likely(pool)) qtcb = mempool_alloc(pool, GFP_ATOMIC); else - qtcb = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache, - GFP_ATOMIC); + qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC); + if (unlikely(!qtcb)) return NULL; memset(qtcb, 0, sizeof(*qtcb)); - qtcb->fsf_req.qtcb = &qtcb->qtcb; - qtcb->fsf_req.pool = pool; - - return &qtcb->fsf_req; + return qtcb; } static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, @@ -704,14 +707,8 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, mempool_t *pool) { struct qdio_buffer_element *sbale; - - struct zfcp_fsf_req *req; struct zfcp_qdio_queue *req_q = &adapter->req_q; - - if (req_flags & ZFCP_REQ_NO_QTCB) - req = zfcp_fsf_alloc_noqtcb(pool); - else - req = zfcp_fsf_alloc_qtcb(pool); + struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); if (unlikely(!req)) return ERR_PTR(-ENOMEM); @@ -735,7 +732,17 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, sbale[0].addr = (void *) req->req_id; sbale[0].flags |= SBAL_FLAGS0_COMMAND; - if (likely(req->qtcb)) { + if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) { + if (likely(pool)) + req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool); + else + req->qtcb = zfcp_qtcb_alloc(NULL); + + if (unlikely(!req->qtcb)) { + zfcp_fsf_req_free(req); + return ERR_PTR(-ENOMEM); + } + req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no; req->qtcb->prefix.req_id = req->req_id; req->qtcb->prefix.ulp_info = 26; @@ -811,9 +818,8 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, - ZFCP_REQ_NO_QTCB, - adapter->pool.fsf_req_status_read); + req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, 0, + adapter->pool.status_read_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -823,7 +829,7 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; req->sbale_curr = 2; - sr_buf = mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC); + sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); if (!sr_buf) { retval = -ENOMEM; goto failed_buf; @@ -841,7 +847,7 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) goto out; failed_req_send: - mempool_free(sr_buf, adapter->pool.data_status_read); + mempool_free(sr_buf, adapter->pool.status_read_data); failed_buf: zfcp_fsf_req_free(req); zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); @@ -919,7 +925,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, if (zfcp_fsf_req_sbal_get(adapter)) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, - 0, adapter->pool.fsf_req_abort); + 0, adapter->pool.scsi_abort); if (IS_ERR(req)) { req = NULL; goto out; @@ -1231,7 +1237,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -1327,7 +1333,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -1497,7 +1503,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_PORT_WITH_DID, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -1566,7 +1572,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -1643,7 +1649,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_PORT_WITH_DID, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (unlikely(IS_ERR(req))) { retval = PTR_ERR(req); goto out; @@ -1697,7 +1703,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (unlikely(IS_ERR(req))) { retval = PTR_ERR(req); goto out; @@ -1788,7 +1794,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -1960,7 +1966,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -2045,7 +2051,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -2349,7 +2355,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, } req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_scsi); + adapter->pool.scsi_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -2460,7 +2466,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) if (zfcp_fsf_req_sbal_get(adapter)) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, 0, - adapter->pool.fsf_req_scsi); + adapter->pool.scsi_req); if (IS_ERR(req)) { req = NULL; goto out; From 09a46c6e34ba152169b7400d266d2efb4c391a43 Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:16 +0200 Subject: [PATCH 091/209] [SCSI] zfcp: Remove the useless ZFCP_REQ_AUTO_CLEANUP flag The flag ZFCP_REQ_AUTO_CLEANUP was useless as the ZFCP_STATUS_FSFREQ_CLEANUP flag is there for exactly the same purpose. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_fsf.c | 80 ++++++++++++++++++++---------------- 1 file changed, 45 insertions(+), 35 deletions(-) diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 96c580ee7509..70a978a14f2a 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -13,8 +13,6 @@ #include "zfcp_ext.h" #include "zfcp_dbf.h" -#define ZFCP_REQ_AUTO_CLEANUP 0x00000002 - static void zfcp_fsf_request_timeout_handler(unsigned long data) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; @@ -703,8 +701,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) } static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, - u32 fsf_cmd, int req_flags, - mempool_t *pool) + u32 fsf_cmd, mempool_t *pool) { struct qdio_buffer_element *sbale; struct zfcp_qdio_queue *req_q = &adapter->req_q; @@ -761,9 +758,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, return ERR_PTR(-EIO); } - if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP)) - req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - return req; } @@ -818,7 +812,7 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, 0, + req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, adapter->pool.status_read_req); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -925,7 +919,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, if (zfcp_fsf_req_sbal_get(adapter)) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, - 0, adapter->pool.scsi_abort); + adapter->pool.scsi_abort); if (IS_ERR(req)) { req = NULL; goto out; @@ -1081,13 +1075,14 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC, - ZFCP_REQ_AUTO_CLEANUP, pool); + req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC, pool); + if (IS_ERR(req)) { ret = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp, FSF_MAX_SBALS_PER_REQ); if (ret) @@ -1189,13 +1184,15 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els) spin_lock_bh(&adapter->req_q_lock); if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, - ZFCP_REQ_AUTO_CLEANUP, NULL); + + req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, NULL); + if (IS_ERR(req)) { ret = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2); if (ret) @@ -1234,15 +1231,16 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) spin_lock_bh(&adapter->req_q_lock); if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, - FSF_QTCB_EXCHANGE_CONFIG_DATA, - ZFCP_REQ_AUTO_CLEANUP, + + req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, adapter->pool.erp_req); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1278,8 +1276,8 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, if (zfcp_fsf_req_sbal_get(adapter)) goto out_unlock; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, - 0, NULL); + req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out_unlock; @@ -1331,14 +1329,16 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) spin_lock_bh(&adapter->req_q_lock); if (zfcp_fsf_req_sbal_get(adapter)) goto out; + req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, - ZFCP_REQ_AUTO_CLEANUP, adapter->pool.erp_req); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1378,8 +1378,8 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, if (zfcp_fsf_req_sbal_get(adapter)) goto out_unlock; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0, - NULL); + req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, NULL); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out_unlock; @@ -1500,15 +1500,15 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, - FSF_QTCB_OPEN_PORT_WITH_DID, - ZFCP_REQ_AUTO_CLEANUP, + req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_PORT_WITH_DID, adapter->pool.erp_req); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1571,13 +1571,14 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, - ZFCP_REQ_AUTO_CLEANUP, adapter->pool.erp_req); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1646,15 +1647,15 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, - FSF_QTCB_OPEN_PORT_WITH_DID, - ZFCP_REQ_AUTO_CLEANUP, + req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_PORT_WITH_DID, adapter->pool.erp_req); + if (unlikely(IS_ERR(req))) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1702,13 +1703,14 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, - ZFCP_REQ_AUTO_CLEANUP, adapter->pool.erp_req); + if (unlikely(IS_ERR(req))) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1793,13 +1795,14 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT, - ZFCP_REQ_AUTO_CLEANUP, adapter->pool.erp_req); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1965,13 +1968,14 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN, - ZFCP_REQ_AUTO_CLEANUP, adapter->pool.erp_req); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -2049,14 +2053,16 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) spin_lock_bh(&adapter->req_q_lock); if (zfcp_fsf_req_sbal_get(adapter)) goto out; + req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN, - ZFCP_REQ_AUTO_CLEANUP, adapter->pool.erp_req); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -2353,14 +2359,16 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, atomic_inc(&adapter->qdio_outb_full); goto out; } + req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, - ZFCP_REQ_AUTO_CLEANUP, adapter->pool.scsi_req); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; zfcp_unit_get(unit); req->unit = unit; req->data = scsi_cmnd; @@ -2465,8 +2473,10 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) spin_lock_bh(&adapter->req_q_lock); if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, 0, + + req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, adapter->pool.scsi_req); + if (IS_ERR(req)) { req = NULL; goto out; @@ -2537,7 +2547,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, 0, NULL); + req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, NULL); if (IS_ERR(req)) { retval = -EPERM; goto out; From 4544683a4b1d4e65ccca8c736bac56a195a5206b Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:17 +0200 Subject: [PATCH 092/209] [SCSI] zfcp: Move workqueue to adapter struct Remove the global driver work queue and replace it with a workqueue local to the adapter. The usage of this workqueue makes this the correct place for the structure. In addition multiple adapters won't block each other due to the serialization of the queued work. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 29 +++++++++++++++++++++++++++-- drivers/s390/scsi/zfcp_def.h | 2 +- drivers/s390/scsi/zfcp_erp.c | 2 +- drivers/s390/scsi/zfcp_fc.c | 2 +- drivers/s390/scsi/zfcp_fsf.c | 2 +- drivers/s390/scsi/zfcp_scsi.c | 5 +++-- 6 files changed, 34 insertions(+), 8 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index de623292277b..f785cbc7520d 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -176,8 +176,6 @@ static int __init zfcp_module_init(void) if (!zfcp_data.gid_pn_cache) goto out_gid_cache; - zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq"); - sema_init(&zfcp_data.config_sema, 1); rwlock_init(&zfcp_data.config_lock); @@ -458,6 +456,27 @@ static void zfcp_print_sl(struct seq_file *m, struct service_level *sl) adapter->fsf_lic_version); } +static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter) +{ + char name[TASK_COMM_LEN]; + + snprintf(name, sizeof(name), "zfcp_q_%s", + dev_name(&adapter->ccw_device->dev)); + adapter->work_queue = create_singlethread_workqueue(name); + + if (adapter->work_queue) + return 0; + return -ENOMEM; +} + +static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter) +{ + if (adapter->work_queue) + destroy_workqueue(adapter->work_queue); + adapter->work_queue = NULL; + +} + /** * zfcp_adapter_enqueue - enqueue a new adapter to the list * @ccw_device: pointer to the struct cc_device @@ -504,6 +523,9 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) if (zfcp_adapter_debug_register(adapter)) goto debug_register_failed; + if (zfcp_setup_adapter_work_queue(adapter)) + goto work_queue_failed; + init_waitqueue_head(&adapter->remove_wq); init_waitqueue_head(&adapter->erp_thread_wqh); init_waitqueue_head(&adapter->erp_done_wqh); @@ -543,6 +565,8 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) return 0; sysfs_failed: + zfcp_destroy_adapter_work_queue(adapter); +work_queue_failed: zfcp_adapter_debug_unregister(adapter); debug_register_failed: dev_set_drvdata(&ccw_device->dev, NULL); @@ -579,6 +603,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) if (!retval) return; + zfcp_destroy_adapter_work_queue(adapter); zfcp_adapter_debug_unregister(adapter); zfcp_qdio_free(adapter); zfcp_free_low_mem_buffers(adapter); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 1e27ed5d90e0..2715a103e5a8 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -485,6 +485,7 @@ struct zfcp_adapter { struct work_struct scan_work; struct service_level service_level; atomic_t qdio_outb_full; /* queue full incidents */ + struct workqueue_struct *work_queue; }; struct zfcp_port { @@ -573,7 +574,6 @@ struct zfcp_data { struct kmem_cache *qtcb_cache; struct kmem_cache *sr_buffer_cache; struct kmem_cache *gid_pn_cache; - struct workqueue_struct *work_queue; }; /********************** ZFCP SPECIFIC DEFINES ********************************/ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index a377e2f91251..50e5fbe2252a 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -875,7 +875,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) return zfcp_erp_open_ptp_port(act); if (!port->d_id) { zfcp_port_get(port); - if (!queue_work(zfcp_data.work_queue, + if (!queue_work(adapter->work_queue, &port->gid_pn_work)) zfcp_port_put(port); return ZFCP_ERP_CONTINUES; diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index acadcd3c276a..8921e16fdab7 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -480,7 +480,7 @@ void zfcp_fc_link_test_work(struct work_struct *work) void zfcp_test_link(struct zfcp_port *port) { zfcp_port_get(port); - if (!queue_work(zfcp_data.work_queue, &port->test_link_work)) + if (!queue_work(port->adapter->work_queue, &port->test_link_work)) zfcp_port_put(port); } diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 70a978a14f2a..5b73f989a629 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -305,7 +305,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) zfcp_fsf_req_free(req); atomic_inc(&adapter->stat_miss); - queue_work(zfcp_data.work_queue, &adapter->stat_work); + queue_work(adapter->work_queue, &adapter->stat_work); } static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 0de059161b35..2e13d41269a4 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -572,7 +572,7 @@ void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) zfcp_port_get(port); port->rport_task = RPORT_ADD; - if (!queue_work(zfcp_data.work_queue, &port->rport_work)) + if (!queue_work(port->adapter->work_queue, &port->rport_work)) zfcp_port_put(port); } @@ -581,7 +581,8 @@ void zfcp_scsi_schedule_rport_block(struct zfcp_port *port) zfcp_port_get(port); port->rport_task = RPORT_DEL; - if (port->rport && queue_work(zfcp_data.work_queue, &port->rport_work)) + if (port->rport && queue_work(port->adapter->work_queue, + &port->rport_work)) return; zfcp_port_put(port); From 42428f747a8a0db9c6de03e105932316defad65d Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:18 +0200 Subject: [PATCH 093/209] [SCSI] zfcp: Separate qdio attributes from zfcp_fsf_req Split all qdio related attributes out of zfcp_fsf_req and put it in new structure. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_dbf.c | 6 +-- drivers/s390/scsi/zfcp_def.h | 84 ++++++++++++++++++++----------- drivers/s390/scsi/zfcp_ext.h | 11 ++-- drivers/s390/scsi/zfcp_fsf.c | 79 +++++++++++++++-------------- drivers/s390/scsi/zfcp_qdio.c | 95 +++++++++++++++++++---------------- 5 files changed, 160 insertions(+), 115 deletions(-) diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 5568440ec2fc..fc7f3d66fe37 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -152,9 +152,9 @@ void _zfcp_hba_dbf_event_fsf_response(const char *tag2, int level, memcpy(response->fsf_status_qual, fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); response->fsf_req_status = fsf_req->status; - response->sbal_first = fsf_req->sbal_first; - response->sbal_last = fsf_req->sbal_last; - response->sbal_response = fsf_req->sbal_response; + response->sbal_first = fsf_req->queue_req.sbal_first; + response->sbal_last = fsf_req->queue_req.sbal_last; + response->sbal_response = fsf_req->queue_req.sbal_response; response->pool = fsf_req->pool != NULL; response->erp_action = (unsigned long)fsf_req->erp_action; diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 2715a103e5a8..a04bdfd4d2f6 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -529,36 +529,64 @@ struct zfcp_unit { struct work_struct scsi_work; }; -/* FSF request */ +/** + * struct zfcp_queue_req - queue related values for a request + * @sbal_number: number of free SBALs + * @sbal_first: first SBAL for this request + * @sbal_last: last SBAL for this request + * @sbal_limit: last possible SBAL for this request + * @sbale_curr: current SBALE at creation of this request + * @sbal_response: SBAL used in interrupt + * @qdio_outb_usage: usage of outbound queue + * @qdio_inb_usage: usage of inbound queue + */ +struct zfcp_queue_req { + u8 sbal_number; + u8 sbal_first; + u8 sbal_last; + u8 sbal_limit; + u8 sbale_curr; + u8 sbal_response; + u16 qdio_outb_usage; + u16 qdio_inb_usage; +}; + +/** + * struct zfcp_fsf_req - basic FSF request structure + * @list: list of FSF requests + * @req_id: unique request ID + * @adapter: adapter this request belongs to + * @queue_req: queue related values + * @completion: used to signal the completion of the request + * @status: status of the request + * @fsf_command: FSF command issued + * @qtcb: associated QTCB + * @seq_no: sequence number of this request + * @data: private data + * @timer: timer data of this request + * @erp_action: reference to erp action if request issued on behalf of ERP + * @pool: reference to memory pool if used for this request + * @issued: time when request was send (STCK) + * @unit: reference to unit if this request is a SCSI request + * @handler: handler which should be called to process response + */ struct zfcp_fsf_req { - struct list_head list; /* list of FSF requests */ - unsigned long req_id; /* unique request ID */ - struct zfcp_adapter *adapter; /* adapter request belongs to */ - u8 sbal_number; /* nr of SBALs free for use */ - u8 sbal_first; /* first SBAL for this request */ - u8 sbal_last; /* last SBAL for this request */ - u8 sbal_limit; /* last possible SBAL for - this reuest */ - u8 sbale_curr; /* current SBALE during creation - of request */ - u8 sbal_response; /* SBAL used in interrupt */ - struct completion completion; /* can be used by a routine - to wait for completion */ - u32 status; /* status of this request */ - u32 fsf_command; /* FSF Command copy */ - struct fsf_qtcb *qtcb; /* address of associated QTCB */ - u32 seq_no; /* Sequence number of request */ - void *data; /* private data of request */ - struct timer_list timer; /* used for erp or scsi er */ - struct zfcp_erp_action *erp_action; /* used if this request is - issued on behalf of erp */ - mempool_t *pool; /* used if request was alloacted - from emergency pool */ - unsigned long long issued; /* request sent time (STCK) */ - struct zfcp_unit *unit; + struct list_head list; + unsigned long req_id; + struct zfcp_adapter *adapter; + struct zfcp_queue_req queue_req; + struct completion completion; + u32 status; + u32 fsf_command; + struct fsf_qtcb *qtcb; + u32 seq_no; + void *data; + struct timer_list timer; + struct zfcp_erp_action *erp_action; + mempool_t *pool; + unsigned long long issued; + struct zfcp_unit *unit; void (*handler)(struct zfcp_fsf_req *); - u16 qdio_outb_usage;/* usage of outbound queue */ - u16 qdio_inb_usage; /* usage of inbound queue */ }; /* driver data */ diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 6a3727bdb386..d11c0f44dad3 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -142,10 +142,13 @@ extern void zfcp_fsf_reqid_check(struct zfcp_adapter *, int); /* zfcp_qdio.c */ extern int zfcp_qdio_allocate(struct zfcp_adapter *); extern void zfcp_qdio_free(struct zfcp_adapter *); -extern int zfcp_qdio_send(struct zfcp_fsf_req *); -extern struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *); -extern struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *); -extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long, +extern int zfcp_qdio_send(struct zfcp_adapter *, struct zfcp_queue_req *); +extern struct qdio_buffer_element + *zfcp_qdio_sbale_req(struct zfcp_adapter *, struct zfcp_queue_req *); +extern struct qdio_buffer_element + *zfcp_qdio_sbale_curr(struct zfcp_adapter *, struct zfcp_queue_req *); +extern int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *, + struct zfcp_queue_req *, unsigned long, struct scatterlist *, int); extern int zfcp_qdio_open(struct zfcp_adapter *); extern void zfcp_qdio_close(struct zfcp_adapter *); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 5b73f989a629..e88b7804780b 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -720,12 +720,12 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, req->adapter = adapter; req->fsf_command = fsf_cmd; req->req_id = adapter->req_no; - req->sbal_number = 1; - req->sbal_first = req_q->first; - req->sbal_last = req_q->first; - req->sbale_curr = 1; + req->queue_req.sbal_number = 1; + req->queue_req.sbal_first = req_q->first; + req->queue_req.sbal_last = req_q->first; + req->queue_req.sbale_curr = 1; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].addr = (void *) req->req_id; sbale[0].flags |= SBAL_FLAGS0_COMMAND; @@ -774,9 +774,9 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) list_add_tail(&req->list, &adapter->req_list[idx]); spin_unlock_irqrestore(&adapter->req_list_lock, flags); - req->qdio_outb_usage = atomic_read(&adapter->req_q.count); + req->queue_req.qdio_outb_usage = atomic_read(&adapter->req_q.count); req->issued = get_clock(); - if (zfcp_qdio_send(req)) { + if (zfcp_qdio_send(adapter, &req->queue_req)) { del_timer(&req->timer); spin_lock_irqsave(&adapter->req_list_lock, flags); /* lookup request again, list might have changed */ @@ -819,9 +819,9 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) goto out; } - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; - req->sbale_curr = 2; + req->queue_req.sbale_curr = 2; sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); if (!sr_buf) { @@ -830,7 +830,7 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) } memset(sr_buf, 0, sizeof(*sr_buf)); req->data = sr_buf; - sbale = zfcp_qdio_sbale_curr(req); + sbale = zfcp_qdio_sbale_curr(adapter, &req->queue_req); sbale->addr = (void *) sr_buf; sbale->length = sizeof(*sr_buf); @@ -929,7 +929,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, ZFCP_STATUS_COMMON_UNBLOCKED))) goto out_error_free; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1023,8 +1023,10 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, struct scatterlist *sg_resp, int max_sbals) { - struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(req); - u32 feat = req->adapter->adapter_features; + struct zfcp_adapter *adapter = req->adapter; + struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter, + &req->queue_req); + u32 feat = adapter->adapter_features; int bytes; if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) { @@ -1041,14 +1043,16 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, return 0; } - bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, + bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, + SBAL_FLAGS0_TYPE_WRITE_READ, sg_req, max_sbals); if (bytes <= 0) return -EIO; req->qtcb->bottom.support.req_buf_length = bytes; - req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; + req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; - bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, + bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, + SBAL_FLAGS0_TYPE_WRITE_READ, sg_resp, max_sbals); if (bytes <= 0) return -EIO; @@ -1241,7 +1245,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1283,7 +1287,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, goto out_unlock; } - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; req->handler = zfcp_fsf_exchange_config_data_handler; @@ -1339,7 +1343,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1388,7 +1392,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, if (data) req->data = data; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1509,7 +1513,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1579,7 +1583,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1656,7 +1660,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1711,7 +1715,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1803,7 +1807,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1976,7 +1980,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -2063,7 +2067,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -2140,8 +2144,8 @@ static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req) } if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) trace.flags |= ZFCP_BLK_REQ_ERROR; - trace.inb_usage = fsf_req->qdio_inb_usage; - trace.outb_usage = fsf_req->qdio_outb_usage; + trace.inb_usage = fsf_req->queue_req.qdio_inb_usage; + trace.outb_usage = fsf_req->queue_req.qdio_outb_usage; blk_add_driver_data(req->q, req, &trace, sizeof(trace)); } @@ -2420,11 +2424,11 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32); - real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype, + real_bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, sbtype, scsi_sglist(scsi_cmnd), FSF_MAX_SBALS_PER_REQ); if (unlikely(real_bytes < 0)) { - if (req->sbal_number >= FSF_MAX_SBALS_PER_REQ) { + if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) { dev_err(&adapter->ccw_device->dev, "Oversize data package, unit 0x%016Lx " "on port 0x%016Lx closed\n", @@ -2492,7 +2496,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + sizeof(u32); - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -2555,15 +2559,15 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, req->handler = zfcp_fsf_control_file_handler; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= direction; bottom = &req->qtcb->bottom.support; bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; bottom->option = fsf_cfdc->option; - bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg, - FSF_MAX_SBALS_PER_REQ); + bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, direction, + fsf_cfdc->sg, FSF_MAX_SBALS_PER_REQ); if (bytes != ZFCP_CFDC_MAX_SIZE) { zfcp_fsf_req_free(req); goto out; @@ -2612,8 +2616,9 @@ void zfcp_fsf_reqid_check(struct zfcp_adapter *adapter, int sbal_idx) list_del(&fsf_req->list); spin_unlock_irqrestore(&adapter->req_list_lock, flags); - fsf_req->sbal_response = sbal_idx; - fsf_req->qdio_inb_usage = atomic_read(&adapter->resp_q.count); + fsf_req->queue_req.sbal_response = sbal_idx; + fsf_req->queue_req.qdio_inb_usage = + atomic_read(&adapter->resp_q.count); zfcp_fsf_req_complete(fsf_req); if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 2e9b3a9cebd9..e118874976f0 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -3,7 +3,7 @@ * * Setup and helper functions to access QDIO. * - * Copyright IBM Corporation 2002, 2008 + * Copyright IBM Corporation 2002, 2009 */ #define KMSG_COMPONENT "zfcp" @@ -165,12 +165,14 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, /** * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req - * @fsf_req: pointer to struct fsf_req + * @adapter: pointer to struct zfcp_adapter + * @q_rec: pointer to struct zfcp_queue_rec * Returns: pointer to qdio_buffer_element (SBALE) structure */ -struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req) +struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_adapter *adapter, + struct zfcp_queue_req *q_req) { - return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0); + return zfcp_qdio_sbale(&adapter->req_q, q_req->sbal_last, 0); } /** @@ -178,74 +180,80 @@ struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req) * @fsf_req: pointer to struct fsf_req * Returns: pointer to qdio_buffer_element (SBALE) structure */ -struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req) +struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_adapter *adapter, + struct zfcp_queue_req *q_req) { - return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, - req->sbale_curr); + return zfcp_qdio_sbale(&adapter->req_q, q_req->sbal_last, + q_req->sbale_curr); } -static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) +static void zfcp_qdio_sbal_limit(struct zfcp_adapter *adapter, + struct zfcp_queue_req *q_req, int max_sbals) { - int count = atomic_read(&fsf_req->adapter->req_q.count); + int count = atomic_read(&adapter->req_q.count); count = min(count, max_sbals); - fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1) + q_req->sbal_limit = (q_req->sbal_first + count - 1) % QDIO_MAX_BUFFERS_PER_Q; } static struct qdio_buffer_element * -zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) +zfcp_qdio_sbal_chain(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req, + unsigned long sbtype) { struct qdio_buffer_element *sbale; /* set last entry flag in current SBALE of current SBAL */ - sbale = zfcp_qdio_sbale_curr(fsf_req); + sbale = zfcp_qdio_sbale_curr(adapter, q_req); sbale->flags |= SBAL_FLAGS_LAST_ENTRY; /* don't exceed last allowed SBAL */ - if (fsf_req->sbal_last == fsf_req->sbal_limit) + if (q_req->sbal_last == q_req->sbal_limit) return NULL; /* set chaining flag in first SBALE of current SBAL */ - sbale = zfcp_qdio_sbale_req(fsf_req); + sbale = zfcp_qdio_sbale_req(adapter, q_req); sbale->flags |= SBAL_FLAGS0_MORE_SBALS; /* calculate index of next SBAL */ - fsf_req->sbal_last++; - fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; + q_req->sbal_last++; + q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; /* keep this requests number of SBALs up-to-date */ - fsf_req->sbal_number++; + q_req->sbal_number++; /* start at first SBALE of new SBAL */ - fsf_req->sbale_curr = 0; + q_req->sbale_curr = 0; /* set storage-block type for new SBAL */ - sbale = zfcp_qdio_sbale_curr(fsf_req); + sbale = zfcp_qdio_sbale_curr(adapter, q_req); sbale->flags |= sbtype; return sbale; } static struct qdio_buffer_element * -zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) +zfcp_qdio_sbale_next(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req, + unsigned int sbtype) { - if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) - return zfcp_qdio_sbal_chain(fsf_req, sbtype); - fsf_req->sbale_curr++; - return zfcp_qdio_sbale_curr(fsf_req); + if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) + return zfcp_qdio_sbal_chain(adapter, q_req, sbtype); + q_req->sbale_curr++; + return zfcp_qdio_sbale_curr(adapter, q_req); } -static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req) +static void zfcp_qdio_undo_sbals(struct zfcp_adapter *adapter, + struct zfcp_queue_req *q_req) { - struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal; - int first = fsf_req->sbal_first; - int last = fsf_req->sbal_last; + struct qdio_buffer **sbal = adapter->req_q.sbal; + int first = q_req->sbal_first; + int last = q_req->sbal_last; int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % QDIO_MAX_BUFFERS_PER_Q + 1; zfcp_qdio_zero_sbals(sbal, first, count); } -static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, +static int zfcp_qdio_fill_sbals(struct zfcp_adapter *adapter, + struct zfcp_queue_req *q_req, unsigned int sbtype, void *start_addr, unsigned int total_length) { @@ -256,10 +264,10 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, /* split segment up */ for (addr = start_addr, remaining = total_length; remaining > 0; addr += length, remaining -= length) { - sbale = zfcp_qdio_sbale_next(fsf_req, sbtype); + sbale = zfcp_qdio_sbale_next(adapter, q_req, sbtype); if (!sbale) { - atomic_inc(&fsf_req->adapter->qdio_outb_full); - zfcp_qdio_undo_sbals(fsf_req); + atomic_inc(&adapter->qdio_outb_full); + zfcp_qdio_undo_sbals(adapter, q_req); return -EINVAL; } @@ -281,29 +289,31 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, * @max_sbals: upper bound for number of SBALs to be used * Returns: number of bytes, or error (negativ) */ -int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, - struct scatterlist *sg, int max_sbals) +int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *adapter, + struct zfcp_queue_req *q_req, + unsigned long sbtype, struct scatterlist *sg, + int max_sbals) { struct qdio_buffer_element *sbale; int retval, bytes = 0; /* figure out last allowed SBAL */ - zfcp_qdio_sbal_limit(fsf_req, max_sbals); + zfcp_qdio_sbal_limit(adapter, q_req, max_sbals); /* set storage-block type for this request */ - sbale = zfcp_qdio_sbale_req(fsf_req); + sbale = zfcp_qdio_sbale_req(adapter, q_req); sbale->flags |= sbtype; for (; sg; sg = sg_next(sg)) { - retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg), - sg->length); + retval = zfcp_qdio_fill_sbals(adapter, q_req, sbtype, + sg_virt(sg), sg->length); if (retval < 0) return retval; bytes += sg->length; } /* assume that no other SBALEs are to follow in the same SBAL */ - sbale = zfcp_qdio_sbale_curr(fsf_req); + sbale = zfcp_qdio_sbale_curr(adapter, q_req); sbale->flags |= SBAL_FLAGS_LAST_ENTRY; return bytes; @@ -314,12 +324,11 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, * @fsf_req: pointer to struct zfcp_fsf_req * Returns: 0 on success, error otherwise */ -int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req) +int zfcp_qdio_send(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req) { - struct zfcp_adapter *adapter = fsf_req->adapter; struct zfcp_qdio_queue *req_q = &adapter->req_q; - int first = fsf_req->sbal_first; - int count = fsf_req->sbal_number; + int first = q_req->sbal_first; + int count = q_req->sbal_number; int retval; unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; From 564e1c86c810f9ccfe4300afa402815e3db4886d Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:19 +0200 Subject: [PATCH 094/209] [SCSI] zfcp: Move qdio related data out of zfcp_adapter The zfcp_adapter structure was growing over time to a size of almost one memory page. To reduce the size of the data structure and to seperate different layers, put all qdio related data in the new zfcp_qdio data structure. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 20 ++- drivers/s390/scsi/zfcp_dbf.c | 6 +- drivers/s390/scsi/zfcp_def.h | 34 ++-- drivers/s390/scsi/zfcp_erp.c | 8 +- drivers/s390/scsi/zfcp_ext.h | 26 +-- drivers/s390/scsi/zfcp_fsf.c | 299 +++++++++++++++++---------------- drivers/s390/scsi/zfcp_qdio.c | 222 ++++++++++++------------ drivers/s390/scsi/zfcp_scsi.c | 6 +- drivers/s390/scsi/zfcp_sysfs.c | 16 +- 9 files changed, 342 insertions(+), 295 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index f785cbc7520d..ed7211ef04eb 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -428,7 +428,7 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) int zfcp_status_read_refill(struct zfcp_adapter *adapter) { while (atomic_read(&adapter->stat_miss) > 0) - if (zfcp_fsf_status_read(adapter)) { + if (zfcp_fsf_status_read(adapter->qdio)) { if (atomic_read(&adapter->stat_miss) >= 16) { zfcp_erp_adapter_reopen(adapter, 0, "axsref1", NULL); @@ -507,11 +507,16 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) return -ENOMEM; } + adapter->qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL); + if (!adapter->qdio) + goto qdio_mem_failed; + + adapter->qdio->adapter = adapter; ccw_device->handler = NULL; adapter->ccw_device = ccw_device; atomic_set(&adapter->refcount, 0); - if (zfcp_qdio_allocate(adapter)) + if (zfcp_qdio_allocate(adapter->qdio, ccw_device)) goto qdio_allocate_failed; if (zfcp_allocate_low_mem_buffers(adapter)) @@ -536,8 +541,8 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) spin_lock_init(&adapter->req_list_lock); - spin_lock_init(&adapter->req_q_lock); - spin_lock_init(&adapter->qdio_stat_lock); + spin_lock_init(&adapter->qdio->req_q_lock); + spin_lock_init(&adapter->qdio->stat_lock); rwlock_init(&adapter->erp_lock); rwlock_init(&adapter->abort_lock); @@ -574,7 +579,9 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) failed_low_mem_buffers: zfcp_free_low_mem_buffers(adapter); qdio_allocate_failed: - zfcp_qdio_free(adapter); + zfcp_qdio_free(adapter->qdio); + kfree(adapter->qdio); +qdio_mem_failed: kfree(adapter); return -ENOMEM; } @@ -605,12 +612,13 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) zfcp_destroy_adapter_work_queue(adapter); zfcp_adapter_debug_unregister(adapter); - zfcp_qdio_free(adapter); + zfcp_qdio_free(adapter->qdio); zfcp_free_low_mem_buffers(adapter); kfree(adapter->req_list); kfree(adapter->fc_stats); kfree(adapter->stats_reset_data); kfree(adapter->gs); + kfree(adapter->qdio); kfree(adapter); } diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index fc7f3d66fe37..3179b08bda6a 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -274,16 +274,16 @@ void _zfcp_hba_dbf_event_fsf_unsol(const char *tag, int level, /** * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure - * @adapter: adapter affected by this QDIO related event + * @qdio: qdio structure affected by this QDIO related event * @qdio_error: as passed by qdio module * @sbal_index: first buffer with error condition, as passed by qdio module * @sbal_count: number of buffers affected, as passed by qdio module */ -void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, +void zfcp_hba_dbf_event_qdio(struct zfcp_qdio *qdio, unsigned int qdio_error, int sbal_index, int sbal_count) { - struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_dbf *dbf = qdio->adapter->dbf; struct zfcp_hba_dbf_record *r = &dbf->hba_dbf_buf; unsigned long flags; diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index a04bdfd4d2f6..bac5c497eab5 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -428,6 +428,29 @@ struct zfcp_latencies { spinlock_t lock; }; +/** struct zfcp_qdio - basic QDIO data structure + * @resp_q: response queue + * @req_q: request queue + * @stat_lock: lock to protect req_q_util and req_q_time + * @req_q_lock; lock to serialize access to request queue + * @req_q_time: time of last fill level change + * @req_q_util: used for accounting + * @req_q_full: queue full incidents + * @req_q_wq: used to wait for SBAL availability + * @adapter: adapter used in conjunction with this QDIO structure + */ +struct zfcp_qdio { + struct zfcp_qdio_queue resp_q; + struct zfcp_qdio_queue req_q; + spinlock_t stat_lock; + spinlock_t req_q_lock; + ktime_t req_q_time; + u64 req_q_util; + atomic_t req_q_full; + wait_queue_head_t req_q_wq; + struct zfcp_adapter *adapter; +}; + struct zfcp_adapter { atomic_t refcount; /* reference count */ wait_queue_head_t remove_wq; /* can be used to wait for @@ -436,6 +459,7 @@ struct zfcp_adapter { u64 peer_wwpn; /* P2P peer WWPN */ u32 peer_d_id; /* P2P peer D_ID */ struct ccw_device *ccw_device; /* S/390 ccw device */ + struct zfcp_qdio *qdio; u32 hydra_version; /* Hydra version */ u32 fsf_lic_version; u32 adapter_features; /* FCP channel features */ @@ -447,15 +471,7 @@ struct zfcp_adapter { unsigned long req_no; /* unique FSF req number */ struct list_head *req_list; /* list of pending reqs */ spinlock_t req_list_lock; /* request list lock */ - struct zfcp_qdio_queue req_q; /* request queue */ - spinlock_t req_q_lock; /* for operations on queue */ - ktime_t req_q_time; /* time of last fill level change */ - u64 req_q_util; /* for accounting */ - spinlock_t qdio_stat_lock; u32 fsf_req_seq_no; /* FSF cmnd seq number */ - wait_queue_head_t request_wq; /* can be used to wait for - more avaliable SBALs */ - struct zfcp_qdio_queue resp_q; /* response queue */ rwlock_t abort_lock; /* Protects against SCSI stack abort/command completion races */ @@ -478,13 +494,11 @@ struct zfcp_adapter { struct zfcp_wka_ports *gs; /* generic services */ struct zfcp_dbf *dbf; /* debug traces */ struct zfcp_adapter_mempool pool; /* Adapter memory pools */ - struct qdio_initialize qdio_init_data; /* for qdio_establish */ struct fc_host_statistics *fc_stats; struct fsf_qtcb_bottom_port *stats_reset_data; unsigned long stats_reset; struct work_struct scan_work; struct service_level service_level; - atomic_t qdio_outb_full; /* queue full incidents */ struct workqueue_struct *work_queue; }; diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 50e5fbe2252a..feda1db56b23 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -603,9 +603,11 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter) static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act) { - if (zfcp_qdio_open(act->adapter)) + struct zfcp_qdio *qdio = act->adapter->qdio; + + if (zfcp_qdio_open(qdio)) return ZFCP_ERP_FAILED; - init_waitqueue_head(&act->adapter->request_wq); + init_waitqueue_head(&qdio->req_q_wq); atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status); return ZFCP_ERP_SUCCEEDED; } @@ -710,7 +712,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act) struct zfcp_adapter *adapter = act->adapter; /* close queues to ensure that buffers are not accessed by adapter */ - zfcp_qdio_close(adapter); + zfcp_qdio_close(adapter->qdio); zfcp_fsf_req_dismiss_all(adapter); adapter->fsf_req_seq_no = 0; zfcp_fc_wka_ports_force_offline(adapter->gs); diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index d11c0f44dad3..e97947d2f2ed 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -51,7 +51,7 @@ extern void _zfcp_hba_dbf_event_fsf_response(const char *, int level, extern void _zfcp_hba_dbf_event_fsf_unsol(const char *, int level, struct zfcp_adapter *, struct fsf_status_read_buffer *); -extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int, +extern void zfcp_hba_dbf_event_qdio(struct zfcp_qdio *, unsigned int, int, int); extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *, struct zfcp_fsf_req *); @@ -118,15 +118,15 @@ extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); -extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *, +extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *, struct fsf_qtcb_bottom_config *); extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *); -extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *, +extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *, struct fsf_qtcb_bottom_port *); extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_cfdc *); extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); -extern int zfcp_fsf_status_read(struct zfcp_adapter *); +extern int zfcp_fsf_status_read(struct zfcp_qdio *); extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, struct zfcp_erp_action *); @@ -137,21 +137,21 @@ extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8); extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, struct zfcp_unit *); -extern void zfcp_fsf_reqid_check(struct zfcp_adapter *, int); +extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int); /* zfcp_qdio.c */ -extern int zfcp_qdio_allocate(struct zfcp_adapter *); -extern void zfcp_qdio_free(struct zfcp_adapter *); -extern int zfcp_qdio_send(struct zfcp_adapter *, struct zfcp_queue_req *); +extern int zfcp_qdio_allocate(struct zfcp_qdio *, struct ccw_device *); +extern void zfcp_qdio_free(struct zfcp_qdio *); +extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *); extern struct qdio_buffer_element - *zfcp_qdio_sbale_req(struct zfcp_adapter *, struct zfcp_queue_req *); + *zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *); extern struct qdio_buffer_element - *zfcp_qdio_sbale_curr(struct zfcp_adapter *, struct zfcp_queue_req *); -extern int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *, + *zfcp_qdio_sbale_curr(struct zfcp_qdio *, struct zfcp_queue_req *); +extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_queue_req *, unsigned long, struct scatterlist *, int); -extern int zfcp_qdio_open(struct zfcp_adapter *); -extern void zfcp_qdio_close(struct zfcp_adapter *); +extern int zfcp_qdio_open(struct zfcp_qdio *); +extern void zfcp_qdio_close(struct zfcp_qdio *); /* zfcp_scsi.c */ extern struct zfcp_data zfcp_data; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index e88b7804780b..b9a16e4b48b4 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -637,33 +637,34 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) } } -static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter) +static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio) { - struct zfcp_qdio_queue *req_q = &adapter->req_q; + struct zfcp_qdio_queue *req_q = &qdio->req_q; - spin_lock_bh(&adapter->req_q_lock); + spin_lock_bh(&qdio->req_q_lock); if (atomic_read(&req_q->count)) return 1; - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return 0; } -static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) +static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio) { + struct zfcp_adapter *adapter = qdio->adapter; long ret; - spin_unlock_bh(&adapter->req_q_lock); - ret = wait_event_interruptible_timeout(adapter->request_wq, - zfcp_fsf_sbal_check(adapter), 5 * HZ); + spin_unlock_bh(&qdio->req_q_lock); + ret = wait_event_interruptible_timeout(qdio->req_q_wq, + zfcp_fsf_sbal_check(qdio), 5 * HZ); if (ret > 0) return 0; if (!ret) { - atomic_inc(&adapter->qdio_outb_full); + atomic_inc(&qdio->req_q_full); /* assume hanging outbound queue, try queue recovery */ zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL); } - spin_lock_bh(&adapter->req_q_lock); + spin_lock_bh(&qdio->req_q_lock); return -EIO; } @@ -700,11 +701,12 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) return qtcb; } -static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, +static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, u32 fsf_cmd, mempool_t *pool) { struct qdio_buffer_element *sbale; - struct zfcp_qdio_queue *req_q = &adapter->req_q; + struct zfcp_qdio_queue *req_q = &qdio->req_q; + struct zfcp_adapter *adapter = qdio->adapter; struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); if (unlikely(!req)) @@ -725,7 +727,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, req->queue_req.sbal_last = req_q->first; req->queue_req.sbale_curr = 1; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].addr = (void *) req->req_id; sbale[0].flags |= SBAL_FLAGS0_COMMAND; @@ -740,7 +742,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, return ERR_PTR(-ENOMEM); } - req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no; + req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; req->qtcb->prefix.req_id = req->req_id; req->qtcb->prefix.ulp_info = 26; req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command]; @@ -764,6 +766,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) { struct zfcp_adapter *adapter = req->adapter; + struct zfcp_qdio *qdio = adapter->qdio; unsigned long flags; int idx; int with_qtcb = (req->qtcb != NULL); @@ -774,9 +777,9 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) list_add_tail(&req->list, &adapter->req_list[idx]); spin_unlock_irqrestore(&adapter->req_list_lock, flags); - req->queue_req.qdio_outb_usage = atomic_read(&adapter->req_q.count); + req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count); req->issued = get_clock(); - if (zfcp_qdio_send(adapter, &req->queue_req)) { + if (zfcp_qdio_send(qdio, &req->queue_req)) { del_timer(&req->timer); spin_lock_irqsave(&adapter->req_list_lock, flags); /* lookup request again, list might have changed */ @@ -801,25 +804,26 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) * @req_flags: request flags * Returns: 0 on success, ERROR otherwise */ -int zfcp_fsf_status_read(struct zfcp_adapter *adapter) +int zfcp_fsf_status_read(struct zfcp_qdio *qdio) { + struct zfcp_adapter *adapter = qdio->adapter; struct zfcp_fsf_req *req; struct fsf_status_read_buffer *sr_buf; struct qdio_buffer_element *sbale; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, + req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, adapter->pool.status_read_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; req->queue_req.sbale_curr = 2; @@ -830,7 +834,7 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) } memset(sr_buf, 0, sizeof(*sr_buf)); req->data = sr_buf; - sbale = zfcp_qdio_sbale_curr(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req); sbale->addr = (void *) sr_buf; sbale->length = sizeof(*sr_buf); @@ -846,7 +850,7 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) zfcp_fsf_req_free(req); zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -913,13 +917,13 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, { struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req = NULL; - struct zfcp_adapter *adapter = unit->port->adapter; + struct zfcp_qdio *qdio = unit->port->adapter->qdio; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, - adapter->pool.scsi_abort); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, + qdio->adapter->pool.scsi_abort); if (IS_ERR(req)) { req = NULL; goto out; @@ -929,7 +933,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, ZFCP_STATUS_COMMON_UNBLOCKED))) goto out_error_free; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -947,7 +951,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, zfcp_fsf_req_free(req); req = NULL; out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return req; } @@ -1024,7 +1028,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, int max_sbals) { struct zfcp_adapter *adapter = req->adapter; - struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter, + struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio, &req->queue_req); u32 feat = adapter->adapter_features; int bytes; @@ -1043,7 +1047,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, return 0; } - bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, + bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, SBAL_FLAGS0_TYPE_WRITE_READ, sg_req, max_sbals); if (bytes <= 0) @@ -1051,7 +1055,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, req->qtcb->bottom.support.req_buf_length = bytes; req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; - bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, + bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, SBAL_FLAGS0_TYPE_WRITE_READ, sg_resp, max_sbals); if (bytes <= 0) @@ -1071,15 +1075,15 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, struct zfcp_erp_action *erp_action) { struct zfcp_wka_port *wka_port = ct->wka_port; - struct zfcp_adapter *adapter = wka_port->adapter; + struct zfcp_qdio *qdio = wka_port->adapter->qdio; struct zfcp_fsf_req *req; int ret = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC, pool); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool); if (IS_ERR(req)) { ret = PTR_ERR(req); @@ -1118,7 +1122,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, if (erp_action) erp_action->fsf_req = NULL; out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return ret; } @@ -1181,15 +1185,15 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) int zfcp_fsf_send_els(struct zfcp_send_els *els) { struct zfcp_fsf_req *req; - struct zfcp_adapter *adapter = els->adapter; + struct zfcp_qdio *qdio = els->adapter->qdio; struct fsf_qtcb_bottom_support *bottom; int ret = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, NULL); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL); if (IS_ERR(req)) { ret = PTR_ERR(req); @@ -1221,7 +1225,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els) failed_send: zfcp_fsf_req_free(req); out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return ret; } @@ -1229,15 +1233,15 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) { struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req; - struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_qdio *qdio = erp_action->adapter->qdio; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, - adapter->pool.erp_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, + qdio->adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -1245,7 +1249,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1265,29 +1269,29 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) erp_action->fsf_req = NULL; } out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } -int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, +int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, struct fsf_qtcb_bottom_config *data) { struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req = NULL; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out_unlock; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out_unlock; } - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; req->handler = zfcp_fsf_exchange_config_data_handler; @@ -1303,7 +1307,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); if (!retval) wait_for_completion(&req->completion); @@ -1311,7 +1315,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, return retval; out_unlock: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -1322,20 +1326,20 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, */ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) { + struct zfcp_qdio *qdio = erp_action->adapter->qdio; struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req; - struct zfcp_adapter *adapter = erp_action->adapter; int retval = -EIO; - if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) + if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) return -EOPNOTSUPP; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, - adapter->pool.erp_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, + qdio->adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -1343,7 +1347,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1358,31 +1362,31 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) erp_action->fsf_req = NULL; } out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } /** * zfcp_fsf_exchange_port_data_sync - request information about local port - * @adapter: pointer to struct zfcp_adapter + * @qdio: pointer to struct zfcp_qdio * @data: pointer to struct fsf_qtcb_bottom_port * Returns: 0 on success, error otherwise */ -int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, +int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, struct fsf_qtcb_bottom_port *data) { struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req = NULL; int retval = -EIO; - if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) + if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) return -EOPNOTSUPP; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out_unlock; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, NULL); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -1392,14 +1396,14 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, if (data) req->data = data; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; req->handler = zfcp_fsf_exchange_port_data_handler; zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); if (!retval) wait_for_completion(&req->completion); @@ -1409,7 +1413,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, return retval; out_unlock: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -1495,17 +1499,17 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) { struct qdio_buffer_element *sbale; - struct zfcp_adapter *adapter = erp_action->adapter; - struct zfcp_fsf_req *req; + struct zfcp_qdio *qdio = erp_action->adapter->qdio; struct zfcp_port *port = erp_action->port; + struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_PORT_WITH_DID, - adapter->pool.erp_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, + qdio->adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -1513,7 +1517,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1532,7 +1536,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) zfcp_port_put(port); } out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -1566,16 +1570,16 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) { struct qdio_buffer_element *sbale; - struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_qdio *qdio = erp_action->adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, - adapter->pool.erp_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, + qdio->adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -1583,7 +1587,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1600,7 +1604,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) erp_action->fsf_req = NULL; } out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -1643,16 +1647,16 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) { struct qdio_buffer_element *sbale; - struct zfcp_adapter *adapter = wka_port->adapter; + struct zfcp_qdio *qdio = wka_port->adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_PORT_WITH_DID, - adapter->pool.erp_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, + qdio->adapter->pool.erp_req); if (unlikely(IS_ERR(req))) { retval = PTR_ERR(req); @@ -1660,7 +1664,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1673,7 +1677,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) if (retval) zfcp_fsf_req_free(req); out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -1698,16 +1702,16 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) { struct qdio_buffer_element *sbale; - struct zfcp_adapter *adapter = wka_port->adapter; + struct zfcp_qdio *qdio = wka_port->adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, - adapter->pool.erp_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, + qdio->adapter->pool.erp_req); if (unlikely(IS_ERR(req))) { retval = PTR_ERR(req); @@ -1715,7 +1719,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1728,7 +1732,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) if (retval) zfcp_fsf_req_free(req); out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -1790,16 +1794,16 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) { struct qdio_buffer_element *sbale; - struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_qdio *qdio = erp_action->adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT, - adapter->pool.erp_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, + qdio->adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -1807,7 +1811,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1824,7 +1828,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) erp_action->fsf_req = NULL; } out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -1964,14 +1968,15 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) { struct qdio_buffer_element *sbale; struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_qdio *qdio = adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN, + req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, adapter->pool.erp_req); if (IS_ERR(req)) { @@ -1980,7 +1985,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -2001,7 +2006,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) erp_action->fsf_req = NULL; } out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -2050,16 +2055,16 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) { struct qdio_buffer_element *sbale; - struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_qdio *qdio = erp_action->adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN, - adapter->pool.erp_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, + qdio->adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -2067,7 +2072,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -2085,7 +2090,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) erp_action->fsf_req = NULL; } out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -2353,18 +2358,19 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; int real_bytes, retval = -EIO; struct zfcp_adapter *adapter = unit->port->adapter; + struct zfcp_qdio *qdio = adapter->qdio; if (unlikely(!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_UNBLOCKED))) return -EBUSY; - spin_lock(&adapter->req_q_lock); - if (atomic_read(&adapter->req_q.count) <= 0) { - atomic_inc(&adapter->qdio_outb_full); + spin_lock(&qdio->req_q_lock); + if (atomic_read(&qdio->req_q.count) <= 0) { + atomic_inc(&qdio->req_q_full); goto out; } - req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, + req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, adapter->pool.scsi_req); if (IS_ERR(req)) { @@ -2424,7 +2430,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32); - real_bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, sbtype, + real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype, scsi_sglist(scsi_cmnd), FSF_MAX_SBALS_PER_REQ); if (unlikely(real_bytes < 0)) { @@ -2453,7 +2459,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, zfcp_fsf_req_free(req); scsi_cmnd->host_scribble = NULL; out: - spin_unlock(&adapter->req_q_lock); + spin_unlock(&qdio->req_q_lock); return retval; } @@ -2468,18 +2474,18 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req = NULL; struct fcp_cmnd_iu *fcp_cmnd_iu; - struct zfcp_adapter *adapter = unit->port->adapter; + struct zfcp_qdio *qdio = unit->port->adapter->qdio; if (unlikely(!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_UNBLOCKED))) return NULL; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, - adapter->pool.scsi_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, + qdio->adapter->pool.scsi_req); if (IS_ERR(req)) { req = NULL; @@ -2496,7 +2502,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + sizeof(u32); - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -2511,7 +2517,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) zfcp_fsf_req_free(req); req = NULL; out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return req; } @@ -2529,6 +2535,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, struct zfcp_fsf_cfdc *fsf_cfdc) { struct qdio_buffer_element *sbale; + struct zfcp_qdio *qdio = adapter->qdio; struct zfcp_fsf_req *req = NULL; struct fsf_qtcb_bottom_support *bottom; int direction, retval = -EIO, bytes; @@ -2547,11 +2554,11 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, return ERR_PTR(-EINVAL); } - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, NULL); + req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL); if (IS_ERR(req)) { retval = -EPERM; goto out; @@ -2559,15 +2566,16 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, req->handler = zfcp_fsf_control_file_handler; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= direction; bottom = &req->qtcb->bottom.support; bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; bottom->option = fsf_cfdc->option; - bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, direction, - fsf_cfdc->sg, FSF_MAX_SBALS_PER_REQ); + bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, + direction, fsf_cfdc->sg, + FSF_MAX_SBALS_PER_REQ); if (bytes != ZFCP_CFDC_MAX_SIZE) { zfcp_fsf_req_free(req); goto out; @@ -2576,7 +2584,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); if (!retval) { wait_for_completion(&req->completion); @@ -2590,9 +2598,10 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, * @adapter: pointer to struct zfcp_adapter * @sbal_idx: response queue index of SBAL to be processed */ -void zfcp_fsf_reqid_check(struct zfcp_adapter *adapter, int sbal_idx) +void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) { - struct qdio_buffer *sbal = adapter->resp_q.sbal[sbal_idx]; + struct zfcp_adapter *adapter = qdio->adapter; + struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx]; struct qdio_buffer_element *sbale; struct zfcp_fsf_req *fsf_req; unsigned long flags, req_id; @@ -2618,7 +2627,7 @@ void zfcp_fsf_reqid_check(struct zfcp_adapter *adapter, int sbal_idx) fsf_req->queue_req.sbal_response = sbal_idx; fsf_req->queue_req.qdio_inb_usage = - atomic_read(&adapter->resp_q.count); + atomic_read(&qdio->resp_q.count); zfcp_fsf_req_complete(fsf_req); if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index e118874976f0..0b3f634509bf 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -36,18 +36,18 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) /** * zfcp_qdio_free - free memory used by request- and resposne queue - * @adapter: pointer to the zfcp_adapter structure + * @qdio: pointer to the zfcp_qdio structure */ -void zfcp_qdio_free(struct zfcp_adapter *adapter) +void zfcp_qdio_free(struct zfcp_qdio *qdio) { struct qdio_buffer **sbal_req, **sbal_resp; int p; - if (adapter->ccw_device) - qdio_free(adapter->ccw_device); + if (qdio->adapter->ccw_device) + qdio_free(qdio->adapter->ccw_device); - sbal_req = adapter->req_q.sbal; - sbal_resp = adapter->resp_q.sbal; + sbal_req = qdio->req_q.sbal; + sbal_resp = qdio->resp_q.sbal; for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { free_page((unsigned long) sbal_req[p]); @@ -55,8 +55,10 @@ void zfcp_qdio_free(struct zfcp_adapter *adapter) } } -static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, char *id) +static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) { + struct zfcp_adapter *adapter = qdio->adapter; + dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); zfcp_erp_adapter_reopen(adapter, @@ -75,47 +77,47 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) } /* this needs to be called prior to updating the queue fill level */ -static void zfcp_qdio_account(struct zfcp_adapter *adapter) +static void zfcp_qdio_account(struct zfcp_qdio *qdio) { ktime_t now; s64 span; int free, used; - spin_lock(&adapter->qdio_stat_lock); + spin_lock(&qdio->stat_lock); now = ktime_get(); - span = ktime_us_delta(now, adapter->req_q_time); - free = max(0, atomic_read(&adapter->req_q.count)); + span = ktime_us_delta(now, qdio->req_q_time); + free = max(0, atomic_read(&qdio->req_q.count)); used = QDIO_MAX_BUFFERS_PER_Q - free; - adapter->req_q_util += used * span; - adapter->req_q_time = now; - spin_unlock(&adapter->qdio_stat_lock); + qdio->req_q_util += used * span; + qdio->req_q_time = now; + spin_unlock(&qdio->stat_lock); } static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, int queue_no, int first, int count, unsigned long parm) { - struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; - struct zfcp_qdio_queue *queue = &adapter->req_q; + struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; + struct zfcp_qdio_queue *queue = &qdio->req_q; if (unlikely(qdio_err)) { - zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); - zfcp_qdio_handler_error(adapter, "qdireq1"); + zfcp_hba_dbf_event_qdio(qdio, qdio_err, first, count); + zfcp_qdio_handler_error(qdio, "qdireq1"); return; } /* cleanup all SBALs being program-owned now */ zfcp_qdio_zero_sbals(queue->sbal, first, count); - zfcp_qdio_account(adapter); + zfcp_qdio_account(qdio); atomic_add(count, &queue->count); - wake_up(&adapter->request_wq); + wake_up(&qdio->req_q_wq); } -static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) +static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed) { - struct zfcp_qdio_queue *queue = &adapter->resp_q; - struct ccw_device *cdev = adapter->ccw_device; + struct zfcp_qdio_queue *queue = &qdio->resp_q; + struct ccw_device *cdev = qdio->adapter->ccw_device; u8 count, start = queue->first; unsigned int retval; @@ -137,12 +139,12 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, int queue_no, int first, int count, unsigned long parm) { - struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; + struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; int sbal_idx, sbal_no; if (unlikely(qdio_err)) { - zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); - zfcp_qdio_handler_error(adapter, "qdires1"); + zfcp_hba_dbf_event_qdio(qdio, qdio_err, first, count); + zfcp_qdio_handler_error(qdio, "qdires1"); return; } @@ -153,26 +155,26 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, for (sbal_no = 0; sbal_no < count; sbal_no++) { sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; /* go through all SBALEs of SBAL */ - zfcp_fsf_reqid_check(adapter, sbal_idx); + zfcp_fsf_reqid_check(qdio, sbal_idx); } /* * put range of SBALs back to response queue * (including SBALs which have already been free before) */ - zfcp_qdio_resp_put_back(adapter, count); + zfcp_qdio_resp_put_back(qdio, count); } /** * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req - * @adapter: pointer to struct zfcp_adapter + * @qdio: pointer to struct zfcp_qdio * @q_rec: pointer to struct zfcp_queue_rec * Returns: pointer to qdio_buffer_element (SBALE) structure */ -struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_adapter *adapter, +struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) { - return zfcp_qdio_sbale(&adapter->req_q, q_req->sbal_last, 0); + return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0); } /** @@ -180,30 +182,30 @@ struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_adapter *adapter, * @fsf_req: pointer to struct fsf_req * Returns: pointer to qdio_buffer_element (SBALE) structure */ -struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_adapter *adapter, +struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) { - return zfcp_qdio_sbale(&adapter->req_q, q_req->sbal_last, + return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, q_req->sbale_curr); } -static void zfcp_qdio_sbal_limit(struct zfcp_adapter *adapter, +static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, int max_sbals) { - int count = atomic_read(&adapter->req_q.count); + int count = atomic_read(&qdio->req_q.count); count = min(count, max_sbals); q_req->sbal_limit = (q_req->sbal_first + count - 1) % QDIO_MAX_BUFFERS_PER_Q; } static struct qdio_buffer_element * -zfcp_qdio_sbal_chain(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req, +zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, unsigned long sbtype) { struct qdio_buffer_element *sbale; /* set last entry flag in current SBALE of current SBAL */ - sbale = zfcp_qdio_sbale_curr(adapter, q_req); + sbale = zfcp_qdio_sbale_curr(qdio, q_req); sbale->flags |= SBAL_FLAGS_LAST_ENTRY; /* don't exceed last allowed SBAL */ @@ -211,7 +213,7 @@ zfcp_qdio_sbal_chain(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req, return NULL; /* set chaining flag in first SBALE of current SBAL */ - sbale = zfcp_qdio_sbale_req(adapter, q_req); + sbale = zfcp_qdio_sbale_req(qdio, q_req); sbale->flags |= SBAL_FLAGS0_MORE_SBALS; /* calculate index of next SBAL */ @@ -225,26 +227,26 @@ zfcp_qdio_sbal_chain(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req, q_req->sbale_curr = 0; /* set storage-block type for new SBAL */ - sbale = zfcp_qdio_sbale_curr(adapter, q_req); + sbale = zfcp_qdio_sbale_curr(qdio, q_req); sbale->flags |= sbtype; return sbale; } static struct qdio_buffer_element * -zfcp_qdio_sbale_next(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req, +zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, unsigned int sbtype) { if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) - return zfcp_qdio_sbal_chain(adapter, q_req, sbtype); + return zfcp_qdio_sbal_chain(qdio, q_req, sbtype); q_req->sbale_curr++; - return zfcp_qdio_sbale_curr(adapter, q_req); + return zfcp_qdio_sbale_curr(qdio, q_req); } -static void zfcp_qdio_undo_sbals(struct zfcp_adapter *adapter, +static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) { - struct qdio_buffer **sbal = adapter->req_q.sbal; + struct qdio_buffer **sbal = qdio->req_q.sbal; int first = q_req->sbal_first; int last = q_req->sbal_last; int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % @@ -252,7 +254,7 @@ static void zfcp_qdio_undo_sbals(struct zfcp_adapter *adapter, zfcp_qdio_zero_sbals(sbal, first, count); } -static int zfcp_qdio_fill_sbals(struct zfcp_adapter *adapter, +static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, unsigned int sbtype, void *start_addr, unsigned int total_length) @@ -264,10 +266,10 @@ static int zfcp_qdio_fill_sbals(struct zfcp_adapter *adapter, /* split segment up */ for (addr = start_addr, remaining = total_length; remaining > 0; addr += length, remaining -= length) { - sbale = zfcp_qdio_sbale_next(adapter, q_req, sbtype); + sbale = zfcp_qdio_sbale_next(qdio, q_req, sbtype); if (!sbale) { - atomic_inc(&adapter->qdio_outb_full); - zfcp_qdio_undo_sbals(adapter, q_req); + atomic_inc(&qdio->req_q_full); + zfcp_qdio_undo_sbals(qdio, q_req); return -EINVAL; } @@ -289,7 +291,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_adapter *adapter, * @max_sbals: upper bound for number of SBALs to be used * Returns: number of bytes, or error (negativ) */ -int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *adapter, +int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, unsigned long sbtype, struct scatterlist *sg, int max_sbals) @@ -298,14 +300,14 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *adapter, int retval, bytes = 0; /* figure out last allowed SBAL */ - zfcp_qdio_sbal_limit(adapter, q_req, max_sbals); + zfcp_qdio_sbal_limit(qdio, q_req, max_sbals); /* set storage-block type for this request */ - sbale = zfcp_qdio_sbale_req(adapter, q_req); + sbale = zfcp_qdio_sbale_req(qdio, q_req); sbale->flags |= sbtype; for (; sg; sg = sg_next(sg)) { - retval = zfcp_qdio_fill_sbals(adapter, q_req, sbtype, + retval = zfcp_qdio_fill_sbals(qdio, q_req, sbtype, sg_virt(sg), sg->length); if (retval < 0) return retval; @@ -313,7 +315,7 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *adapter, } /* assume that no other SBALEs are to follow in the same SBAL */ - sbale = zfcp_qdio_sbale_curr(adapter, q_req); + sbale = zfcp_qdio_sbale_curr(qdio, q_req); sbale->flags |= SBAL_FLAGS_LAST_ENTRY; return bytes; @@ -321,20 +323,22 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *adapter, /** * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO - * @fsf_req: pointer to struct zfcp_fsf_req + * @qdio: pointer to struct zfcp_qdio + * @q_req: pointer to struct zfcp_queue_req * Returns: 0 on success, error otherwise */ -int zfcp_qdio_send(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req) +int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) { - struct zfcp_qdio_queue *req_q = &adapter->req_q; + struct zfcp_qdio_queue *req_q = &qdio->req_q; int first = q_req->sbal_first; int count = q_req->sbal_number; int retval; unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; - zfcp_qdio_account(adapter); + zfcp_qdio_account(qdio); - retval = do_QDIO(adapter->ccw_device, qdio_flags, 0, first, count); + retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first, + count); if (unlikely(retval)) { zfcp_qdio_zero_sbals(req_q->sbal, first, count); return retval; @@ -347,63 +351,69 @@ int zfcp_qdio_send(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req) return 0; } + +static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, + struct zfcp_qdio *qdio) +{ + + id->cdev = qdio->adapter->ccw_device; + id->q_format = QDIO_ZFCP_QFMT; + memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); + ASCEBC(id->adapter_name, 8); + id->qib_param_field_format = 0; + id->qib_param_field = NULL; + id->input_slib_elements = NULL; + id->output_slib_elements = NULL; + id->no_input_qs = 1; + id->no_output_qs = 1; + id->input_handler = zfcp_qdio_int_resp; + id->output_handler = zfcp_qdio_int_req; + id->int_parm = (unsigned long) qdio; + id->flags = QDIO_INBOUND_0COPY_SBALS | + QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; + id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal); + id->output_sbal_addr_array = (void **) (qdio->req_q.sbal); + +} /** * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data * @adapter: pointer to struct zfcp_adapter * Returns: -ENOMEM on memory allocation error or return value from * qdio_allocate */ -int zfcp_qdio_allocate(struct zfcp_adapter *adapter) +int zfcp_qdio_allocate(struct zfcp_qdio *qdio, struct ccw_device *ccw_dev) { - struct qdio_initialize *init_data; + struct qdio_initialize init_data; - if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) || - zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal)) + if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) || + zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal)) return -ENOMEM; - init_data = &adapter->qdio_init_data; + zfcp_qdio_setup_init_data(&init_data, qdio); - init_data->cdev = adapter->ccw_device; - init_data->q_format = QDIO_ZFCP_QFMT; - memcpy(init_data->adapter_name, dev_name(&adapter->ccw_device->dev), 8); - ASCEBC(init_data->adapter_name, 8); - init_data->qib_param_field_format = 0; - init_data->qib_param_field = NULL; - init_data->input_slib_elements = NULL; - init_data->output_slib_elements = NULL; - init_data->no_input_qs = 1; - init_data->no_output_qs = 1; - init_data->input_handler = zfcp_qdio_int_resp; - init_data->output_handler = zfcp_qdio_int_req; - init_data->int_parm = (unsigned long) adapter; - init_data->flags = QDIO_INBOUND_0COPY_SBALS | - QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; - init_data->input_sbal_addr_array = - (void **) (adapter->resp_q.sbal); - init_data->output_sbal_addr_array = - (void **) (adapter->req_q.sbal); - - return qdio_allocate(init_data); + return qdio_allocate(&init_data); } /** * zfcp_close_qdio - close qdio queues for an adapter + * @qdio: pointer to structure zfcp_qdio */ -void zfcp_qdio_close(struct zfcp_adapter *adapter) +void zfcp_qdio_close(struct zfcp_qdio *qdio) { struct zfcp_qdio_queue *req_q; int first, count; - if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) + if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return; /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ - req_q = &adapter->req_q; - spin_lock_bh(&adapter->req_q_lock); - atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); - spin_unlock_bh(&adapter->req_q_lock); + req_q = &qdio->req_q; + spin_lock_bh(&qdio->req_q_lock); + atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); + spin_unlock_bh(&qdio->req_q_lock); - qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); + qdio_shutdown(qdio->adapter->ccw_device, + QDIO_FLAG_CLEANUP_USING_CLEAR); /* cleanup used outbound sbals */ count = atomic_read(&req_q->count); @@ -414,50 +424,54 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter) } req_q->first = 0; atomic_set(&req_q->count, 0); - adapter->resp_q.first = 0; - atomic_set(&adapter->resp_q.count, 0); + qdio->resp_q.first = 0; + atomic_set(&qdio->resp_q.count, 0); } /** * zfcp_qdio_open - prepare and initialize response queue - * @adapter: pointer to struct zfcp_adapter + * @qdio: pointer to struct zfcp_qdio * Returns: 0 on success, otherwise -EIO */ -int zfcp_qdio_open(struct zfcp_adapter *adapter) +int zfcp_qdio_open(struct zfcp_qdio *qdio) { struct qdio_buffer_element *sbale; + struct qdio_initialize init_data; + struct ccw_device *cdev = qdio->adapter->ccw_device; int cc; - if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) + if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) return -EIO; - if (qdio_establish(&adapter->qdio_init_data)) + zfcp_qdio_setup_init_data(&init_data, qdio); + + if (qdio_establish(&init_data)) goto failed_establish; - if (qdio_activate(adapter->ccw_device)) + if (qdio_activate(cdev)) goto failed_qdio; for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { - sbale = &(adapter->resp_q.sbal[cc]->element[0]); + sbale = &(qdio->resp_q.sbal[cc]->element[0]); sbale->length = 0; sbale->flags = SBAL_FLAGS_LAST_ENTRY; sbale->addr = NULL; } - if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, + if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q)) goto failed_qdio; /* set index of first avalable SBALS / number of available SBALS */ - adapter->req_q.first = 0; - atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q); + qdio->req_q.first = 0; + atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q); return 0; failed_qdio: - qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); failed_establish: - dev_err(&adapter->ccw_device->dev, + dev_err(&cdev->dev, "Setting up the QDIO connection to the FCP adapter failed\n"); return -EIO; } diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 2e13d41269a4..4414720c87f6 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -225,7 +225,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) { struct zfcp_unit *unit = scpnt->device->hostdata; struct zfcp_adapter *adapter = unit->port->adapter; - struct zfcp_fsf_req *fsf_req; + struct zfcp_fsf_req *fsf_req = NULL; int retval = SUCCESS; int retry = 3; @@ -429,7 +429,7 @@ static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host) if (!data) return NULL; - ret = zfcp_fsf_exchange_port_data_sync(adapter, data); + ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data); if (ret) { kfree(data); return NULL; @@ -458,7 +458,7 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost) if (!data) return; - ret = zfcp_fsf_exchange_port_data_sync(adapter, data); + ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data); if (ret) kfree(data); else { diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 0fe5cce818cb..a6cf62636834 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -425,7 +425,7 @@ static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev, if (!qtcb_port) return -ENOMEM; - retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port); + retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port); if (!retval) retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util, qtcb_port->cb_util, qtcb_port->a_util); @@ -451,7 +451,7 @@ static int zfcp_sysfs_adapter_ex_config(struct device *dev, if (!qtcb_config) return -ENOMEM; - retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config); + retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config); if (!retval) *stat_inf = qtcb_config->stat_info; @@ -492,15 +492,15 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev, char *buf) { struct Scsi_Host *scsi_host = class_to_shost(dev); - struct zfcp_adapter *adapter = - (struct zfcp_adapter *) scsi_host->hostdata[0]; + struct zfcp_qdio *qdio = + ((struct zfcp_adapter *) scsi_host->hostdata[0])->qdio; u64 util; - spin_lock_bh(&adapter->qdio_stat_lock); - util = adapter->req_q_util; - spin_unlock_bh(&adapter->qdio_stat_lock); + spin_lock_bh(&qdio->stat_lock); + util = qdio->req_q_util; + spin_unlock_bh(&qdio->stat_lock); - return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full), + return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full), (unsigned long long)util); } static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); From 799b76d09aeee558d18c1f5b93e63f58f1d1fc11 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:20 +0200 Subject: [PATCH 095/209] [SCSI] zfcp: Decouple gid_pn requests from erp Don't let the erp wait for gid_pn requests to complete. Instead, queue the gid_pn work, exit erp and let the finished gid_pn work trigger a new port reopen. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 9 ++++++-- drivers/s390/scsi/zfcp_def.h | 1 + drivers/s390/scsi/zfcp_erp.c | 28 +--------------------- drivers/s390/scsi/zfcp_ext.h | 6 ++--- drivers/s390/scsi/zfcp_fc.c | 45 +++++++++++++++++++++++++++--------- drivers/s390/scsi/zfcp_fsf.c | 14 ++--------- 6 files changed, 47 insertions(+), 56 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index ed7211ef04eb..572dcd67e713 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -362,6 +362,11 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) if (!adapter->pool.erp_req) return -ENOMEM; + adapter->pool.gid_pn_req = + mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); + if (!adapter->pool.gid_pn_req) + return -ENOMEM; + adapter->pool.scsi_req = mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); if (!adapter->pool.scsi_req) @@ -379,7 +384,7 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) return -ENOMEM; adapter->pool.qtcb_pool = - mempool_create_slab_pool(3, zfcp_data.qtcb_cache); + mempool_create_slab_pool(4, zfcp_data.qtcb_cache); if (!adapter->pool.qtcb_pool) return -ENOMEM; @@ -652,7 +657,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, init_waitqueue_head(&port->remove_wq); INIT_LIST_HEAD(&port->unit_list_head); - INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup); + INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); INIT_WORK(&port->rport_work, zfcp_scsi_rport_work); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index bac5c497eab5..9a8ff0553421 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -265,6 +265,7 @@ struct zfcp_fsf_req; /* holds various memory pools of an adapter */ struct zfcp_adapter_mempool { mempool_t *erp_req; + mempool_t *gid_pn_req; mempool_t *scsi_req; mempool_t *scsi_abort; mempool_t *status_read_req; diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index feda1db56b23..67297d2744fb 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -26,7 +26,6 @@ enum zfcp_erp_steps { ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001, ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, - ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400, ZFCP_ERP_STEP_PORT_OPENING = 0x0800, ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000, ZFCP_ERP_STEP_UNIT_OPENING = 0x2000, @@ -842,27 +841,6 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act) return zfcp_erp_port_strategy_open_port(act); } -void zfcp_erp_port_strategy_open_lookup(struct work_struct *work) -{ - int retval; - struct zfcp_port *port = container_of(work, struct zfcp_port, - gid_pn_work); - - retval = zfcp_fc_ns_gid_pn(&port->erp_action); - if (!retval) { - port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP; - goto out; - } - if (retval == -ENOMEM) { - zfcp_erp_notify(&port->erp_action, ZFCP_STATUS_ERP_LOWMEM); - goto out; - } - /* all other error condtions */ - zfcp_erp_notify(&port->erp_action, 0); -out: - zfcp_port_put(port); -} - static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) { struct zfcp_adapter *adapter = act->adapter; @@ -880,12 +858,8 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) if (!queue_work(adapter->work_queue, &port->gid_pn_work)) zfcp_port_put(port); - return ZFCP_ERP_CONTINUES; + return ZFCP_ERP_EXIT; } - /* fall through */ - case ZFCP_ERP_STEP_NAMESERVER_LOOKUP: - if (!port->d_id) - return ZFCP_ERP_FAILED; return zfcp_erp_port_strategy_open_port(act); case ZFCP_ERP_STEP_PORT_OPENING: diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index e97947d2f2ed..7650cec7f71f 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -94,13 +94,12 @@ extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, char *, void *); extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *, void *); extern void zfcp_erp_timeout_handler(unsigned long); -extern void zfcp_erp_port_strategy_open_lookup(struct work_struct *); /* zfcp_fc.c */ extern int zfcp_scan_ports(struct zfcp_adapter *); extern void _zfcp_scan_ports_later(struct work_struct *); extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); -extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *); +extern void zfcp_fc_port_did_lookup(struct work_struct *); extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); extern void zfcp_test_link(struct zfcp_port *); extern void zfcp_fc_link_test_work(struct work_struct *); @@ -128,8 +127,7 @@ extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *, extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); extern int zfcp_fsf_status_read(struct zfcp_qdio *); extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); -extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, - struct zfcp_erp_action *); +extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *); extern int zfcp_fsf_send_els(struct zfcp_send_els *); extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, struct scsi_cmnd *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 8921e16fdab7..bc0c9f54d0d8 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -282,15 +282,15 @@ static void zfcp_fc_ns_gid_pn_eval(unsigned long data) port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK; } -int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action, +static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, struct zfcp_gid_pn_data *gid_pn) { - struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_adapter *adapter = port->adapter; struct zfcp_fc_ns_handler_data compl_rec; int ret; /* setup parameters for send generic command */ - gid_pn->port = erp_action->port; + gid_pn->port = port; gid_pn->ct.wka_port = &adapter->gs->ds; gid_pn->ct.handler = zfcp_fc_ns_handler; gid_pn->ct.handler_data = (unsigned long) &compl_rec; @@ -309,12 +309,12 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action, gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS; gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN; gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4; - gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn; + gid_pn->ct_iu_req.wwpn = port->wwpn; init_completion(&compl_rec.done); compl_rec.handler = zfcp_fc_ns_gid_pn_eval; compl_rec.handler_data = (unsigned long) gid_pn; - ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.erp_req, erp_action); + ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.gid_pn_req); if (!ret) wait_for_completion(&compl_rec.done); return ret; @@ -322,14 +322,14 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action, /** * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request - * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed + * @port: port where GID_PN request is needed * return: -ENOMEM on error, 0 otherwise */ -int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action) +static int zfcp_fc_ns_gid_pn(struct zfcp_port *port) { int ret; struct zfcp_gid_pn_data *gid_pn; - struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_adapter *adapter = port->adapter; gid_pn = mempool_alloc(adapter->pool.gid_pn_data, GFP_ATOMIC); if (!gid_pn) @@ -341,7 +341,7 @@ int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action) if (ret) goto out; - ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn); + ret = zfcp_fc_ns_gid_pn_request(port, gid_pn); zfcp_wka_port_put(&adapter->gs->ds); out: @@ -349,6 +349,29 @@ int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action) return ret; } +void zfcp_fc_port_did_lookup(struct work_struct *work) +{ + int ret; + struct zfcp_port *port = container_of(work, struct zfcp_port, + gid_pn_work); + + ret = zfcp_fc_ns_gid_pn(port); + if (ret) { + /* could not issue gid_pn for some reason */ + zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1", NULL); + goto out; + } + + if (!port->d_id) { + zfcp_erp_port_failed(port, "fcgpn_2", NULL); + goto out; + } + + zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL); +out: + zfcp_port_put(port); +} + /** * zfcp_fc_plogi_evaluate - evaluate PLOGI playload * @port: zfcp_port structure @@ -551,7 +574,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft, init_completion(&compl_rec.done); compl_rec.handler = NULL; - ret = zfcp_fsf_send_ct(ct, NULL, NULL); + ret = zfcp_fsf_send_ct(ct, NULL); if (!ret) wait_for_completion(&compl_rec.done); return ret; @@ -840,7 +863,7 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) ct_fc_job->ct.completion = NULL; ct_fc_job->job = job; - ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL, NULL); + ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL); if (ret) { kfree(ct_fc_job); zfcp_wka_port_put(ct_fc_job->ct.wka_port); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index b9a16e4b48b4..048f1a848f34 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1069,10 +1069,8 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) * @ct: pointer to struct zfcp_send_ct with data for request * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req - * @erp_action: if non-null the Generic Service request sent within ERP */ -int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, - struct zfcp_erp_action *erp_action) +int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool) { struct zfcp_wka_port *wka_port = ct->wka_port; struct zfcp_qdio *qdio = wka_port->adapter->qdio; @@ -1103,13 +1101,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, req->data = ct; zfcp_san_dbf_event_ct_request(req); - - if (erp_action) { - erp_action->fsf_req = req; - req->erp_action = erp_action; - zfcp_fsf_start_erp_timer(req); - } else - zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); ret = zfcp_fsf_req_send(req); if (ret) @@ -1119,8 +1111,6 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, failed_send: zfcp_fsf_req_free(req); - if (erp_action) - erp_action->fsf_req = NULL; out: spin_unlock_bh(&qdio->req_q_lock); return ret; From 5771710bd5edfafcb8656f49b93690a6fae5a4d2 Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:21 +0200 Subject: [PATCH 096/209] [SCSI] zfcp: Update dbf calls Change the dbf data and functions to use the zfcp_dbf prefix throughout the code. Also change the calls to dbf to use zfcp_dbf instead of zfcp_adapter. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 7 +- drivers/s390/scsi/zfcp_dbf.c | 408 ++++++++++++++++------------------ drivers/s390/scsi/zfcp_dbf.h | 152 ++++++------- drivers/s390/scsi/zfcp_erp.c | 49 ++-- drivers/s390/scsi/zfcp_ext.h | 52 ++--- drivers/s390/scsi/zfcp_fc.c | 2 +- drivers/s390/scsi/zfcp_fsf.c | 24 +- drivers/s390/scsi/zfcp_qdio.c | 6 +- drivers/s390/scsi/zfcp_scsi.c | 27 ++- 9 files changed, 347 insertions(+), 380 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 572dcd67e713..7a50f64c36bd 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -517,6 +517,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) goto qdio_mem_failed; adapter->qdio->adapter = adapter; + ccw_device->handler = NULL; adapter->ccw_device = ccw_device; atomic_set(&adapter->refcount, 0); @@ -530,7 +531,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) if (zfcp_reqlist_alloc(adapter)) goto failed_low_mem_buffers; - if (zfcp_adapter_debug_register(adapter)) + if (zfcp_dbf_adapter_register(adapter)) goto debug_register_failed; if (zfcp_setup_adapter_work_queue(adapter)) @@ -577,7 +578,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) sysfs_failed: zfcp_destroy_adapter_work_queue(adapter); work_queue_failed: - zfcp_adapter_debug_unregister(adapter); + zfcp_dbf_adapter_unregister(adapter->dbf); debug_register_failed: dev_set_drvdata(&ccw_device->dev, NULL); kfree(adapter->req_list); @@ -616,7 +617,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) return; zfcp_destroy_adapter_work_queue(adapter); - zfcp_adapter_debug_unregister(adapter); + zfcp_dbf_adapter_unregister(adapter->dbf); zfcp_qdio_free(adapter->qdio); zfcp_free_low_mem_buffers(adapter); kfree(adapter->req_list); diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 3179b08bda6a..c066428b2878 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -120,9 +120,9 @@ static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view, return p - out_buf; } -void _zfcp_hba_dbf_event_fsf_response(const char *tag2, int level, - struct zfcp_fsf_req *fsf_req, - struct zfcp_dbf *dbf) +void _zfcp_dbf_hba_fsf_response(const char *tag2, int level, + struct zfcp_fsf_req *fsf_req, + struct zfcp_dbf *dbf) { struct fsf_qtcb *qtcb = fsf_req->qtcb; union fsf_prot_status_qual *prot_status_qual = @@ -132,11 +132,11 @@ void _zfcp_hba_dbf_event_fsf_response(const char *tag2, int level, struct zfcp_port *port; struct zfcp_unit *unit; struct zfcp_send_els *send_els; - struct zfcp_hba_dbf_record *rec = &dbf->hba_dbf_buf; - struct zfcp_hba_dbf_record_response *response = &rec->u.response; + struct zfcp_dbf_hba_record *rec = &dbf->hba_buf; + struct zfcp_dbf_hba_record_response *response = &rec->u.response; unsigned long flags; - spin_lock_irqsave(&dbf->hba_dbf_lock, flags); + spin_lock_irqsave(&dbf->hba_lock, flags); memset(rec, 0, sizeof(*rec)); strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE); strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); @@ -203,7 +203,7 @@ void _zfcp_hba_dbf_event_fsf_response(const char *tag2, int level, break; } - debug_event(dbf->hba_dbf, level, rec, sizeof(*rec)); + debug_event(dbf->hba, level, rec, sizeof(*rec)); /* have fcp channel microcode fixed to use as little as possible */ if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) { @@ -211,27 +211,25 @@ void _zfcp_hba_dbf_event_fsf_response(const char *tag2, int level, char *buf = (char *)qtcb + qtcb->header.log_start; int len = qtcb->header.log_length; for (; len && !buf[len - 1]; len--); - zfcp_dbf_hexdump(dbf->hba_dbf, rec, sizeof(*rec), level, buf, + zfcp_dbf_hexdump(dbf->hba, rec, sizeof(*rec), level, buf, len); } - spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->hba_lock, flags); } -void _zfcp_hba_dbf_event_fsf_unsol(const char *tag, int level, - struct zfcp_adapter *adapter, - struct fsf_status_read_buffer *status_buffer) +void _zfcp_dbf_hba_fsf_unsol(const char *tag, int level, struct zfcp_dbf *dbf, + struct fsf_status_read_buffer *status_buffer) { - struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_hba_dbf_record *rec = &dbf->hba_dbf_buf; + struct zfcp_dbf_hba_record *rec = &dbf->hba_buf; unsigned long flags; - spin_lock_irqsave(&dbf->hba_dbf_lock, flags); + spin_lock_irqsave(&dbf->hba_lock, flags); memset(rec, 0, sizeof(*rec)); strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE); strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE); - rec->u.status.failed = atomic_read(&adapter->stat_miss); + rec->u.status.failed = atomic_read(&dbf->adapter->stat_miss); if (status_buffer != NULL) { rec->u.status.status_type = status_buffer->status_type; rec->u.status.status_subtype = status_buffer->status_subtype; @@ -268,58 +266,54 @@ void _zfcp_hba_dbf_event_fsf_unsol(const char *tag, int level, &status_buffer->payload, rec->u.status.payload_size); } - debug_event(dbf->hba_dbf, level, rec, sizeof(*rec)); - spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); + debug_event(dbf->hba, level, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->hba_lock, flags); } /** - * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure + * zfcp_dbf_hba_qdio - trace event for QDIO related failure * @qdio: qdio structure affected by this QDIO related event * @qdio_error: as passed by qdio module * @sbal_index: first buffer with error condition, as passed by qdio module * @sbal_count: number of buffers affected, as passed by qdio module */ -void zfcp_hba_dbf_event_qdio(struct zfcp_qdio *qdio, - unsigned int qdio_error, int sbal_index, - int sbal_count) +void zfcp_dbf_hba_qdio(struct zfcp_dbf *dbf, unsigned int qdio_error, + int sbal_index, int sbal_count) { - struct zfcp_dbf *dbf = qdio->adapter->dbf; - struct zfcp_hba_dbf_record *r = &dbf->hba_dbf_buf; + struct zfcp_dbf_hba_record *r = &dbf->hba_buf; unsigned long flags; - spin_lock_irqsave(&dbf->hba_dbf_lock, flags); + spin_lock_irqsave(&dbf->hba_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); r->u.qdio.qdio_error = qdio_error; r->u.qdio.sbal_index = sbal_index; r->u.qdio.sbal_count = sbal_count; - debug_event(dbf->hba_dbf, 0, r, sizeof(*r)); - spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); + debug_event(dbf->hba, 0, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->hba_lock, flags); } /** - * zfcp_hba_dbf_event_berr - trace event for bit error threshold - * @adapter: adapter affected by this QDIO related event + * zfcp_dbf_hba_berr - trace event for bit error threshold + * @dbf: dbf structure affected by this QDIO related event * @req: fsf request */ -void zfcp_hba_dbf_event_berr(struct zfcp_adapter *adapter, - struct zfcp_fsf_req *req) +void zfcp_dbf_hba_berr(struct zfcp_dbf *dbf, struct zfcp_fsf_req *req) { - struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_hba_dbf_record *r = &dbf->hba_dbf_buf; + struct zfcp_dbf_hba_record *r = &dbf->hba_buf; struct fsf_status_read_buffer *sr_buf = req->data; struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error; unsigned long flags; - spin_lock_irqsave(&dbf->hba_dbf_lock, flags); + spin_lock_irqsave(&dbf->hba_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE); memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload)); - debug_event(dbf->hba_dbf, 0, r, sizeof(*r)); - spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); + debug_event(dbf->hba, 0, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->hba_lock, flags); } -static void zfcp_hba_dbf_view_response(char **p, - struct zfcp_hba_dbf_record_response *r) +static void zfcp_dbf_hba_view_response(char **p, + struct zfcp_dbf_hba_record_response *r) { struct timespec t; @@ -380,8 +374,8 @@ static void zfcp_hba_dbf_view_response(char **p, } } -static void zfcp_hba_dbf_view_status(char **p, - struct zfcp_hba_dbf_record_status *r) +static void zfcp_dbf_hba_view_status(char **p, + struct zfcp_dbf_hba_record_status *r) { zfcp_dbf_out(p, "failed", "0x%02x", r->failed); zfcp_dbf_out(p, "status_type", "0x%08x", r->status_type); @@ -393,14 +387,14 @@ static void zfcp_hba_dbf_view_status(char **p, r->payload_size); } -static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r) +static void zfcp_dbf_hba_view_qdio(char **p, struct zfcp_dbf_hba_record_qdio *r) { zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error); zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index); zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); } -static void zfcp_hba_dbf_view_berr(char **p, struct fsf_bit_error_payload *r) +static void zfcp_dbf_hba_view_berr(char **p, struct fsf_bit_error_payload *r) { zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count); zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count); @@ -424,10 +418,10 @@ static void zfcp_hba_dbf_view_berr(char **p, struct fsf_bit_error_payload *r) r->current_transmit_b2b_credit); } -static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view, +static int zfcp_dbf_hba_view_format(debug_info_t *id, struct debug_view *view, char *out_buf, const char *in_buf) { - struct zfcp_hba_dbf_record *r = (struct zfcp_hba_dbf_record *)in_buf; + struct zfcp_dbf_hba_record *r = (struct zfcp_dbf_hba_record *)in_buf; char *p = out_buf; if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) @@ -438,45 +432,42 @@ static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view, zfcp_dbf_tag(&p, "tag2", r->tag2); if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0) - zfcp_hba_dbf_view_response(&p, &r->u.response); + zfcp_dbf_hba_view_response(&p, &r->u.response); else if (strncmp(r->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0) - zfcp_hba_dbf_view_status(&p, &r->u.status); + zfcp_dbf_hba_view_status(&p, &r->u.status); else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0) - zfcp_hba_dbf_view_qdio(&p, &r->u.qdio); + zfcp_dbf_hba_view_qdio(&p, &r->u.qdio); else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0) - zfcp_hba_dbf_view_berr(&p, &r->u.berr); + zfcp_dbf_hba_view_berr(&p, &r->u.berr); if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) != 0) p += sprintf(p, "\n"); return p - out_buf; } -static struct debug_view zfcp_hba_dbf_view = { - "structured", - NULL, - &zfcp_dbf_view_header, - &zfcp_hba_dbf_view_format, - NULL, - NULL +static struct debug_view zfcp_dbf_hba_view = { + .name = "structured", + .header_proc = zfcp_dbf_view_header, + .format_proc = zfcp_dbf_hba_view_format, }; -static const char *zfcp_rec_dbf_tags[] = { +static const char *zfcp_dbf_rec_tags[] = { [ZFCP_REC_DBF_ID_THREAD] = "thread", [ZFCP_REC_DBF_ID_TARGET] = "target", [ZFCP_REC_DBF_ID_TRIGGER] = "trigger", [ZFCP_REC_DBF_ID_ACTION] = "action", }; -static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view, +static int zfcp_dbf_rec_view_format(debug_info_t *id, struct debug_view *view, char *buf, const char *_rec) { - struct zfcp_rec_dbf_record *r = (struct zfcp_rec_dbf_record *)_rec; + struct zfcp_dbf_rec_record *r = (struct zfcp_dbf_rec_record *)_rec; char *p = buf; char hint[ZFCP_DBF_ID_SIZE + 1]; memcpy(hint, r->id2, ZFCP_DBF_ID_SIZE); hint[ZFCP_DBF_ID_SIZE] = 0; - zfcp_dbf_outs(&p, "tag", zfcp_rec_dbf_tags[r->id]); + zfcp_dbf_outs(&p, "tag", zfcp_dbf_rec_tags[r->id]); zfcp_dbf_outs(&p, "hint", hint); switch (r->id) { case ZFCP_REC_DBF_ID_THREAD: @@ -514,25 +505,22 @@ static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view, return p - buf; } -static struct debug_view zfcp_rec_dbf_view = { - "structured", - NULL, - &zfcp_dbf_view_header, - &zfcp_rec_dbf_view_format, - NULL, - NULL +static struct debug_view zfcp_dbf_rec_view = { + .name = "structured", + .header_proc = zfcp_dbf_view_header, + .format_proc = zfcp_dbf_rec_view_format, }; /** - * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation + * zfcp_dbf_rec_thread - trace event related to recovery thread operation * @id2: identifier for event - * @adapter: adapter + * @dbf: reference to dbf structure * This function assumes that the caller is holding erp_lock. */ -void zfcp_rec_dbf_event_thread(char *id2, struct zfcp_adapter *adapter) +void zfcp_dbf_rec_thread(char *id2, struct zfcp_dbf *dbf) { - struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_rec_dbf_record *r = &dbf->rec_dbf_buf; + struct zfcp_adapter *adapter = dbf->adapter; + struct zfcp_dbf_rec_record *r = &dbf->rec_buf; unsigned long flags = 0; struct list_head *entry; unsigned ready = 0, running = 0, total; @@ -543,42 +531,41 @@ void zfcp_rec_dbf_event_thread(char *id2, struct zfcp_adapter *adapter) running++; total = adapter->erp_total_count; - spin_lock_irqsave(&dbf->rec_dbf_lock, flags); + spin_lock_irqsave(&dbf->rec_lock, flags); memset(r, 0, sizeof(*r)); r->id = ZFCP_REC_DBF_ID_THREAD; memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); r->u.thread.total = total; r->u.thread.ready = ready; r->u.thread.running = running; - debug_event(dbf->rec_dbf, 6, r, sizeof(*r)); - spin_unlock_irqrestore(&dbf->rec_dbf_lock, flags); + debug_event(dbf->rec, 6, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->rec_lock, flags); } /** - * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation + * zfcp_dbf_rec_thread - trace event related to recovery thread operation * @id2: identifier for event * @adapter: adapter * This function assumes that the caller does not hold erp_lock. */ -void zfcp_rec_dbf_event_thread_lock(char *id2, struct zfcp_adapter *adapter) +void zfcp_dbf_rec_thread_lock(char *id2, struct zfcp_dbf *dbf) { + struct zfcp_adapter *adapter = dbf->adapter; unsigned long flags; read_lock_irqsave(&adapter->erp_lock, flags); - zfcp_rec_dbf_event_thread(id2, adapter); + zfcp_dbf_rec_thread(id2, dbf); read_unlock_irqrestore(&adapter->erp_lock, flags); } -static void zfcp_rec_dbf_event_target(char *id2, void *ref, - struct zfcp_adapter *adapter, - atomic_t *status, atomic_t *erp_count, - u64 wwpn, u32 d_id, u64 fcp_lun) +static void zfcp_dbf_rec_target(char *id2, void *ref, struct zfcp_dbf *dbf, + atomic_t *status, atomic_t *erp_count, u64 wwpn, + u32 d_id, u64 fcp_lun) { - struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_rec_dbf_record *r = &dbf->rec_dbf_buf; + struct zfcp_dbf_rec_record *r = &dbf->rec_buf; unsigned long flags; - spin_lock_irqsave(&dbf->rec_dbf_lock, flags); + spin_lock_irqsave(&dbf->rec_lock, flags); memset(r, 0, sizeof(*r)); r->id = ZFCP_REC_DBF_ID_TARGET; memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); @@ -588,56 +575,57 @@ static void zfcp_rec_dbf_event_target(char *id2, void *ref, r->u.target.d_id = d_id; r->u.target.fcp_lun = fcp_lun; r->u.target.erp_count = atomic_read(erp_count); - debug_event(dbf->rec_dbf, 3, r, sizeof(*r)); - spin_unlock_irqrestore(&dbf->rec_dbf_lock, flags); + debug_event(dbf->rec, 3, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->rec_lock, flags); } /** - * zfcp_rec_dbf_event_adapter - trace event for adapter state change + * zfcp_dbf_rec_adapter - trace event for adapter state change * @id: identifier for trigger of state change * @ref: additional reference (e.g. request) - * @adapter: adapter + * @dbf: reference to dbf structure */ -void zfcp_rec_dbf_event_adapter(char *id, void *ref, - struct zfcp_adapter *adapter) +void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf) { - zfcp_rec_dbf_event_target(id, ref, adapter, &adapter->status, + struct zfcp_adapter *adapter = dbf->adapter; + + zfcp_dbf_rec_target(id, ref, dbf, &adapter->status, &adapter->erp_counter, 0, 0, 0); } /** - * zfcp_rec_dbf_event_port - trace event for port state change + * zfcp_dbf_rec_port - trace event for port state change * @id: identifier for trigger of state change * @ref: additional reference (e.g. request) * @port: port */ -void zfcp_rec_dbf_event_port(char *id, void *ref, struct zfcp_port *port) +void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port) { - struct zfcp_adapter *adapter = port->adapter; + struct zfcp_dbf *dbf = port->adapter->dbf; - zfcp_rec_dbf_event_target(id, ref, adapter, &port->status, + zfcp_dbf_rec_target(id, ref, dbf, &port->status, &port->erp_counter, port->wwpn, port->d_id, 0); } /** - * zfcp_rec_dbf_event_unit - trace event for unit state change + * zfcp_dbf_rec_unit - trace event for unit state change * @id: identifier for trigger of state change * @ref: additional reference (e.g. request) * @unit: unit */ -void zfcp_rec_dbf_event_unit(char *id, void *ref, struct zfcp_unit *unit) +void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit) { struct zfcp_port *port = unit->port; - struct zfcp_adapter *adapter = port->adapter; + struct zfcp_dbf *dbf = port->adapter->dbf; - zfcp_rec_dbf_event_target(id, ref, adapter, &unit->status, + zfcp_dbf_rec_target(id, ref, dbf, &unit->status, &unit->erp_counter, port->wwpn, port->d_id, unit->fcp_lun); } /** - * zfcp_rec_dbf_event_trigger - trace event for triggered error recovery + * zfcp_dbf_rec_trigger - trace event for triggered error recovery * @id2: identifier for error recovery trigger * @ref: additional reference (e.g. request) * @want: originally requested error recovery action @@ -647,15 +635,15 @@ void zfcp_rec_dbf_event_unit(char *id, void *ref, struct zfcp_unit *unit) * @port: port * @unit: unit */ -void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need, - void *action, struct zfcp_adapter *adapter, - struct zfcp_port *port, struct zfcp_unit *unit) +void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action, + struct zfcp_adapter *adapter, struct zfcp_port *port, + struct zfcp_unit *unit) { struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_rec_dbf_record *r = &dbf->rec_dbf_buf; + struct zfcp_dbf_rec_record *r = &dbf->rec_buf; unsigned long flags; - spin_lock_irqsave(&dbf->rec_dbf_lock, flags); + spin_lock_irqsave(&dbf->rec_lock, flags); memset(r, 0, sizeof(*r)); r->id = ZFCP_REC_DBF_ID_TRIGGER; memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); @@ -672,23 +660,22 @@ void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need, r->u.trigger.us = atomic_read(&unit->status); r->u.trigger.fcp_lun = unit->fcp_lun; } - debug_event(dbf->rec_dbf, action ? 1 : 4, r, sizeof(*r)); - spin_unlock_irqrestore(&dbf->rec_dbf_lock, flags); + debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->rec_lock, flags); } /** - * zfcp_rec_dbf_event_action - trace event showing progress of recovery action + * zfcp_dbf_rec_action - trace event showing progress of recovery action * @id2: identifier * @erp_action: error recovery action struct pointer */ -void zfcp_rec_dbf_event_action(char *id2, struct zfcp_erp_action *erp_action) +void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action) { - struct zfcp_adapter *adapter = erp_action->adapter; - struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_rec_dbf_record *r = &dbf->rec_dbf_buf; + struct zfcp_dbf *dbf = erp_action->adapter->dbf; + struct zfcp_dbf_rec_record *r = &dbf->rec_buf; unsigned long flags; - spin_lock_irqsave(&dbf->rec_dbf_lock, flags); + spin_lock_irqsave(&dbf->rec_lock, flags); memset(r, 0, sizeof(*r)); r->id = ZFCP_REC_DBF_ID_ACTION; memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); @@ -696,27 +683,27 @@ void zfcp_rec_dbf_event_action(char *id2, struct zfcp_erp_action *erp_action) r->u.action.status = erp_action->status; r->u.action.step = erp_action->step; r->u.action.fsf_req = (unsigned long)erp_action->fsf_req; - debug_event(dbf->rec_dbf, 5, r, sizeof(*r)); - spin_unlock_irqrestore(&dbf->rec_dbf_lock, flags); + debug_event(dbf->rec, 5, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->rec_lock, flags); } /** - * zfcp_san_dbf_event_ct_request - trace event for issued CT request + * zfcp_dbf_san_ct_request - trace event for issued CT request * @fsf_req: request containing issued CT data */ -void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) +void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; struct zfcp_wka_port *wka_port = ct->wka_port; struct zfcp_adapter *adapter = wka_port->adapter; struct zfcp_dbf *dbf = adapter->dbf; struct ct_hdr *hdr = sg_virt(ct->req); - struct zfcp_san_dbf_record *r = &dbf->san_dbf_buf; - struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req; + struct zfcp_dbf_san_record *r = &dbf->san_buf; + struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req; int level = 3; unsigned long flags; - spin_lock_irqsave(&dbf->san_dbf_lock, flags); + spin_lock_irqsave(&dbf->san_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); r->fsf_reqid = fsf_req->req_id; @@ -731,29 +718,29 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) oct->max_res_size = hdr->max_res_size; oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr), ZFCP_DBF_SAN_MAX_PAYLOAD); - debug_event(dbf->san_dbf, level, r, sizeof(*r)); - zfcp_dbf_hexdump(dbf->san_dbf, r, sizeof(*r), level, + debug_event(dbf->san, level, r, sizeof(*r)); + zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, (void *)hdr + sizeof(struct ct_hdr), oct->len); - spin_unlock_irqrestore(&dbf->san_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->san_lock, flags); } /** - * zfcp_san_dbf_event_ct_response - trace event for completion of CT request + * zfcp_dbf_san_ct_response - trace event for completion of CT request * @fsf_req: request containing CT response */ -void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) +void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; struct zfcp_wka_port *wka_port = ct->wka_port; struct zfcp_adapter *adapter = wka_port->adapter; struct ct_hdr *hdr = sg_virt(ct->resp); struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_san_dbf_record *r = &dbf->san_dbf_buf; - struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp; + struct zfcp_dbf_san_record *r = &dbf->san_buf; + struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp; int level = 3; unsigned long flags; - spin_lock_irqsave(&dbf->san_dbf_lock, flags); + spin_lock_irqsave(&dbf->san_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); r->fsf_reqid = fsf_req->req_id; @@ -768,23 +755,22 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) rct->max_res_size = hdr->max_res_size; rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr), ZFCP_DBF_SAN_MAX_PAYLOAD); - debug_event(dbf->san_dbf, level, r, sizeof(*r)); - zfcp_dbf_hexdump(dbf->san_dbf, r, sizeof(*r), level, + debug_event(dbf->san, level, r, sizeof(*r)); + zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, (void *)hdr + sizeof(struct ct_hdr), rct->len); - spin_unlock_irqrestore(&dbf->san_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->san_lock, flags); } -static void zfcp_san_dbf_event_els(const char *tag, int level, - struct zfcp_fsf_req *fsf_req, u32 s_id, - u32 d_id, u8 ls_code, void *buffer, - int buflen) +static void zfcp_dbf_san_els(const char *tag, int level, + struct zfcp_fsf_req *fsf_req, u32 s_id, u32 d_id, + u8 ls_code, void *buffer, int buflen) { struct zfcp_adapter *adapter = fsf_req->adapter; struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_san_dbf_record *rec = &dbf->san_dbf_buf; + struct zfcp_dbf_san_record *rec = &dbf->san_buf; unsigned long flags; - spin_lock_irqsave(&dbf->san_dbf_lock, flags); + spin_lock_irqsave(&dbf->san_lock, flags); memset(rec, 0, sizeof(*rec)); strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); rec->fsf_reqid = fsf_req->req_id; @@ -792,45 +778,45 @@ static void zfcp_san_dbf_event_els(const char *tag, int level, rec->s_id = s_id; rec->d_id = d_id; rec->u.els.ls_code = ls_code; - debug_event(dbf->san_dbf, level, rec, sizeof(*rec)); - zfcp_dbf_hexdump(dbf->san_dbf, rec, sizeof(*rec), level, + debug_event(dbf->san, level, rec, sizeof(*rec)); + zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level, buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD)); - spin_unlock_irqrestore(&dbf->san_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->san_lock, flags); } /** - * zfcp_san_dbf_event_els_request - trace event for issued ELS + * zfcp_dbf_san_els_request - trace event for issued ELS * @fsf_req: request containing issued ELS */ -void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) +void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; - zfcp_san_dbf_event_els("oels", 2, fsf_req, + zfcp_dbf_san_els("oels", 2, fsf_req, fc_host_port_id(els->adapter->scsi_host), els->d_id, *(u8 *) sg_virt(els->req), sg_virt(els->req), els->req->length); } /** - * zfcp_san_dbf_event_els_response - trace event for completed ELS + * zfcp_dbf_san_els_response - trace event for completed ELS * @fsf_req: request containing ELS response */ -void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) +void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; - zfcp_san_dbf_event_els("rels", 2, fsf_req, els->d_id, + zfcp_dbf_san_els("rels", 2, fsf_req, els->d_id, fc_host_port_id(els->adapter->scsi_host), *(u8 *)sg_virt(els->req), sg_virt(els->resp), els->resp->length); } /** - * zfcp_san_dbf_event_incoming_els - trace event for incomig ELS + * zfcp_dbf_san_incoming_els - trace event for incomig ELS * @fsf_req: request containing unsolicited status buffer with incoming ELS */ -void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) +void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req) { struct zfcp_adapter *adapter = fsf_req->adapter; struct fsf_status_read_buffer *buf = @@ -838,16 +824,16 @@ void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) int length = (int)buf->length - (int)((void *)&buf->payload - (void *)buf); - zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id, + zfcp_dbf_san_els("iels", 1, fsf_req, buf->d_id, fc_host_port_id(adapter->scsi_host), buf->payload.data[0], (void *)buf->payload.data, length); } -static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view, +static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view, char *out_buf, const char *in_buf) { - struct zfcp_san_dbf_record *r = (struct zfcp_san_dbf_record *)in_buf; + struct zfcp_dbf_san_record *r = (struct zfcp_dbf_san_record *)in_buf; char *p = out_buf; if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) @@ -860,7 +846,7 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view, zfcp_dbf_out(&p, "d_id", "0x%06x", r->d_id); if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) { - struct zfcp_san_dbf_record_ct_request *ct = &r->u.ct_req; + struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req; zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code); zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type); @@ -868,7 +854,7 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view, zfcp_dbf_out(&p, "options", "0x%02x", ct->options); zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size); } else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) { - struct zfcp_san_dbf_record_ct_response *ct = &r->u.ct_resp; + struct zfcp_dbf_san_record_ct_response *ct = &r->u.ct_resp; zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code); zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code); @@ -878,34 +864,30 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view, } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 || strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { - struct zfcp_san_dbf_record_els *els = &r->u.els; + struct zfcp_dbf_san_record_els *els = &r->u.els; zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code); } return p - out_buf; } -static struct debug_view zfcp_san_dbf_view = { - "structured", - NULL, - &zfcp_dbf_view_header, - &zfcp_san_dbf_view_format, - NULL, - NULL +static struct debug_view zfcp_dbf_san_view = { + .name = "structured", + .header_proc = zfcp_dbf_view_header, + .format_proc = zfcp_dbf_san_view_format, }; -void _zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, - struct zfcp_dbf *dbf, struct scsi_cmnd *scsi_cmnd, - struct zfcp_fsf_req *fsf_req, - unsigned long old_req_id) +void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level, + struct zfcp_dbf *dbf, struct scsi_cmnd *scsi_cmnd, + struct zfcp_fsf_req *fsf_req, unsigned long old_req_id) { - struct zfcp_scsi_dbf_record *rec = &dbf->scsi_dbf_buf; + struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf; struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; unsigned long flags; struct fcp_rsp_iu *fcp_rsp; char *fcp_rsp_info = NULL, *fcp_sns_info = NULL; int offset = 0, buflen = 0; - spin_lock_irqsave(&dbf->scsi_dbf_lock, flags); + spin_lock_irqsave(&dbf->scsi_lock, flags); do { memset(rec, 0, sizeof(*rec)); if (offset == 0) { @@ -959,20 +941,20 @@ void _zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, dump->offset = offset; dump->size = min(buflen - offset, (int)sizeof(struct - zfcp_scsi_dbf_record) - + zfcp_dbf_scsi_record) - (int)sizeof(struct zfcp_dbf_dump)); memcpy(dump->data, fcp_sns_info + offset, dump->size); offset += dump->size; } - debug_event(dbf->scsi_dbf, level, rec, sizeof(*rec)); + debug_event(dbf->scsi, level, rec, sizeof(*rec)); } while (offset < buflen); - spin_unlock_irqrestore(&dbf->scsi_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->scsi_lock, flags); } -static int zfcp_scsi_dbf_view_format(debug_info_t *id, struct debug_view *view, +static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view, char *out_buf, const char *in_buf) { - struct zfcp_scsi_dbf_record *r = (struct zfcp_scsi_dbf_record *)in_buf; + struct zfcp_dbf_scsi_record *r = (struct zfcp_dbf_scsi_record *)in_buf; struct timespec t; char *p = out_buf; @@ -1013,13 +995,10 @@ static int zfcp_scsi_dbf_view_format(debug_info_t *id, struct debug_view *view, return p - out_buf; } -static struct debug_view zfcp_scsi_dbf_view = { - "structured", - NULL, - &zfcp_dbf_view_header, - &zfcp_scsi_dbf_view_format, - NULL, - NULL +static struct debug_view zfcp_dbf_scsi_view = { + .name = "structured", + .header_proc = zfcp_dbf_view_header, + .format_proc = zfcp_dbf_scsi_view_format, }; static debug_info_t *zfcp_dbf_reg(const char *name, int level, @@ -1043,7 +1022,7 @@ static debug_info_t *zfcp_dbf_reg(const char *name, int level, * @adapter: pointer to adapter for which debug features should be registered * return: -ENOMEM on error, 0 otherwise */ -int zfcp_adapter_debug_register(struct zfcp_adapter *adapter) +int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter) { char dbf_name[DEBUG_MAX_NAME_LEN]; struct zfcp_dbf *dbf; @@ -1052,63 +1031,60 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter) if (!dbf) return -ENOMEM; - spin_lock_init(&dbf->hba_dbf_lock); - spin_lock_init(&dbf->san_dbf_lock); - spin_lock_init(&dbf->scsi_dbf_lock); - spin_lock_init(&dbf->rec_dbf_lock); + dbf->adapter = adapter; + + spin_lock_init(&dbf->hba_lock); + spin_lock_init(&dbf->san_lock); + spin_lock_init(&dbf->scsi_lock); + spin_lock_init(&dbf->rec_lock); /* debug feature area which records recovery activity */ sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev)); - dbf->rec_dbf = zfcp_dbf_reg(dbf_name, 3, &zfcp_rec_dbf_view, - sizeof(struct zfcp_rec_dbf_record)); - if (!dbf->rec_dbf) - goto fail_rec; + dbf->rec = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_rec_view, + sizeof(struct zfcp_dbf_rec_record)); + if (!dbf->rec) + goto err_out; /* debug feature area which records HBA (FSF and QDIO) conditions */ sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev)); - dbf->hba_dbf = zfcp_dbf_reg(dbf_name, 3, &zfcp_hba_dbf_view, - sizeof(struct zfcp_hba_dbf_record)); - if (!dbf->hba_dbf) - goto fail_hba; + dbf->hba = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_hba_view, + sizeof(struct zfcp_dbf_hba_record)); + if (!dbf->hba) + goto err_out; /* debug feature area which records SAN command failures and recovery */ sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev)); - dbf->san_dbf = zfcp_dbf_reg(dbf_name, 6, &zfcp_san_dbf_view, - sizeof(struct zfcp_san_dbf_record)); - if (!dbf->san_dbf) - goto fail_san; + dbf->san = zfcp_dbf_reg(dbf_name, 6, &zfcp_dbf_san_view, + sizeof(struct zfcp_dbf_san_record)); + if (!dbf->san) + goto err_out; /* debug feature area which records SCSI command failures and recovery */ sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev)); - dbf->scsi_dbf = zfcp_dbf_reg(dbf_name, 3, &zfcp_scsi_dbf_view, - sizeof(struct zfcp_scsi_dbf_record)); - if (!dbf->scsi_dbf) - goto fail_scsi; + dbf->scsi = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_scsi_view, + sizeof(struct zfcp_dbf_scsi_record)); + if (!dbf->scsi) + goto err_out; adapter->dbf = dbf; return 0; -fail_scsi: - debug_unregister(dbf->san_dbf); -fail_san: - debug_unregister(dbf->hba_dbf); -fail_hba: - debug_unregister(dbf->rec_dbf); -fail_rec: - kfree(dbf); +err_out: + zfcp_dbf_adapter_unregister(dbf); return -ENOMEM; } /** * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter - * @adapter: pointer to adapter for which debug features should be unregistered + * @dbf: pointer to dbf for which debug features should be unregistered */ -void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter) +void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf) { - debug_unregister(adapter->dbf->scsi_dbf); - debug_unregister(adapter->dbf->san_dbf); - debug_unregister(adapter->dbf->hba_dbf); - debug_unregister(adapter->dbf->rec_dbf); - kfree(adapter->dbf); - adapter->dbf = NULL; + debug_unregister(dbf->scsi); + debug_unregister(dbf->san); + debug_unregister(dbf->hba); + debug_unregister(dbf->rec); + dbf->adapter->dbf = NULL; + kfree(dbf); } + diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index bceaff449033..6b1461e8f847 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -37,13 +37,13 @@ struct zfcp_dbf_dump { u8 data[]; /* dump data */ } __attribute__ ((packed)); -struct zfcp_rec_dbf_record_thread { +struct zfcp_dbf_rec_record_thread { u32 total; u32 ready; u32 running; }; -struct zfcp_rec_dbf_record_target { +struct zfcp_dbf_rec_record_target { u64 ref; u32 status; u32 d_id; @@ -52,7 +52,7 @@ struct zfcp_rec_dbf_record_target { u32 erp_count; }; -struct zfcp_rec_dbf_record_trigger { +struct zfcp_dbf_rec_record_trigger { u8 want; u8 need; u32 as; @@ -64,21 +64,21 @@ struct zfcp_rec_dbf_record_trigger { u64 fcp_lun; }; -struct zfcp_rec_dbf_record_action { +struct zfcp_dbf_rec_record_action { u32 status; u32 step; u64 action; u64 fsf_req; }; -struct zfcp_rec_dbf_record { +struct zfcp_dbf_rec_record { u8 id; char id2[7]; union { - struct zfcp_rec_dbf_record_action action; - struct zfcp_rec_dbf_record_thread thread; - struct zfcp_rec_dbf_record_target target; - struct zfcp_rec_dbf_record_trigger trigger; + struct zfcp_dbf_rec_record_action action; + struct zfcp_dbf_rec_record_thread thread; + struct zfcp_dbf_rec_record_target target; + struct zfcp_dbf_rec_record_trigger trigger; } u; }; @@ -89,7 +89,7 @@ enum { ZFCP_REC_DBF_ID_TRIGGER, }; -struct zfcp_hba_dbf_record_response { +struct zfcp_dbf_hba_record_response { u32 fsf_command; u64 fsf_reqid; u32 fsf_seqno; @@ -127,7 +127,7 @@ struct zfcp_hba_dbf_record_response { } u; } __attribute__ ((packed)); -struct zfcp_hba_dbf_record_status { +struct zfcp_dbf_hba_record_status { u8 failed; u32 status_type; u32 status_subtype; @@ -141,24 +141,24 @@ struct zfcp_hba_dbf_record_status { u8 payload[ZFCP_DBF_UNSOL_PAYLOAD]; } __attribute__ ((packed)); -struct zfcp_hba_dbf_record_qdio { +struct zfcp_dbf_hba_record_qdio { u32 qdio_error; u8 sbal_index; u8 sbal_count; } __attribute__ ((packed)); -struct zfcp_hba_dbf_record { +struct zfcp_dbf_hba_record { u8 tag[ZFCP_DBF_TAG_SIZE]; u8 tag2[ZFCP_DBF_TAG_SIZE]; union { - struct zfcp_hba_dbf_record_response response; - struct zfcp_hba_dbf_record_status status; - struct zfcp_hba_dbf_record_qdio qdio; + struct zfcp_dbf_hba_record_response response; + struct zfcp_dbf_hba_record_status status; + struct zfcp_dbf_hba_record_qdio qdio; struct fsf_bit_error_payload berr; } u; } __attribute__ ((packed)); -struct zfcp_san_dbf_record_ct_request { +struct zfcp_dbf_san_record_ct_request { u16 cmd_req_code; u8 revision; u8 gs_type; @@ -168,7 +168,7 @@ struct zfcp_san_dbf_record_ct_request { u32 len; } __attribute__ ((packed)); -struct zfcp_san_dbf_record_ct_response { +struct zfcp_dbf_san_record_ct_response { u16 cmd_rsp_code; u8 revision; u8 reason_code; @@ -178,27 +178,27 @@ struct zfcp_san_dbf_record_ct_response { u32 len; } __attribute__ ((packed)); -struct zfcp_san_dbf_record_els { +struct zfcp_dbf_san_record_els { u8 ls_code; u32 len; } __attribute__ ((packed)); -struct zfcp_san_dbf_record { +struct zfcp_dbf_san_record { u8 tag[ZFCP_DBF_TAG_SIZE]; u64 fsf_reqid; u32 fsf_seqno; u32 s_id; u32 d_id; union { - struct zfcp_san_dbf_record_ct_request ct_req; - struct zfcp_san_dbf_record_ct_response ct_resp; - struct zfcp_san_dbf_record_els els; + struct zfcp_dbf_san_record_ct_request ct_req; + struct zfcp_dbf_san_record_ct_response ct_resp; + struct zfcp_dbf_san_record_els els; } u; #define ZFCP_DBF_SAN_MAX_PAYLOAD 1024 u8 payload[32]; } __attribute__ ((packed)); -struct zfcp_scsi_dbf_record { +struct zfcp_dbf_scsi_record { u8 tag[ZFCP_DBF_TAG_SIZE]; u8 tag2[ZFCP_DBF_TAG_SIZE]; u32 scsi_id; @@ -225,86 +225,84 @@ struct zfcp_scsi_dbf_record { } __attribute__ ((packed)); struct zfcp_dbf { - debug_info_t *rec_dbf; - debug_info_t *hba_dbf; - debug_info_t *san_dbf; - debug_info_t *scsi_dbf; - spinlock_t rec_dbf_lock; - spinlock_t hba_dbf_lock; - spinlock_t san_dbf_lock; - spinlock_t scsi_dbf_lock; - struct zfcp_rec_dbf_record rec_dbf_buf; - struct zfcp_hba_dbf_record hba_dbf_buf; - struct zfcp_san_dbf_record san_dbf_buf; - struct zfcp_scsi_dbf_record scsi_dbf_buf; + debug_info_t *rec; + debug_info_t *hba; + debug_info_t *san; + debug_info_t *scsi; + spinlock_t rec_lock; + spinlock_t hba_lock; + spinlock_t san_lock; + spinlock_t scsi_lock; + struct zfcp_dbf_rec_record rec_buf; + struct zfcp_dbf_hba_record hba_buf; + struct zfcp_dbf_san_record san_buf; + struct zfcp_dbf_scsi_record scsi_buf; + struct zfcp_adapter *adapter; }; static inline -void zfcp_hba_dbf_event_fsf_resp(const char *tag2, int level, - struct zfcp_fsf_req *req, struct zfcp_dbf *dbf) +void zfcp_dbf_hba_fsf_resp(const char *tag2, int level, + struct zfcp_fsf_req *req, struct zfcp_dbf *dbf) { - if (level <= dbf->hba_dbf->level) - _zfcp_hba_dbf_event_fsf_response(tag2, level, req, dbf); + if (level <= dbf->hba->level) + _zfcp_dbf_hba_fsf_response(tag2, level, req, dbf); } /** - * zfcp_hba_dbf_event_fsf_response - trace event for request completion + * zfcp_dbf_hba_fsf_response - trace event for request completion * @fsf_req: request that has been completed */ -static inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *req) +static inline void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req) { struct zfcp_dbf *dbf = req->adapter->dbf; struct fsf_qtcb *qtcb = req->qtcb; if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) { - zfcp_hba_dbf_event_fsf_resp("perr", 1, req, dbf); + zfcp_dbf_hba_fsf_resp("perr", 1, req, dbf); } else if (qtcb->header.fsf_status != FSF_GOOD) { - zfcp_hba_dbf_event_fsf_resp("ferr", 1, req, dbf); + zfcp_dbf_hba_fsf_resp("ferr", 1, req, dbf); } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || (req->fsf_command == FSF_QTCB_OPEN_LUN)) { - zfcp_hba_dbf_event_fsf_resp("open", 4, req, dbf); + zfcp_dbf_hba_fsf_resp("open", 4, req, dbf); } else if (qtcb->header.log_length) { - zfcp_hba_dbf_event_fsf_resp("qtcb", 5, req, dbf); + zfcp_dbf_hba_fsf_resp("qtcb", 5, req, dbf); } else { - zfcp_hba_dbf_event_fsf_resp("norm", 6, req, dbf); + zfcp_dbf_hba_fsf_resp("norm", 6, req, dbf); } } /** - * zfcp_hba_dbf_event_fsf_unsol - trace event for an unsolicited status buffer + * zfcp_dbf_hba_fsf_unsol - trace event for an unsolicited status buffer * @tag: tag indicating which kind of unsolicited status has been received - * @adapter: adapter that has issued the unsolicited status buffer + * @dbf: reference to dbf structure * @status_buffer: buffer containing payload of unsolicited status */ static inline -void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, - struct fsf_status_read_buffer *buf) +void zfcp_dbf_hba_fsf_unsol(const char *tag, struct zfcp_dbf *dbf, + struct fsf_status_read_buffer *buf) { - struct zfcp_dbf *dbf = adapter->dbf; int level = 2; - if (level <= dbf->hba_dbf->level) - _zfcp_hba_dbf_event_fsf_unsol(tag, level, adapter, buf); + if (level <= dbf->hba->level) + _zfcp_dbf_hba_fsf_unsol(tag, level, dbf, buf); } static inline -void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, - struct zfcp_adapter *adapter, struct scsi_cmnd *scmd, - struct zfcp_fsf_req *req, unsigned long old_id) +void zfcp_dbf_scsi(const char *tag, const char *tag2, int level, + struct zfcp_dbf *dbf, struct scsi_cmnd *scmd, + struct zfcp_fsf_req *req, unsigned long old_id) { - struct zfcp_dbf *dbf = adapter->dbf; - - if (level <= dbf->scsi_dbf->level) - _zfcp_scsi_dbf_event(tag, tag2, level, dbf, scmd, req, old_id); + if (level <= dbf->scsi->level) + _zfcp_dbf_scsi(tag, tag2, level, dbf, scmd, req, old_id); } /** - * zfcp_scsi_dbf_event_result - trace event for SCSI command completion + * zfcp_dbf_scsi_result - trace event for SCSI command completion * @tag: tag indicating success or failure of SCSI command * @level: trace level applicable for this event * @adapter: adapter that has been used to issue the SCSI command @@ -312,16 +310,14 @@ void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, * @fsf_req: request used to issue SCSI command (might be NULL) */ static inline -void zfcp_scsi_dbf_event_result(const char *tag, int level, - struct zfcp_adapter *adapter, - struct scsi_cmnd *scmd, - struct zfcp_fsf_req *fsf_req) +void zfcp_dbf_scsi_result(const char *tag, int level, struct zfcp_dbf *dbf, + struct scsi_cmnd *scmd, struct zfcp_fsf_req *fsf_req) { - zfcp_scsi_dbf_event("rslt", tag, level, adapter, scmd, fsf_req, 0); + zfcp_dbf_scsi("rslt", tag, level, dbf, scmd, fsf_req, 0); } /** - * zfcp_scsi_dbf_event_abort - trace event for SCSI command abort + * zfcp_dbf_scsi_abort - trace event for SCSI command abort * @tag: tag indicating success or failure of abort operation * @adapter: adapter thas has been used to issue SCSI command to be aborted * @scmd: SCSI command to be aborted @@ -329,28 +325,26 @@ void zfcp_scsi_dbf_event_result(const char *tag, int level, * @old_id: identifier of request containg SCSI command to be aborted */ static inline -void zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, - struct scsi_cmnd *scmd, - struct zfcp_fsf_req *new_req, - unsigned long old_id) +void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf, + struct scsi_cmnd *scmd, struct zfcp_fsf_req *new_req, + unsigned long old_id) { - zfcp_scsi_dbf_event("abrt", tag, 1, adapter, scmd, new_req, old_id); + zfcp_dbf_scsi("abrt", tag, 1, dbf, scmd, new_req, old_id); } /** - * zfcp_scsi_dbf_event_devreset - trace event for Logical Unit or Target Reset + * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset * @tag: tag indicating success or failure of reset operation * @flag: indicates type of reset (Target Reset, Logical Unit Reset) * @unit: unit that needs reset * @scsi_cmnd: SCSI command which caused this error recovery */ static inline -void zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, - struct zfcp_unit *unit, - struct scsi_cmnd *scsi_cmnd) +void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, + struct scsi_cmnd *scsi_cmnd) { - zfcp_scsi_dbf_event(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1, - unit->port->adapter, scsi_cmnd, NULL, 0); + zfcp_dbf_scsi(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1, + unit->port->adapter->dbf, scsi_cmnd, NULL, 0); } #endif /* ZFCP_DBF_H */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 67297d2744fb..373567eda8f6 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -74,9 +74,9 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act) struct zfcp_adapter *adapter = act->adapter; list_move(&act->list, &act->adapter->erp_ready_head); - zfcp_rec_dbf_event_action("erardy1", act); + zfcp_dbf_rec_action("erardy1", act); up(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread("erardy2", adapter); + zfcp_dbf_rec_thread("erardy2", adapter->dbf); } static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) @@ -227,11 +227,10 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, ++adapter->erp_total_count; list_add_tail(&act->list, &adapter->erp_ready_head); up(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread("eracte1", adapter); + zfcp_dbf_rec_thread("eracte1", adapter->dbf); retval = 0; out: - zfcp_rec_dbf_event_trigger(id, ref, want, need, act, - adapter, port, unit); + zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, unit); return retval; } @@ -442,28 +441,28 @@ static int status_change_clear(unsigned long mask, atomic_t *status) static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) { if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) - zfcp_rec_dbf_event_adapter("eraubl1", NULL, adapter); + zfcp_dbf_rec_adapter("eraubl1", NULL, adapter->dbf); atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); } static void zfcp_erp_port_unblock(struct zfcp_port *port) { if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) - zfcp_rec_dbf_event_port("erpubl1", NULL, port); + zfcp_dbf_rec_port("erpubl1", NULL, port); atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); } static void zfcp_erp_unit_unblock(struct zfcp_unit *unit) { if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status)) - zfcp_rec_dbf_event_unit("eruubl1", NULL, unit); + zfcp_dbf_rec_unit("eruubl1", NULL, unit); atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status); } static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) { list_move(&erp_action->list, &erp_action->adapter->erp_running_head); - zfcp_rec_dbf_event_action("erator1", erp_action); + zfcp_dbf_rec_action("erator1", erp_action); } static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) @@ -479,11 +478,11 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) if (act->status & (ZFCP_STATUS_ERP_DISMISSED | ZFCP_STATUS_ERP_TIMEDOUT)) { act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; - zfcp_rec_dbf_event_action("erscf_1", act); + zfcp_dbf_rec_action("erscf_1", act); act->fsf_req->erp_action = NULL; } if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) - zfcp_rec_dbf_event_action("erscf_2", act); + zfcp_dbf_rec_action("erscf_2", act); if (act->fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) act->fsf_req = NULL; } else @@ -641,9 +640,9 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) return ZFCP_ERP_FAILED; } - zfcp_rec_dbf_event_thread_lock("erasfx1", adapter); + zfcp_dbf_rec_thread_lock("erasfx1", adapter->dbf); down(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread_lock("erasfx2", adapter); + zfcp_dbf_rec_thread_lock("erasfx2", adapter->dbf); if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) break; @@ -682,9 +681,9 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act) if (ret) return ZFCP_ERP_FAILED; - zfcp_rec_dbf_event_thread_lock("erasox1", adapter); + zfcp_dbf_rec_thread_lock("erasox1", adapter->dbf); down(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread_lock("erasox2", adapter); + zfcp_dbf_rec_thread_lock("erasox2", adapter->dbf); if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) return ZFCP_ERP_FAILED; @@ -1138,7 +1137,7 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) } list_del(&erp_action->list); - zfcp_rec_dbf_event_action("eractd1", erp_action); + zfcp_dbf_rec_action("eractd1", erp_action); switch (erp_action->action) { case ZFCP_ERP_ACTION_REOPEN_UNIT: @@ -1297,9 +1296,9 @@ static int zfcp_erp_thread(void *data) while (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL)) { - zfcp_rec_dbf_event_thread_lock("erthrd1", adapter); + zfcp_dbf_rec_thread_lock("erthrd1", adapter->dbf); ignore = down_interruptible(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread_lock("erthrd2", adapter); + zfcp_dbf_rec_thread_lock("erthrd2", adapter->dbf); write_lock_irqsave(&adapter->erp_lock, flags); next = adapter->erp_ready_head.next; @@ -1356,7 +1355,7 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter) { atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status); up(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread_lock("erthrk1", adapter); + zfcp_dbf_rec_thread_lock("erthrk1", adapter->dbf); wait_event(adapter->erp_thread_wqh, !(atomic_read(&adapter->status) & @@ -1431,11 +1430,11 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id, if (set_or_clear == ZFCP_SET) { if (status_change_set(mask, &adapter->status)) - zfcp_rec_dbf_event_adapter(id, ref, adapter); + zfcp_dbf_rec_adapter(id, ref, adapter->dbf); atomic_set_mask(mask, &adapter->status); } else { if (status_change_clear(mask, &adapter->status)) - zfcp_rec_dbf_event_adapter(id, ref, adapter); + zfcp_dbf_rec_adapter(id, ref, adapter->dbf); atomic_clear_mask(mask, &adapter->status); if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) atomic_set(&adapter->erp_counter, 0); @@ -1465,11 +1464,11 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref, if (set_or_clear == ZFCP_SET) { if (status_change_set(mask, &port->status)) - zfcp_rec_dbf_event_port(id, ref, port); + zfcp_dbf_rec_port(id, ref, port); atomic_set_mask(mask, &port->status); } else { if (status_change_clear(mask, &port->status)) - zfcp_rec_dbf_event_port(id, ref, port); + zfcp_dbf_rec_port(id, ref, port); atomic_clear_mask(mask, &port->status); if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) atomic_set(&port->erp_counter, 0); @@ -1494,11 +1493,11 @@ void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref, { if (set_or_clear == ZFCP_SET) { if (status_change_set(mask, &unit->status)) - zfcp_rec_dbf_event_unit(id, ref, unit); + zfcp_dbf_rec_unit(id, ref, unit); atomic_set_mask(mask, &unit->status); } else { if (status_change_clear(mask, &unit->status)) - zfcp_rec_dbf_event_unit(id, ref, unit); + zfcp_dbf_rec_unit(id, ref, unit); atomic_clear_mask(mask, &unit->status); if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) { atomic_set(&unit->erp_counter, 0); diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 7650cec7f71f..a49d00921b92 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -34,35 +34,31 @@ extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *); extern struct miscdevice zfcp_cfdc_misc; /* zfcp_dbf.c */ -extern int zfcp_adapter_debug_register(struct zfcp_adapter *); -extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *); -extern void zfcp_rec_dbf_event_thread(char *, struct zfcp_adapter *); -extern void zfcp_rec_dbf_event_thread_lock(char *, struct zfcp_adapter *); -extern void zfcp_rec_dbf_event_adapter(char *, void *, struct zfcp_adapter *); -extern void zfcp_rec_dbf_event_port(char *, void *, struct zfcp_port *); -extern void zfcp_rec_dbf_event_unit(char *, void *, struct zfcp_unit *); -extern void zfcp_rec_dbf_event_trigger(char *, void *, u8, u8, void *, - struct zfcp_adapter *, - struct zfcp_port *, struct zfcp_unit *); -extern void zfcp_rec_dbf_event_action(char *, struct zfcp_erp_action *); -extern void _zfcp_hba_dbf_event_fsf_response(const char *, int level, - struct zfcp_fsf_req *, - struct zfcp_dbf *dbf); -extern void _zfcp_hba_dbf_event_fsf_unsol(const char *, int level, - struct zfcp_adapter *, +extern int zfcp_dbf_adapter_register(struct zfcp_adapter *); +extern void zfcp_dbf_adapter_unregister(struct zfcp_dbf *); +extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *); +extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *); +extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *); +extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *); +extern void zfcp_dbf_rec_unit(char *, void *, struct zfcp_unit *); +extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *, + struct zfcp_adapter *, struct zfcp_port *, + struct zfcp_unit *); +extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *); +extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *, + struct zfcp_dbf *); +extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *, struct fsf_status_read_buffer *); -extern void zfcp_hba_dbf_event_qdio(struct zfcp_qdio *, unsigned int, int, - int); -extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *, - struct zfcp_fsf_req *); -extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *); -extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *); -extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *); -extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *); -extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); -extern void _zfcp_scsi_dbf_event(const char *, const char *, int, - struct zfcp_dbf *, struct scsi_cmnd *, - struct zfcp_fsf_req *, unsigned long); +extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int); +extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); +extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *); +extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *); +extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *); +extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *); +extern void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *); +extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *, + struct scsi_cmnd *, struct zfcp_fsf_req *, + unsigned long); /* zfcp_erp.c */ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *, diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index bc0c9f54d0d8..309f1dfad3f9 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -242,7 +242,7 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req) (struct fsf_status_read_buffer *) fsf_req->data; unsigned int els_type = status_buffer->payload.data[0]; - zfcp_san_dbf_event_incoming_els(fsf_req); + zfcp_dbf_san_incoming_els(fsf_req); if (els_type == LS_PLOGI) zfcp_fc_incoming_plogi(fsf_req); else if (els_type == LS_LOGO) diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 048f1a848f34..665967f049a8 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -248,13 +248,13 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) struct fsf_status_read_buffer *sr_buf = req->data; if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { - zfcp_hba_dbf_event_fsf_unsol("dism", adapter, sr_buf); + zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf); mempool_free(sr_buf, adapter->pool.status_read_data); zfcp_fsf_req_free(req); return; } - zfcp_hba_dbf_event_fsf_unsol("read", adapter, sr_buf); + zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf); switch (sr_buf->status_type) { case FSF_STATUS_READ_PORT_CLOSED: @@ -269,7 +269,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) dev_warn(&adapter->ccw_device->dev, "The error threshold for checksum statistics " "has been exceeded\n"); - zfcp_hba_dbf_event_berr(adapter, req); + zfcp_dbf_hba_berr(adapter->dbf, req); break; case FSF_STATUS_READ_LINK_DOWN: zfcp_fsf_status_read_link_down(req); @@ -355,7 +355,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) struct fsf_qtcb *qtcb = req->qtcb; union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual; - zfcp_hba_dbf_event_fsf_response(req); + zfcp_dbf_hba_fsf_response(req); if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { req->status |= ZFCP_STATUS_FSFREQ_ERROR | @@ -848,7 +848,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) mempool_free(sr_buf, adapter->pool.status_read_data); failed_buf: zfcp_fsf_req_free(req); - zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); + zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL); out: spin_unlock_bh(&qdio->req_q_lock); return retval; @@ -968,7 +968,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) switch (header->fsf_status) { case FSF_GOOD: - zfcp_san_dbf_event_ct_response(req); + zfcp_dbf_san_ct_response(req); send_ct->status = 0; break; case FSF_SERVICE_CLASS_NOT_SUPPORTED: @@ -1100,7 +1100,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool) req->qtcb->bottom.support.timeout = ct->timeout; req->data = ct; - zfcp_san_dbf_event_ct_request(req); + zfcp_dbf_san_ct_request(req); zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); ret = zfcp_fsf_req_send(req); @@ -1129,7 +1129,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) switch (header->fsf_status) { case FSF_GOOD: - zfcp_san_dbf_event_els_response(req); + zfcp_dbf_san_els_response(req); send_els->status = 0; break; case FSF_SERVICE_CLASS_NOT_SUPPORTED: @@ -1203,7 +1203,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els) bottom->timeout = 2 * R_A_TOV; req->data = els; - zfcp_san_dbf_event_els_request(req); + zfcp_dbf_san_els_request(req); zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); ret = zfcp_fsf_req_send(req); @@ -2213,11 +2213,11 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) } skip_fsfstatus: if (scpnt->result != 0) - zfcp_scsi_dbf_event_result("erro", 3, req->adapter, scpnt, req); + zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req); else if (scpnt->retries > 0) - zfcp_scsi_dbf_event_result("retr", 4, req->adapter, scpnt, req); + zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req); else - zfcp_scsi_dbf_event_result("norm", 6, req->adapter, scpnt, req); + zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req); scpnt->host_scribble = NULL; (scpnt->scsi_done) (scpnt); diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 0b3f634509bf..84527ebbbe58 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -101,7 +101,8 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, struct zfcp_qdio_queue *queue = &qdio->req_q; if (unlikely(qdio_err)) { - zfcp_hba_dbf_event_qdio(qdio, qdio_err, first, count); + zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, + count); zfcp_qdio_handler_error(qdio, "qdireq1"); return; } @@ -143,7 +144,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, int sbal_idx, sbal_no; if (unlikely(qdio_err)) { - zfcp_hba_dbf_event_qdio(qdio, qdio_err, first, count); + zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, + count); zfcp_qdio_handler_error(qdio, "qdires1"); return; } diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 4414720c87f6..b6177ad2d5bf 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -53,11 +53,11 @@ static int zfcp_scsi_slave_configure(struct scsi_device *sdp) static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) { + struct zfcp_adapter *adapter = + (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; set_host_byte(scpnt, result); if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) - zfcp_scsi_dbf_event_result("fail", 4, - (struct zfcp_adapter*) scpnt->device->host->hostdata[0], - scpnt, NULL); + zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL); /* return directly */ scpnt->scsi_done(scpnt); } @@ -93,7 +93,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, scsi_result = fc_remote_port_chkready(rport); if (unlikely(scsi_result)) { scpnt->result = scsi_result; - zfcp_scsi_dbf_event_result("fail", 4, adapter, scpnt, NULL); + zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL); scpnt->scsi_done(scpnt); return 0; } @@ -181,8 +181,8 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) spin_unlock(&adapter->req_list_lock); if (!old_req) { write_unlock_irqrestore(&adapter->abort_lock, flags); - zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, - old_reqid); + zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL, + old_reqid); return FAILED; /* completion could be in progress */ } old_req->data = NULL; @@ -198,8 +198,8 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) zfcp_erp_wait(adapter); if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { - zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, - old_reqid); + zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL, + old_reqid); return SUCCESS; } } @@ -216,7 +216,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) dbf_tag = "fail"; retval = FAILED; } - zfcp_scsi_dbf_event_abort(dbf_tag, adapter, scpnt, abrt_req, old_reqid); + zfcp_dbf_scsi_abort(dbf_tag, adapter->dbf, scpnt, abrt_req, old_reqid); zfcp_fsf_req_free(abrt_req); return retval; } @@ -237,8 +237,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) zfcp_erp_wait(adapter); if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { - zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, - scpnt); + zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt); return SUCCESS; } } @@ -248,13 +247,13 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) wait_for_completion(&fsf_req->completion); if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { - zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt); + zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt); retval = FAILED; } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) { - zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt); + zfcp_dbf_scsi_devreset("nsup", tm_flags, unit, scpnt); retval = FAILED; } else - zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt); + zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt); zfcp_fsf_req_free(fsf_req); return retval; From d5a282a1c5084ec7ebd9e6ab9723317f6b3fcd7b Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:22 +0200 Subject: [PATCH 097/209] [SCSI] zfcp: introduce _setup, _destroy for qdio and FC Extract independent data structures and introduce common _setup and _destroy routines for QDIO and Fibre Channel related data structures Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 42 ++++++++-------------- drivers/s390/scsi/zfcp_ext.h | 7 ++-- drivers/s390/scsi/zfcp_fc.c | 36 +++++++++++++------ drivers/s390/scsi/zfcp_qdio.c | 68 +++++++++++++++++++++++------------ 4 files changed, 89 insertions(+), 64 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 7a50f64c36bd..c77686ed938c 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -506,30 +506,18 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) if (!adapter) return -ENOMEM; - adapter->gs = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL); - if (!adapter->gs) { - kfree(adapter); - return -ENOMEM; - } - - adapter->qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL); - if (!adapter->qdio) - goto qdio_mem_failed; - - adapter->qdio->adapter = adapter; - ccw_device->handler = NULL; adapter->ccw_device = ccw_device; atomic_set(&adapter->refcount, 0); - if (zfcp_qdio_allocate(adapter->qdio, ccw_device)) - goto qdio_allocate_failed; + if (zfcp_qdio_setup(adapter)) + goto qdio_failed; if (zfcp_allocate_low_mem_buffers(adapter)) - goto failed_low_mem_buffers; + goto low_mem_buffers_failed; if (zfcp_reqlist_alloc(adapter)) - goto failed_low_mem_buffers; + goto low_mem_buffers_failed; if (zfcp_dbf_adapter_register(adapter)) goto debug_register_failed; @@ -537,6 +525,9 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) if (zfcp_setup_adapter_work_queue(adapter)) goto work_queue_failed; + if (zfcp_fc_gs_setup(adapter)) + goto generic_services_failed; + init_waitqueue_head(&adapter->remove_wq); init_waitqueue_head(&adapter->erp_thread_wqh); init_waitqueue_head(&adapter->erp_done_wqh); @@ -547,9 +538,6 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) spin_lock_init(&adapter->req_list_lock); - spin_lock_init(&adapter->qdio->req_q_lock); - spin_lock_init(&adapter->qdio->stat_lock); - rwlock_init(&adapter->erp_lock); rwlock_init(&adapter->abort_lock); @@ -570,24 +558,23 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) goto sysfs_failed; atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); - zfcp_fc_wka_ports_init(adapter); if (!zfcp_adapter_scsi_register(adapter)) return 0; sysfs_failed: + zfcp_fc_gs_destroy(adapter); +generic_services_failed: zfcp_destroy_adapter_work_queue(adapter); work_queue_failed: zfcp_dbf_adapter_unregister(adapter->dbf); debug_register_failed: dev_set_drvdata(&ccw_device->dev, NULL); kfree(adapter->req_list); -failed_low_mem_buffers: +low_mem_buffers_failed: zfcp_free_low_mem_buffers(adapter); -qdio_allocate_failed: - zfcp_qdio_free(adapter->qdio); - kfree(adapter->qdio); -qdio_mem_failed: +qdio_failed: + zfcp_qdio_destroy(adapter->qdio); kfree(adapter); return -ENOMEM; } @@ -616,15 +603,14 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) if (!retval) return; + zfcp_fc_gs_destroy(adapter); zfcp_destroy_adapter_work_queue(adapter); zfcp_dbf_adapter_unregister(adapter->dbf); - zfcp_qdio_free(adapter->qdio); zfcp_free_low_mem_buffers(adapter); + zfcp_qdio_destroy(adapter->qdio); kfree(adapter->req_list); kfree(adapter->fc_stats); kfree(adapter->stats_reset_data); - kfree(adapter->gs); - kfree(adapter->qdio); kfree(adapter); } diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index a49d00921b92..cf09b2838c50 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -100,7 +100,8 @@ extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); extern void zfcp_test_link(struct zfcp_port *); extern void zfcp_fc_link_test_work(struct work_struct *); extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *); -extern void zfcp_fc_wka_ports_init(struct zfcp_adapter *); +extern int zfcp_fc_gs_setup(struct zfcp_adapter *); +extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *); extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *); @@ -134,8 +135,8 @@ extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int); /* zfcp_qdio.c */ -extern int zfcp_qdio_allocate(struct zfcp_qdio *, struct ccw_device *); -extern void zfcp_qdio_free(struct zfcp_qdio *); +extern int zfcp_qdio_setup(struct zfcp_adapter *); +extern void zfcp_qdio_destroy(struct zfcp_qdio *); extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *); extern struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 309f1dfad3f9..db8bb41a7794 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -141,17 +141,6 @@ void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs) zfcp_fc_wka_port_force_offline(&gs->ks); } -void zfcp_fc_wka_ports_init(struct zfcp_adapter *adapter) -{ - struct zfcp_wka_ports *gs = adapter->gs; - - zfcp_fc_wka_port_init(&gs->ms, FC_FID_MGMT_SERV, adapter); - zfcp_fc_wka_port_init(&gs->ts, FC_FID_TIME_SERV, adapter); - zfcp_fc_wka_port_init(&gs->ds, FC_FID_DIR_SERV, adapter); - zfcp_fc_wka_port_init(&gs->as, FC_FID_ALIASES, adapter); - zfcp_fc_wka_port_init(&gs->ks, FC_FID_SEC_KEY, adapter); -} - static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, struct fcp_rscn_element *elem) { @@ -870,3 +859,28 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) } return ret; } + +int zfcp_fc_gs_setup(struct zfcp_adapter *adapter) +{ + struct zfcp_wka_ports *wka_ports; + + wka_ports = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL); + if (!wka_ports) + return -ENOMEM; + + adapter->gs = wka_ports; + zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter); + zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter); + zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter); + zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter); + zfcp_fc_wka_port_init(&wka_ports->ks, FC_FID_SEC_KEY, adapter); + + return 0; +} + +void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter) +{ + kfree(adapter->gs); + adapter->gs = NULL; +} + diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 84527ebbbe58..2b499e28ff1f 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -34,27 +34,6 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) return &q->sbal[sbal_idx]->element[sbale_idx]; } -/** - * zfcp_qdio_free - free memory used by request- and resposne queue - * @qdio: pointer to the zfcp_qdio structure - */ -void zfcp_qdio_free(struct zfcp_qdio *qdio) -{ - struct qdio_buffer **sbal_req, **sbal_resp; - int p; - - if (qdio->adapter->ccw_device) - qdio_free(qdio->adapter->ccw_device); - - sbal_req = qdio->req_q.sbal; - sbal_resp = qdio->resp_q.sbal; - - for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { - free_page((unsigned long) sbal_req[p]); - free_page((unsigned long) sbal_resp[p]); - } -} - static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) { struct zfcp_adapter *adapter = qdio->adapter; @@ -383,7 +362,7 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, * Returns: -ENOMEM on memory allocation error or return value from * qdio_allocate */ -int zfcp_qdio_allocate(struct zfcp_qdio *qdio, struct ccw_device *ccw_dev) +static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) { struct qdio_initialize init_data; @@ -477,3 +456,48 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) "Setting up the QDIO connection to the FCP adapter failed\n"); return -EIO; } + +void zfcp_qdio_destroy(struct zfcp_qdio *qdio) +{ + struct qdio_buffer **sbal_req, **sbal_resp; + int p; + + if (!qdio) + return; + + if (qdio->adapter->ccw_device) + qdio_free(qdio->adapter->ccw_device); + + sbal_req = qdio->req_q.sbal; + sbal_resp = qdio->resp_q.sbal; + + for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { + free_page((unsigned long) sbal_req[p]); + free_page((unsigned long) sbal_resp[p]); + } + + kfree(qdio); +} + +int zfcp_qdio_setup(struct zfcp_adapter *adapter) +{ + struct zfcp_qdio *qdio; + + qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL); + if (!qdio) + return -ENOMEM; + + qdio->adapter = adapter; + + if (zfcp_qdio_allocate(qdio)) { + zfcp_qdio_destroy(qdio); + return -ENOMEM; + } + + spin_lock_init(&qdio->req_q_lock); + spin_lock_init(&qdio->stat_lock); + + adapter->qdio = qdio; + return 0; +} + From 6f53a2d2ecaefa3ffff8864f51a3ae38737e1152 Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:23 +0200 Subject: [PATCH 098/209] [SCSI] zfcp: Apply common naming conventions to zfcp_fc Update the Fibre Channel related code to use the zfcp_fc prefix. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 2 +- drivers/s390/scsi/zfcp_ext.h | 6 ++-- drivers/s390/scsi/zfcp_fc.c | 51 +++++++++++++++++----------------- drivers/s390/scsi/zfcp_fsf.c | 10 +++---- drivers/s390/scsi/zfcp_sysfs.c | 2 +- 5 files changed, 35 insertions(+), 36 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index c77686ed938c..23b85a03e26a 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -544,7 +544,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) sema_init(&adapter->erp_ready_sem, 0); INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); - INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later); + INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later); adapter->service_level.seq_print = zfcp_print_sl; diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index cf09b2838c50..36935bc0818f 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -92,12 +92,12 @@ extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *, extern void zfcp_erp_timeout_handler(unsigned long); /* zfcp_fc.c */ -extern int zfcp_scan_ports(struct zfcp_adapter *); -extern void _zfcp_scan_ports_later(struct work_struct *); +extern int zfcp_fc_scan_ports(struct zfcp_adapter *); +extern void _zfcp_fc_scan_ports_later(struct work_struct *); extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); extern void zfcp_fc_port_did_lookup(struct work_struct *); extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); -extern void zfcp_test_link(struct zfcp_port *); +extern void zfcp_fc_test_link(struct zfcp_port *); extern void zfcp_fc_link_test_work(struct work_struct *); extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *); extern int zfcp_fc_gs_setup(struct zfcp_adapter *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index db8bb41a7794..7433da900fab 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -57,7 +57,7 @@ struct zfcp_fc_ns_handler_data { unsigned long handler_data; }; -static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port) +static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port) { if (mutex_lock_interruptible(&wka_port->mutex)) return -ERESTARTSYS; @@ -82,7 +82,7 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port) return -EIO; } -static void zfcp_wka_port_offline(struct work_struct *work) +static void zfcp_fc_wka_port_offline(struct work_struct *work) { struct delayed_work *dw = to_delayed_work(work); struct zfcp_wka_port *wka_port = @@ -102,7 +102,7 @@ static void zfcp_wka_port_offline(struct work_struct *work) mutex_unlock(&wka_port->mutex); } -static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port) +static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port) { if (atomic_dec_return(&wka_port->refcount) != 0) return; @@ -121,7 +121,7 @@ static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id, wka_port->status = ZFCP_WKA_PORT_OFFLINE; atomic_set(&wka_port->refcount, 0); mutex_init(&wka_port->mutex); - INIT_DELAYED_WORK(&wka_port->work, zfcp_wka_port_offline); + INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline); } static void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) @@ -150,7 +150,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, read_lock_irqsave(&zfcp_data.config_lock, flags); list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { if ((port->d_id & range) == (elem->nport_did & range)) - zfcp_test_link(port); + zfcp_fc_test_link(port); if (!port->d_id) zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, @@ -326,13 +326,13 @@ static int zfcp_fc_ns_gid_pn(struct zfcp_port *port) memset(gid_pn, 0, sizeof(*gid_pn)); - ret = zfcp_wka_port_get(&adapter->gs->ds); + ret = zfcp_fc_wka_port_get(&adapter->gs->ds); if (ret) goto out; ret = zfcp_fc_ns_gid_pn_request(port, gid_pn); - zfcp_wka_port_put(&adapter->gs->ds); + zfcp_fc_wka_port_put(&adapter->gs->ds); out: mempool_free(gid_pn, adapter->pool.gid_pn_data); return ret; @@ -482,14 +482,14 @@ void zfcp_fc_link_test_work(struct work_struct *work) } /** - * zfcp_test_link - lightweight link test procedure + * zfcp_fc_test_link - lightweight link test procedure * @port: port to be tested * * Test status of a link to a remote port using the ELS command ADISC. * If there is a problem with the remote port, error recovery steps * will be triggered. */ -void zfcp_test_link(struct zfcp_port *port) +void zfcp_fc_test_link(struct zfcp_port *port) { zfcp_port_get(port); if (!queue_work(port->adapter->work_queue, &port->test_link_work)) @@ -532,9 +532,8 @@ static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num) } -static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft, - struct zfcp_adapter *adapter, - int max_bytes) +static int zfcp_fc_send_gpn_ft(struct zfcp_gpn_ft *gpn_ft, + struct zfcp_adapter *adapter, int max_bytes) { struct zfcp_send_ct *ct = &gpn_ft->ct; struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); @@ -569,7 +568,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft, return ret; } -static void zfcp_validate_port(struct zfcp_port *port) +static void zfcp_fc_validate_port(struct zfcp_port *port) { struct zfcp_adapter *adapter = port->adapter; @@ -589,7 +588,7 @@ static void zfcp_validate_port(struct zfcp_port *port) zfcp_port_dequeue(port); } -static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) +static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) { struct zfcp_send_ct *ct = &gpn_ft->ct; struct scatterlist *sg = gpn_ft->sg_resp; @@ -649,16 +648,16 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) zfcp_erp_wait(adapter); list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list) - zfcp_validate_port(port); + zfcp_fc_validate_port(port); up(&zfcp_data.config_sema); return ret; } /** - * zfcp_scan_ports - scan remote ports and attach new ports + * zfcp_fc_scan_ports - scan remote ports and attach new ports * @adapter: pointer to struct zfcp_adapter */ -int zfcp_scan_ports(struct zfcp_adapter *adapter) +int zfcp_fc_scan_ports(struct zfcp_adapter *adapter) { int ret, i; struct zfcp_gpn_ft *gpn_ft; @@ -673,7 +672,7 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter) fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) return 0; - ret = zfcp_wka_port_get(&adapter->gs->ds); + ret = zfcp_fc_wka_port_get(&adapter->gs->ds); if (ret) return ret; @@ -684,9 +683,9 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter) } for (i = 0; i < 3; i++) { - ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter, max_bytes); + ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes); if (!ret) { - ret = zfcp_scan_eval_gpn_ft(gpn_ft, max_entries); + ret = zfcp_fc_eval_gpn_ft(gpn_ft, max_entries); if (ret == -EAGAIN) ssleep(1); else @@ -695,14 +694,14 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter) } zfcp_free_sg_env(gpn_ft, buf_num); out: - zfcp_wka_port_put(&adapter->gs->ds); + zfcp_fc_wka_port_put(&adapter->gs->ds); return ret; } -void _zfcp_scan_ports_later(struct work_struct *work) +void _zfcp_fc_scan_ports_later(struct work_struct *work) { - zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work)); + zfcp_fc_scan_ports(container_of(work, struct zfcp_adapter, scan_work)); } struct zfcp_els_fc_job { @@ -792,7 +791,7 @@ static void zfcp_fc_generic_ct_handler(unsigned long data) job->state_flags = FC_RQST_STATE_DONE; job->job_done(job); - zfcp_wka_port_put(ct_fc_job->ct.wka_port); + zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port); kfree(ct_fc_job); } @@ -838,7 +837,7 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) return -EINVAL; /* no such service */ } - ret = zfcp_wka_port_get(ct_fc_job->ct.wka_port); + ret = zfcp_fc_wka_port_get(ct_fc_job->ct.wka_port); if (ret) { kfree(ct_fc_job); return ret; @@ -855,7 +854,7 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL); if (ret) { kfree(ct_fc_job); - zfcp_wka_port_put(ct_fc_job->ct.wka_port); + zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port); } return ret; } diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 665967f049a8..c241f032fd49 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -892,7 +892,7 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) case FSF_ADAPTER_STATUS_AVAILABLE: switch (fsq->word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - zfcp_test_link(unit->port); + zfcp_fc_test_link(unit->port); /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -1139,7 +1139,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) switch (header->fsf_status_qual.word[0]){ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: if (port && (send_els->ls_code != ZFCP_LS_ADISC)) - zfcp_test_link(port); + zfcp_fc_test_link(port); /*fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: case FSF_SQ_RETRY_IF_POSSIBLE: @@ -1889,7 +1889,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) case FSF_ADAPTER_STATUS_AVAILABLE: switch (header->fsf_status_qual.word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - zfcp_test_link(unit->port); + zfcp_fc_test_link(unit->port); /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -2024,7 +2024,7 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) case FSF_ADAPTER_STATUS_AVAILABLE: switch (req->qtcb->header.fsf_status_qual.word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - zfcp_test_link(unit->port); + zfcp_fc_test_link(unit->port); /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -2307,7 +2307,7 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req) case FSF_ADAPTER_STATUS_AVAILABLE: if (header->fsf_status_qual.word[0] == FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) - zfcp_test_link(unit->port); + zfcp_fc_test_link(unit->port); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; } diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index a6cf62636834..c86496bb608b 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -126,7 +126,7 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) return -EBUSY; - ret = zfcp_scan_ports(adapter); + ret = zfcp_fc_scan_ports(adapter); return ret ? ret : (ssize_t) count; } static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, From ea945ff84c2ce1089edb7914ffdd998c24c25903 Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:24 +0200 Subject: [PATCH 099/209] [SCSI] zfcp: resolve false usage of dd_data in fc_rport The fc_rport structure reserves a reference where a LLD can put information required in a situation where the fc transport class is triggering LLD callbacks. The zfcp driver was using this variable directly which is discouraged. This patch solves this issue by making this reference unnecessary. In addition the dev_loss_tmo callback is removed, it is not required: zfcp does not access the fc_rport after calling fc_remote_port_delete. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 4 ---- drivers/s390/scsi/zfcp_fc.c | 2 +- drivers/s390/scsi/zfcp_scsi.c | 22 ++++------------------ 3 files changed, 5 insertions(+), 23 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 23b85a03e26a..ed9a8a1517c6 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -709,10 +709,6 @@ void zfcp_port_dequeue(struct zfcp_port *port) write_lock_irq(&zfcp_data.config_lock); list_del(&port->list); write_unlock_irq(&zfcp_data.config_lock); - if (port->rport) { - port->rport->dd_data = NULL; - port->rport = NULL; - } wait_event(port->remove_wq, atomic_read(&port->refcount) == 0); cancel_work_sync(&port->rport_work); /* usually not necessary */ zfcp_adapter_put(port->adapter); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 7433da900fab..5c1f12247e42 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -752,7 +752,7 @@ int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job) els_fc_job->els.adapter = adapter; if (rport) { read_lock_irq(&zfcp_data.config_lock); - port = rport->dd_data; + port = zfcp_get_port_by_wwpn(adapter, rport->port_name); if (port) els_fc_job->els.d_id = port->d_id; read_unlock_irq(&zfcp_data.config_lock); diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index b6177ad2d5bf..3ff726afafc6 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -490,21 +490,6 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) rport->dev_loss_tmo = timeout; } -/** - * zfcp_scsi_dev_loss_tmo_callbk - Free any reference to rport - * @rport: The rport that is about to be deleted. - */ -static void zfcp_scsi_dev_loss_tmo_callbk(struct fc_rport *rport) -{ - struct zfcp_port *port; - - write_lock_irq(&zfcp_data.config_lock); - port = rport->dd_data; - if (port) - port->rport = NULL; - write_unlock_irq(&zfcp_data.config_lock); -} - /** * zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport * @rport: The FC rport where to teminate I/O @@ -516,9 +501,12 @@ static void zfcp_scsi_dev_loss_tmo_callbk(struct fc_rport *rport) static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) { struct zfcp_port *port; + struct Scsi_Host *shost = rport_to_shost(rport); + struct zfcp_adapter *adapter = + (struct zfcp_adapter *)shost->hostdata[0]; write_lock_irq(&zfcp_data.config_lock); - port = rport->dd_data; + port = zfcp_get_port_by_wwpn(adapter, rport->port_name); if (port) zfcp_port_get(port); write_unlock_irq(&zfcp_data.config_lock); @@ -550,7 +538,6 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) return; } - rport->dd_data = port; rport->maxframe_size = port->maxframe_size; rport->supported_classes = port->supported_classes; port->rport = rport; @@ -663,7 +650,6 @@ struct fc_function_template zfcp_transport_functions = { .reset_fc_host_stats = zfcp_reset_fc_host_stats, .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo, .get_host_port_state = zfcp_get_host_port_state, - .dev_loss_tmo_callbk = zfcp_scsi_dev_loss_tmo_callbk, .terminate_rport_io = zfcp_scsi_terminate_rport_io, .show_host_port_state = 1, .bsg_request = zfcp_execute_fc_job, From 347c6a965dc110c91a77f65181fc011ee257a4a6 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:25 +0200 Subject: [PATCH 100/209] [SCSI] zfcp: Use kthread API for zfcp erp thread Switch the creation of the zfcp erp thread from the deprecated kernel_thread API to the kthread API. This allows also the removal of some flags in zfcp since the kthread API handles thread creation and shutdown internally. To allow the usage of the kthread_stop function, replace the erp ready semaphore with a waitqueue for waiting until erp actions arrive on the ready queue. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 4 +-- drivers/s390/scsi/zfcp_def.h | 6 ++-- drivers/s390/scsi/zfcp_erp.c | 62 ++++++++++++++---------------------- 3 files changed, 27 insertions(+), 45 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index ed9a8a1517c6..e8f39f02bc3b 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -529,7 +529,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) goto generic_services_failed; init_waitqueue_head(&adapter->remove_wq); - init_waitqueue_head(&adapter->erp_thread_wqh); + init_waitqueue_head(&adapter->erp_ready_wq); init_waitqueue_head(&adapter->erp_done_wqh); INIT_LIST_HEAD(&adapter->port_list_head); @@ -541,8 +541,6 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) rwlock_init(&adapter->erp_lock); rwlock_init(&adapter->abort_lock); - sema_init(&adapter->erp_ready_sem, 0); - INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 9a8ff0553421..ce65d88e280c 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -222,8 +222,6 @@ struct zfcp_ls_adisc { #define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 #define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 -#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020 -#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080 #define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 @@ -481,10 +479,9 @@ struct zfcp_adapter { atomic_t status; /* status of this adapter */ struct list_head erp_ready_head; /* error recovery for this adapter/devices */ + wait_queue_head_t erp_ready_wq; struct list_head erp_running_head; rwlock_t erp_lock; - struct semaphore erp_ready_sem; - wait_queue_head_t erp_thread_wqh; wait_queue_head_t erp_done_wqh; struct zfcp_erp_action erp_action; /* pending error recovery */ atomic_t erp_counter; @@ -492,6 +489,7 @@ struct zfcp_adapter { actions */ u32 erp_low_mem_count; /* nr of erp actions waiting for memory */ + struct task_struct *erp_thread; struct zfcp_wka_ports *gs; /* generic services */ struct zfcp_dbf *dbf; /* debug traces */ struct zfcp_adapter_mempool pool; /* Adapter memory pools */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 373567eda8f6..577e15708598 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -9,6 +9,7 @@ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include #include "zfcp_ext.h" #define ZFCP_MAX_ERPS 3 @@ -75,7 +76,7 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act) list_move(&act->list, &act->adapter->erp_ready_head); zfcp_dbf_rec_action("erardy1", act); - up(&adapter->erp_ready_sem); + wake_up(&adapter->erp_ready_wq); zfcp_dbf_rec_thread("erardy2", adapter->dbf); } @@ -212,8 +213,7 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, int retval = 1, need; struct zfcp_erp_action *act = NULL; - if (!(atomic_read(&adapter->status) & - ZFCP_STATUS_ADAPTER_ERP_THREAD_UP)) + if (!adapter->erp_thread) return -EIO; need = zfcp_erp_required_act(want, adapter, port, unit); @@ -226,7 +226,7 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, goto out; ++adapter->erp_total_count; list_add_tail(&act->list, &adapter->erp_ready_head); - up(&adapter->erp_ready_sem); + wake_up(&adapter->erp_ready_wq); zfcp_dbf_rec_thread("eracte1", adapter->dbf); retval = 0; out: @@ -641,7 +641,8 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) } zfcp_dbf_rec_thread_lock("erasfx1", adapter->dbf); - down(&adapter->erp_ready_sem); + wait_event(adapter->erp_ready_wq, + !list_empty(&adapter->erp_ready_head)); zfcp_dbf_rec_thread_lock("erasfx2", adapter->dbf); if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) break; @@ -682,7 +683,8 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act) return ZFCP_ERP_FAILED; zfcp_dbf_rec_thread_lock("erasox1", adapter->dbf); - down(&adapter->erp_ready_sem); + wait_event(adapter->erp_ready_wq, + !list_empty(&adapter->erp_ready_head)); zfcp_dbf_rec_thread_lock("erasox2", adapter->dbf); if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) return ZFCP_ERP_FAILED; @@ -1285,21 +1287,17 @@ static int zfcp_erp_thread(void *data) struct list_head *next; struct zfcp_erp_action *act; unsigned long flags; - int ignore; - - daemonize("zfcperp%s", dev_name(&adapter->ccw_device->dev)); - /* Block all signals */ - siginitsetinv(¤t->blocked, 0); - atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); - wake_up(&adapter->erp_thread_wqh); - - while (!(atomic_read(&adapter->status) & - ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL)) { + for (;;) { zfcp_dbf_rec_thread_lock("erthrd1", adapter->dbf); - ignore = down_interruptible(&adapter->erp_ready_sem); + wait_event_interruptible(adapter->erp_ready_wq, + !list_empty(&adapter->erp_ready_head) || + kthread_should_stop()); zfcp_dbf_rec_thread_lock("erthrd2", adapter->dbf); + if (kthread_should_stop()) + break; + write_lock_irqsave(&adapter->erp_lock, flags); next = adapter->erp_ready_head.next; write_unlock_irqrestore(&adapter->erp_lock, flags); @@ -1313,9 +1311,6 @@ static int zfcp_erp_thread(void *data) } } - atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); - wake_up(&adapter->erp_thread_wqh); - return 0; } @@ -1327,18 +1322,17 @@ static int zfcp_erp_thread(void *data) */ int zfcp_erp_thread_setup(struct zfcp_adapter *adapter) { - int retval; + struct task_struct *thread; - atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); - retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD); - if (retval < 0) { + thread = kthread_run(zfcp_erp_thread, adapter, "zfcperp%s", + dev_name(&adapter->ccw_device->dev)); + if (IS_ERR(thread)) { dev_err(&adapter->ccw_device->dev, "Creating an ERP thread for the FCP device failed.\n"); - return retval; + return PTR_ERR(thread); } - wait_event(adapter->erp_thread_wqh, - atomic_read(&adapter->status) & - ZFCP_STATUS_ADAPTER_ERP_THREAD_UP); + + adapter->erp_thread = thread; return 0; } @@ -1353,16 +1347,8 @@ int zfcp_erp_thread_setup(struct zfcp_adapter *adapter) */ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter) { - atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status); - up(&adapter->erp_ready_sem); - zfcp_dbf_rec_thread_lock("erthrk1", adapter->dbf); - - wait_event(adapter->erp_thread_wqh, - !(atomic_read(&adapter->status) & - ZFCP_STATUS_ADAPTER_ERP_THREAD_UP)); - - atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, - &adapter->status); + kthread_stop(adapter->erp_thread); + adapter->erp_thread = NULL; } /** From 98fc4d5c8cd9bd1a412cca922feecb54c1c22d8e Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:26 +0200 Subject: [PATCH 101/209] [SCSI] zfcp: Simplify and update ct/gs and els timeout handling The recommendation for a timeout of 2 * R_A_TOV is the same for ct/gs and els requests, so set it in the common function used for initializing both request types. Besides, the timer inside zfcp should only run longer than the timeout set for the channel, so 10 seconds more should be enough (instead of 60 seconds). Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_def.h | 5 ----- drivers/s390/scsi/zfcp_fc.c | 3 --- drivers/s390/scsi/zfcp_fsf.c | 15 ++++++--------- 3 files changed, 6 insertions(+), 17 deletions(-) diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index ce65d88e280c..99830758e873 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -73,9 +73,6 @@ /*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/ -/* timeout for name-server lookup (in seconds) */ -#define ZFCP_NS_GID_PN_TIMEOUT 10 - /* task attribute values in FCP-2 FCP_CMND IU */ #define SIMPLE_Q 0 #define HEAD_OF_Q 1 @@ -319,7 +316,6 @@ struct ct_iu_gpn_ft_req { * @resp: scatter-gather list for response * @handler: handler function (called for response to the request) * @handler_data: data passed to handler function - * @timeout: FSF timeout for this request * @completion: completion for synchronization purposes * @status: used to pass error status to calling function */ @@ -329,7 +325,6 @@ struct zfcp_send_ct { struct scatterlist *resp; void (*handler)(unsigned long); unsigned long handler_data; - int timeout; struct completion *completion; int status; }; diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 5c1f12247e42..82f148d09968 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -283,7 +283,6 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, gid_pn->ct.wka_port = &adapter->gs->ds; gid_pn->ct.handler = zfcp_fc_ns_handler; gid_pn->ct.handler_data = (unsigned long) &compl_rec; - gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT; gid_pn->ct.req = &gid_pn->req; gid_pn->ct.resp = &gid_pn->resp; sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req, @@ -556,7 +555,6 @@ static int zfcp_fc_send_gpn_ft(struct zfcp_gpn_ft *gpn_ft, ct->wka_port = &adapter->gs->ds; ct->handler = zfcp_fc_ns_handler; ct->handler_data = (unsigned long)&compl_rec; - ct->timeout = 10; ct->req = &gpn_ft->sg_req; ct->resp = gpn_ft->sg_resp; @@ -845,7 +843,6 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) ct_fc_job->ct.req = job->request_payload.sg_list; ct_fc_job->ct.resp = job->reply_payload.sg_list; - ct_fc_job->ct.timeout = ZFCP_FSF_REQUEST_TIMEOUT; ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler; ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job; ct_fc_job->ct.completion = NULL; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index c241f032fd49..f09c863dc6bd 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1060,7 +1060,12 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, sg_resp, max_sbals); if (bytes <= 0) return -EIO; + + /* common settings for ct/gs and els requests */ req->qtcb->bottom.support.resp_buf_length = bytes; + req->qtcb->bottom.support.service_class = FSF_CLASS_3; + req->qtcb->bottom.support.timeout = 2 * R_A_TOV; + zfcp_fsf_start_timer(req, 2 * R_A_TOV + 10); return 0; } @@ -1096,12 +1101,9 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool) req->handler = zfcp_fsf_send_ct_handler; req->qtcb->header.port_handle = wka_port->handle; - req->qtcb->bottom.support.service_class = FSF_CLASS_3; - req->qtcb->bottom.support.timeout = ct->timeout; req->data = ct; zfcp_dbf_san_ct_request(req); - zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); ret = zfcp_fsf_req_send(req); if (ret) @@ -1176,7 +1178,6 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els) { struct zfcp_fsf_req *req; struct zfcp_qdio *qdio = els->adapter->qdio; - struct fsf_qtcb_bottom_support *bottom; int ret = -EIO; spin_lock_bh(&qdio->req_q_lock); @@ -1196,16 +1197,12 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els) if (ret) goto failed_send; - bottom = &req->qtcb->bottom.support; + req->qtcb->bottom.support.d_id = els->d_id; req->handler = zfcp_fsf_send_els_handler; - bottom->d_id = els->d_id; - bottom->service_class = FSF_CLASS_3; - bottom->timeout = 2 * R_A_TOV; req->data = els; zfcp_dbf_san_els_request(req); - zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); ret = zfcp_fsf_req_send(req); if (ret) goto failed_send; From 143bb6bfe36d20618d8bf667915fe14d14b8ae2f Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:27 +0200 Subject: [PATCH 102/209] [SCSI] zfcp: Defer resource allocation to first ccw_set_online call So far, zfcp allocated all resources required for FCP adapters/subchannels when the device was discovered in the ccw_probe callback. If there are lots of unused FCP subchannels attached to a system, this is a waste of resources. To alleviate this, defer the resource allocation to the first call to ccw_set_online. To avoid disruptions during possible following calls to ccw_set_offline and then ccw_set_online, keep the adapter resources until the device is finally being removed via ccw_remove. While doing this, also manage the zfcp erp thread together with all other adapter resources in zfcp_adapter_enqueue/dequeue. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 6 +++ drivers/s390/scsi/zfcp_ccw.c | 74 +++++++++++++++++++++--------------- drivers/s390/scsi/zfcp_erp.c | 5 +++ 3 files changed, 54 insertions(+), 31 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index e8f39f02bc3b..d1e75d36ed1a 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -541,6 +541,9 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) rwlock_init(&adapter->erp_lock); rwlock_init(&adapter->abort_lock); + if (zfcp_erp_thread_setup(adapter)) + goto erp_thread_failed; + INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later); @@ -561,6 +564,8 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) return 0; sysfs_failed: + zfcp_erp_thread_kill(adapter); +erp_thread_failed: zfcp_fc_gs_destroy(adapter); generic_services_failed: zfcp_destroy_adapter_work_queue(adapter); @@ -602,6 +607,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) return; zfcp_fc_gs_destroy(adapter); + zfcp_erp_thread_kill(adapter); zfcp_destroy_adapter_work_queue(adapter); zfcp_dbf_adapter_unregister(adapter->dbf); zfcp_free_low_mem_buffers(adapter); diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index d9da5c42ccbe..82ae6ed7ef83 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -18,6 +18,9 @@ static int zfcp_ccw_suspend(struct ccw_device *cdev) { struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); + if (!adapter) + return 0; + down(&zfcp_data.config_sema); zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL); @@ -33,6 +36,9 @@ static int zfcp_ccw_activate(struct ccw_device *cdev) { struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); + if (!adapter) + return 0; + zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL, ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, @@ -63,25 +69,14 @@ int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter) * zfcp_ccw_probe - probe function of zfcp driver * @ccw_device: pointer to belonging ccw device * - * This function gets called by the common i/o layer and sets up the initial - * data structures for each fcp adapter, which was detected by the system. - * Also the sysfs files for this adapter will be created by this function. - * In addition the nameserver port will be added to the ports of the adapter - * and its sysfs representation will be created too. + * This function gets called by the common i/o layer for each FCP + * device found on the current system. This is only a stub to make cio + * work: To only allocate adapter resources for devices actually used, + * the allocation is deferred to the first call to ccw_set_online. */ static int zfcp_ccw_probe(struct ccw_device *ccw_device) { - int retval = 0; - - down(&zfcp_data.config_sema); - if (zfcp_adapter_enqueue(ccw_device)) { - dev_err(&ccw_device->dev, - "Setting up data structures for the " - "FCP adapter failed\n"); - retval = -EINVAL; - } - up(&zfcp_data.config_sema); - return retval; + return 0; } /** @@ -102,8 +97,11 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) LIST_HEAD(port_remove_lh); ccw_device_set_offline(ccw_device); + down(&zfcp_data.config_sema); adapter = dev_get_drvdata(&ccw_device->dev); + if (!adapter) + goto out; write_lock_irq(&zfcp_data.config_lock); list_for_each_entry_safe(port, p, &adapter->port_list_head, list) { @@ -129,6 +127,7 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0); zfcp_adapter_dequeue(adapter); +out: up(&zfcp_data.config_sema); } @@ -136,22 +135,33 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) * zfcp_ccw_set_online - set_online function of zfcp driver * @ccw_device: pointer to belonging ccw device * - * This function gets called by the common i/o layer and sets an adapter - * into state online. Setting an fcp device online means that it will be - * registered with the SCSI stack, that the QDIO queues will be set up - * and that the adapter will be opened (asynchronously). + * This function gets called by the common i/o layer and sets an + * adapter into state online. The first call will allocate all + * adapter resources that will be retained until the device is removed + * via zfcp_ccw_remove. + * + * Setting an fcp device online means that it will be registered with + * the SCSI stack, that the QDIO queues will be set up and that the + * adapter will be opened. */ static int zfcp_ccw_set_online(struct ccw_device *ccw_device) { struct zfcp_adapter *adapter; - int retval; + int ret = 0; down(&zfcp_data.config_sema); adapter = dev_get_drvdata(&ccw_device->dev); - retval = zfcp_erp_thread_setup(adapter); - if (retval) - goto out; + if (!adapter) { + ret = zfcp_adapter_enqueue(ccw_device); + if (ret) { + dev_err(&ccw_device->dev, + "Setting up data structures for the " + "FCP adapter failed\n"); + goto out; + } + adapter = dev_get_drvdata(&ccw_device->dev); + } /* initialize request counter */ BUG_ON(!zfcp_reqlist_isempty(adapter)); @@ -162,13 +172,11 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device) zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, "ccsonl2", NULL); zfcp_erp_wait(adapter); +out: up(&zfcp_data.config_sema); - flush_work(&adapter->scan_work); - return 0; - - out: - up(&zfcp_data.config_sema); - return retval; + if (!ret) + flush_work(&adapter->scan_work); + return ret; } /** @@ -184,10 +192,13 @@ static int zfcp_ccw_set_offline(struct ccw_device *ccw_device) down(&zfcp_data.config_sema); adapter = dev_get_drvdata(&ccw_device->dev); + if (!adapter) + goto out; + zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL); zfcp_erp_wait(adapter); - zfcp_erp_thread_kill(adapter); up(&zfcp_data.config_sema); +out: return 0; } @@ -244,6 +255,7 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev) adapter = dev_get_drvdata(&cdev->dev); zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL); zfcp_erp_wait(adapter); + zfcp_erp_thread_kill(adapter); up(&zfcp_data.config_sema); } diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 577e15708598..73d366ba31e5 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -150,6 +150,9 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, a_status = atomic_read(&adapter->status); if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE) return 0; + if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) && + !(a_status & ZFCP_STATUS_COMMON_OPEN)) + return 0; /* shutdown requested for closed adapter */ } return need; @@ -1349,6 +1352,8 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter) { kthread_stop(adapter->erp_thread); adapter->erp_thread = NULL; + WARN_ON(!list_empty(&adapter->erp_ready_head)); + WARN_ON(!list_empty(&adapter->erp_running_head)); } /** From 24680defdb55e073c5e43d14318a164b842d8ce7 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:28 +0200 Subject: [PATCH 103/209] [SCSI] zfcp: Replace config semaphore with mutex The config semaphore is only used as a mutex, so replace it with a simple mutex. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 22 +++++++++++----------- drivers/s390/scsi/zfcp_ccw.c | 20 ++++++++++---------- drivers/s390/scsi/zfcp_def.h | 3 +-- drivers/s390/scsi/zfcp_fc.c | 4 ++-- drivers/s390/scsi/zfcp_sysfs.c | 16 ++++++++-------- 5 files changed, 32 insertions(+), 33 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index d1e75d36ed1a..8e989159e4ed 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -84,7 +84,7 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) struct zfcp_port *port; struct zfcp_unit *unit; - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); read_lock_irq(&zfcp_data.config_lock); adapter = zfcp_get_adapter_by_busid(busid); if (adapter) @@ -99,20 +99,20 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) unit = zfcp_unit_enqueue(port, lun); if (IS_ERR(unit)) goto out_unit; - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); ccw_device_set_online(adapter->ccw_device); zfcp_erp_wait(adapter); flush_work(&unit->scsi_work); - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); zfcp_unit_put(unit); out_unit: zfcp_port_put(port); out_port: zfcp_adapter_put(adapter); out_adapter: - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); return; } @@ -176,7 +176,7 @@ static int __init zfcp_module_init(void) if (!zfcp_data.gid_pn_cache) goto out_gid_cache; - sema_init(&zfcp_data.config_sema, 1); + mutex_init(&zfcp_data.config_mutex); rwlock_init(&zfcp_data.config_lock); zfcp_data.scsi_transport_template = @@ -266,7 +266,7 @@ static void zfcp_sysfs_unit_release(struct device *dev) * @port: pointer to port where unit is added * @fcp_lun: FCP LUN of unit to be enqueued * Returns: pointer to enqueued unit on success, ERR_PTR on error - * Locks: config_sema must be held to serialize changes to the unit list + * Locks: config_mutex must be held to serialize changes to the unit list * * Sets up some unit internal structures and creates sysfs entry. */ @@ -356,7 +356,7 @@ void zfcp_unit_dequeue(struct zfcp_unit *unit) static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) { - /* must only be called with zfcp_data.config_sema taken */ + /* must only be called with zfcp_data.config_mutex taken */ adapter->pool.erp_req = mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); if (!adapter->pool.erp_req) @@ -404,7 +404,7 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) { - /* zfcp_data.config_sema must be held */ + /* zfcp_data.config_mutex must be held */ if (adapter->pool.erp_req) mempool_destroy(adapter->pool.erp_req); if (adapter->pool.scsi_req) @@ -491,7 +491,7 @@ static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter) * Enqueues an adapter at the end of the adapter list in the driver data. * All adapter internal structures are set up. * Proc-fs entries are also created. - * locks: config_sema must be held to serialise changes to the adapter list + * locks: config_mutex must be held to serialize changes to the adapter list */ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) { @@ -499,7 +499,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) /* * Note: It is safe to release the list_lock, as any list changes - * are protected by the config_sema, which must be held to get here + * are protected by the config_mutex, which must be held to get here */ adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL); @@ -630,7 +630,7 @@ static void zfcp_sysfs_port_release(struct device *dev) * @status: initial status for the port * @d_id: destination id of the remote port to be enqueued * Returns: pointer to enqueued port on success, ERR_PTR on error - * Locks: config_sema must be held to serialize changes to the port list + * Locks: config_mutex must be held to serialize changes to the port list * * All port internal structures are set up and the sysfs entry is generated. * d_id is used to enqueue ports with a well known address like the Directory diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 82ae6ed7ef83..0c90f8e71605 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -21,12 +21,12 @@ static int zfcp_ccw_suspend(struct ccw_device *cdev) if (!adapter) return 0; - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL); zfcp_erp_wait(adapter); - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); return 0; } @@ -98,7 +98,7 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) ccw_device_set_offline(ccw_device); - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); adapter = dev_get_drvdata(&ccw_device->dev); if (!adapter) goto out; @@ -128,7 +128,7 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) zfcp_adapter_dequeue(adapter); out: - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); } /** @@ -149,7 +149,7 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device) struct zfcp_adapter *adapter; int ret = 0; - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); adapter = dev_get_drvdata(&ccw_device->dev); if (!adapter) { @@ -173,7 +173,7 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device) "ccsonl2", NULL); zfcp_erp_wait(adapter); out: - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); if (!ret) flush_work(&adapter->scan_work); return ret; @@ -190,14 +190,14 @@ static int zfcp_ccw_set_offline(struct ccw_device *ccw_device) { struct zfcp_adapter *adapter; - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); adapter = dev_get_drvdata(&ccw_device->dev); if (!adapter) goto out; zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL); zfcp_erp_wait(adapter); - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); out: return 0; } @@ -251,12 +251,12 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev) { struct zfcp_adapter *adapter; - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); adapter = dev_get_drvdata(&cdev->dev); zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL); zfcp_erp_wait(adapter); zfcp_erp_thread_kill(adapter); - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); } static struct ccw_driver zfcp_ccw_driver = { diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 99830758e873..cc98eead2c3c 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -604,8 +604,7 @@ struct zfcp_data { rwlock_t config_lock; /* serialises changes to adapter/port/unit lists */ - struct semaphore config_sema; /* serialises configuration - changes */ + struct mutex config_mutex; struct kmem_cache *gpn_ft_cache; struct kmem_cache *qtcb_cache; struct kmem_cache *sr_buffer_cache; diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 82f148d09968..722f22de8753 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -613,7 +613,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) return -E2BIG; } - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); /* first entry is the header */ for (x = 1; x < max_entries && !last; x++) { @@ -647,7 +647,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) zfcp_erp_wait(adapter); list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list) zfcp_fc_validate_port(port); - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); return ret; } diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index c86496bb608b..079a8cf518a3 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -88,7 +88,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ unsigned long val; \ int retval = 0; \ \ - down(&zfcp_data.config_sema); \ + mutex_lock(&zfcp_data.config_mutex); \ if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \ retval = -EBUSY; \ goto out; \ @@ -105,7 +105,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ _reopen_id, NULL); \ zfcp_erp_wait(_adapter); \ out: \ - up(&zfcp_data.config_sema); \ + mutex_unlock(&zfcp_data.config_mutex); \ return retval ? retval : (ssize_t) count; \ } \ static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ @@ -142,7 +142,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, int retval = 0; LIST_HEAD(port_remove_lh); - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { retval = -EBUSY; goto out; @@ -173,7 +173,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, zfcp_port_put(port); zfcp_port_dequeue(port); out: - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); return retval ? retval : (ssize_t) count; } static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL, @@ -207,7 +207,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, u64 fcp_lun; int retval = -EINVAL; - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { retval = -EBUSY; goto out; @@ -226,7 +226,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, zfcp_erp_wait(unit->port->adapter); zfcp_unit_put(unit); out: - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); return retval ? retval : (ssize_t) count; } static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); @@ -241,7 +241,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, int retval = 0; LIST_HEAD(unit_remove_lh); - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { retval = -EBUSY; goto out; @@ -282,7 +282,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, zfcp_unit_put(unit); zfcp_unit_dequeue(unit); out: - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); return retval ? retval : (ssize_t) count; } static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); From f4395b652636398eb4712e6f3caf79c9a6c02e21 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Tue, 18 Aug 2009 15:43:29 +0200 Subject: [PATCH 104/209] [SCSI] zfcp: proper use of device register Don't use kfree directly after device registration started. Signed-off-by: Sebastian Ott Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 8e989159e4ed..a7954443ec19 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -309,8 +309,10 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) } read_unlock_irq(&zfcp_data.config_lock); - if (device_register(&unit->sysfs_device)) - goto err_out_free; + if (device_register(&unit->sysfs_device)) { + put_device(&unit->sysfs_device); + return ERR_PTR(-EINVAL); + } if (sysfs_create_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs)) { @@ -675,8 +677,10 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, } read_unlock_irq(&zfcp_data.config_lock); - if (device_register(&port->sysfs_device)) - goto err_out_free; + if (device_register(&port->sysfs_device)) { + put_device(&port->sysfs_device); + goto err_out; + } retval = sysfs_create_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs); From 0fac3f477b6b520ae7d972ceb6e958e6807c8e1a Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:30 +0200 Subject: [PATCH 105/209] [SCSI] zfcp: Handle failures during device allocation correctly dev_set_name tries to allocate memory, so check the return value for allocation failures. After dev_set_name succeeds, call device_register as next step to be able to use put_device during error handling. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 65 ++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 36 deletions(-) diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index a7954443ec19..1be6bf7e8ce6 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -274,6 +274,13 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) { struct zfcp_unit *unit; + read_lock_irq(&zfcp_data.config_lock); + if (zfcp_get_unit_by_lun(port, fcp_lun)) { + read_unlock_irq(&zfcp_data.config_lock); + return ERR_PTR(-EINVAL); + } + read_unlock_irq(&zfcp_data.config_lock); + unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); if (!unit) return ERR_PTR(-ENOMEM); @@ -285,8 +292,11 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) unit->port = port; unit->fcp_lun = fcp_lun; - dev_set_name(&unit->sysfs_device, "0x%016llx", - (unsigned long long) fcp_lun); + if (dev_set_name(&unit->sysfs_device, "0x%016llx", + (unsigned long long) fcp_lun)) { + kfree(unit); + return ERR_PTR(-ENOMEM); + } unit->sysfs_device.parent = &port->sysfs_device; unit->sysfs_device.release = zfcp_sysfs_unit_release; dev_set_drvdata(&unit->sysfs_device, unit); @@ -302,13 +312,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) unit->latencies.cmd.channel.min = 0xFFFFFFFF; unit->latencies.cmd.fabric.min = 0xFFFFFFFF; - read_lock_irq(&zfcp_data.config_lock); - if (zfcp_get_unit_by_lun(port, fcp_lun)) { - read_unlock_irq(&zfcp_data.config_lock); - goto err_out_free; - } - read_unlock_irq(&zfcp_data.config_lock); - if (device_register(&unit->sysfs_device)) { put_device(&unit->sysfs_device); return ERR_PTR(-EINVAL); @@ -317,7 +320,7 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) if (sysfs_create_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs)) { device_unregister(&unit->sysfs_device); - return ERR_PTR(-EIO); + return ERR_PTR(-EINVAL); } zfcp_unit_get(unit); @@ -332,10 +335,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) zfcp_port_get(port); return unit; - -err_out_free: - kfree(unit); - return ERR_PTR(-EINVAL); } /** @@ -642,7 +641,13 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, u32 status, u32 d_id) { struct zfcp_port *port; - int retval; + + read_lock_irq(&zfcp_data.config_lock); + if (zfcp_get_port_by_wwpn(adapter, wwpn)) { + read_unlock_irq(&zfcp_data.config_lock); + return ERR_PTR(-EINVAL); + } + read_unlock_irq(&zfcp_data.config_lock); port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); if (!port) @@ -663,31 +668,24 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status); atomic_set(&port->refcount, 0); - dev_set_name(&port->sysfs_device, "0x%016llx", - (unsigned long long)wwpn); + if (dev_set_name(&port->sysfs_device, "0x%016llx", + (unsigned long long)wwpn)) { + kfree(port); + return ERR_PTR(-ENOMEM); + } port->sysfs_device.parent = &adapter->ccw_device->dev; - port->sysfs_device.release = zfcp_sysfs_port_release; dev_set_drvdata(&port->sysfs_device, port); - read_lock_irq(&zfcp_data.config_lock); - if (zfcp_get_port_by_wwpn(adapter, wwpn)) { - read_unlock_irq(&zfcp_data.config_lock); - goto err_out_free; - } - read_unlock_irq(&zfcp_data.config_lock); - if (device_register(&port->sysfs_device)) { put_device(&port->sysfs_device); - goto err_out; + return ERR_PTR(-EINVAL); } - retval = sysfs_create_group(&port->sysfs_device.kobj, - &zfcp_sysfs_port_attrs); - - if (retval) { + if (sysfs_create_group(&port->sysfs_device.kobj, + &zfcp_sysfs_port_attrs)) { device_unregister(&port->sysfs_device); - goto err_out; + return ERR_PTR(-EINVAL); } zfcp_port_get(port); @@ -701,11 +699,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, zfcp_adapter_get(adapter); return port; - -err_out_free: - kfree(port); -err_out: - return ERR_PTR(-EINVAL); } /** From b592e89ac9af521be164490e45c53c93e89c776f Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:31 +0200 Subject: [PATCH 106/209] [SCSI] zfcp: Remove duplicated code for debug timestamps The timestamp calculation used for s390dbf output is the same in a private zfcp function and in debug.c. Replace both with a common inline function. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- arch/s390/include/asm/timex.h | 8 ++++++++ arch/s390/kernel/debug.c | 8 +------- arch/s390/kernel/time.c | 1 + drivers/s390/scsi/zfcp_dbf.c | 19 +++---------------- 4 files changed, 13 insertions(+), 23 deletions(-) diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index cc21e3e20fd7..a07e699bb65c 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -88,6 +88,14 @@ int get_sync_clock(unsigned long long *clock); void init_cpu_timer(void); unsigned long long monotonic_clock(void); +void tod_to_timeval(__u64, struct timespec *); + +static inline +void stck_to_timespec(unsigned long long stck, struct timespec *ts) +{ + tod_to_timeval(stck - TOD_UNIX_EPOCH, ts); +} + extern u64 sched_clock_base_cc; #endif diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index be8bceaf37d9..4c512561687d 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -63,8 +63,6 @@ typedef struct } debug_sprintf_entry_t; -extern void tod_to_timeval(uint64_t todval, struct timespec *xtime); - /* internal function prototyes */ static int debug_init(void); @@ -1450,17 +1448,13 @@ debug_dflt_header_fn(debug_info_t * id, struct debug_view *view, int area, debug_entry_t * entry, char *out_buf) { struct timespec time_spec; - unsigned long long time; char *except_str; unsigned long caller; int rc = 0; unsigned int level; level = entry->id.fields.level; - time = entry->id.stck; - /* adjust todclock to 1970 */ - time -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096); - tod_to_timeval(time, &time_spec); + stck_to_timespec(entry->id.stck, &time_spec); if (entry->id.fields.exception) except_str = "*"; diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index d4c8e9c47c81..9693d98d4d4a 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -90,6 +90,7 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime) todval -= (sec * 1000000) << 12; xtime->tv_nsec = ((todval * 1000) >> 12); } +EXPORT_SYMBOL(tod_to_timeval); void clock_comparator_work(void) { diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index c066428b2878..215b70749e95 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -38,19 +38,6 @@ static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len, } } -/* FIXME: this duplicate this code in s390 debug feature */ -static void zfcp_dbf_timestamp(unsigned long long stck, struct timespec *time) -{ - unsigned long long sec; - - stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096); - sec = stck >> 12; - do_div(sec, 1000000); - time->tv_sec = sec; - stck -= (sec * 1000000) << 12; - time->tv_nsec = ((stck * 1000) >> 12); -} - static void zfcp_dbf_tag(char **p, const char *label, const char *tag) { int i; @@ -107,7 +94,7 @@ static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view, char *p = out_buf; if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) { - zfcp_dbf_timestamp(entry->id.stck, &t); + stck_to_timespec(entry->id.stck, &t); zfcp_dbf_out(&p, "timestamp", "%011lu:%06lu", t.tv_sec, t.tv_nsec); zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid); @@ -320,7 +307,7 @@ static void zfcp_dbf_hba_view_response(char **p, zfcp_dbf_out(p, "fsf_command", "0x%08x", r->fsf_command); zfcp_dbf_out(p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); zfcp_dbf_out(p, "fsf_seqno", "0x%08x", r->fsf_seqno); - zfcp_dbf_timestamp(r->fsf_issued, &t); + stck_to_timespec(r->fsf_issued, &t); zfcp_dbf_out(p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); zfcp_dbf_out(p, "fsf_prot_status", "0x%08x", r->fsf_prot_status); zfcp_dbf_out(p, "fsf_status", "0x%08x", r->fsf_status); @@ -976,7 +963,7 @@ static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view, zfcp_dbf_out(&p, "old_fsf_reqid", "0x%0Lx", r->old_fsf_reqid); zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); - zfcp_dbf_timestamp(r->fsf_issued, &t); + stck_to_timespec(r->fsf_issued, &t); zfcp_dbf_out(&p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); if (strncmp(r->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) { From 41e05a12c7aae16f0381103af3e5ca791e87ce59 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 18 Aug 2009 15:43:32 +0200 Subject: [PATCH 107/209] [SCSI] zfcp: optimize zfcp_qdio_account Remove expensive ktime_get()/ktime_us_delta() functions from the hot path and use get_clock_monotonic() instead. This elimates seven function calls and avoids a lot of unnecessary calculations. Signed-off-by: Heiko Carstens Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_def.h | 2 +- drivers/s390/scsi/zfcp_qdio.c | 11 +++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index cc98eead2c3c..7da2fad8f515 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -438,7 +438,7 @@ struct zfcp_qdio { struct zfcp_qdio_queue req_q; spinlock_t stat_lock; spinlock_t req_q_lock; - ktime_t req_q_time; + unsigned long long req_q_time; u64 req_q_util; atomic_t req_q_full; wait_queue_head_t req_q_wq; diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 2b499e28ff1f..6c5228b627fc 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -56,16 +56,15 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) } /* this needs to be called prior to updating the queue fill level */ -static void zfcp_qdio_account(struct zfcp_qdio *qdio) +static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) { - ktime_t now; - s64 span; + unsigned long long now, span; int free, used; spin_lock(&qdio->stat_lock); - now = ktime_get(); - span = ktime_us_delta(now, qdio->req_q_time); - free = max(0, atomic_read(&qdio->req_q.count)); + now = get_clock_monotonic(); + span = (now - qdio->req_q_time) >> 12; + free = atomic_read(&qdio->req_q.count); used = QDIO_MAX_BUFFERS_PER_Q - free; qdio->req_q_util += used * span; qdio->req_q_time = now; From 69cb48750b02034350bc78d8053647d7464cdde0 Mon Sep 17 00:00:00 2001 From: Ed Lin Date: Tue, 18 Aug 2009 12:15:14 -0700 Subject: [PATCH 108/209] [SCSI] stex: Add reset code for st_yel (v2) Add reset related code for st_yel. 1. Set the SS_H2I_INT_RESET bit. 2. Wait for the SS_MU_OPERATIONAL flag. This is also part of normal handshake process so move it to handshake routine. 3. Continue handshake with the firmware. Signed-off-by: Ed Lin Signed-off-by: James Bottomley --- drivers/scsi/stex.c | 33 +++++++++++++++++++++++++++++---- 1 file changed, 29 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 8d2a95c4e5b5..09fa8861fc58 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -55,6 +55,7 @@ enum { OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */ OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */ + YIOA_STATUS = 0x00, YH2I_INT = 0x20, YINT_EN = 0x34, YI2H_INT = 0x9c, @@ -108,6 +109,10 @@ enum { SS_HEAD_HANDSHAKE = 0x80, + SS_H2I_INT_RESET = 0x100, + + SS_MU_OPERATIONAL = 0x80000000, + STEX_CDB_LENGTH = 16, STATUS_VAR_LEN = 128, @@ -884,7 +889,7 @@ static void stex_ss_mu_intr(struct st_hba *hba) tag = (u16)value; if (unlikely(tag >= hba->host->can_queue)) { printk(KERN_WARNING DRV_NAME - "(%s): invalid tag\n", pci_name(hba->pdev)); + "(%s): invalid tag\n", pci_name(hba->pdev)); continue; } @@ -1040,16 +1045,27 @@ static int stex_ss_handshake(struct st_hba *hba) void __iomem *base = hba->mmio_base; struct st_msg_header *msg_h; struct handshake_frame *h; - __le32 *scratch = hba->scratch; + __le32 *scratch; u32 data; unsigned long before; int ret = 0; - h = (struct handshake_frame *)(hba->alloc_rq(hba)); - msg_h = (struct st_msg_header *)h - 1; + before = jiffies; + while ((readl(base + YIOA_STATUS) & SS_MU_OPERATIONAL) == 0) { + if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { + printk(KERN_ERR DRV_NAME + "(%s): firmware not operational\n", + pci_name(hba->pdev)); + return -1; + } + msleep(1); + } + + msg_h = (struct st_msg_header *)hba->dma_mem; msg_h->handle = cpu_to_le64(hba->dma_handle); msg_h->flag = SS_HEAD_HANDSHAKE; + h = (struct handshake_frame *)(msg_h + 1); h->rb_phy = cpu_to_le64(hba->dma_handle); h->req_sz = cpu_to_le16(hba->rq_size); h->req_cnt = cpu_to_le16(hba->rq_count+1); @@ -1205,6 +1221,13 @@ static void stex_hard_reset(struct st_hba *hba) hba->pdev->saved_config_space[i]); } +static void stex_ss_reset(struct st_hba *hba) +{ + writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); + readl(hba->mmio_base + YH2I_INT); + ssleep(5); +} + static int stex_reset(struct scsi_cmnd *cmd) { struct st_hba *hba; @@ -1221,6 +1244,8 @@ static int stex_reset(struct scsi_cmnd *cmd) if (hba->cardtype == st_shasta) stex_hard_reset(hba); + else if (hba->cardtype == st_yel) + stex_ss_reset(hba); if (hba->cardtype != st_yosemite) { if (stex_handshake(hba)) { From cd4e12e8ad246ec5bc23ab04d0da0e6985025620 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Thu, 20 Aug 2009 13:20:54 +0530 Subject: [PATCH 109/209] [SCSI] mpt2sas : Rescan topology from Interrupt context instead of work thread Following host reset its possible that the controller firmware could assign new handles for devices, as well as adding or deleting devices. There is code in the driver that will rescan the topology folowing host reset; updating device handles, and remove devices that are no longer responding. This patch will improve the responsivness by moving this rescaning from the delayed hotplug worker thread to immediately following the host reset. Signed-off-by: Kashyap Desai Reviewed-by: Eric Moore Cc: Stable Tree Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.c | 3 + drivers/scsi/mpt2sas/mpt2sas_base.h | 1 + drivers/scsi/mpt2sas/mpt2sas_scsih.c | 142 ++++++++++----------------- 3 files changed, 57 insertions(+), 89 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 35a13867495e..cc5a8dae4ae1 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -3536,5 +3536,8 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); ioc->ioc_reset_in_progress = 0; spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + if (!r) + _base_reset_handler(ioc, MPT2_IOC_RUNNING); return r; } diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index acdcff150a35..998a7b847b3d 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -119,6 +119,7 @@ #define MPT2_IOC_PRE_RESET 1 /* prior to host reset */ #define MPT2_IOC_AFTER_RESET 2 /* just after host reset */ #define MPT2_IOC_DONE_RESET 3 /* links re-initialized */ +#define MPT2_IOC_RUNNING 4 /* shost running */ /* * logging format diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 2e9a4445596f..471c3b604596 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -103,7 +103,6 @@ struct sense_info { }; -#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF) /** * struct fw_event_work - firmware event struct * @list: link list framework @@ -2404,27 +2403,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc, spin_unlock_irqrestore(&ioc->fw_event_lock, flags); } -/** - * _scsih_queue_rescan - queue a topology rescan from user context - * @ioc: per adapter object - * - * Return nothing. - */ -static void -_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc) -{ - struct fw_event_work *fw_event; - - if (ioc->wait_for_port_enable_to_complete) - return; - fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); - if (!fw_event) - return; - fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET; - fw_event->ioc = ioc; - _scsih_fw_event_add(ioc, fw_event); -} - /** * _scsih_flush_running_cmds - completing outstanding commands. * @ioc: per adapter object @@ -2455,46 +2433,6 @@ _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc) ioc->name, count)); } -/** - * mpt2sas_scsih_reset_handler - reset callback handler (for scsih) - * @ioc: per adapter object - * @reset_phase: phase - * - * The handler for doing any required cleanup or initialization. - * - * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET, - * MPT2_IOC_DONE_RESET - * - * Return nothing. - */ -void -mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) -{ - switch (reset_phase) { - case MPT2_IOC_PRE_RESET: - dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " - "MPT2_IOC_PRE_RESET\n", ioc->name, __func__)); - _scsih_fw_event_off(ioc); - break; - case MPT2_IOC_AFTER_RESET: - dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " - "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__)); - if (ioc->tm_cmds.status & MPT2_CMD_PENDING) { - ioc->tm_cmds.status |= MPT2_CMD_RESET; - mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid); - complete(&ioc->tm_cmds.done); - } - _scsih_fw_event_on(ioc); - _scsih_flush_running_cmds(ioc); - break; - case MPT2_IOC_DONE_RESET: - dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " - "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); - _scsih_queue_rescan(ioc); - break; - } -} - /** * _scsih_setup_eedp - setup MPI request for EEDP transfer * @scmd: pointer to scsi command object @@ -5156,22 +5094,9 @@ static void _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc) { struct _sas_device *sas_device, *sas_device_next; - struct _sas_node *sas_expander, *sas_expander_next; + struct _sas_node *sas_expander; struct _raid_device *raid_device, *raid_device_next; - unsigned long flags; - _scsih_search_responding_sas_devices(ioc); - _scsih_search_responding_raid_devices(ioc); - _scsih_search_responding_expanders(ioc); - - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - ioc->shost_recovery = 0; - if (ioc->shost->shost_state == SHOST_RECOVERY) { - printk(MPT2SAS_INFO_FMT "putting controller into " - "SHOST_RUNNING\n", ioc->name); - scsi_host_set_state(ioc->shost, SHOST_RUNNING); - } - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); list_for_each_entry_safe(sas_device, sas_device_next, &ioc->sas_device_list, list) { @@ -5207,16 +5132,63 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc) _scsih_raid_device_remove(ioc, raid_device); } - list_for_each_entry_safe(sas_expander, sas_expander_next, - &ioc->sas_expander_list, list) { + retry_expander_search: + sas_expander = NULL; + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { if (sas_expander->responding) { sas_expander->responding = 0; continue; } - printk("\tremoving expander: handle(0x%04x), " - " sas_addr(0x%016llx)\n", sas_expander->handle, - (unsigned long long)sas_expander->sas_address); _scsih_expander_remove(ioc, sas_expander->handle); + goto retry_expander_search; + } +} + +/** + * mpt2sas_scsih_reset_handler - reset callback handler (for scsih) + * @ioc: per adapter object + * @reset_phase: phase + * + * The handler for doing any required cleanup or initialization. + * + * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET, + * MPT2_IOC_DONE_RESET + * + * Return nothing. + */ +void +mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) +{ + switch (reset_phase) { + case MPT2_IOC_PRE_RESET: + dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " + "MPT2_IOC_PRE_RESET\n", ioc->name, __func__)); + _scsih_fw_event_off(ioc); + break; + case MPT2_IOC_AFTER_RESET: + dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " + "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__)); + if (ioc->tm_cmds.status & MPT2_CMD_PENDING) { + ioc->tm_cmds.status |= MPT2_CMD_RESET; + mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid); + complete(&ioc->tm_cmds.done); + } + _scsih_fw_event_on(ioc); + _scsih_flush_running_cmds(ioc); + break; + case MPT2_IOC_DONE_RESET: + dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " + "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); + _scsih_sas_host_refresh(ioc, 0); + _scsih_search_responding_sas_devices(ioc); + _scsih_search_responding_raid_devices(ioc); + _scsih_search_responding_expanders(ioc); + break; + case MPT2_IOC_RUNNING: + dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " + "MPT2_IOC_RUNNING\n", ioc->name, __func__)); + _scsih_remove_unresponding_devices(ioc); + break; } } @@ -5236,14 +5208,6 @@ _firmware_event_work(struct work_struct *work) unsigned long flags; struct MPT2SAS_ADAPTER *ioc = fw_event->ioc; - /* This is invoked by calling _scsih_queue_rescan(). */ - if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) { - _scsih_fw_event_free(ioc, fw_event); - _scsih_sas_host_refresh(ioc, 1); - _scsih_remove_unresponding_devices(ioc); - return; - } - /* the queue is being flushed so ignore this event */ spin_lock_irqsave(&ioc->fw_event_lock, flags); if (ioc->fw_events_off || ioc->remove_host) { From 155dd4c763694222c125e65438d823f58ea653bc Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Thu, 20 Aug 2009 13:22:00 +0530 Subject: [PATCH 110/209] [SCSI] mpt2sas: Prevent sending command to FW while Host Reset This patch renames the flag for indicating host reset from ioc_reset_in_progress to shost_recovery. It also removes the spin locks surrounding the setting of this flag, which are unnecessary. Sanity checks on the shost_recovery flag were added thru out the code so as to prevent sending firmware commands during host reset. Also, the setting of the shost state to SHOST_RECOVERY was removed to prevent deadlocks, this is actually better handled by the shost_recovery flag. Signed-off-by: Kashyap Desai Reviewed-by: Eric Moore Cc: Stable Tree Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.c | 13 ++------ drivers/scsi/mpt2sas/mpt2sas_base.h | 3 +- drivers/scsi/mpt2sas/mpt2sas_ctl.c | 16 ++-------- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 40 ++++++++++++++---------- drivers/scsi/mpt2sas/mpt2sas_transport.c | 29 ++++++++++------- 5 files changed, 47 insertions(+), 54 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index cc5a8dae4ae1..1cfb503125b6 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -94,7 +94,7 @@ _base_fault_reset_work(struct work_struct *work) int rc; spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - if (ioc->ioc_reset_in_progress) + if (ioc->shost_recovery) goto rearm_timer; spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); @@ -3501,20 +3501,13 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, __func__)); spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - if (ioc->ioc_reset_in_progress) { + if (ioc->shost_recovery) { spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); printk(MPT2SAS_ERR_FMT "%s: busy\n", ioc->name, __func__); return -EBUSY; } - ioc->ioc_reset_in_progress = 1; ioc->shost_recovery = 1; - if (ioc->shost->shost_state == SHOST_RUNNING) { - /* set back to SHOST_RUNNING in mpt2sas_scsih.c */ - scsi_host_set_state(ioc->shost, SHOST_RECOVERY); - printk(MPT2SAS_INFO_FMT "putting controller into " - "SHOST_RECOVERY\n", ioc->name); - } spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); _base_reset_handler(ioc, MPT2_IOC_PRE_RESET); @@ -3534,7 +3527,7 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - ioc->ioc_reset_in_progress = 0; + ioc->shost_recovery = 0; spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); if (!r) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 998a7b847b3d..15827582a90e 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -432,7 +432,7 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); * @fw_event_list: list of fw events * @aen_event_read_flag: event log was read * @broadcast_aen_busy: broadcast aen waiting to be serviced - * @ioc_reset_in_progress: host reset in progress + * @shost_recovery: host reset in progress * @ioc_reset_in_progress_lock: * @ioc_link_reset_in_progress: phy/hard reset in progress * @ignore_loginfos: ignore loginfos during task managment @@ -545,7 +545,6 @@ struct MPT2SAS_ADAPTER { /* misc flags */ int aen_event_read_flag; u8 broadcast_aen_busy; - u8 ioc_reset_in_progress; u8 shost_recovery; spinlock_t ioc_reset_in_progress_lock; u8 ioc_link_reset_in_progress; diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 14e473d1fa7b..c2a51018910f 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -1963,7 +1963,6 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg) { enum block_state state; long ret = -EINVAL; - unsigned long flags; state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; @@ -1989,13 +1988,8 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg) !ioc) return -ENODEV; - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - if (ioc->shost_recovery) { - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, - flags); + if (ioc->shost_recovery) return -EAGAIN; - } - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) { uarg = arg; @@ -2098,7 +2092,6 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg) struct mpt2_ioctl_command karg; struct MPT2SAS_ADAPTER *ioc; enum block_state state; - unsigned long flags; if (_IOC_SIZE(cmd) != sizeof(struct mpt2_ioctl_command32)) return -EINVAL; @@ -2113,13 +2106,8 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg) if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - if (ioc->shost_recovery) { - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, - flags); + if (ioc->shost_recovery) return -EAGAIN; - } - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); memset(&karg, 0, sizeof(struct mpt2_ioctl_command)); karg.hdr.ioc_number = karg32.hdr.ioc_number; diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 471c3b604596..195f5e5aabf1 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -1785,17 +1785,18 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun, u32 ioc_state; unsigned long timeleft; u8 VF_ID = 0; - unsigned long flags; - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED || - ioc->shost_recovery) { - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) { + printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n", + __func__, ioc->name); + return; + } + + if (ioc->shost_recovery) { printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", __func__, ioc->name); return; } - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); ioc_state = mpt2sas_base_get_iocstate(ioc, 0); if (ioc_state & MPI2_DOORBELL_USED) { @@ -2553,7 +2554,6 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) Mpi2SCSIIORequest_t *mpi_request; u32 mpi_control; u16 smid; - unsigned long flags; scmd->scsi_done = done; sas_device_priv_data = scmd->device->hostdata; @@ -2572,13 +2572,10 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) } /* see if we are busy with task managment stuff */ - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - if (sas_target_priv_data->tm_busy || - ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (sas_target_priv_data->tm_busy) + return SCSI_MLQUEUE_DEVICE_BUSY; + else if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) return SCSI_MLQUEUE_HOST_BUSY; - } - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); if (scmd->sc_data_direction == DMA_FROM_DEVICE) mpi_control = MPI2_SCSIIO_CONTROL_READ; @@ -3374,6 +3371,9 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) if (!handle) return -1; + if (ioc->shost_recovery) + return -1; + if ((mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", @@ -3510,6 +3510,9 @@ _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u16 handle) struct _sas_node *sas_expander; unsigned long flags; + if (ioc->shost_recovery) + return; + spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); @@ -3681,6 +3684,8 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) mutex_unlock(&ioc->tm_cmds.mutex); dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset " "done: handle(0x%04x)\n", ioc->name, device_handle)); + if (ioc->shost_recovery) + goto out; } /* SAS_IO_UNIT_CNTR - send REMOVE_DEVICE */ @@ -3846,6 +3851,8 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, "expander event\n", ioc->name)); return; } + if (ioc->shost_recovery) + return; if (event_data->PHY[i].PhyStatus & MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) continue; @@ -5217,13 +5224,10 @@ _firmware_event_work(struct work_struct *work) } spin_unlock_irqrestore(&ioc->fw_event_lock, flags); - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); if (ioc->shost_recovery) { - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); _scsih_fw_event_requeue(ioc, fw_event, 1000); return; } - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); switch (fw_event->event) { case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: @@ -5425,6 +5429,8 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, if (!sas_device) continue; _scsih_remove_device(ioc, sas_device->handle); + if (ioc->shost_recovery) + return; goto retry_device_search; } } @@ -5446,6 +5452,8 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, if (!expander_sibling) continue; _scsih_expander_remove(ioc, expander_sibling->handle); + if (ioc->shost_recovery) + return; goto retry_expander_search; } } diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index 686695b155c7..a53086d0381a 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -140,11 +140,18 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle, u32 device_info; u32 ioc_status; + if (ioc->shost_recovery) { + printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); - return -1; + return -ENXIO; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & @@ -153,7 +160,7 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle, printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)" "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status, __FILE__, __LINE__, __func__); - return -1; + return -EIO; } memset(identify, 0, sizeof(identify)); @@ -288,21 +295,17 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, void *psge; u32 sgl_flags; u8 issue_reset = 0; - unsigned long flags; void *data_out = NULL; dma_addr_t data_out_dma; u32 sz; u64 *sas_address_le; u16 wait_state_count; - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - if (ioc->ioc_reset_in_progress) { - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->shost_recovery) { printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", __func__, ioc->name); return -EFAULT; } - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); mutex_lock(&ioc->transport_cmds.mutex); @@ -806,6 +809,12 @@ mpt2sas_transport_update_phy_link_change(struct MPT2SAS_ADAPTER *ioc, struct _sas_node *sas_node; struct _sas_phy *mpt2sas_phy; + if (ioc->shost_recovery) { + printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + return; + } + spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_node = _transport_sas_node_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); @@ -1025,7 +1034,6 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, void *psge; u32 sgl_flags; u8 issue_reset = 0; - unsigned long flags; dma_addr_t dma_addr_in = 0; dma_addr_t dma_addr_out = 0; u16 wait_state_count; @@ -1045,14 +1053,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, return -EINVAL; } - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - if (ioc->ioc_reset_in_progress) { - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->shost_recovery) { printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", __func__, ioc->name); return -EFAULT; } - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex); if (rc) From cc0f5207664f85da8d0b2ebdacec5daaee0ce95f Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Thu, 20 Aug 2009 13:22:39 +0530 Subject: [PATCH 111/209] [SCSI] mpt2sas: Removed wrapper funtions _scsih_link_change. Deleted the wrapper function called _scsih_link_change. This function was implemented for compatibility reasons only, between different kernel versions. Currently this function is no longer needed. The calling function are converted to calling mpt2sas_transport_update_phy_link_change directly in the transport layer. Signed-off-by: Kashyap Desai Reviewed-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.h | 2 +- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 44 ++++++++---------------- drivers/scsi/mpt2sas/mpt2sas_transport.c | 4 +-- 3 files changed, 17 insertions(+), 33 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 15827582a90e..39dc69627bc7 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -798,7 +798,7 @@ int mpt2sas_transport_add_host_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy *mpt2sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev); int mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy *mpt2sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev); -void mpt2sas_transport_update_phy_link_change(struct MPT2SAS_ADAPTER *ioc, u16 handle, +void mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, u16 handle, u16 attached_handle, u8 phy_number, u8 link_rate); extern struct sas_function_template mpt2sas_transport_functions; extern struct scsi_transport_template *mpt2sas_transport_template; diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 195f5e5aabf1..c0d5d5e1f9c8 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -3123,25 +3123,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) scmd->scsi_done(scmd); } -/** - * _scsih_link_change - process phy link changes - * @ioc: per adapter object - * @handle: phy handle - * @attached_handle: valid for devices attached to link - * @phy_number: phy number - * @link_rate: new link rate - * Context: user. - * - * Return nothing. - */ -static void -_scsih_link_change(struct MPT2SAS_ADAPTER *ioc, u16 handle, u16 attached_handle, - u8 phy_number, u8 link_rate) -{ - mpt2sas_transport_update_phy_link_change(ioc, handle, attached_handle, - phy_number, link_rate); -} - /** * _scsih_sas_host_refresh - refreshing sas host object contents * @ioc: per adapter object @@ -3186,7 +3167,8 @@ _scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc, u8 update) le16_to_cpu(sas_iounit_pg0->PhyData[i]. ControllerDevHandle); if (update) - _scsih_link_change(ioc, + mpt2sas_transport_update_links( + ioc, ioc->sas_hba.phy[i].handle, le16_to_cpu(sas_iounit_pg0->PhyData[i]. AttachedDevHandle), i, @@ -3868,9 +3850,10 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: if (!parent_handle) { if (phy_number < ioc->sas_hba.num_phys) - _scsih_link_change(ioc, - ioc->sas_hba.phy[phy_number].handle, - handle, phy_number, link_rate_); + mpt2sas_transport_update_links( + ioc, + ioc->sas_hba.phy[phy_number].handle, + handle, phy_number, link_rate_); } else { spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_expander = @@ -3880,11 +3863,12 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, flags); if (sas_expander) { if (phy_number < sas_expander->num_phys) - _scsih_link_change(ioc, - sas_expander-> - phy[phy_number].handle, - handle, phy_number, - link_rate_); + mpt2sas_transport_update_links( + ioc, + sas_expander-> + phy[phy_number].handle, + handle, phy_number, + link_rate_); } } if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) { @@ -4400,7 +4384,7 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc, return; } - _scsih_link_change(ioc, + mpt2sas_transport_update_links(ioc, le16_to_cpu(sas_device_pg0.ParentDevHandle), handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); @@ -4689,7 +4673,7 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, return; } - _scsih_link_change(ioc, + mpt2sas_transport_update_links(ioc, le16_to_cpu(sas_device_pg0.ParentDevHandle), handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index a53086d0381a..742324a0a11e 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -792,7 +792,7 @@ mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy } /** - * mpt2sas_transport_update_phy_link_change - refreshing phy link changes and attached devices + * mpt2sas_transport_update_links - refreshing phy link changes * @ioc: per adapter object * @handle: handle to sas_host or expander * @attached_handle: attached device handle @@ -802,7 +802,7 @@ mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy * Returns nothing. */ void -mpt2sas_transport_update_phy_link_change(struct MPT2SAS_ADAPTER *ioc, +mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, u16 handle, u16 attached_handle, u8 phy_number, u8 link_rate) { unsigned long flags; From 34a03bef2202d0c9983a8da0a8abaee37d285847 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Thu, 20 Aug 2009 13:23:19 +0530 Subject: [PATCH 112/209] [SCSI] mpt2sas: setting SDEV into RUNNING state from Interrupt context Changing SDEV Running state from interrupt context. Previously It was handle in work queue thread. With this change It will not wait for work queue thread to execute scsih_ublock_io_device to put SDEV into Running state. This will reduce delay for Device becoming RUNNING. Modified this patch considering James comment "Not to change SDEV state using scsi_device_set_state API, instead use scsi_internal_device_unblock scsi_internal_device_block API" Signed-off-by: Kashyap Desai Reviewed-by: Eric Moore Cc: Stable Tree Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.h | 2 ++ drivers/scsi/mpt2sas/mpt2sas_scsih.c | 17 +++++++++++------ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 39dc69627bc7..1f3efd6569d3 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -802,5 +802,7 @@ void mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, u16 handle, u16 attached_handle, u8 phy_number, u8 link_rate); extern struct sas_function_template mpt2sas_transport_functions; extern struct scsi_transport_template *mpt2sas_transport_template; +extern int scsi_internal_device_block(struct scsi_device *sdev); +extern int scsi_internal_device_unblock(struct scsi_device *sdev); #endif /* MPT2SAS_BASE_H_INCLUDED */ diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index c0d5d5e1f9c8..6e6d5af6b365 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2222,7 +2222,7 @@ _scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) MPT2SAS_INFO_FMT "SDEV_RUNNING: " "handle(0x%04x)\n", ioc->name, handle)); sas_device_priv_data->block = 0; - scsi_device_set_state(sdev, SDEV_RUNNING); + scsi_internal_device_unblock(sdev); } } } @@ -2251,7 +2251,7 @@ _scsih_block_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) MPT2SAS_INFO_FMT "SDEV_BLOCK: " "handle(0x%04x)\n", ioc->name, handle)); sas_device_priv_data->block = 1; - scsi_device_set_state(sdev, SDEV_BLOCK); + scsi_internal_device_block(sdev); } } } @@ -2327,6 +2327,7 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc, u16 handle; u16 reason_code; u8 phy_number; + u8 link_rate; for (i = 0; i < event_data->NumEntries; i++) { handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); @@ -2337,6 +2338,11 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc, MPI2_EVENT_SAS_TOPO_RC_MASK; if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) _scsih_block_io_device(ioc, handle); + if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) { + link_rate = event_data->PHY[i].LinkRate >> 4; + if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5) + _scsih_ublock_io_device(ioc, handle); + } } } @@ -3690,6 +3696,9 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) le32_to_cpu(mpi_reply.IOCLogInfo))); out: + + _scsih_ublock_io_device(ioc, handle); + mpt2sas_transport_port_remove(ioc, sas_device->sas_address, sas_device->parent_handle); @@ -3871,10 +3880,6 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, link_rate_); } } - if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) { - if (link_rate_ >= MPI2_SAS_NEG_LINK_RATE_1_5) - _scsih_ublock_io_device(ioc, handle); - } if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED) { if (link_rate_ < MPI2_SAS_NEG_LINK_RATE_1_5) break; From ed79f1280d1bc54f168abcffc8c3e0bf8ffb1873 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Thu, 20 Aug 2009 13:23:49 +0530 Subject: [PATCH 113/209] [SCSI] mpt2sas: Raid 10 Volume is showing as Raid 1E in dmesg This patch modifies the slave_configure callback so the messages that get sent to system log for RAID1E volumes contain the string "RAID10" instead of "RAID1E". These messages contain information regarding what kind of scsi device is being added. Certain OEMS can enable displaying the RAID10 string instead of RAID1E via manufacturing page 10. The driver will read this config page at driver load time, then determine from the GenericFlags0 bits whether display the RAID10 or RAID1E string, also even drive count is taken into consideration. Signed-off-by: Kashyap Desai Reviewed-by: Eric Moore Cc: Stable Tree Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.c | 7 ++- drivers/scsi/mpt2sas/mpt2sas_base.h | 37 +++++++++++++++- drivers/scsi/mpt2sas/mpt2sas_config.c | 61 +++++++++++++++++++++++++++ drivers/scsi/mpt2sas/mpt2sas_scsih.c | 8 +++- 4 files changed, 109 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 1cfb503125b6..2e4bc3d2b435 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -1542,6 +1542,8 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) (ioc->bios_pg3.BiosVersion & 0x0000FF00) >> 8, ioc->bios_pg3.BiosVersion & 0x000000FF); + _base_display_dell_branding(ioc); + printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name); if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { @@ -1554,8 +1556,6 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) i++; } - _base_display_dell_branding(ioc); - i = 0; printk("), "); printk("Capabilities=("); @@ -1627,6 +1627,9 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc) u32 iounit_pg1_flags; mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0); + if (ioc->ir_firmware) + mpt2sas_config_get_manufacturing_pg10(ioc, &mpi_reply, + &ioc->manu_pg10); mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 1f3efd6569d3..ff05ae1e43ba 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -197,6 +197,38 @@ struct MPT2SAS_TARGET { * @block: device is in SDEV_BLOCK state * @tlr_snoop_check: flag used in determining whether to disable TLR */ + +/* OEM Identifiers */ +#define MFG10_OEM_ID_INVALID (0x00000000) +#define MFG10_OEM_ID_DELL (0x00000001) +#define MFG10_OEM_ID_FSC (0x00000002) +#define MFG10_OEM_ID_SUN (0x00000003) +#define MFG10_OEM_ID_IBM (0x00000004) + +/* GENERIC Flags 0*/ +#define MFG10_GF0_OCE_DISABLED (0x00000001) +#define MFG10_GF0_R1E_DRIVE_COUNT (0x00000002) +#define MFG10_GF0_R10_DISPLAY (0x00000004) +#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008) +#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010) + +/* OEM Specific Flags will come from OEM specific header files */ +typedef struct _MPI2_CONFIG_PAGE_MAN_10 { + MPI2_CONFIG_PAGE_HEADER Header; /* 00h */ + U8 OEMIdentifier; /* 04h */ + U8 Reserved1; /* 05h */ + U16 Reserved2; /* 08h */ + U32 Reserved3; /* 0Ch */ + U32 GenericFlags0; /* 10h */ + U32 GenericFlags1; /* 14h */ + U32 Reserved4; /* 18h */ + U32 OEMSpecificFlags0; /* 1Ch */ + U32 OEMSpecificFlags1; /* 20h */ + U32 Reserved5[18]; /* 24h-60h*/ +} MPI2_CONFIG_PAGE_MAN_10, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_10, + Mpi2ManufacturingPage10_t, MPI2_POINTER pMpi2ManufacturingPage10_t; + struct MPT2SAS_DEVICE { struct MPT2SAS_TARGET *sas_target; unsigned int lun; @@ -461,6 +493,7 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); * @facts: static facts data * @pfacts: static port facts data * @manu_pg0: static manufacturing page 0 + * @manu_pg10: static manufacturing page 10 * @bios_pg2: static bios page 2 * @bios_pg3: static bios page 3 * @ioc_pg8: static ioc page 8 @@ -663,6 +696,7 @@ struct MPT2SAS_ADAPTER { dma_addr_t diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT]; u8 diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT]; u32 unique_id[MPI2_DIAG_BUF_TYPE_COUNT]; + Mpi2ManufacturingPage10_t manu_pg10; u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23]; u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT]; }; @@ -734,6 +768,8 @@ void mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 re int mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys); int mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page); +int mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage10_t *config_page); int mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage2_t *config_page); int mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t @@ -776,7 +812,6 @@ int mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle, u16 *volume_handle); int mpt2sas_config_get_volume_wwid(struct MPT2SAS_ADAPTER *ioc, u16 volume_handle, u64 *wwid); - /* ctl shared API */ extern struct device_attribute *mpt2sas_host_attrs[]; extern struct device_attribute *mpt2sas_dev_attrs[]; diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c index 6ddee161beb3..b9f4d0f97e50 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_config.c +++ b/drivers/scsi/mpt2sas/mpt2sas_config.c @@ -425,6 +425,67 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, return r; } +/** + * mpt2sas_config_get_manufacturing_pg10 - obtain manufacturing page 10 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage10_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + struct config_request mem; + + memset(config_page, 0, sizeof(Mpi2ManufacturingPage10_t)); + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 10; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; + mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; + mpi_request.Header.PageType = mpi_reply->Header.PageType; + mpi_request.Header.PageLength = mpi_reply->Header.PageLength; + mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4; + if (mem.config_page_sz > ioc->config_page_sz) { + r = _config_alloc_config_dma_memory(ioc, &mem); + if (r) + goto out; + } else { + mem.config_page_dma = ioc->config_page_dma; + mem.config_page = ioc->config_page; + } + ioc->base_add_sg_single(&mpi_request.PageBufferSGE, + MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, + mem.config_page_dma); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + if (!r) + memcpy(config_page, mem.config_page, + min_t(u16, mem.config_page_sz, + sizeof(Mpi2ManufacturingPage10_t))); + + if (mem.config_page_sz > ioc->config_page_sz) + _config_free_config_dma_memory(ioc, &mem); + + out: + return r; +} + /** * mpt2sas_config_get_bios_pg2 - obtain bios page 2 * @ioc: per adapter object diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 6e6d5af6b365..e71a6c04298b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -1501,7 +1501,13 @@ _scsih_slave_configure(struct scsi_device *sdev) break; case MPI2_RAID_VOL_TYPE_RAID1E: qdepth = MPT2SAS_RAID_QUEUE_DEPTH; - r_level = "RAID1E"; + if (ioc->manu_pg10.OEMIdentifier && + (ioc->manu_pg10.GenericFlags0 & + MFG10_GF0_R10_DISPLAY) && + !(raid_device->num_pds % 2)) + r_level = "RAID10"; + else + r_level = "RAID1E"; break; case MPI2_RAID_VOL_TYPE_RAID1: qdepth = MPT2SAS_RAID_QUEUE_DEPTH; From 5b768581ea722172f059ad5a5eebea9008961af0 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Thu, 20 Aug 2009 13:24:31 +0530 Subject: [PATCH 114/209] [SCSI] mpt2sas: cleanup interrupt routine and config_request optimization Cleaned up base_interrupt routine to be more effiecent. Deleted about a third of the config page API by moving redundant code from all the calling functions to _config_request. Signed-off-by: Kashyap Desai Reviewed-by: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.c | 73 +-- drivers/scsi/mpt2sas/mpt2sas_base.h | 2 +- drivers/scsi/mpt2sas/mpt2sas_config.c | 911 ++++++-------------------- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 1 + 4 files changed, 234 insertions(+), 753 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 2e4bc3d2b435..d95d2f274cb3 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -687,6 +687,14 @@ _base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc) ioc->mask_interrupts = 0; } +union reply_descriptor { + u64 word; + struct { + u32 low; + u32 high; + } u; +}; + /** * _base_interrupt - MPT adapter (IOC) specific interrupt handler. * @irq: irq number (not used) @@ -698,47 +706,38 @@ _base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc) static irqreturn_t _base_interrupt(int irq, void *bus_id) { - union reply_descriptor { - u64 word; - struct { - u32 low; - u32 high; - } u; - }; union reply_descriptor rd; - u32 post_index, post_index_next, completed_cmds; + u32 completed_cmds; u8 request_desript_type; u16 smid; u8 cb_idx; u32 reply; u8 VF_ID; - int i; struct MPT2SAS_ADAPTER *ioc = bus_id; + Mpi2ReplyDescriptorsUnion_t *rpf; if (ioc->mask_interrupts) return IRQ_NONE; - post_index = ioc->reply_post_host_index; - request_desript_type = ioc->reply_post_free[post_index]. - Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; + rpf = &ioc->reply_post_free[ioc->reply_post_host_index]; + request_desript_type = rpf->Default.ReplyFlags + & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) return IRQ_NONE; completed_cmds = 0; do { - rd.word = ioc->reply_post_free[post_index].Words; + rd.word = rpf->Words; if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) goto out; reply = 0; cb_idx = 0xFF; - smid = le16_to_cpu(ioc->reply_post_free[post_index]. - Default.DescriptorTypeDependent1); - VF_ID = ioc->reply_post_free[post_index]. - Default.VF_ID; + smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); + VF_ID = rpf->Default.VF_ID; if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { - reply = le32_to_cpu(ioc->reply_post_free[post_index]. - AddressReply.ReplyFrameAddress); + reply = le32_to_cpu + (rpf->AddressReply.ReplyFrameAddress); } else if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER) goto next; @@ -765,21 +764,27 @@ _base_interrupt(int irq, void *bus_id) 0 : ioc->reply_free_host_index + 1; ioc->reply_free[ioc->reply_free_host_index] = cpu_to_le32(reply); + wmb(); writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); - wmb(); } next: - post_index_next = (post_index == (ioc->reply_post_queue_depth - - 1)) ? 0 : post_index + 1; + + rpf->Words = ULLONG_MAX; + ioc->reply_post_host_index = (ioc->reply_post_host_index == + (ioc->reply_post_queue_depth - 1)) ? 0 : + ioc->reply_post_host_index + 1; request_desript_type = - ioc->reply_post_free[post_index_next].Default.ReplyFlags - & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; + ioc->reply_post_free[ioc->reply_post_host_index].Default. + ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; completed_cmds++; if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) goto out; - post_index = post_index_next; + if (!ioc->reply_post_host_index) + rpf = ioc->reply_post_free; + else + rpf++; } while (1); out: @@ -787,19 +792,8 @@ _base_interrupt(int irq, void *bus_id) if (!completed_cmds) return IRQ_NONE; - /* reply post descriptor handling */ - post_index_next = ioc->reply_post_host_index; - for (i = 0 ; i < completed_cmds; i++) { - post_index = post_index_next; - /* poison the reply post descriptor */ - ioc->reply_post_free[post_index_next].Words = ULLONG_MAX; - post_index_next = (post_index == - (ioc->reply_post_queue_depth - 1)) - ? 0 : post_index + 1; - } - ioc->reply_post_host_index = post_index_next; - writel(post_index_next, &ioc->chip->ReplyPostHostIndex); wmb(); + writel(ioc->reply_post_host_index, &ioc->chip->ReplyPostHostIndex); return IRQ_HANDLED; } @@ -1650,7 +1644,7 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc) iounit_pg1_flags |= MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); - mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, ioc->iounit_pg1); + mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); } /** @@ -3306,13 +3300,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); ioc->tm_cmds.status = MPT2_CMD_NOT_USED; mutex_init(&ioc->tm_cmds.mutex); - init_completion(&ioc->tm_cmds.done); /* config page internal command bits */ ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); ioc->config_cmds.status = MPT2_CMD_NOT_USED; mutex_init(&ioc->config_cmds.mutex); - init_completion(&ioc->config_cmds.done); /* ctl module internal command bits */ ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); @@ -3436,6 +3428,7 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) if (ioc->config_cmds.status & MPT2_CMD_PENDING) { ioc->config_cmds.status |= MPT2_CMD_RESET; mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid); + ioc->config_cmds.smid = USHORT_MAX; complete(&ioc->config_cmds.done); } break; diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index ff05ae1e43ba..f4bf7ff8f480 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -785,7 +785,7 @@ int mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRep int mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page); int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t - *mpi_reply, Mpi2IOUnitPage1_t config_page); + *mpi_reply, Mpi2IOUnitPage1_t *config_page); int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz); int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c index b9f4d0f97e50..ab8c560865d8 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_config.c +++ b/drivers/scsi/mpt2sas/mpt2sas_config.c @@ -72,15 +72,15 @@ /** * struct config_request - obtain dma memory via routine - * @config_page_sz: size - * @config_page: virt pointer - * @config_page_dma: phys pointer + * @sz: size + * @page: virt pointer + * @page_dma: phys pointer * */ struct config_request{ - u16 config_page_sz; - void *config_page; - dma_addr_t config_page_dma; + u16 sz; + void *page; + dma_addr_t page_dma; }; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING @@ -174,6 +174,55 @@ _config_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid, } #endif +/** + * _config_alloc_config_dma_memory - obtain physical memory + * @ioc: per adapter object + * @mem: struct config_request + * + * A wrapper for obtaining dma-able memory for config page request. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_alloc_config_dma_memory(struct MPT2SAS_ADAPTER *ioc, + struct config_request *mem) +{ + int r = 0; + + if (mem->sz > ioc->config_page_sz) { + mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz, + &mem->page_dma, GFP_KERNEL); + if (!mem->page) { + printk(MPT2SAS_ERR_FMT "%s: dma_alloc_coherent" + " failed asking for (%d) bytes!!\n", + ioc->name, __func__, mem->sz); + r = -ENOMEM; + } + } else { /* use tmp buffer if less than 512 bytes */ + mem->page = ioc->config_page; + mem->page_dma = ioc->config_page_dma; + } + return r; +} + +/** + * _config_free_config_dma_memory - wrapper to free the memory + * @ioc: per adapter object + * @mem: struct config_request + * + * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory. + * + * Returns 0 for success, non-zero for failure. + */ +static void +_config_free_config_dma_memory(struct MPT2SAS_ADAPTER *ioc, + struct config_request *mem) +{ + if (mem->sz > ioc->config_page_sz) + dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page, + mem->page_dma); +} + /** * mpt2sas_config_done - config page completion routine * @ioc: per adapter object @@ -206,6 +255,7 @@ mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) #ifdef CONFIG_SCSI_MPT2SAS_LOGGING _config_display_some_debug(ioc, smid, "config_done", mpi_reply); #endif + ioc->config_cmds.smid = USHORT_MAX; complete(&ioc->config_cmds.done); } @@ -215,7 +265,9 @@ mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) * @mpi_request: request message frame * @mpi_reply: reply mf payload returned from firmware * @timeout: timeout in seconds - * Context: sleep, the calling function needs to acquire the config_cmds.mutex + * @config_page: contents of the config page + * @config_page_sz: size of config page + * Context: sleep * * A generic API for config page requests to firmware. * @@ -228,16 +280,17 @@ mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) */ static int _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t - *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout) + *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout, + void *config_page, u16 config_page_sz) { u16 smid; u32 ioc_state; unsigned long timeleft; Mpi2ConfigRequest_t *config_request; int r; - u8 retry_count; - u8 issue_host_reset = 0; + u8 retry_count, issue_host_reset = 0; u16 wait_state_count; + struct config_request mem; mutex_lock(&ioc->config_cmds.mutex); if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) { @@ -246,12 +299,44 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t mutex_unlock(&ioc->config_cmds.mutex); return -EAGAIN; } + retry_count = 0; + memset(&mem, 0, sizeof(struct config_request)); + + if (config_page) { + mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion; + mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber; + mpi_request->Header.PageType = mpi_reply->Header.PageType; + mpi_request->Header.PageLength = mpi_reply->Header.PageLength; + mpi_request->ExtPageLength = mpi_reply->ExtPageLength; + mpi_request->ExtPageType = mpi_reply->ExtPageType; + if (mpi_request->Header.PageLength) + mem.sz = mpi_request->Header.PageLength * 4; + else + mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; + r = _config_alloc_config_dma_memory(ioc, &mem); + if (r != 0) + goto out; + if (mpi_request->Action == + MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT) { + ioc->base_add_sg_single(&mpi_request->PageBufferSGE, + MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz, + mem.page_dma); + memcpy(mem.page, config_page, min_t(u16, mem.sz, + config_page_sz)); + } else { + memset(config_page, 0, config_page_sz); + ioc->base_add_sg_single(&mpi_request->PageBufferSGE, + MPT2_CONFIG_COMMON_SGLFLAGS | mem.sz, mem.page_dma); + } + } retry_config: if (retry_count) { - if (retry_count > 2) /* attempt only 2 retries */ - return -EFAULT; + if (retry_count > 2) { /* attempt only 2 retries */ + r = -EFAULT; + goto free_mem; + } printk(MPT2SAS_INFO_FMT "%s: attempting retry (%d)\n", ioc->name, __func__, retry_count); } @@ -262,8 +347,9 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t printk(MPT2SAS_ERR_FMT "%s: failed due to ioc not operational\n", ioc->name, __func__); + ioc->config_cmds.status = MPT2_CMD_NOT_USED; r = -EFAULT; - goto out; + goto free_mem; } ssleep(1); ioc_state = mpt2sas_base_get_iocstate(ioc, 1); @@ -279,8 +365,9 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); + ioc->config_cmds.status = MPT2_CMD_NOT_USED; r = -EAGAIN; - goto out; + goto free_mem; } r = 0; @@ -292,6 +379,7 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t #ifdef CONFIG_SCSI_MPT2SAS_LOGGING _config_display_some_debug(ioc, smid, "config_request", NULL); #endif + init_completion(&ioc->config_cmds.done); mpt2sas_base_put_smid_default(ioc, smid, config_request->VF_ID); timeleft = wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ); @@ -303,67 +391,37 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t retry_count++; if (ioc->config_cmds.smid == smid) mpt2sas_base_free_smid(ioc, smid); - if ((ioc->shost_recovery) || - (ioc->config_cmds.status & MPT2_CMD_RESET)) + if ((ioc->shost_recovery) || (ioc->config_cmds.status & + MPT2_CMD_RESET)) goto retry_config; issue_host_reset = 1; r = -EFAULT; - goto out; + goto free_mem; } + if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID) memcpy(mpi_reply, ioc->config_cmds.reply, sizeof(Mpi2ConfigReply_t)); if (retry_count) - printk(MPT2SAS_INFO_FMT "%s: retry completed!!\n", - ioc->name, __func__); -out: + printk(MPT2SAS_INFO_FMT "%s: retry (%d) completed!!\n", + ioc->name, __func__, retry_count); + if (config_page && mpi_request->Action == + MPI2_CONFIG_ACTION_PAGE_READ_CURRENT) + memcpy(config_page, mem.page, min_t(u16, mem.sz, + config_page_sz)); + free_mem: + if (config_page) + _config_free_config_dma_memory(ioc, &mem); + out: ioc->config_cmds.status = MPT2_CMD_NOT_USED; mutex_unlock(&ioc->config_cmds.mutex); + if (issue_host_reset) mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); return r; } -/** - * _config_alloc_config_dma_memory - obtain physical memory - * @ioc: per adapter object - * @mem: struct config_request - * - * A wrapper for obtaining dma-able memory for config page request. - * - * Returns 0 for success, non-zero for failure. - */ -static int -_config_alloc_config_dma_memory(struct MPT2SAS_ADAPTER *ioc, - struct config_request *mem) -{ - int r = 0; - - mem->config_page = pci_alloc_consistent(ioc->pdev, mem->config_page_sz, - &mem->config_page_dma); - if (!mem->config_page) - r = -ENOMEM; - return r; -} - -/** - * _config_free_config_dma_memory - wrapper to free the memory - * @ioc: per adapter object - * @mem: struct config_request - * - * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory. - * - * Returns 0 for success, non-zero for failure. - */ -static void -_config_free_config_dma_memory(struct MPT2SAS_ADAPTER *ioc, - struct config_request *mem) -{ - pci_free_consistent(ioc->pdev, mem->config_page_sz, mem->config_page, - mem->config_page_dma); -} - /** * mpt2sas_config_get_manufacturing_pg0 - obtain manufacturing page 0 * @ioc: per adapter object @@ -379,9 +437,7 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sizeof(Mpi2ManufacturingPage0_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -390,37 +446,14 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.Header.PageLength = mpi_reply->Header.PageLength; - mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2ManufacturingPage0_t))); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -440,9 +473,7 @@ mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc, { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sizeof(Mpi2ManufacturingPage10_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -451,37 +482,14 @@ mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc, mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.Header.PageLength = mpi_reply->Header.PageLength; - mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2ManufacturingPage10_t))); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -501,9 +509,7 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sizeof(Mpi2BiosPage2_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -512,37 +518,14 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, mpi_request.Header.PageVersion = MPI2_BIOSPAGE2_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.Header.PageLength = mpi_reply->Header.PageLength; - mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2BiosPage2_t))); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -562,9 +545,7 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sizeof(Mpi2BiosPage3_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -573,37 +554,14 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t mpi_request.Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.Header.PageLength = mpi_reply->Header.PageLength; - mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2BiosPage3_t))); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -623,9 +581,7 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sizeof(Mpi2IOUnitPage0_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -634,37 +590,14 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, mpi_request.Header.PageVersion = MPI2_IOUNITPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.Header.PageLength = mpi_reply->Header.PageLength; - mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2IOUnitPage0_t))); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -684,9 +617,7 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sizeof(Mpi2IOUnitPage1_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -695,37 +626,14 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.Header.PageLength = mpi_reply->Header.PageLength; - mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2IOUnitPage1_t))); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -741,11 +649,10 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, */ int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, - Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t config_page) + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page) { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; @@ -755,38 +662,14 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.Header.PageLength = mpi_reply->Header.PageLength; - mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - - memset(mem.config_page, 0, mem.config_page_sz); - memcpy(mem.config_page, &config_page, - sizeof(Mpi2IOUnitPage1_t)); - - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -806,9 +689,7 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sizeof(Mpi2IOCPage8_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -817,37 +698,14 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.Header.PageLength = mpi_reply->Header.PageLength; - mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2IOCPage8_t))); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -869,9 +727,7 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sizeof(Mpi2SasDevicePage0_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -881,39 +737,15 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t mpi_request.Header.PageNumber = 0; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.ExtPageLength = mpi_reply->ExtPageLength; - mpi_request.ExtPageType = mpi_reply->ExtPageType; - mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2SasDevicePage0_t))); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -935,9 +767,7 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sizeof(Mpi2SasDevicePage1_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -947,39 +777,15 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t mpi_request.Header.PageNumber = 1; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.ExtPageLength = mpi_reply->ExtPageLength; - mpi_request.ExtPageType = mpi_reply->ExtPageType; - mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2SasDevicePage1_t))); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -997,11 +803,11 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys) { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; u16 ioc_status; Mpi2ConfigReply_t mpi_reply; Mpi2SasIOUnitPage0_t config_page; + *num_phys = 0; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -1011,44 +817,20 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys) mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, &mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply.Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply.Header.PageNumber; - mpi_request.Header.PageType = mpi_reply.Header.PageType; - mpi_request.ExtPageLength = mpi_reply.ExtPageLength; - mpi_request.ExtPageType = mpi_reply.ExtPageType; - mem.config_page_sz = le16_to_cpu(mpi_reply.ExtPageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, &mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, + sizeof(Mpi2SasIOUnitPage0_t)); if (!r) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { - memcpy(&config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2SasIOUnitPage0_t))); + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) *num_phys = config_page.NumPhys; - } } - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - out: return r; } @@ -1072,8 +854,7 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sz); + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -1083,37 +864,13 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.ExtPageLength = mpi_reply->ExtPageLength; - mpi_request.ExtPageType = mpi_reply->ExtPageType; - mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, sz, mem.config_page_sz)); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); out: return r; } @@ -1137,9 +894,7 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sz); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -1149,37 +904,13 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.ExtPageLength = mpi_reply->ExtPageLength; - mpi_request.ExtPageType = mpi_reply->ExtPageType; - mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, sz, mem.config_page_sz)); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); out: return r; } @@ -1201,9 +932,7 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sizeof(Mpi2ExpanderPage0_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -1213,39 +942,15 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t mpi_request.Header.PageVersion = MPI2_SASEXPANDER0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.ExtPageLength = mpi_reply->ExtPageLength; - mpi_request.ExtPageType = mpi_reply->ExtPageType; - mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2ExpanderPage0_t))); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -1268,9 +973,7 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sizeof(Mpi2ExpanderPage1_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -1280,7 +983,7 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t mpi_request.Header.PageVersion = MPI2_SASEXPANDER1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; @@ -1288,33 +991,9 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t cpu_to_le32(MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM | (phy_number << MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.ExtPageLength = mpi_reply->ExtPageLength; - mpi_request.ExtPageType = mpi_reply->ExtPageType; - mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2ExpanderPage1_t))); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -1336,9 +1015,7 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sizeof(Mpi2SasEnclosurePage0_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -1348,39 +1025,15 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t mpi_request.Header.PageVersion = MPI2_SASENCLOSURE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.ExtPageLength = mpi_reply->ExtPageLength; - mpi_request.ExtPageType = mpi_reply->ExtPageType; - mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2SasEnclosurePage0_t))); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -1401,9 +1054,7 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sizeof(Mpi2SasPhyPage0_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -1413,40 +1064,16 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t mpi_request.Header.PageVersion = MPI2_SASPHY0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.ExtPageLength = mpi_reply->ExtPageLength; - mpi_request.ExtPageType = mpi_reply->ExtPageType; - mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2SasPhyPage0_t))); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -1467,9 +1094,7 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sizeof(Mpi2SasPhyPage1_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -1479,40 +1104,16 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t mpi_request.Header.PageVersion = MPI2_SASPHY1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.ExtPageLength = mpi_reply->ExtPageLength; - mpi_request.ExtPageType = mpi_reply->ExtPageType; - mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2SasPhyPage1_t))); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -1535,9 +1136,7 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc, { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; - memset(config_page, 0, sizeof(Mpi2RaidVolPage1_t)); memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; @@ -1546,38 +1145,15 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc, mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.Header.PageLength = mpi_reply->Header.PageLength; - mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2RaidVolPage1_t))); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -1596,10 +1172,9 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 *num_pds) { Mpi2ConfigRequest_t mpi_request; - Mpi2RaidVolPage0_t *config_page; + Mpi2RaidVolPage0_t config_page; Mpi2ConfigReply_t mpi_reply; int r; - struct config_request mem; u16 ioc_status; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); @@ -1611,43 +1186,23 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle, mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, &mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(MPI2_RAID_VOLUME_PGAD_FORM_HANDLE | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply.Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply.Header.PageNumber; - mpi_request.Header.PageType = mpi_reply.Header.PageType; - mpi_request.Header.PageLength = mpi_reply.Header.PageLength; - mem.config_page_sz = le16_to_cpu(mpi_reply.Header.PageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, &mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, + sizeof(Mpi2RaidVolPage0_t)); if (!r) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { - config_page = mem.config_page; - *num_pds = config_page->NumPhysDisks; - } + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) + *num_pds = config_page.NumPhysDisks; } - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - out: return r; } @@ -1671,10 +1226,8 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc, { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); - memset(config_page, 0, sz); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; @@ -1682,37 +1235,14 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc, mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | handle); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.Header.PageLength = mpi_reply->Header.PageLength; - mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, sz, mem.config_page_sz)); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); out: return r; } @@ -1735,10 +1265,8 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t { Mpi2ConfigRequest_t mpi_request; int r; - struct config_request mem; memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); - memset(config_page, 0, sizeof(Mpi2RaidPhysDiskPage0_t)); mpi_request.Function = MPI2_FUNCTION_CONFIG; mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK; @@ -1746,38 +1274,15 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t mpi_request.Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(form | form_specific); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber; - mpi_request.Header.PageType = mpi_reply->Header.PageType; - mpi_request.Header.PageLength = mpi_reply->Header.PageLength; - mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); r = _config_request(ioc, &mpi_request, mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); - if (!r) - memcpy(config_page, mem.config_page, - min_t(u16, mem.config_page_sz, - sizeof(Mpi2RaidPhysDiskPage0_t))); - - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); out: return r; } @@ -1795,11 +1300,10 @@ int mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle, u16 *volume_handle) { - Mpi2RaidConfigurationPage0_t *config_page; + Mpi2RaidConfigurationPage0_t *config_page = NULL; Mpi2ConfigRequest_t mpi_request; Mpi2ConfigReply_t mpi_reply; - int r, i; - struct config_request mem; + int r, i, config_page_sz; u16 ioc_status; *volume_handle = 0; @@ -1812,40 +1316,27 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle, mpi_request.Header.PageNumber = 0; mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); r = _config_request(ioc, &mpi_request, &mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); if (r) goto out; mpi_request.PageAddress = cpu_to_le32(MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG); mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; - mpi_request.Header.PageVersion = mpi_reply.Header.PageVersion; - mpi_request.Header.PageNumber = mpi_reply.Header.PageNumber; - mpi_request.Header.PageType = mpi_reply.Header.PageType; - mpi_request.ExtPageLength = mpi_reply.ExtPageLength; - mpi_request.ExtPageType = mpi_reply.ExtPageType; - mem.config_page_sz = le16_to_cpu(mpi_reply.ExtPageLength) * 4; - if (mem.config_page_sz > ioc->config_page_sz) { - r = _config_alloc_config_dma_memory(ioc, &mem); - if (r) - goto out; - } else { - mem.config_page_dma = ioc->config_page_dma; - mem.config_page = ioc->config_page; - } - ioc->base_add_sg_single(&mpi_request.PageBufferSGE, - MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz, - mem.config_page_dma); + config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4); + config_page = kmalloc(config_page_sz, GFP_KERNEL); + if (!config_page) + goto out; r = _config_request(ioc, &mpi_request, &mpi_reply, - MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + config_page_sz); if (r) goto out; r = -1; ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) - goto done; - config_page = mem.config_page; + goto out; for (i = 0; i < config_page->NumElements; i++) { if ((config_page->ConfigElement[i].ElementFlags & MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE) != @@ -1856,15 +1347,11 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle, *volume_handle = le16_to_cpu(config_page-> ConfigElement[i].VolDevHandle); r = 0; - goto done; + goto out; } } - - done: - if (mem.config_page_sz > ioc->config_page_sz) - _config_free_config_dma_memory(ioc, &mem); - out: + kfree(config_page); return r; } diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index e71a6c04298b..774b34525bba 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -1836,6 +1836,7 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun, mpi_request->TaskMID = cpu_to_le16(smid_task); int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); mpt2sas_scsih_set_tm_flag(ioc, handle); + init_completion(&ioc->tm_cmds.done); mpt2sas_base_put_smid_hi_priority(ioc, smid, VF_ID); timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); mpt2sas_scsih_clear_tm_flag(ioc, handle); From dc162b63bfc4201ca5c13df3fc8836c56979eb07 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Thu, 20 Aug 2009 13:25:03 +0530 Subject: [PATCH 115/209] [SCSI] mpt2sas: Bump driver version 01.100.06.00 Bump version to 01.100.06.00 Signed-off-by: Kashyap Desai Reviewed-by:: Eric Moore Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index f4bf7ff8f480..2faab1e690e9 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -69,10 +69,10 @@ #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_AUTHOR "LSI Corporation " #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" -#define MPT2SAS_DRIVER_VERSION "01.100.04.00" +#define MPT2SAS_DRIVER_VERSION "01.100.06.00" #define MPT2SAS_MAJOR_VERSION 01 #define MPT2SAS_MINOR_VERSION 100 -#define MPT2SAS_BUILD_VERSION 04 +#define MPT2SAS_BUILD_VERSION 06 #define MPT2SAS_RELEASE_VERSION 00 /* From cf53b069f52ae3f83dec1acd339e3c3a2e979478 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Thu, 20 Aug 2009 11:06:04 -0700 Subject: [PATCH 116/209] [SCSI] qla2xxx: Generalize srb structure usage. Lay groundwork for adding alternative asynchronous operations by generalize and extending the SRB structure. This allows for follow-on patches to add support for: - Asynchronous logins. - ELS/CT passthru requests. - Loopback requests. - Non-blocking mailbox commands (ABTS, Task Management, etc). Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_def.h | 3 +++ drivers/scsi/qla2xxx/qla_iocb.c | 2 ++ drivers/scsi/qla2xxx/qla_os.c | 11 +++++++++-- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 68ab28c8152b..9eb7be684f0a 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -189,6 +189,7 @@ struct req_que; */ typedef struct srb { struct fc_port *fcport; + uint32_t handle; struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ @@ -196,6 +197,8 @@ typedef struct srb { uint32_t request_sense_length; uint8_t *request_sense_ptr; + + void *ctx; } srb_t; /* diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index c0ba370b23cf..d37554ecd313 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -350,6 +350,7 @@ qla2x00_start_scsi(srb_t *sp) /* Build command packet */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; + sp->handle = handle; sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; req->cnt -= req_cnt; @@ -778,6 +779,7 @@ qla24xx_start_scsi(srb_t *sp) /* Build command packet. */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; + sp->handle = handle; sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; req->cnt -= req_cnt; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index d7b271339a5c..b6c088caf35e 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -473,6 +473,7 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport, sp->flags = 0; CMD_SP(cmd) = (void *)sp; cmd->scsi_done = done; + sp->ctx = NULL; return sp; } @@ -709,6 +710,8 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport) continue; if (sp->fcport != fcport) continue; + if (sp->ctx) + continue; spin_unlock_irqrestore(&ha->hardware_lock, flags); if (ha->isp_ops->abort_command(sp)) { @@ -794,7 +797,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) if (sp == NULL) continue; - + if (sp->ctx) + continue; if (sp->cmd != cmd) continue; @@ -859,7 +863,8 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, sp = req->outstanding_cmds[cnt]; if (!sp) continue; - + if (sp->ctx) + continue; if (vha->vp_idx != sp->fcport->vha->vp_idx) continue; match = 0; @@ -2986,6 +2991,8 @@ qla2x00_timer(scsi_qla_host_t *vha) sp = req->outstanding_cmds[index]; if (!sp) continue; + if (sp->ctx) + continue; sfcp = sp->fcport; if (!(sfcp->flags & FCF_TAPE_PRESENT)) continue; From ac280b670e6d6666667aba02324e2fc50bd96ae7 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Thu, 20 Aug 2009 11:06:05 -0700 Subject: [PATCH 117/209] [SCSI] qla2xxx: Add asynchronous-login support. ISPs which support this feature include 23xx and above. Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_def.h | 32 ++++ drivers/scsi/qla2xxx/qla_gbl.h | 20 +++ drivers/scsi/qla2xxx/qla_init.c | 215 ++++++++++++++++++++++++++- drivers/scsi/qla2xxx/qla_iocb.c | 202 ++++++++++++++++++++++++++ drivers/scsi/qla2xxx/qla_isr.c | 250 ++++++++++++++++++++++++++++++++ drivers/scsi/qla2xxx/qla_mid.c | 2 + drivers/scsi/qla2xxx/qla_os.c | 72 ++++++++- 7 files changed, 784 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 9eb7be684f0a..efdfb1eb26ba 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -206,6 +206,28 @@ typedef struct srb { */ #define SRB_DMA_VALID BIT_0 /* Command sent to ISP */ +/* + * SRB extensions. + */ +struct srb_ctx { +#define SRB_LOGIN_CMD 1 +#define SRB_LOGOUT_CMD 2 + uint16_t type; + struct timer_list timer; + + void (*free)(srb_t *sp); + void (*timeout)(srb_t *sp); +}; + +struct srb_logio { + struct srb_ctx ctx; + +#define SRB_LOGIN_RETRIED BIT_0 +#define SRB_LOGIN_COND_PLOGI BIT_1 +#define SRB_LOGIN_SKIP_PRLI BIT_2 + uint16_t flags; +}; + /* * ISP I/O Register Set structure definitions. */ @@ -2096,6 +2118,10 @@ struct qla_msix_entry { enum qla_work_type { QLA_EVT_AEN, QLA_EVT_IDC_ACK, + QLA_EVT_ASYNC_LOGIN, + QLA_EVT_ASYNC_LOGIN_DONE, + QLA_EVT_ASYNC_LOGOUT, + QLA_EVT_ASYNC_LOGOUT_DONE, }; @@ -2114,6 +2140,11 @@ struct qla_work_evt { #define QLA_IDC_ACK_REGS 7 uint16_t mb[QLA_IDC_ACK_REGS]; } idc_ack; + struct { + struct fc_port *fcport; +#define QLA_LOGIO_LOGIN_RETRIED BIT_0 + u16 data[2]; + } logio; } u; }; @@ -2354,6 +2385,7 @@ struct qla_hw_data { (ha)->flags.msix_enabled) #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha)) #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha)) +#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) #define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA) #define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2) diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 65b12d82867c..f3d1d1afa95b 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -52,6 +52,14 @@ extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *); extern void qla84xx_put_chip(struct scsi_qla_host *); +extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *, + uint16_t *); +extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *); +extern int qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *, + uint16_t *); +extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *, + uint16_t *); + /* * Global Data in qla_os.c source file. */ @@ -76,6 +84,15 @@ extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum fc_host_event_code, u32); extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *); +extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *, + uint16_t *); +extern int qla2x00_post_async_login_done_work(struct scsi_qla_host *, + fc_port_t *, uint16_t *); +extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *, + uint16_t *); +extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *, + fc_port_t *, uint16_t *); + extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *); extern void qla2x00_abort_fcport_cmds(fc_port_t *); @@ -83,6 +100,8 @@ extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *, struct qla_hw_data *); extern void qla2x00_free_host(struct scsi_qla_host *); extern void qla2x00_relogin(struct scsi_qla_host *); +extern void qla2x00_do_work(struct scsi_qla_host *); + /* * Global Functions in qla_mid.c source file. */ @@ -135,6 +154,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, uint16_t, uint16_t, uint8_t); int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, uint16_t, uint16_t, uint8_t); +extern int qla2x00_start_sp(srb_t *); /* * Global Function Prototypes in qla_mbx.c source file. diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 0cbe39e92506..37c99a27874c 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -40,6 +40,210 @@ static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); static int qla84xx_init_chip(scsi_qla_host_t *); static int qla25xx_init_queues(struct qla_hw_data *); +/* SRB Extensions ---------------------------------------------------------- */ + +static void +qla2x00_ctx_sp_timeout(unsigned long __data) +{ + srb_t *sp = (srb_t *)__data; + struct srb_ctx *ctx; + fc_port_t *fcport = sp->fcport; + struct qla_hw_data *ha = fcport->vha->hw; + struct req_que *req; + unsigned long flags; + + spin_lock_irqsave(&ha->hardware_lock, flags); + req = ha->req_q_map[0]; + req->outstanding_cmds[sp->handle] = NULL; + ctx = sp->ctx; + ctx->timeout(sp); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + ctx->free(sp); +} + +static void +qla2x00_ctx_sp_free(srb_t *sp) +{ + struct srb_ctx *ctx = sp->ctx; + + kfree(ctx); + mempool_free(sp, sp->fcport->vha->hw->srb_mempool); +} + +inline srb_t * +qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, + unsigned long tmo) +{ + srb_t *sp; + struct qla_hw_data *ha = vha->hw; + struct srb_ctx *ctx; + + sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); + if (!sp) + goto done; + ctx = kzalloc(size, GFP_KERNEL); + if (!ctx) { + mempool_free(sp, ha->srb_mempool); + goto done; + } + + memset(sp, 0, sizeof(*sp)); + sp->fcport = fcport; + sp->ctx = ctx; + ctx->free = qla2x00_ctx_sp_free; + + init_timer(&ctx->timer); + if (!tmo) + goto done; + ctx->timer.expires = jiffies + tmo * HZ; + ctx->timer.data = (unsigned long)sp; + ctx->timer.function = qla2x00_ctx_sp_timeout; + add_timer(&ctx->timer); +done: + return sp; +} + +/* Asynchronous Login/Logout Routines -------------------------------------- */ + +#define ELS_TMO_2_RATOV(ha) ((ha)->r_a_tov / 10 * 2) + +static void +qla2x00_async_logio_timeout(srb_t *sp) +{ + fc_port_t *fcport = sp->fcport; + struct srb_logio *lio = sp->ctx; + + DEBUG2(printk(KERN_WARNING + "scsi(%ld:%x): Async-%s timeout.\n", + fcport->vha->host_no, sp->handle, + lio->ctx.type == SRB_LOGIN_CMD ? "login": "logout")); + + if (lio->ctx.type == SRB_LOGIN_CMD) + qla2x00_post_async_logout_work(fcport->vha, fcport, NULL); +} + +int +qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, + uint16_t *data) +{ + struct qla_hw_data *ha = vha->hw; + srb_t *sp; + struct srb_logio *lio; + int rval; + + rval = QLA_FUNCTION_FAILED; + sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_logio), + ELS_TMO_2_RATOV(ha) + 2); + if (!sp) + goto done; + + lio = sp->ctx; + lio->ctx.type = SRB_LOGIN_CMD; + lio->ctx.timeout = qla2x00_async_logio_timeout; + lio->flags |= SRB_LOGIN_COND_PLOGI; + if (data[1] & QLA_LOGIO_LOGIN_RETRIED) + lio->flags |= SRB_LOGIN_RETRIED; + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + DEBUG2(printk(KERN_DEBUG + "scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x " + "retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, + fcport->login_retry)); + return rval; + +done_free_sp: + del_timer_sync(&lio->ctx.timer); + lio->ctx.free(sp); +done: + return rval; +} + +int +qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_hw_data *ha = vha->hw; + srb_t *sp; + struct srb_logio *lio; + int rval; + + rval = QLA_FUNCTION_FAILED; + sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_logio), + ELS_TMO_2_RATOV(ha) + 2); + if (!sp) + goto done; + + lio = sp->ctx; + lio->ctx.type = SRB_LOGOUT_CMD; + lio->ctx.timeout = qla2x00_async_logio_timeout; + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + DEBUG2(printk(KERN_DEBUG + "scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n", + fcport->vha->host_no, sp->handle, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); + return rval; + +done_free_sp: + del_timer_sync(&lio->ctx.timer); + lio->ctx.free(sp); +done: + return rval; +} + +int +qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, + uint16_t *data) +{ + int rval; + uint8_t opts = 0; + + switch (data[0]) { + case MBS_COMMAND_COMPLETE: + if (fcport->flags & FCF_TAPE_PRESENT) + opts |= BIT_1; + rval = qla2x00_get_port_database(vha, fcport, opts); + if (rval != QLA_SUCCESS) + qla2x00_mark_device_lost(vha, fcport, 1, 0); + else + qla2x00_update_fcport(vha, fcport); + break; + case MBS_COMMAND_ERROR: + if (data[1] & QLA_LOGIO_LOGIN_RETRIED) + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + else + qla2x00_mark_device_lost(vha, fcport, 1, 0); + break; + case MBS_PORT_ID_USED: + fcport->loop_id = data[1]; + qla2x00_post_async_login_work(vha, fcport, NULL); + break; + case MBS_LOOP_ID_USED: + fcport->loop_id++; + rval = qla2x00_find_new_loop_id(vha, fcport); + if (rval != QLA_SUCCESS) { + qla2x00_mark_device_lost(vha, fcport, 1, 0); + break; + } + qla2x00_post_async_login_work(vha, fcport, NULL); + break; + } + return QLA_SUCCESS; +} + +int +qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, + uint16_t *data) +{ + qla2x00_mark_device_lost(vha, fcport, 1, 0); + return QLA_SUCCESS; +} + /****************************************************************************/ /* QLogic ISP2x00 Hardware Support Functions. */ /****************************************************************************/ @@ -1977,7 +2181,7 @@ qla2x00_rport_del(void *data) struct fc_rport *rport; spin_lock_irq(fcport->vha->host->host_lock); - rport = fcport->drport; + rport = fcport->drport ? fcport->drport: fcport->rport; fcport->drport = NULL; spin_unlock_irq(fcport->vha->host->host_lock); if (rport) @@ -2344,8 +2548,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) struct fc_rport *rport; struct qla_hw_data *ha = vha->hw; - if (fcport->drport) - qla2x00_rport_del(fcport); + qla2x00_rport_del(fcport); rport_ids.node_name = wwn_to_u64(fcport->node_name); rport_ids.port_name = wwn_to_u64(fcport->port_name); @@ -3038,6 +3241,12 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport, rval = QLA_SUCCESS; retry = 0; + if (IS_ALOGIO_CAPABLE(ha)) { + rval = qla2x00_post_async_login_work(vha, fcport, NULL); + if (!rval) + return rval; + } + rval = qla2x00_fabric_login(vha, fcport, next_loopid); if (rval == QLA_SUCCESS) { /* Send an ADISC to tape devices.*/ diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index d37554ecd313..c5ccac0bef76 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -860,3 +860,205 @@ static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) else *rsp = ha->rsp_q_map[0]; } + +/* Generic Control-SRB manipulation functions. */ + +static void * +qla2x00_alloc_iocbs(srb_t *sp) +{ + scsi_qla_host_t *vha = sp->fcport->vha; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); + uint32_t index, handle; + request_t *pkt; + uint16_t cnt, req_cnt; + + pkt = NULL; + req_cnt = 1; + + /* Check for room in outstanding command list. */ + handle = req->current_outstanding_cmd; + for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { + handle++; + if (handle == MAX_OUTSTANDING_COMMANDS) + handle = 1; + if (!req->outstanding_cmds[handle]) + break; + } + if (index == MAX_OUTSTANDING_COMMANDS) + goto queuing_error; + + /* Check for room on request queue. */ + if (req->cnt < req_cnt) { + if (ha->mqenable) + cnt = RD_REG_DWORD(®->isp25mq.req_q_out); + else if (IS_FWI2_CAPABLE(ha)) + cnt = RD_REG_DWORD(®->isp24.req_q_out); + else + cnt = qla2x00_debounce_register( + ISP_REQ_Q_OUT(ha, ®->isp)); + + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + } + if (req->cnt < req_cnt) + goto queuing_error; + + /* Prep packet */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + req->cnt -= req_cnt; + + pkt = req->ring_ptr; + memset(pkt, 0, REQUEST_ENTRY_SIZE); + pkt->entry_count = req_cnt; + pkt->handle = handle; + sp->handle = handle; + +queuing_error: + return pkt; +} + +static void +qla2x00_start_iocbs(srb_t *sp) +{ + struct qla_hw_data *ha = sp->fcport->vha->hw; + struct req_que *req = ha->req_q_map[0]; + device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); + struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; + + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else + req->ring_ptr++; + + /* Set chip new ring index. */ + if (ha->mqenable) { + WRT_REG_DWORD(®->isp25mq.req_q_in, req->ring_index); + RD_REG_DWORD(&ioreg->hccr); + } else if (IS_FWI2_CAPABLE(ha)) { + WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); + RD_REG_DWORD_RELAXED(®->isp24.req_q_in); + } else { + WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), req->ring_index); + RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); + } +} + +static void +qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) +{ + struct srb_logio *lio = sp->ctx; + + logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; + logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); + if (lio->flags & SRB_LOGIN_COND_PLOGI) + logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); + if (lio->flags & SRB_LOGIN_SKIP_PRLI) + logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); + logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); + logio->port_id[0] = sp->fcport->d_id.b.al_pa; + logio->port_id[1] = sp->fcport->d_id.b.area; + logio->port_id[2] = sp->fcport->d_id.b.domain; + logio->vp_index = sp->fcport->vp_idx; +} + +static void +qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) +{ + struct qla_hw_data *ha = sp->fcport->vha->hw; + struct srb_logio *lio = sp->ctx; + uint16_t opts; + + mbx->entry_type = MBX_IOCB_TYPE;; + SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); + mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT); + opts = lio->flags & SRB_LOGIN_COND_PLOGI ? BIT_0: 0; + opts |= lio->flags & SRB_LOGIN_SKIP_PRLI ? BIT_1: 0; + if (HAS_EXTENDED_IDS(ha)) { + mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); + mbx->mb10 = cpu_to_le16(opts); + } else { + mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts); + } + mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); + mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | + sp->fcport->d_id.b.al_pa); + mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); +} + +static void +qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) +{ + logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; + logio->control_flags = + cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); + logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); + logio->port_id[0] = sp->fcport->d_id.b.al_pa; + logio->port_id[1] = sp->fcport->d_id.b.area; + logio->port_id[2] = sp->fcport->d_id.b.domain; + logio->vp_index = sp->fcport->vp_idx; +} + +static void +qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) +{ + struct qla_hw_data *ha = sp->fcport->vha->hw; + + mbx->entry_type = MBX_IOCB_TYPE;; + SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); + mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT); + mbx->mb1 = HAS_EXTENDED_IDS(ha) ? + cpu_to_le16(sp->fcport->loop_id): + cpu_to_le16(sp->fcport->loop_id << 8); + mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); + mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | + sp->fcport->d_id.b.al_pa); + mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); + /* Implicit: mbx->mbx10 = 0. */ +} + +int +qla2x00_start_sp(srb_t *sp) +{ + int rval; + struct qla_hw_data *ha = sp->fcport->vha->hw; + void *pkt; + struct srb_ctx *ctx = sp->ctx; + unsigned long flags; + + rval = QLA_FUNCTION_FAILED; + spin_lock_irqsave(&ha->hardware_lock, flags); + pkt = qla2x00_alloc_iocbs(sp); + if (!pkt) + goto done; + + rval = QLA_SUCCESS; + switch (ctx->type) { + case SRB_LOGIN_CMD: + IS_FWI2_CAPABLE(ha) ? + qla24xx_login_iocb(sp, pkt): + qla2x00_login_iocb(sp, pkt); + break; + case SRB_LOGOUT_CMD: + IS_FWI2_CAPABLE(ha) ? + qla24xx_logout_iocb(sp, pkt): + qla2x00_logout_iocb(sp, pkt); + break; + default: + break; + } + + wmb(); + qla2x00_start_iocbs(sp); +done: + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 74fa6f992041..c0fec6914b96 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -919,6 +919,249 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, } } +static srb_t * +qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, + struct req_que *req, void *iocb) +{ + struct qla_hw_data *ha = vha->hw; + sts_entry_t *pkt = iocb; + srb_t *sp = NULL; + uint16_t index; + + index = LSW(pkt->handle); + if (index >= MAX_OUTSTANDING_COMMANDS) { + qla_printk(KERN_WARNING, ha, + "%s: Invalid completion handle (%x).\n", func, index); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + goto done; + } + sp = req->outstanding_cmds[index]; + if (!sp) { + qla_printk(KERN_WARNING, ha, + "%s: Invalid completion handle (%x) -- timed-out.\n", func, + index); + return sp; + } + if (sp->handle != index) { + qla_printk(KERN_WARNING, ha, + "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle, + index); + return NULL; + } + req->outstanding_cmds[index] = NULL; +done: + return sp; +} + +static void +qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct mbx_entry *mbx) +{ + const char func[] = "MBX-IOCB"; + const char *type; + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport; + srb_t *sp; + struct srb_logio *lio; + uint16_t data[2]; + + sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); + if (!sp) + return; + + type = NULL; + lio = sp->ctx; + switch (lio->ctx.type) { + case SRB_LOGIN_CMD: + type = "login"; + break; + case SRB_LOGOUT_CMD: + type = "logout"; + break; + default: + qla_printk(KERN_WARNING, ha, + "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp, + lio->ctx.type); + return; + } + + del_timer(&lio->ctx.timer); + fcport = sp->fcport; + + data[0] = data[1] = 0; + if (mbx->entry_status) { + DEBUG2(printk(KERN_WARNING + "scsi(%ld:%x): Async-%s error entry - entry-status=%x " + "status=%x state-flag=%x status-flags=%x.\n", + fcport->vha->host_no, sp->handle, type, + mbx->entry_status, le16_to_cpu(mbx->status), + le16_to_cpu(mbx->state_flags), + le16_to_cpu(mbx->status_flags))); + DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx))); + + data[0] = MBS_COMMAND_ERROR; + data[1] = lio->flags & SRB_LOGIN_RETRIED ? + QLA_LOGIO_LOGIN_RETRIED: 0; + goto done_post_logio_done_work; + } + + if (!mbx->status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { + DEBUG2(printk(KERN_DEBUG + "scsi(%ld:%x): Async-%s complete - mbx1=%x.\n", + fcport->vha->host_no, sp->handle, type, + le16_to_cpu(mbx->mb1))); + + data[0] = MBS_COMMAND_COMPLETE; + if (lio->ctx.type == SRB_LOGIN_CMD && le16_to_cpu(mbx->mb1) & BIT_1) + fcport->flags |= FCF_TAPE_PRESENT; + + goto done_post_logio_done_work; + } + + data[0] = le16_to_cpu(mbx->mb0); + switch (data[0]) { + case MBS_PORT_ID_USED: + data[1] = le16_to_cpu(mbx->mb1); + break; + case MBS_LOOP_ID_USED: + break; + default: + data[0] = MBS_COMMAND_ERROR; + data[1] = lio->flags & SRB_LOGIN_RETRIED ? + QLA_LOGIO_LOGIN_RETRIED: 0; + break; + } + + DEBUG2(printk(KERN_WARNING + "scsi(%ld:%x): Async-%s failed - status=%x mb0=%x mb1=%x mb2=%x " + "mb6=%x mb7=%x.\n", + fcport->vha->host_no, sp->handle, type, le16_to_cpu(mbx->status), + le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), + le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), + le16_to_cpu(mbx->mb7))); + +done_post_logio_done_work: + lio->ctx.type == SRB_LOGIN_CMD ? + qla2x00_post_async_login_done_work(fcport->vha, fcport, data): + qla2x00_post_async_logout_done_work(fcport->vha, fcport, data); + + lio->ctx.free(sp); +} + +static void +qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, + struct logio_entry_24xx *logio) +{ + const char func[] = "LOGIO-IOCB"; + const char *type; + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport; + srb_t *sp; + struct srb_logio *lio; + uint16_t data[2]; + uint32_t iop[2]; + + sp = qla2x00_get_sp_from_handle(vha, func, req, logio); + if (!sp) + return; + + type = NULL; + lio = sp->ctx; + switch (lio->ctx.type) { + case SRB_LOGIN_CMD: + type = "login"; + break; + case SRB_LOGOUT_CMD: + type = "logout"; + break; + default: + qla_printk(KERN_WARNING, ha, + "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp, + lio->ctx.type); + return; + } + + del_timer(&lio->ctx.timer); + fcport = sp->fcport; + + data[0] = data[1] = 0; + if (logio->entry_status) { + DEBUG2(printk(KERN_WARNING + "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n", + fcport->vha->host_no, sp->handle, type, + logio->entry_status)); + DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio))); + + data[0] = MBS_COMMAND_ERROR; + data[1] = lio->flags & SRB_LOGIN_RETRIED ? + QLA_LOGIO_LOGIN_RETRIED: 0; + goto done_post_logio_done_work; + } + + if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { + DEBUG2(printk(KERN_DEBUG + "scsi(%ld:%x): Async-%s complete - iop0=%x.\n", + fcport->vha->host_no, sp->handle, type, + le32_to_cpu(logio->io_parameter[0]))); + + data[0] = MBS_COMMAND_COMPLETE; + if (lio->ctx.type == SRB_LOGOUT_CMD) + goto done_post_logio_done_work; + + iop[0] = le32_to_cpu(logio->io_parameter[0]); + if (iop[0] & BIT_4) { + fcport->port_type = FCT_TARGET; + if (iop[0] & BIT_8) + fcport->flags |= FCF_TAPE_PRESENT; + } + if (iop[0] & BIT_5) + fcport->port_type = FCT_INITIATOR; + if (logio->io_parameter[7] || logio->io_parameter[8]) + fcport->supported_classes |= FC_COS_CLASS2; + if (logio->io_parameter[9] || logio->io_parameter[10]) + fcport->supported_classes |= FC_COS_CLASS3; + + goto done_post_logio_done_work; + } + + iop[0] = le32_to_cpu(logio->io_parameter[0]); + iop[1] = le32_to_cpu(logio->io_parameter[1]); + switch (iop[0]) { + case LSC_SCODE_PORTID_USED: + data[0] = MBS_PORT_ID_USED; + data[1] = LSW(iop[1]); + break; + case LSC_SCODE_NPORT_USED: + data[0] = MBS_LOOP_ID_USED; + break; + case LSC_SCODE_CMD_FAILED: + if ((iop[1] & 0xff) == 0x05) { + data[0] = MBS_NOT_LOGGED_IN; + break; + } + /* Fall through. */ + default: + data[0] = MBS_COMMAND_ERROR; + data[1] = lio->flags & SRB_LOGIN_RETRIED ? + QLA_LOGIO_LOGIN_RETRIED: 0; + break; + } + + DEBUG2(printk(KERN_WARNING + "scsi(%ld:%x): Async-%s failed - comp=%x iop0=%x iop1=%x.\n", + fcport->vha->host_no, sp->handle, type, + le16_to_cpu(logio->comp_status), + le32_to_cpu(logio->io_parameter[0]), + le32_to_cpu(logio->io_parameter[1]))); + +done_post_logio_done_work: + lio->ctx.type == SRB_LOGIN_CMD ? + qla2x00_post_async_login_done_work(fcport->vha, fcport, data): + qla2x00_post_async_logout_done_work(fcport->vha, fcport, data); + + lio->ctx.free(sp); +} + /** * qla2x00_process_response_queue() - Process response queue entries. * @ha: SCSI driver HA context @@ -980,6 +1223,9 @@ qla2x00_process_response_queue(struct rsp_que *rsp) case STATUS_CONT_TYPE: qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); break; + case MBX_IOCB_TYPE: + qla2x00_mbx_iocb_entry(vha, rsp->req, + (struct mbx_entry *)pkt); default: /* Type Not Supported. */ DEBUG4(printk(KERN_WARNING @@ -1590,6 +1836,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, qla24xx_report_id_acquisition(vha, (struct vp_rpt_id_entry_24xx *)pkt); break; + case LOGINOUT_PORT_IOCB_TYPE: + qla24xx_logio_entry(vha, rsp->req, + (struct logio_entry_24xx *)pkt); + break; default: /* Type Not Supported. */ DEBUG4(printk(KERN_WARNING diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 6238be37d7bb..a748a95efb10 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -253,6 +253,8 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha) if (!(ha->current_topology & ISP_CFG_F)) return 0; + qla2x00_do_work(vha); + if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { /* VP acquired. complete port configuration */ if (atomic_read(&base_vha->loop_state) == LOOP_READY) { diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index b6c088caf35e..5fd7adbff30b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1170,6 +1170,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) int que, cnt; unsigned long flags; srb_t *sp; + struct srb_ctx *ctx; struct qla_hw_data *ha = vha->hw; struct req_que *req; @@ -1182,8 +1183,14 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) sp = req->outstanding_cmds[cnt]; if (sp) { req->outstanding_cmds[cnt] = NULL; - sp->cmd->result = res; - qla2x00_sp_compl(ha, sp); + if (!sp->ctx) { + sp->cmd->result = res; + qla2x00_sp_compl(ha, sp); + } else { + ctx = sp->ctx; + del_timer_sync(&ctx->timer); + ctx->free(sp); + } } } } @@ -2618,7 +2625,31 @@ qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) return qla2x00_post_work(vha, e); } -static void +#define qla2x00_post_async_work(name, type) \ +int qla2x00_post_async_##name##_work( \ + struct scsi_qla_host *vha, \ + fc_port_t *fcport, uint16_t *data) \ +{ \ + struct qla_work_evt *e; \ + \ + e = qla2x00_alloc_work(vha, type); \ + if (!e) \ + return QLA_FUNCTION_FAILED; \ + \ + e->u.logio.fcport = fcport; \ + if (data) { \ + e->u.logio.data[0] = data[0]; \ + e->u.logio.data[1] = data[1]; \ + } \ + return qla2x00_post_work(vha, e); \ +} + +qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); +qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE); +qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); +qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); + +void qla2x00_do_work(struct scsi_qla_host *vha) { struct qla_work_evt *e, *tmp; @@ -2640,6 +2671,21 @@ qla2x00_do_work(struct scsi_qla_host *vha) case QLA_EVT_IDC_ACK: qla81xx_idc_ack(vha, e->u.idc_ack.mb); break; + case QLA_EVT_ASYNC_LOGIN: + qla2x00_async_login(vha, e->u.logio.fcport, + e->u.logio.data); + break; + case QLA_EVT_ASYNC_LOGIN_DONE: + qla2x00_async_login_done(vha, e->u.logio.fcport, + e->u.logio.data); + break; + case QLA_EVT_ASYNC_LOGOUT: + qla2x00_async_logout(vha, e->u.logio.fcport); + break; + case QLA_EVT_ASYNC_LOGOUT_DONE: + qla2x00_async_logout_done(vha, e->u.logio.fcport, + e->u.logio.data); + break; } if (e->flags & QLA_EVT_FLAG_FREE) kfree(e); @@ -2655,6 +2701,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha) int status; uint16_t next_loopid = 0; struct qla_hw_data *ha = vha->hw; + uint16_t data[2]; list_for_each_entry(fcport, &vha->vp_fcports, list) { /* @@ -2664,6 +2711,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha) if (atomic_read(&fcport->state) != FCS_ONLINE && fcport->login_retry) { + fcport->login_retry--; if (fcport->flags & FCF_FABRIC_DEVICE) { if (fcport->flags & FCF_TAPE_PRESENT) ha->isp_ops->fabric_logout(vha, @@ -2672,13 +2720,22 @@ void qla2x00_relogin(struct scsi_qla_host *vha) fcport->d_id.b.area, fcport->d_id.b.al_pa); - status = qla2x00_fabric_login(vha, fcport, - &next_loopid); + if (IS_ALOGIO_CAPABLE(ha)) { + data[0] = 0; + data[1] = QLA_LOGIO_LOGIN_RETRIED; + status = qla2x00_post_async_login_work( + vha, fcport, data); + if (status == QLA_SUCCESS) + continue; + /* Attempt a retry. */ + status = 1; + } else + status = qla2x00_fabric_login(vha, + fcport, &next_loopid); } else status = qla2x00_local_device_login(vha, fcport); - fcport->login_retry--; if (status == QLA_SUCCESS) { fcport->old_loop_id = fcport->loop_id; @@ -2851,6 +2908,9 @@ qla2x00_do_dpc(void *data) */ ha->dpc_active = 0; + /* Cleanup any residual CTX SRBs. */ + qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); + return 0; } From 523eeac6703a995d58918aaf321f128f75c13108 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Thu, 20 Aug 2009 15:10:57 -0500 Subject: [PATCH 118/209] [SCSI] iscsi_tcp: Evaluate socket state in data_ready() The network core will call the state_change() callback prior to the data_ready() callback, which might cause us to lose a connection state change. So we have to evaluate the socket state at the end of the data_ready() callback, too. Signed-off-by: Hannes Reinecke Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/iscsi_tcp.c | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 518dbd91df85..c7e2ff24ee9e 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -99,6 +99,24 @@ static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, return total_consumed; } +/** + * iscsi_sw_sk_state_check - check socket state + * @sk: socket + * + * If the socket is in CLOSE or CLOSE_WAIT we should + * not close the connection if there is still some + * data pending. + */ +static inline int iscsi_sw_sk_state_check(struct sock *sk) +{ + if ((sk->sk_state == TCP_CLOSE_WAIT || + sk->sk_state == TCP_CLOSE) && + !atomic_read(&sk->sk_rmem_alloc)) + return -ECONNRESET; + + return 0; +} + static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag) { struct iscsi_conn *conn = sk->sk_user_data; @@ -117,6 +135,12 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag) rd_desc.count = 1; tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv); + if (iscsi_sw_sk_state_check(sk) < 0) { + ISCSI_SW_TCP_DBG(conn, "iscsi_tcp_data_ready: " + "TCP_CLOSE|TCP_CLOSE_WAIT\n"); + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + } + read_unlock(&sk->sk_callback_lock); /* If we had to (atomically) map a highmem page, @@ -137,9 +161,7 @@ static void iscsi_sw_tcp_state_change(struct sock *sk) conn = (struct iscsi_conn*)sk->sk_user_data; session = conn->session; - if ((sk->sk_state == TCP_CLOSE_WAIT || - sk->sk_state == TCP_CLOSE) && - !atomic_read(&sk->sk_rmem_alloc)) { + if (iscsi_sw_sk_state_check(sk) < 0) { ISCSI_SW_TCP_DBG(conn, "iscsi_tcp_state_change: " "TCP_CLOSE|TCP_CLOSE_WAIT\n"); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); From d3305f3407fa3e9452079ec6cc8379067456e4aa Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 20 Aug 2009 15:10:58 -0500 Subject: [PATCH 119/209] [SCSI] libiscsi: don't increment cmdsn if cmd is not sent We increment session->cmdsn at the top of iscsi_prep_scsi_cmd_pdu, but if the prep ecb or prep bidi or init_task calls fails then we leave the session->cmdsn incremented. This moves the cmdsn manipulation to the end of the function when we know it has succeeded. It also adds a session->cmdsn--; in queuecommand for if a driver like bnx2i tries to send a a task from that context but it fails. We do not have to do this in the xmit thread context because that code will retry the same task if the initial call fails. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index a751f6230c22..c68cb53a984b 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -301,8 +301,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) hdr->flags = ISCSI_ATTR_SIMPLE; int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); memcpy(task->lun, hdr->lun, sizeof(task->lun)); - hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn); - session->cmdsn++; hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); cmd_len = sc->cmd_len; if (cmd_len < ISCSI_CDB_SIZE) @@ -388,6 +386,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) return -EIO; task->state = ISCSI_TASK_RUNNING; + hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn); + session->cmdsn++; conn->scsicmd_pdus_cnt++; ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x " @@ -1497,6 +1497,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) } } if (session->tt->xmit_task(task)) { + session->cmdsn--; reason = FAILURE_SESSION_NOT_READY; goto prepd_reject; } From 8afa1439fcff58da8f28c1d083046f229f6ab3de Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 20 Aug 2009 15:10:59 -0500 Subject: [PATCH 120/209] [SCSI] libiscsi: handle immediate command rejections If we sent multiple pdus as immediate the target could be rejecting some and we have just been dropping the rejection notification. This adds code to handle nop-out rejections, so if a nop-out was sent as a ping and rejected we do not mark the connection bad. Instead we just clean up the timers since we have pdu making a rount trip we know the connection is good. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 110 ++++++++++++++++++++++++++++++++-------- 1 file changed, 89 insertions(+), 21 deletions(-) diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index c68cb53a984b..1d7a8b7e8a75 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -857,27 +857,102 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) } } +static int iscsi_nop_out_rsp(struct iscsi_task *task, + struct iscsi_nopin *nop, char *data, int datalen) +{ + struct iscsi_conn *conn = task->conn; + int rc = 0; + + if (conn->ping_task != task) { + /* + * If this is not in response to one of our + * nops then it must be from userspace. + */ + if (iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *)nop, + data, datalen)) + rc = ISCSI_ERR_CONN_FAILED; + } else + mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); + return rc; +} + static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *data, int datalen) { struct iscsi_reject *reject = (struct iscsi_reject *)hdr; struct iscsi_hdr rejected_pdu; + int opcode, rc = 0; conn->exp_statsn = be32_to_cpu(reject->statsn) + 1; - if (reject->reason == ISCSI_REASON_DATA_DIGEST_ERROR) { - if (ntoh24(reject->dlength) > datalen) - return ISCSI_ERR_PROTO; - - if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) { - memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); - iscsi_conn_printk(KERN_ERR, conn, - "pdu (op 0x%x) rejected " - "due to DataDigest error.\n", - rejected_pdu.opcode); - } + if (ntoh24(reject->dlength) > datalen || + ntoh24(reject->dlength) < sizeof(struct iscsi_hdr)) { + iscsi_conn_printk(KERN_ERR, conn, "Cannot handle rejected " + "pdu. Invalid data length (pdu dlength " + "%u, datalen %d\n", ntoh24(reject->dlength), + datalen); + return ISCSI_ERR_PROTO; } - return 0; + memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); + opcode = rejected_pdu.opcode & ISCSI_OPCODE_MASK; + + switch (reject->reason) { + case ISCSI_REASON_DATA_DIGEST_ERROR: + iscsi_conn_printk(KERN_ERR, conn, + "pdu (op 0x%x itt 0x%x) rejected " + "due to DataDigest error.\n", + rejected_pdu.itt, opcode); + break; + case ISCSI_REASON_IMM_CMD_REJECT: + iscsi_conn_printk(KERN_ERR, conn, + "pdu (op 0x%x itt 0x%x) rejected. Too many " + "immediate commands.\n", + rejected_pdu.itt, opcode); + /* + * We only send one TMF at a time so if the target could not + * handle it, then it should get fixed (RFC mandates that + * a target can handle one immediate TMF per conn). + * + * For nops-outs, we could have sent more than one if + * the target is sending us lots of nop-ins + */ + if (opcode != ISCSI_OP_NOOP_OUT) + return 0; + + if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) + /* + * nop-out in response to target's nop-out rejected. + * Just resend. + */ + iscsi_send_nopout(conn, + (struct iscsi_nopin*)&rejected_pdu); + else { + struct iscsi_task *task; + /* + * Our nop as ping got dropped. We know the target + * and transport are ok so just clean up + */ + task = iscsi_itt_to_task(conn, rejected_pdu.itt); + if (!task) { + iscsi_conn_printk(KERN_ERR, conn, + "Invalid pdu reject. Could " + "not lookup rejected task.\n"); + rc = ISCSI_ERR_BAD_ITT; + } else + rc = iscsi_nop_out_rsp(task, + (struct iscsi_nopin*)&rejected_pdu, + NULL, 0); + } + break; + default: + iscsi_conn_printk(KERN_ERR, conn, + "pdu (op 0x%x itt 0x%x) rejected. Reason " + "code 0x%x\n", rejected_pdu.itt, + rejected_pdu.opcode, reject->reason); + break; + } + return rc; } /** @@ -1038,15 +1113,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, } conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; - if (conn->ping_task != task) - /* - * If this is not in response to one of our - * nops then it must be from userspace. - */ - goto recv_pdu; - - mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); - iscsi_complete_task(task, ISCSI_TASK_COMPLETED); + rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr, + data, datalen); break; default: rc = ISCSI_ERR_BAD_OPCODE; From b06fc73a9ebd352065dd4dd3139fb53ed72ac970 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 20 Aug 2009 15:11:00 -0500 Subject: [PATCH 121/209] [SCSI] qla4xxx: Removed residual from overrun debug print The residual variable is only valid for udnerrun so do not print it out for the overrun case. Signed-off-by: Karen Higgins [Mike Christie: Fix coding style issues in patch] Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/qla4xxx/ql4_isr.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 8025ee16588e..c196d55eae39 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c @@ -227,11 +227,11 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, case SCS_DATA_UNDERRUN: case SCS_DATA_OVERRUN: if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) || - (sts_entry->completionStatus == SCS_DATA_OVERRUN)) { - DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, " - "residual = 0x%x\n", ha->host_no, + (sts_entry->completionStatus == SCS_DATA_OVERRUN)) { + DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun\n", + ha->host_no, cmd->device->channel, cmd->device->id, - cmd->device->lun, __func__, residual)); + cmd->device->lun, __func__)); cmd->result = DID_ERROR << 16; break; From 632248aab3170004e24512a4378fc6d9d7f3b4ac Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 20 Aug 2009 15:11:01 -0500 Subject: [PATCH 122/209] [SCSI] iscsi class: Add logging to scsi_transport_iscsi.c Logging for connections and sessions in the scsi_transport_iscsi module is now controlled by module parameters. Signed-off-by: Erez Zilber [Mike Christie: newline fixups and modification of some dbg statements] Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_iscsi.c | 73 ++++++++++++++++++++++++++++- 1 file changed, 71 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index b47240ca4b19..ad897df36615 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -36,6 +36,38 @@ #define ISCSI_TRANSPORT_VERSION "2.0-870" +static int dbg_session; +module_param_named(debug_session, dbg_session, int, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug_session, + "Turn on debugging for sessions in scsi_transport_iscsi " + "module. Set to 1 to turn on, and zero to turn off. Default " + "is off."); + +static int dbg_conn; +module_param_named(debug_conn, dbg_conn, int, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug_conn, + "Turn on debugging for connections in scsi_transport_iscsi " + "module. Set to 1 to turn on, and zero to turn off. Default " + "is off."); + +#define ISCSI_DBG_TRANS_SESSION(_session, dbg_fmt, arg...) \ + do { \ + if (dbg_session) \ + iscsi_cls_session_printk(KERN_INFO, _session, \ + "%s: " dbg_fmt, \ + __func__, ##arg); \ + } while (0); + +#define ISCSI_DBG_TRANS_CONN(_conn, dbg_fmt, arg...) \ + do { \ + if (dbg_conn) \ + iscsi_cls_conn_printk(KERN_INFO, _conn, \ + "%s: " dbg_fmt, \ + __func__, ##arg); \ + } while (0); + struct iscsi_internal { struct scsi_transport_template t; struct iscsi_transport *iscsi_transport; @@ -377,6 +409,7 @@ static void iscsi_session_release(struct device *dev) shost = iscsi_session_to_shost(session); scsi_host_put(shost); + ISCSI_DBG_TRANS_SESSION(session, "Completing session release\n"); kfree(session); } @@ -441,6 +474,9 @@ static int iscsi_user_scan_session(struct device *dev, void *data) return 0; session = iscsi_dev_to_session(dev); + + ISCSI_DBG_TRANS_SESSION(session, "Scanning session\n"); + shost = iscsi_session_to_shost(session); ihost = shost->shost_data; @@ -448,8 +484,7 @@ static int iscsi_user_scan_session(struct device *dev, void *data) spin_lock_irqsave(&session->lock, flags); if (session->state != ISCSI_SESSION_LOGGED_IN) { spin_unlock_irqrestore(&session->lock, flags); - mutex_unlock(&ihost->mutex); - return 0; + goto user_scan_exit; } id = session->target_id; spin_unlock_irqrestore(&session->lock, flags); @@ -462,7 +497,10 @@ static int iscsi_user_scan_session(struct device *dev, void *data) scsi_scan_target(&session->dev, 0, id, scan_data->lun, 1); } + +user_scan_exit: mutex_unlock(&ihost->mutex); + ISCSI_DBG_TRANS_SESSION(session, "Completed session scan\n"); return 0; } @@ -522,7 +560,9 @@ static void session_recovery_timedout(struct work_struct *work) if (session->transport->session_recovery_timedout) session->transport->session_recovery_timedout(session); + ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n"); scsi_target_unblock(&session->dev); + ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n"); } static void __iscsi_unblock_session(struct work_struct *work) @@ -534,6 +574,7 @@ static void __iscsi_unblock_session(struct work_struct *work) struct iscsi_cls_host *ihost = shost->shost_data; unsigned long flags; + ISCSI_DBG_TRANS_SESSION(session, "Unblocking session\n"); /* * The recovery and unblock work get run from the same workqueue, * so try to cancel it if it was going to run after this unblock. @@ -553,6 +594,7 @@ static void __iscsi_unblock_session(struct work_struct *work) if (scsi_queue_work(shost, &session->scan_work)) atomic_inc(&ihost->nr_scans); } + ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking session\n"); } /** @@ -579,10 +621,12 @@ static void __iscsi_block_session(struct work_struct *work) block_work); unsigned long flags; + ISCSI_DBG_TRANS_SESSION(session, "Blocking session\n"); spin_lock_irqsave(&session->lock, flags); session->state = ISCSI_SESSION_FAILED; spin_unlock_irqrestore(&session->lock, flags); scsi_target_block(&session->dev); + ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n"); queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, session->recovery_tmo * HZ); } @@ -602,6 +646,8 @@ static void __iscsi_unbind_session(struct work_struct *work) struct iscsi_cls_host *ihost = shost->shost_data; unsigned long flags; + ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n"); + /* Prevent new scans and make sure scanning is not in progress */ mutex_lock(&ihost->mutex); spin_lock_irqsave(&session->lock, flags); @@ -616,6 +662,7 @@ static void __iscsi_unbind_session(struct work_struct *work) scsi_remove_target(&session->dev); iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION); + ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n"); } struct iscsi_cls_session * @@ -647,6 +694,8 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport, device_initialize(&session->dev); if (dd_size) session->dd_data = &session[1]; + + ISCSI_DBG_TRANS_SESSION(session, "Completed session allocation\n"); return session; } EXPORT_SYMBOL_GPL(iscsi_alloc_session); @@ -712,6 +761,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) spin_unlock_irqrestore(&sesslock, flags); iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION); + ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n"); return 0; release_host: @@ -752,6 +802,7 @@ static void iscsi_conn_release(struct device *dev) struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev); struct device *parent = conn->dev.parent; + ISCSI_DBG_TRANS_CONN(conn, "Releasing conn\n"); kfree(conn); put_device(parent); } @@ -774,6 +825,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session) unsigned long flags; int err; + ISCSI_DBG_TRANS_SESSION(session, "Removing session\n"); + spin_lock_irqsave(&sesslock, flags); list_del(&session->sess_list); spin_unlock_irqrestore(&sesslock, flags); @@ -807,12 +860,15 @@ void iscsi_remove_session(struct iscsi_cls_session *session) "for session. Error %d.\n", err); transport_unregister_device(&session->dev); + + ISCSI_DBG_TRANS_SESSION(session, "Completing session removal\n"); device_del(&session->dev); } EXPORT_SYMBOL_GPL(iscsi_remove_session); void iscsi_free_session(struct iscsi_cls_session *session) { + ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n"); iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION); put_device(&session->dev); } @@ -828,6 +884,7 @@ EXPORT_SYMBOL_GPL(iscsi_free_session); int iscsi_destroy_session(struct iscsi_cls_session *session) { iscsi_remove_session(session); + ISCSI_DBG_TRANS_SESSION(session, "Completing session destruction\n"); iscsi_free_session(session); return 0; } @@ -885,6 +942,8 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) list_add(&conn->conn_list, &connlist); conn->active = 1; spin_unlock_irqrestore(&connlock, flags); + + ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n"); return conn; release_parent_ref: @@ -912,6 +971,7 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn) spin_unlock_irqrestore(&connlock, flags); transport_unregister_device(&conn->dev); + ISCSI_DBG_TRANS_CONN(conn, "Completing conn destruction\n"); device_unregister(&conn->dev); return 0; } @@ -1200,6 +1260,9 @@ int iscsi_session_event(struct iscsi_cls_session *session, "Cannot notify userspace of session " "event %u. Check iscsi daemon\n", event); + + ISCSI_DBG_TRANS_SESSION(session, "Completed handling event %d rc %d\n", + event, rc); return rc; } EXPORT_SYMBOL_GPL(iscsi_session_event); @@ -1221,6 +1284,8 @@ iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep, shost = iscsi_session_to_shost(session); ev->r.c_session_ret.host_no = shost->host_no; ev->r.c_session_ret.sid = session->sid; + ISCSI_DBG_TRANS_SESSION(session, + "Completed creating transport session\n"); return 0; } @@ -1246,6 +1311,8 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) ev->r.c_conn_ret.sid = session->sid; ev->r.c_conn_ret.cid = conn->cid; + + ISCSI_DBG_TRANS_CONN(conn, "Completed creating transport conn\n"); return 0; } @@ -1258,8 +1325,10 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev if (!conn) return -EINVAL; + ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n"); if (transport->destroy_conn) transport->destroy_conn(conn); + return 0; } From d1af8a328755f51c9b76157a8692e56520d3fd94 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 20 Aug 2009 15:11:02 -0500 Subject: [PATCH 123/209] [SCSI] iscsi_tcp: add new conn error to indicate tcp conn closed If a target closed the connection, we will detect it in the state_changed or data_ready callout. This adds a new conn error value to use for this problem, so it is not confused with when the initiator throws a conn error and drops the connection. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/iscsi_tcp.c | 23 +++++++++-------------- include/scsi/iscsi_if.h | 1 + 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index c7e2ff24ee9e..2b1b834a098b 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -109,11 +109,14 @@ static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, */ static inline int iscsi_sw_sk_state_check(struct sock *sk) { - if ((sk->sk_state == TCP_CLOSE_WAIT || - sk->sk_state == TCP_CLOSE) && - !atomic_read(&sk->sk_rmem_alloc)) - return -ECONNRESET; + struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data; + if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) && + !atomic_read(&sk->sk_rmem_alloc)) { + ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n"); + iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE); + return -ECONNRESET; + } return 0; } @@ -135,11 +138,7 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag) rd_desc.count = 1; tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv); - if (iscsi_sw_sk_state_check(sk) < 0) { - ISCSI_SW_TCP_DBG(conn, "iscsi_tcp_data_ready: " - "TCP_CLOSE|TCP_CLOSE_WAIT\n"); - iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); - } + iscsi_sw_sk_state_check(sk); read_unlock(&sk->sk_callback_lock); @@ -161,11 +160,7 @@ static void iscsi_sw_tcp_state_change(struct sock *sk) conn = (struct iscsi_conn*)sk->sk_user_data; session = conn->session; - if (iscsi_sw_sk_state_check(sk) < 0) { - ISCSI_SW_TCP_DBG(conn, "iscsi_tcp_state_change: " - "TCP_CLOSE|TCP_CLOSE_WAIT\n"); - iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); - } + iscsi_sw_sk_state_check(sk); tcp_conn = conn->dd_data; tcp_sw_conn = tcp_conn->dd_data; diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h index 4426f00da5ff..d67dda2b6aa0 100644 --- a/include/scsi/iscsi_if.h +++ b/include/scsi/iscsi_if.h @@ -262,6 +262,7 @@ enum iscsi_err { ISCSI_ERR_NO_SCSI_CMD = ISCSI_ERR_BASE + 17, ISCSI_ERR_INVALID_HOST = ISCSI_ERR_BASE + 18, ISCSI_ERR_XMIT_FAILED = ISCSI_ERR_BASE + 19, + ISCSI_ERR_TCP_CONN_CLOSE = ISCSI_ERR_BASE + 20, }; /* From 70b31c152dc49ef70bd2b34ad53ccbd9bb4116d4 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 20 Aug 2009 15:11:03 -0500 Subject: [PATCH 124/209] [SCSI] libiscsi, iscsi_tcp: check suspend bit before each call to xmit_task If we had multiple tasks on the cmd or requeue lists, and iscsi_tcp returns a error, the write_space function can still run and queue iscsi_data_xmit. If it was a legetimate problem and iscsi_conn_failure was run but we raced and iscsi_data_xmit was run first it could miss the suspend bit checks, and start trying to send data again and hit another timeout. A similar problem is present when using cxgb3i. This has libiscsi check the suspend bit before calling the xmit task callout, so we at least do not try sending multiple tasks (one could be sent). Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 1d7a8b7e8a75..a7ee4bb40708 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1280,6 +1280,9 @@ static int iscsi_xmit_task(struct iscsi_conn *conn) struct iscsi_task *task = conn->task; int rc; + if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) + return -ENODATA; + __iscsi_get_task(task); spin_unlock_bh(&conn->session->lock); rc = conn->session->tt->xmit_task(task); @@ -1329,7 +1332,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) int rc = 0; spin_lock_bh(&conn->session->lock); - if (unlikely(conn->suspend_tx)) { + if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n"); spin_unlock_bh(&conn->session->lock); return -ENODATA; @@ -1338,7 +1341,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) if (conn->task) { rc = iscsi_xmit_task(conn); if (rc) - goto again; + goto done; } /* @@ -1358,7 +1361,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) } rc = iscsi_xmit_task(conn); if (rc) - goto again; + goto done; } /* process pending command queue */ @@ -1379,14 +1382,14 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) list_add_tail(&conn->task->running, &conn->cmdqueue); conn->task = NULL; - goto again; + goto done; } else fail_scsi_task(conn->task, DID_ABORT); continue; } rc = iscsi_xmit_task(conn); if (rc) - goto again; + goto done; /* * we could continuously get new task requests so * we need to check the mgmt queue for nops that need to @@ -1412,16 +1415,14 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) conn->task->state = ISCSI_TASK_RUNNING; rc = iscsi_xmit_task(conn); if (rc) - goto again; + goto done; if (!list_empty(&conn->mgmtqueue)) goto check_mgmt; } spin_unlock_bh(&conn->session->lock); return -ENODATA; -again: - if (unlikely(conn->suspend_tx)) - rc = -ENODATA; +done: spin_unlock_bh(&conn->session->lock); return rc; } From ee610c6701875525d5c61fa6e56ddbf3e645b8df Mon Sep 17 00:00:00 2001 From: Robert Love Date: Tue, 25 Aug 2009 13:58:31 -0700 Subject: [PATCH 125/209] [SCSI] fcoe: Add format spacing to FCOE_NETDEV_DBG debug macro There's currently no space between the interface name and the user specified format/string. This patch adds a space and a colon to the output to separate the interface name and the user specified string. So, instead of "ethXfoo" it will read "ethX: foo". Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 5ae8ca71afcb..68b9f855073b 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -61,7 +61,7 @@ do { \ #define FCOE_NETDEV_DBG(netdev, fmt, args...) \ FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \ - printk(KERN_INFO "fcoe: %s" fmt, \ + printk(KERN_INFO "fcoe: %s: " fmt, \ netdev->name, ##args);) /* From cd305ce41be1615dfc72892e0642c6b880f58d95 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Tue, 25 Aug 2009 13:58:37 -0700 Subject: [PATCH 126/209] [SCSI] libfc: Fix misleading debug statement The statement reads, "Exchange timed out, notifying the upper layer", however, this statement is printed whenever the timer is armed. This is confusing to someone debugging the code because every time an exchange is initialized, there is an incorrect statement stating that the timer has already timed out. This patch changes the statement to read, "Exchange timer armed" which is more accurate. This patch also adds a debug statement in the timeout handler to properly indicate that the exchange has timed out. Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 11ddd115efb6..40c34274bd81 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -326,7 +326,7 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) return; - FC_EXCH_DBG(ep, "Exchange timed out, notifying the upper layer\n"); + FC_EXCH_DBG(ep, "Exchange timer armed\n"); if (schedule_delayed_work(&ep->timeout_work, msecs_to_jiffies(timer_msec))) @@ -412,6 +412,8 @@ static void fc_exch_timeout(struct work_struct *work) u32 e_stat; int rc = 1; + FC_EXCH_DBG(ep, "Exchange timed out\n"); + spin_lock_bh(&ep->ex_lock); if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) goto unlock; From a69b06bc5e7b153043db8984564b731f99e014fc Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 13:58:42 -0700 Subject: [PATCH 127/209] [SCSI] fcoe: libfcoe: extra semicolon in CHECK_LOGGING macros causes compile error If using code like this: if (foo) FCOE_DBG("foo\n); else FCOE_DBG("bar\n"); one gets compile errors because FCOE_DBG expands with its own semicolon, making one too many for the if-statement. Remove the offending semicolon in fcoe.h and also a similar case in libfcoe.c. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.h | 2 +- drivers/scsi/fcoe/libfcoe.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 68b9f855073b..65120e21f5c4 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -53,7 +53,7 @@ do { \ do { \ CMD; \ } while (0); \ -} while (0); +} while (0) #define FCOE_DBG(fmt, args...) \ FCOE_CHECK_LOGGING(FCOE_LOGGING, \ diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 4db719d6ada1..d0b5208e1a45 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -69,7 +69,7 @@ do { \ do { \ CMD; \ } while (0); \ -} while (0); +} while (0) #define LIBFCOE_DBG(fmt, args...) \ LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \ From e4bc50bedf0dd6c63f20a7bc0a2b46667664fba1 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Tue, 25 Aug 2009 13:58:47 -0700 Subject: [PATCH 128/209] [SCSI] fcoe, libfc: adds per cpu exch pool within exchange manager(EM) Adds per cpu exch pool for these reasons:- 1. Currently an EM instance is shared across all cpus to manage all exches for all cpus. This required em_lock across all cpus for an exch alloc, free, lookup and reset each frame and that made em_lock expensive, so instead having per cpu exch pool with their own per cpu pool lock will likely reduce locking contention in fast path for an exch alloc, free and lookup. 2. Per cpu exch pool will likely improve cache hit ratio since all frames of an exch will be processed on the same cpu on which exch originated. This patch is only prep work to help in keeping complexity of next patch low, so this patch only sets up per cpu exch pool and related helper funcs to be used by next patch. The next patch fully makes use of per cpu exch pool in all code paths ie. tx, rx and reset. Divides per EM exch id range equally across all cpus to setup per cpu exch pool. This division is such that lower bits of exch id carries cpu number info on which exch originated, later a simple bitwise AND operation on exch id of incoming frame with fc_cpu_mask retrieves cpu number info to direct all frames to same cpu on which exch originated. This required a global fc_cpu_mask and fc_cpu_order initialized to max possible cpus number nr_cpu_ids rounded up to 2's power, this will be used in mapping exch id and exch ptr array index in pool during exch allocation, find or reset code paths. Adds a check in fc_exch_mgr_alloc() to ensure specified min_xid lower bits are zero since these bits are used to carry cpu info. Adds and initializes struct fc_exch_pool with all required fields to manage exches in pool. Allocates per cpu struct fc_exch_pool with memory for exches array for range of exches per pool. The exches array memory is followed by struct fc_exch_pool. Adds fc_exch_ptr_get/set() helper functions to get/set exch ptr in pool exches array at specified array index. Increases default FCOE_MAX_XID to 0x0FFF from 0x07EF, so that more exches are available per cpu after above described exch id range division across all cpus to each pool. Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.h | 2 +- drivers/scsi/libfc/fc_exch.c | 89 ++++++++++++++++++++++++++++++++++-- include/scsi/libfc.h | 1 + 3 files changed, 88 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 65120e21f5c4..550d1e49d1a3 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -38,7 +38,7 @@ #define FCOE_MAX_OUTSTANDING_COMMANDS 1024 #define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */ -#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */ +#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */ unsigned int fcoe_debug_logging; module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 40c34274bd81..9cbe8d66eb25 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -32,6 +32,9 @@ #include #include +u16 fc_cpu_mask; /* cpu mask for possible cpus */ +EXPORT_SYMBOL(fc_cpu_mask); +static u16 fc_cpu_order; /* 2's power to represent total possible cpus */ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ /* @@ -47,6 +50,20 @@ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ * fc_seq holds the state for an individual sequence. */ +/* + * Per cpu exchange pool + * + * This structure manages per cpu exchanges in array of exchange pointers. + * This array is allocated followed by struct fc_exch_pool memory for + * assigned range of exchanges to per cpu pool. + */ +struct fc_exch_pool { + u16 next_index; /* next possible free exchange index */ + u16 total_exches; /* total allocated exchanges */ + spinlock_t lock; /* exch pool lock */ + struct list_head ex_list; /* allocated exchanges list */ +}; + /* * Exchange manager. * @@ -66,6 +83,8 @@ struct fc_exch_mgr { u32 total_exches; /* total allocated exchanges */ struct list_head ex_list; /* allocated exchanges list */ mempool_t *ep_pool; /* reserve ep's */ + u16 pool_max_index; /* max exch array index in exch pool */ + struct fc_exch_pool *pool; /* per cpu exch pool */ /* * currently exchange mgr stats are updated but not used. @@ -303,6 +322,19 @@ static int fc_exch_done_locked(struct fc_exch *ep) return rc; } +static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool, + u16 index) +{ + struct fc_exch **exches = (struct fc_exch **)(pool + 1); + return exches[index]; +} + +static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, + struct fc_exch *ep) +{ + ((struct fc_exch **)(pool + 1))[index] = ep; +} + static void fc_exch_mgr_delete_ep(struct fc_exch *ep) { struct fc_exch_mgr *mp; @@ -1751,6 +1783,7 @@ static void fc_exch_mgr_destroy(struct kref *kref) */ WARN_ON(mp->total_exches != 0); mempool_destroy(mp->ep_pool); + free_percpu(mp->pool); kfree(mp); } @@ -1770,8 +1803,13 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, { struct fc_exch_mgr *mp; size_t len; + u16 pool_exch_range; + size_t pool_size; + unsigned int cpu; + struct fc_exch_pool *pool; - if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { + if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN || + (min_xid & fc_cpu_mask) != 0) { FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", min_xid, max_xid); return NULL; @@ -1802,10 +1840,31 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, if (!mp->ep_pool) goto free_mp; + /* + * Setup per cpu exch pool with entire exchange id range equally + * divided across all cpus. The exch pointers array memory is + * allocated for exch range per pool. + */ + pool_exch_range = (mp->max_xid - mp->min_xid + 1) / (fc_cpu_mask + 1); + mp->pool_max_index = pool_exch_range - 1; + + /* + * Allocate and initialize per cpu exch pool + */ + pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *); + mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool)); + if (!mp->pool) + goto free_mempool; + for_each_possible_cpu(cpu) { + pool = per_cpu_ptr(mp->pool, cpu); + spin_lock_init(&pool->lock); + INIT_LIST_HEAD(&pool->ex_list); + } + kref_init(&mp->kref); if (!fc_exch_mgr_add(lp, mp, match)) { - mempool_destroy(mp->ep_pool); - goto free_mp; + free_percpu(mp->pool); + goto free_mempool; } /* @@ -1816,6 +1875,8 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, kref_put(&mp->kref, fc_exch_mgr_destroy); return mp; +free_mempool: + mempool_destroy(mp->ep_pool); free_mp: kfree(mp); return NULL; @@ -1975,6 +2036,28 @@ int fc_exch_init(struct fc_lport *lp) if (!lp->tt.seq_exch_abort) lp->tt.seq_exch_abort = fc_seq_exch_abort; + /* + * Initialize fc_cpu_mask and fc_cpu_order. The + * fc_cpu_mask is set for nr_cpu_ids rounded up + * to order of 2's * power and order is stored + * in fc_cpu_order as this is later required in + * mapping between an exch id and exch array index + * in per cpu exch pool. + * + * This round up is required to align fc_cpu_mask + * to exchange id's lower bits such that all incoming + * frames of an exchange gets delivered to the same + * cpu on which exchange originated by simple bitwise + * AND operation between fc_cpu_mask and exchange id. + */ + fc_cpu_mask = 1; + fc_cpu_order = 0; + while (fc_cpu_mask < nr_cpu_ids) { + fc_cpu_mask <<= 1; + fc_cpu_order++; + } + fc_cpu_mask--; + return 0; } EXPORT_SYMBOL(fc_exch_init); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index c2b928cfafb9..32063389c4b0 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -343,6 +343,7 @@ static inline bool fc_fcp_is_read(const struct fc_fcp_pkt *fsp) struct fc_exch_mgr; struct fc_exch_mgr_anchor; +extern u16 fc_cpu_mask; /* cpu mask for possible cpus */ /* * Sequence. From b2f0091fbf8b475fa09b5e1712e0ab84cb3e1ca4 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Tue, 25 Aug 2009 13:58:53 -0700 Subject: [PATCH 129/209] [SCSI] fcoe, libfc: fully makes use of per cpu exch pool and then removes em_lock 1. Updates fcoe_rcv() to queue incoming frames to the fcoe per cpu thread on which this frame's exch was originated and simply use current cpu for request exch not originated by initiator. It is redundant to add this code under CONFIG_SMP, so removes CONFIG_SMP uses around this code. 2. Updates fc_exch_em_alloc, fc_exch_delete, fc_exch_find to use per cpu exch pools, here fc_exch_delete is rename of older fc_exch_mgr_delete_ep since ep/exch are now deleted in pools of EM and so brief new name is sufficient and better name. Updates these functions to map exch id to their index into exch pool using fc_cpu_mask, fc_cpu_order and EM min_xid. This mapping is as per detailed explanation about this in last patch and basically this is just as lower fc_cpu_mask bits of exch id as cpu number and upper bit sum of EM min_xid and exch index in pool. Uses pool next_index to keep track of exch allocation from pool along with pool_max_index as upper bound of exches array in pool. 3. Adds exch pool ptr to fc_exch to free exch to its pool in fc_exch_delete. 4. Updates fc_exch_mgr_reset to reset all exch pools of an EM, this required adding fc_exch_pool_reset func to reset exches in pool and then have fc_exch_mgr_reset call fc_exch_pool_reset for each pool within each EM for a lport. 5. Removes no longer needed exches array, em_lock, next_xid, and total_exches from struct fc_exch_mgr, these are not needed after use of per cpu exch pool, also removes not used max_read, last_read from struct fc_exch_mgr. 6. Updates locking notes for exch pool lock with fc_exch lock and uses pool lock in exch allocation, lookup and reset. Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 19 ++-- drivers/scsi/libfc/fc_exch.c | 183 +++++++++++++++++++---------------- include/scsi/libfc.h | 9 +- 3 files changed, 112 insertions(+), 99 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 757aa28f0f04..e32a0ed266aa 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -912,8 +912,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, struct fcoe_softc *fc; struct fc_frame_header *fh; struct fcoe_percpu_s *fps; - unsigned short oxid; - unsigned int cpu = 0; + unsigned int cpu; fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type); lp = fc->ctlr.lp; @@ -947,20 +946,20 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); fh = (struct fc_frame_header *) skb_transport_header(skb); - oxid = ntohs(fh->fh_ox_id); - fr = fcoe_dev_from_skb(skb); fr->fr_dev = lp; fr->ptype = ptype; -#ifdef CONFIG_SMP /* - * The incoming frame exchange id(oxid) is ANDed with num of online - * cpu bits to get cpu and then this cpu is used for selecting - * a per cpu kernel thread from fcoe_percpu. + * In case the incoming frame's exchange is originated from + * the initiator, then received frame's exchange id is ANDed + * with fc_cpu_mask bits to get the same cpu on which exchange + * was originated, otherwise just use the current cpu. */ - cpu = oxid & (num_online_cpus() - 1); -#endif + if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX) + cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask; + else + cpu = smp_processor_id(); fps = &per_cpu(fcoe_percpu, cpu); spin_lock_bh(&fps->fcoe_rx_list.lock); diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 9cbe8d66eb25..b51db15a3876 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -73,14 +73,8 @@ struct fc_exch_pool { struct fc_exch_mgr { enum fc_class class; /* default class for sequences */ struct kref kref; /* exchange mgr reference count */ - spinlock_t em_lock; /* exchange manager lock, - must be taken before ex_lock */ - u16 next_xid; /* next possible free exchange ID */ u16 min_xid; /* min exchange ID */ u16 max_xid; /* max exchange ID */ - u16 max_read; /* max exchange ID for read */ - u16 last_read; /* last xid allocated for read */ - u32 total_exches; /* total allocated exchanges */ struct list_head ex_list; /* allocated exchanges list */ mempool_t *ep_pool; /* reserve ep's */ u16 pool_max_index; /* max exch array index in exch pool */ @@ -99,7 +93,6 @@ struct fc_exch_mgr { atomic_t seq_not_found; atomic_t non_bls_resp; } stats; - struct fc_exch **exches; /* for exch pointers indexed by xid */ }; #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) @@ -192,8 +185,8 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp); * sequence allocation and deallocation must be locked. * - exchange refcnt can be done atomicly without locks. * - sequence allocation must be locked by exch lock. - * - If the em_lock and ex_lock must be taken at the same time, then the - * em_lock must be taken before the ex_lock. + * - If the EM pool lock and ex_lock must be taken at the same time, then the + * EM pool lock must be taken before the ex_lock. */ /* @@ -335,17 +328,18 @@ static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, ((struct fc_exch **)(pool + 1))[index] = ep; } -static void fc_exch_mgr_delete_ep(struct fc_exch *ep) +static void fc_exch_delete(struct fc_exch *ep) { - struct fc_exch_mgr *mp; + struct fc_exch_pool *pool; - mp = ep->em; - spin_lock_bh(&mp->em_lock); - WARN_ON(mp->total_exches <= 0); - mp->total_exches--; - mp->exches[ep->xid - mp->min_xid] = NULL; + pool = ep->pool; + spin_lock_bh(&pool->lock); + WARN_ON(pool->total_exches <= 0); + pool->total_exches--; + fc_exch_ptr_set(pool, (ep->xid - ep->em->min_xid) >> fc_cpu_order, + NULL); list_del(&ep->ex_list); - spin_unlock_bh(&mp->em_lock); + spin_unlock_bh(&pool->lock); fc_exch_release(ep); /* drop hold for exch in mp */ } @@ -465,7 +459,7 @@ static void fc_exch_timeout(struct work_struct *work) rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); if (!rc) - fc_exch_mgr_delete_ep(ep); + fc_exch_delete(ep); if (resp) resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); fc_seq_exch_abort(sp, 2 * ep->r_a_tov); @@ -509,10 +503,9 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, struct fc_exch_mgr *mp) { struct fc_exch *ep; - u16 min, max, xid; - - min = mp->min_xid; - max = mp->max_xid; + unsigned int cpu; + u16 index; + struct fc_exch_pool *pool; /* allocate memory for exchange */ ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); @@ -522,15 +515,17 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, } memset(ep, 0, sizeof(*ep)); - spin_lock_bh(&mp->em_lock); - xid = mp->next_xid; - /* alloc a new xid */ - while (mp->exches[xid - min]) { - xid = (xid == max) ? min : xid + 1; - if (xid == mp->next_xid) + cpu = smp_processor_id(); + pool = per_cpu_ptr(mp->pool, cpu); + spin_lock_bh(&pool->lock); + index = pool->next_index; + /* allocate new exch from pool */ + while (fc_exch_ptr_get(pool, index)) { + index = index == mp->pool_max_index ? 0 : index + 1; + if (index == pool->next_index) goto err; } - mp->next_xid = (xid == max) ? min : xid + 1; + pool->next_index = index == mp->pool_max_index ? 0 : index + 1; fc_exch_hold(ep); /* hold for exch in mp */ spin_lock_init(&ep->ex_lock); @@ -541,17 +536,18 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, */ spin_lock_bh(&ep->ex_lock); - mp->exches[xid - mp->min_xid] = ep; - list_add_tail(&ep->ex_list, &mp->ex_list); + fc_exch_ptr_set(pool, index, ep); + list_add_tail(&ep->ex_list, &pool->ex_list); fc_seq_alloc(ep, ep->seq_id++); - mp->total_exches++; - spin_unlock_bh(&mp->em_lock); + pool->total_exches++; + spin_unlock_bh(&pool->lock); /* * update exchange */ - ep->oxid = ep->xid = xid; + ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid; ep->em = mp; + ep->pool = pool; ep->lp = lport; ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ ep->rxid = FC_XID_UNKNOWN; @@ -560,7 +556,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, out: return ep; err: - spin_unlock_bh(&mp->em_lock); + spin_unlock_bh(&pool->lock); atomic_inc(&mp->stats.no_free_exch_xid); mempool_free(ep, mp->ep_pool); return NULL; @@ -597,16 +593,18 @@ EXPORT_SYMBOL(fc_exch_alloc); */ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) { + struct fc_exch_pool *pool; struct fc_exch *ep = NULL; if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) { - spin_lock_bh(&mp->em_lock); - ep = mp->exches[xid - mp->min_xid]; + pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); + spin_lock_bh(&pool->lock); + ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); if (ep) { fc_exch_hold(ep); WARN_ON(ep->xid != xid); } - spin_unlock_bh(&mp->em_lock); + spin_unlock_bh(&pool->lock); } return ep; } @@ -620,7 +618,7 @@ void fc_exch_done(struct fc_seq *sp) rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); if (!rc) - fc_exch_mgr_delete_ep(ep); + fc_exch_delete(ep); } EXPORT_SYMBOL(fc_exch_done); @@ -1213,7 +1211,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) WARN_ON(fc_seq_exch(sp) != ep); spin_unlock_bh(&ep->ex_lock); if (!rc) - fc_exch_mgr_delete_ep(ep); + fc_exch_delete(ep); } /* @@ -1323,7 +1321,7 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); if (!rc) - fc_exch_mgr_delete_ep(ep); + fc_exch_delete(ep); if (resp) resp(sp, fp, ex_resp_arg); @@ -1466,48 +1464,76 @@ static void fc_exch_reset(struct fc_exch *ep) rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); if (!rc) - fc_exch_mgr_delete_ep(ep); + fc_exch_delete(ep); if (resp) resp(sp, ERR_PTR(-FC_EX_CLOSED), arg); } -/* - * Reset an exchange manager, releasing all sequences and exchanges. - * If sid is non-zero, reset only exchanges we source from that FID. - * If did is non-zero, reset only exchanges destined to that FID. +/** + * fc_exch_pool_reset() - Resets an per cpu exches pool. + * @lport: ptr to the local port + * @pool: ptr to the per cpu exches pool + * @sid: source FC ID + * @did: destination FC ID + * + * Resets an per cpu exches pool, releasing its all sequences + * and exchanges. If sid is non-zero, then reset only exchanges + * we sourced from that FID. If did is non-zero, reset only + * exchanges destined to that FID. */ -void fc_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) +static void fc_exch_pool_reset(struct fc_lport *lport, + struct fc_exch_pool *pool, + u32 sid, u32 did) { struct fc_exch *ep; struct fc_exch *next; - struct fc_exch_mgr *mp; - struct fc_exch_mgr_anchor *ema; - list_for_each_entry(ema, &lp->ema_list, ema_list) { - mp = ema->mp; - spin_lock_bh(&mp->em_lock); + spin_lock_bh(&pool->lock); restart: - list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) { - if ((lp == ep->lp) && - (sid == 0 || sid == ep->sid) && - (did == 0 || did == ep->did)) { - fc_exch_hold(ep); - spin_unlock_bh(&mp->em_lock); + list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) { + if ((lport == ep->lp) && + (sid == 0 || sid == ep->sid) && + (did == 0 || did == ep->did)) { + fc_exch_hold(ep); + spin_unlock_bh(&pool->lock); - fc_exch_reset(ep); + fc_exch_reset(ep); - fc_exch_release(ep); - spin_lock_bh(&mp->em_lock); + fc_exch_release(ep); + spin_lock_bh(&pool->lock); - /* - * must restart loop incase while lock - * was down multiple eps were released. - */ - goto restart; - } + /* + * must restart loop incase while lock + * was down multiple eps were released. + */ + goto restart; } - spin_unlock_bh(&mp->em_lock); + } + spin_unlock_bh(&pool->lock); +} + +/** + * fc_exch_mgr_reset() - Resets all EMs of a lport + * @lport: ptr to the local port + * @sid: source FC ID + * @did: destination FC ID + * + * Reset all EMs of a lport, releasing its all sequences and + * exchanges. If sid is non-zero, then reset only exchanges + * we sourced from that FID. If did is non-zero, reset only + * exchanges destined to that FID. + */ +void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did) +{ + struct fc_exch_mgr_anchor *ema; + unsigned int cpu; + + list_for_each_entry(ema, &lport->ema_list, ema_list) { + for_each_possible_cpu(cpu) + fc_exch_pool_reset(lport, + per_cpu_ptr(ema->mp->pool, cpu), + sid, did); } } EXPORT_SYMBOL(fc_exch_mgr_reset); @@ -1777,11 +1803,6 @@ static void fc_exch_mgr_destroy(struct kref *kref) { struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref); - /* - * The total exch count must be zero - * before freeing exchange manager. - */ - WARN_ON(mp->total_exches != 0); mempool_destroy(mp->ep_pool); free_percpu(mp->pool); kfree(mp); @@ -1802,7 +1823,6 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, bool (*match)(struct fc_frame *)) { struct fc_exch_mgr *mp; - size_t len; u16 pool_exch_range; size_t pool_size; unsigned int cpu; @@ -1816,25 +1836,16 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, } /* - * Memory need for EM + * allocate memory for EM */ - len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *)); - len += sizeof(struct fc_exch_mgr); - - mp = kzalloc(len, GFP_ATOMIC); + mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC); if (!mp) return NULL; mp->class = class; - mp->total_exches = 0; - mp->exches = (struct fc_exch **)(mp + 1); /* adjust em exch xid range for offload */ mp->min_xid = min_xid; mp->max_xid = max_xid; - mp->next_xid = min_xid; - - INIT_LIST_HEAD(&mp->ex_list); - spin_lock_init(&mp->em_lock); mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep); if (!mp->ep_pool) @@ -1944,7 +1955,7 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); if (!rc) - fc_exch_mgr_delete_ep(ep); + fc_exch_delete(ep); return NULL; } EXPORT_SYMBOL(fc_exch_seq_send); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 32063389c4b0..53b38814d38a 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -368,6 +368,7 @@ struct fc_seq { */ struct fc_exch { struct fc_exch_mgr *em; /* exchange manager */ + struct fc_exch_pool *pool; /* per cpu exches pool */ u32 state; /* internal driver state */ u16 xid; /* our exchange ID */ struct list_head ex_list; /* free or busy list linkage */ @@ -1045,10 +1046,12 @@ struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp); */ struct fc_seq *fc_seq_start_next(struct fc_seq *sp); + /* - * Reset an exchange manager, completing all sequences and exchanges. - * If s_id is non-zero, reset only exchanges originating from that FID. - * If d_id is non-zero, reset only exchanges sending to that FID. + * Reset all EMs of a lport, releasing its all sequences and + * exchanges. If sid is non-zero, then reset only exchanges + * we sourced from that FID. If did is non-zero, reset only + * exchanges destined to that FID. */ void fc_exch_mgr_reset(struct fc_lport *, u32 s_id, u32 d_id); From 05cc7390735c49357b9ae67bf97f1c1579547f5b Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Tue, 25 Aug 2009 13:59:03 -0700 Subject: [PATCH 130/209] [SCSI] fcoe: Add sysfs parameter to fcoe for minimum DDP read I/O size This adds fcoe_ddp_min as a module parameter for fcoe module to: /sys/module/fcoe/parameters/ddp_min It is observed that for some hardware, particularly Intel 82599, there is too much overhead in setting up context for direct data placement (DDP) read when the requested read I/O size is small. This is added as a module parameter for performance tuning and is set as 0 by default and user can change this based on their own hardware. Signed-off-by: Yi Zou Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index e32a0ed266aa..a39d370bccf8 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -49,6 +49,12 @@ MODULE_AUTHOR("Open-FCoE.org"); MODULE_DESCRIPTION("FCoE"); MODULE_LICENSE("GPL v2"); +/* Performance tuning parameters for fcoe */ +static unsigned int fcoe_ddp_min; +module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ + "Direct Data Placement (DDP)."); + /* fcoe host list */ LIST_HEAD(fcoe_hostlist); DEFINE_RWLOCK(fcoe_hostlist_lock); @@ -414,7 +420,8 @@ static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, */ bool fcoe_oem_match(struct fc_frame *fp) { - return fc_fcp_is_read(fr_fsp(fp)); + return fc_fcp_is_read(fr_fsp(fp)) && + (fr_fsp(fp)->data_len > fcoe_ddp_min); } /** From a4b7cfaee487caef913be978dce60896fe741268 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 25 Aug 2009 13:59:08 -0700 Subject: [PATCH 131/209] [SCSI] libfcoe: fcoe_ctlr_destroy use cancel_work_sync instead of flush_work Use cancel_work_sync() in place of flush_work(), so that fcoe_ctlr_destroy() can be called from a workqueue. Also, purge the receive queue after the recv_work has been cancled because if recv_work isn't run it's not guaranteed to be empty now. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index d0b5208e1a45..62a4c2026072 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -148,13 +148,17 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip) */ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip) { - flush_work(&fip->recv_work); + cancel_work_sync(&fip->recv_work); + spin_lock_bh(&fip->fip_recv_list.lock); + __skb_queue_purge(&fip->fip_recv_list); + spin_unlock_bh(&fip->fip_recv_list.lock); + spin_lock_bh(&fip->lock); fip->state = FIP_ST_DISABLED; fcoe_ctlr_reset_fcfs(fip); spin_unlock_bh(&fip->lock); del_timer_sync(&fip->timer); - flush_work(&fip->link_work); + cancel_work_sync(&fip->link_work); } EXPORT_SYMBOL(fcoe_ctlr_destroy); From 5892c32f8a2d5a37d4c2ff1df62f6e8a2664abdb Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 25 Aug 2009 13:59:14 -0700 Subject: [PATCH 132/209] [SCSI] fcoe: fix missing error check in call to fcoe_if_init fcoe_if_init() can fail, but it's return value wasn't checked Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index a39d370bccf8..d205ac08d07c 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1900,7 +1900,9 @@ static int __init fcoe_init(void) /* Setup link change notification */ fcoe_dev_setup(); - fcoe_if_init(); + rc = fcoe_if_init(); + if (rc) + goto out_free; return 0; From 36eb7fc872bdd7f7443ce24a718784bfa567f6c7 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 13:59:19 -0700 Subject: [PATCH 133/209] [SCSI] fcoe: remove unnecessary list and lock initializations. The hostlist and the hostlist_lock were initialized both in the delcaration and in fcoe_init(). Remove the unneeded code. Signed-off-by: Joe Eykholt Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index d205ac08d07c..e481882a3e8f 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1881,9 +1881,6 @@ static int __init fcoe_init(void) int rc = 0; struct fcoe_percpu_s *p; - INIT_LIST_HEAD(&fcoe_hostlist); - rwlock_init(&fcoe_hostlist_lock); - for_each_possible_cpu(cpu) { p = &per_cpu(fcoe_percpu, cpu); skb_queue_head_init(&p->fcoe_rx_list); From af7f85d95a34b04f5cf8c3b8c2d4cdd179dc1e19 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 25 Aug 2009 13:59:24 -0700 Subject: [PATCH 134/209] [SCSI] fcoe: interface changes to fcoe_if_create and fcoe_if_destroy By passing in the parent device instead of assuming the netdev is what should be used, fcoe_if_create becomes usable for NPIV vports as well. You still need a netdev, because that's how FCoE works. Also removed some duplicate checks from fcoe_if_create that are already in fcoe_create. fcoe_if_destroy needs to take an lport as it's only argument, not a netdev. That removes the 1:1 netdev:lport assumption from the destroy path. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 113 +++++++++++++++++---------------------- 1 file changed, 48 insertions(+), 65 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index e481882a3e8f..749279698137 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -506,30 +506,20 @@ static inline int fcoe_em_config(struct fc_lport *lp) /** * fcoe_if_destroy() - FCoE software HBA tear-down function - * @netdev: ptr to the associated net_device - * - * Returns: 0 if link is OK for use by FCoE. + * @lport: fc_lport to destroy */ -static int fcoe_if_destroy(struct net_device *netdev) +static void fcoe_if_destroy(struct fc_lport *lport) { - struct fc_lport *lp = NULL; - struct fcoe_softc *fc; - - BUG_ON(!netdev); + struct fcoe_softc *fc = lport_priv(lport); + struct net_device *netdev = fc->netdev; FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); - lp = fcoe_hostlist_lookup(netdev); - if (!lp) - return -ENODEV; - - fc = lport_priv(lp); - /* Logout of the fabric */ - fc_fabric_logoff(lp); + fc_fabric_logoff(lport); /* Remove the instance from fcoe's list */ - fcoe_hostlist_remove(lp); + fcoe_hostlist_remove(lport); /* clean up netdev configurations */ fcoe_netdev_cleanup(fc); @@ -538,33 +528,31 @@ static int fcoe_if_destroy(struct net_device *netdev) fcoe_ctlr_destroy(&fc->ctlr); /* Free queued packets for the per-CPU receive threads */ - fcoe_percpu_clean(lp); + fcoe_percpu_clean(lport); /* Cleanup the fc_lport */ - fc_lport_destroy(lp); - fc_fcp_destroy(lp); + fc_lport_destroy(lport); + fc_fcp_destroy(lport); /* Detach from the scsi-ml */ - fc_remove_host(lp->host); - scsi_remove_host(lp->host); + fc_remove_host(lport->host); + scsi_remove_host(lport->host); /* There are no more rports or I/O, free the EM */ - fc_exch_mgr_free(lp); + fc_exch_mgr_free(lport); /* Free existing skbs */ - fcoe_clean_pending_queue(lp); + fcoe_clean_pending_queue(lport); /* Stop the timer */ del_timer_sync(&fc->timer); /* Free memory used by statistical counters */ - fc_lport_free_stats(lp); + fc_lport_free_stats(lport); /* Release the net_device and Scsi_Host */ dev_put(netdev); - scsi_host_put(lp->host); - - return 0; + scsi_host_put(lport->host); } /* @@ -612,38 +600,35 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = { /** * fcoe_if_create() - this function creates the fcoe interface * @netdev: pointer the associated netdevice + * @parent: device pointer to be the parent in sysfs for the SCSI host * * Creates fc_lport struct and scsi_host for lport, configures lport * and starts fabric login. * - * Returns : 0 on success + * Returns : The allocated fc_lport or an error pointer */ -static int fcoe_if_create(struct net_device *netdev) +static struct fc_lport *fcoe_if_create(struct net_device *netdev, + struct device *parent) { int rc; - struct fc_lport *lp = NULL; + struct fc_lport *lport = NULL; struct fcoe_softc *fc; struct Scsi_Host *shost; - BUG_ON(!netdev); - FCOE_NETDEV_DBG(netdev, "Create Interface\n"); - lp = fcoe_hostlist_lookup(netdev); - if (lp) - return -EEXIST; - shost = libfc_host_alloc(&fcoe_shost_template, sizeof(struct fcoe_softc)); if (!shost) { FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); - return -ENOMEM; + rc = -ENOMEM; + goto out; } - lp = shost_priv(shost); - fc = lport_priv(lp); + lport = shost_priv(shost); + fc = lport_priv(lport); /* configure fc_lport, e.g., em */ - rc = fcoe_lport_config(lp); + rc = fcoe_lport_config(lport); if (rc) { FCOE_NETDEV_DBG(netdev, "Could not configure lport for the " "interface\n"); @@ -658,7 +643,7 @@ static int fcoe_if_create(struct net_device *netdev) fc->ctlr.update_mac = fcoe_update_src_mac; /* configure lport network properties */ - rc = fcoe_netdev_config(lp, netdev); + rc = fcoe_netdev_config(lport, netdev); if (rc) { FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the " "interface\n"); @@ -666,7 +651,7 @@ static int fcoe_if_create(struct net_device *netdev) } /* configure lport scsi host properties */ - rc = fcoe_shost_config(lp, shost, &netdev->dev); + rc = fcoe_shost_config(lport, shost, parent); if (rc) { FCOE_NETDEV_DBG(netdev, "Could not configure shost for the " "interface\n"); @@ -674,7 +659,7 @@ static int fcoe_if_create(struct net_device *netdev) } /* Initialize the library */ - rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ); + rc = fcoe_libfc_config(lport, &fcoe_libfc_fcn_templ); if (rc) { FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the " "interface\n"); @@ -689,7 +674,7 @@ static int fcoe_if_create(struct net_device *netdev) */ write_lock(&fcoe_hostlist_lock); /* lport exch manager allocation */ - rc = fcoe_em_config(lp); + rc = fcoe_em_config(lport); if (rc) { FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the " "interface\n"); @@ -697,27 +682,28 @@ static int fcoe_if_create(struct net_device *netdev) } /* add to lports list */ - fcoe_hostlist_add(lp); + fcoe_hostlist_add(lport); write_unlock(&fcoe_hostlist_lock); - lp->boot_time = jiffies; + lport->boot_time = jiffies; - fc_fabric_login(lp); + fc_fabric_login(lport); - if (!fcoe_link_ok(lp)) + if (!fcoe_link_ok(lport)) fcoe_ctlr_link_up(&fc->ctlr); dev_hold(netdev); - return rc; + return lport; out_lp_destroy: - fc_exch_mgr_free(lp); + fc_exch_mgr_free(lport); out_netdev_cleanup: fcoe_netdev_cleanup(fc); out_host_put: - scsi_host_put(lp->host); - return rc; + scsi_host_put(lport->host); +out: + return ERR_PTR(rc); } /** @@ -1616,8 +1602,9 @@ static int fcoe_ethdrv_put(const struct net_device *netdev) */ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) { - int rc; struct net_device *netdev; + struct fc_lport *lport; + int rc; netdev = fcoe_if_to_netdev(buffer); if (!netdev) { @@ -1625,17 +1612,12 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) goto out_nodev; } /* look for existing lport */ - if (!fcoe_hostlist_lookup(netdev)) { + lport = fcoe_hostlist_lookup(netdev); + if (!lport) { rc = -ENODEV; goto out_putdev; } - rc = fcoe_if_destroy(netdev); - if (rc) { - printk(KERN_ERR "fcoe: Failed to destroy interface (%s)\n", - netdev->name); - rc = -EIO; - goto out_putdev; - } + fcoe_if_destroy(lport); fcoe_ethdrv_put(netdev); rc = 0; out_putdev: @@ -1654,6 +1636,7 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) static int fcoe_create(const char *buffer, struct kernel_param *kp) { int rc; + struct fc_lport *lport; struct net_device *netdev; netdev = fcoe_if_to_netdev(buffer); @@ -1668,8 +1651,8 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) } fcoe_ethdrv_get(netdev); - rc = fcoe_if_create(netdev); - if (rc) { + lport = fcoe_if_create(netdev, &netdev->dev); + if (IS_ERR(lport)) { printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", netdev->name); fcoe_ethdrv_put(netdev); @@ -1926,7 +1909,7 @@ static void __exit fcoe_exit(void) /* releases the associated fcoe hosts */ list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) - fcoe_if_destroy(fc->netdev); + fcoe_if_destroy(fc->ctlr.lp); unregister_hotcpu_notifier(&fcoe_cpu_notifier); From 014f5c3f560a336cb8ad5b9f828c85de0398e7bb Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 25 Aug 2009 13:59:30 -0700 Subject: [PATCH 135/209] [SCSI] fcoe: Introduce and allocate fcoe_interface structure, 1:1 with net_device In preparation for NPIV support, I'm splitting the fcoe instance structure into two to remove the assumptions about it being 1:1 with the net_device. There will now be two structures, one which is 1:1 with the underlying net_device and one which is allocated per virtual SCSI/FC host. fcoe_softc is renamed to fcoe_port for the per Scsi_Host FCoE private data. Later patches with start moving shared stuff from fcoe_port to fcoe_interface Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 328 +++++++++++++++++++++------------------ drivers/scsi/fcoe/fcoe.h | 19 ++- 2 files changed, 189 insertions(+), 158 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 749279698137..0ae54b2bce34 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -151,10 +151,10 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { - struct fcoe_softc *fc; + struct fcoe_port *port; - fc = container_of(ptype, struct fcoe_softc, fip_packet_type); - fcoe_ctlr_recv(&fc->ctlr, skb); + port = container_of(ptype, struct fcoe_port, fip_packet_type); + fcoe_ctlr_recv(&port->ctlr, skb); return 0; } @@ -180,13 +180,13 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) */ static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new) { - struct fcoe_softc *fc; + struct fcoe_port *port; - fc = fcoe_from_ctlr(fip); + port = fcoe_from_ctlr(fip); rtnl_lock(); if (!is_zero_ether_addr(old)) - dev_unicast_delete(fc->netdev, old); - dev_unicast_add(fc->netdev, new); + dev_unicast_delete(port->netdev, old); + dev_unicast_add(port->netdev, new); rtnl_unlock(); } @@ -224,25 +224,25 @@ static int fcoe_lport_config(struct fc_lport *lp) /** * fcoe_netdev_cleanup() - clean up netdev configurations - * @fc: ptr to the fcoe_softc + * @port: ptr to the fcoe_port */ -void fcoe_netdev_cleanup(struct fcoe_softc *fc) +void fcoe_netdev_cleanup(struct fcoe_port *port) { u8 flogi_maddr[ETH_ALEN]; /* Don't listen for Ethernet packets anymore */ - dev_remove_pack(&fc->fcoe_packet_type); - dev_remove_pack(&fc->fip_packet_type); + dev_remove_pack(&port->fcoe_packet_type); + dev_remove_pack(&port->fip_packet_type); /* Delete secondary MAC addresses */ rtnl_lock(); memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); - dev_unicast_delete(fc->netdev, flogi_maddr); - if (!is_zero_ether_addr(fc->ctlr.data_src_addr)) - dev_unicast_delete(fc->netdev, fc->ctlr.data_src_addr); - if (fc->ctlr.spma) - dev_unicast_delete(fc->netdev, fc->ctlr.ctl_src_addr); - dev_mc_delete(fc->netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); + dev_unicast_delete(port->netdev, flogi_maddr); + if (!is_zero_ether_addr(port->ctlr.data_src_addr)) + dev_unicast_delete(port->netdev, port->ctlr.data_src_addr); + if (port->ctlr.spma) + dev_unicast_delete(port->netdev, port->ctlr.ctl_src_addr); + dev_mc_delete(port->netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); rtnl_unlock(); } @@ -271,14 +271,14 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) { u32 mfs; u64 wwnn, wwpn; - struct fcoe_softc *fc; + struct fcoe_port *port; u8 flogi_maddr[ETH_ALEN]; struct netdev_hw_addr *ha; /* Setup lport private data to point to fcoe softc */ - fc = lport_priv(lp); - fc->ctlr.lp = lp; - fc->netdev = netdev; + port = lport_priv(lp); + port->ctlr.lp = lp; + port->netdev = netdev; /* Do not support for bonding device */ if ((netdev->priv_flags & IFF_MASTER_ALB) || @@ -317,27 +317,27 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n", lp->lro_xid); } - skb_queue_head_init(&fc->fcoe_pending_queue); - fc->fcoe_pending_queue_active = 0; - setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp); + skb_queue_head_init(&port->fcoe_pending_queue); + port->fcoe_pending_queue_active = 0; + setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lp); /* look for SAN MAC address, if multiple SAN MACs exist, only * use the first one for SPMA */ rcu_read_lock(); for_each_dev_addr(netdev, ha) { if ((ha->type == NETDEV_HW_ADDR_T_SAN) && - (is_valid_ether_addr(ha->addr))) { - memcpy(fc->ctlr.ctl_src_addr, ha->addr, ETH_ALEN); - fc->ctlr.spma = 1; + (is_valid_ether_addr(port->ctlr.ctl_src_addr))) { + memcpy(port->ctlr.ctl_src_addr, ha->addr, ETH_ALEN); + port->ctlr.spma = 1; break; } } rcu_read_unlock(); /* setup Source Mac Address */ - if (!fc->ctlr.spma) - memcpy(fc->ctlr.ctl_src_addr, netdev->dev_addr, - fc->netdev->addr_len); + if (!port->ctlr.spma) + memcpy(port->ctlr.ctl_src_addr, netdev->dev_addr, + netdev->addr_len); wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0); fc_set_wwnn(lp, wwnn); @@ -353,8 +353,8 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) rtnl_lock(); memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); dev_unicast_add(netdev, flogi_maddr); - if (fc->ctlr.spma) - dev_unicast_add(netdev, fc->ctlr.ctl_src_addr); + if (port->ctlr.spma) + dev_unicast_add(netdev, port->ctlr.ctl_src_addr); dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); rtnl_unlock(); @@ -362,15 +362,15 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) * setup the receive function from ethernet driver * on the ethertype for the given device */ - fc->fcoe_packet_type.func = fcoe_rcv; - fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); - fc->fcoe_packet_type.dev = netdev; - dev_add_pack(&fc->fcoe_packet_type); + port->fcoe_packet_type.func = fcoe_rcv; + port->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); + port->fcoe_packet_type.dev = netdev; + dev_add_pack(&port->fcoe_packet_type); - fc->fip_packet_type.func = fcoe_fip_recv; - fc->fip_packet_type.type = htons(ETH_P_FIP); - fc->fip_packet_type.dev = netdev; - dev_add_pack(&fc->fip_packet_type); + port->fip_packet_type.func = fcoe_fip_recv; + port->fip_packet_type.type = htons(ETH_P_FIP); + port->fip_packet_type.dev = netdev; + dev_add_pack(&port->fip_packet_type); return 0; } @@ -426,7 +426,7 @@ bool fcoe_oem_match(struct fc_frame *fp) /** * fcoe_em_config() - allocates em for this lport - * @lp: the port that em is to allocated for + * @lp: the fcoe that em is to allocated for * * Called with write fcoe_hostlist_lock held. * @@ -434,8 +434,9 @@ bool fcoe_oem_match(struct fc_frame *fp) */ static inline int fcoe_em_config(struct fc_lport *lp) { - struct fcoe_softc *fc = lport_priv(lp); - struct fcoe_softc *oldfc = NULL; + struct fcoe_interface *fcoe; + struct fcoe_port *port = lport_priv(lp); + struct fcoe_port *oldfc = NULL; struct net_device *old_real_dev, *cur_real_dev; u16 min_xid = FCOE_MIN_XID; u16 max_xid = FCOE_MAX_XID; @@ -453,38 +454,39 @@ static inline int fcoe_em_config(struct fc_lport *lp) * Reuse existing offload em instance in case * it is already allocated on real eth device */ - if (fc->netdev->priv_flags & IFF_802_1Q_VLAN) - cur_real_dev = vlan_dev_real_dev(fc->netdev); + if (port->netdev->priv_flags & IFF_802_1Q_VLAN) + cur_real_dev = vlan_dev_real_dev(port->netdev); else - cur_real_dev = fc->netdev; + cur_real_dev = port->netdev; - list_for_each_entry(oldfc, &fcoe_hostlist, list) { + list_for_each_entry(fcoe, &fcoe_hostlist, list) { + oldfc = fcoe->priv; if (oldfc->netdev->priv_flags & IFF_802_1Q_VLAN) old_real_dev = vlan_dev_real_dev(oldfc->netdev); else old_real_dev = oldfc->netdev; if (cur_real_dev == old_real_dev) { - fc->oem = oldfc->oem; + port->oem = oldfc->oem; break; } } - if (fc->oem) { - if (!fc_exch_mgr_add(lp, fc->oem, fcoe_oem_match)) { + if (port->oem) { + if (!fc_exch_mgr_add(lp, port->oem, fcoe_oem_match)) { printk(KERN_ERR "fcoe_em_config: failed to add " "offload em:%p on interface:%s\n", - fc->oem, fc->netdev->name); + port->oem, port->netdev->name); return -ENOMEM; } } else { - fc->oem = fc_exch_mgr_alloc(lp, FC_CLASS_3, + port->oem = fc_exch_mgr_alloc(lp, FC_CLASS_3, FCOE_MIN_XID, lp->lro_xid, fcoe_oem_match); - if (!fc->oem) { + if (!port->oem) { printk(KERN_ERR "fcoe_em_config: failed to allocate " "em for offload exches on interface:%s\n", - fc->netdev->name); + port->netdev->name); return -ENOMEM; } } @@ -497,7 +499,7 @@ static inline int fcoe_em_config(struct fc_lport *lp) skip_oem: if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, min_xid, max_xid, NULL)) { printk(KERN_ERR "fcoe_em_config: failed to " - "allocate em on interface %s\n", fc->netdev->name); + "allocate em on interface %s\n", port->netdev->name); return -ENOMEM; } @@ -510,8 +512,9 @@ static inline int fcoe_em_config(struct fc_lport *lp) */ static void fcoe_if_destroy(struct fc_lport *lport) { - struct fcoe_softc *fc = lport_priv(lport); - struct net_device *netdev = fc->netdev; + struct fcoe_port *port = lport_priv(lport); + struct fcoe_interface *fcoe = port->fcoe; + struct net_device *netdev = port->netdev; FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); @@ -522,10 +525,10 @@ static void fcoe_if_destroy(struct fc_lport *lport) fcoe_hostlist_remove(lport); /* clean up netdev configurations */ - fcoe_netdev_cleanup(fc); + fcoe_netdev_cleanup(port); /* tear-down the FCoE controller */ - fcoe_ctlr_destroy(&fc->ctlr); + fcoe_ctlr_destroy(&port->ctlr); /* Free queued packets for the per-CPU receive threads */ fcoe_percpu_clean(lport); @@ -545,7 +548,7 @@ static void fcoe_if_destroy(struct fc_lport *lport) fcoe_clean_pending_queue(lport); /* Stop the timer */ - del_timer_sync(&fc->timer); + del_timer_sync(&port->timer); /* Free memory used by statistical counters */ fc_lport_free_stats(lport); @@ -553,6 +556,7 @@ static void fcoe_if_destroy(struct fc_lport *lport) /* Release the net_device and Scsi_Host */ dev_put(netdev); scsi_host_put(lport->host); + kfree(fcoe); /* TODO, should be refcounted */ } /* @@ -612,20 +616,31 @@ static struct fc_lport *fcoe_if_create(struct net_device *netdev, { int rc; struct fc_lport *lport = NULL; - struct fcoe_softc *fc; + struct fcoe_port *port; + struct fcoe_interface *fcoe; struct Scsi_Host *shost; FCOE_NETDEV_DBG(netdev, "Create Interface\n"); - shost = libfc_host_alloc(&fcoe_shost_template, - sizeof(struct fcoe_softc)); - if (!shost) { - FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); + fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL); + if (!fcoe) { + FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n"); rc = -ENOMEM; goto out; } + + shost = libfc_host_alloc(&fcoe_shost_template, + sizeof(struct fcoe_port)); + if (!shost) { + FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); + rc = -ENOMEM; + goto out_kfree_port; + } lport = shost_priv(shost); - fc = lport_priv(lport); + port = lport_priv(lport); + + port->fcoe = fcoe; + fcoe->priv = port; /* configure fc_lport, e.g., em */ rc = fcoe_lport_config(lport); @@ -638,9 +653,9 @@ static struct fc_lport *fcoe_if_create(struct net_device *netdev, /* * Initialize FIP. */ - fcoe_ctlr_init(&fc->ctlr); - fc->ctlr.send = fcoe_fip_send; - fc->ctlr.update_mac = fcoe_update_src_mac; + fcoe_ctlr_init(&port->ctlr); + port->ctlr.send = fcoe_fip_send; + port->ctlr.update_mac = fcoe_update_src_mac; /* configure lport network properties */ rc = fcoe_netdev_config(lport, netdev); @@ -690,7 +705,7 @@ static struct fc_lport *fcoe_if_create(struct net_device *netdev, fc_fabric_login(lport); if (!fcoe_link_ok(lport)) - fcoe_ctlr_link_up(&fc->ctlr); + fcoe_ctlr_link_up(&port->ctlr); dev_hold(netdev); @@ -699,9 +714,11 @@ static struct fc_lport *fcoe_if_create(struct net_device *netdev, out_lp_destroy: fc_exch_mgr_free(lport); out_netdev_cleanup: - fcoe_netdev_cleanup(fc); + fcoe_netdev_cleanup(port); out_host_put: scsi_host_put(lport->host); +out_kfree_port: + kfree(fcoe); out: return ERR_PTR(rc); } @@ -902,13 +919,13 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, { struct fc_lport *lp; struct fcoe_rcv_info *fr; - struct fcoe_softc *fc; + struct fcoe_port *port; struct fc_frame_header *fh; struct fcoe_percpu_s *fps; unsigned int cpu; - fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type); - lp = fc->ctlr.lp; + port = container_of(ptype, struct fcoe_port, fcoe_packet_type); + lp = port->ctlr.lp; if (unlikely(lp == NULL)) { FCOE_NETDEV_DBG(dev, "Cannot find hba structure"); goto err2; @@ -1059,7 +1076,7 @@ static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen) * fcoe_fc_crc() - calculates FC CRC in this fcoe skb * @fp: the fc_frame containing data to be checksummed * - * This uses crc32() to calculate the crc for fc frame + * This uses crc32() to calculate the crc for port frame * Return : 32 bit crc */ u32 fcoe_fc_crc(struct fc_frame *fp) @@ -1092,7 +1109,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp) /** * fcoe_xmit() - FCoE frame transmit function - * @lp: the associated local port + * @lp: the associated local fcoe * @fp: the fc_frame to be transmitted * * Return : 0 for success @@ -1109,13 +1126,13 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) unsigned int hlen; /* header length implies the version */ unsigned int tlen; /* trailer length */ unsigned int elen; /* eth header, may include vlan */ - struct fcoe_softc *fc; + struct fcoe_port *port; u8 sof, eof; struct fcoe_hdr *hp; WARN_ON((fr_len(fp) % sizeof(u32)) != 0); - fc = lport_priv(lp); + port = lport_priv(lp); fh = fc_frame_header_get(fp); skb = fp_skb(fp); wlen = skb->len / FCOE_WORD_TO_BYTE; @@ -1126,7 +1143,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) } if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && - fcoe_ctlr_els_send(&fc->ctlr, skb)) + fcoe_ctlr_els_send(&port->ctlr, skb)) return 0; sof = fr_sof(fp); @@ -1148,7 +1165,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) crc = fcoe_fc_crc(fp); } - /* copy fc crc and eof to the skb buff */ + /* copy port crc and eof to the skb buff */ if (skb_is_nonlinear(skb)) { skb_frag_t *frag; if (fcoe_get_paged_crc_eof(skb, tlen)) { @@ -1171,27 +1188,27 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) cp = NULL; } - /* adjust skb network/transport offsets to match mac/fcoe/fc */ + /* adjust skb network/transport offsets to match mac/fcoe/port */ skb_push(skb, elen + hlen); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb->mac_len = elen; skb->protocol = htons(ETH_P_FCOE); - skb->dev = fc->netdev; + skb->dev = port->netdev; /* fill up mac and fcoe headers */ eh = eth_hdr(skb); eh->h_proto = htons(ETH_P_FCOE); - if (fc->ctlr.map_dest) + if (port->ctlr.map_dest) fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); else /* insert GW address */ - memcpy(eh->h_dest, fc->ctlr.dest_addr, ETH_ALEN); + memcpy(eh->h_dest, port->ctlr.dest_addr, ETH_ALEN); - if (unlikely(fc->ctlr.flogi_oxid != FC_XID_UNKNOWN)) - memcpy(eh->h_source, fc->ctlr.ctl_src_addr, ETH_ALEN); + if (unlikely(port->ctlr.flogi_oxid != FC_XID_UNKNOWN)) + memcpy(eh->h_source, port->ctlr.ctl_src_addr, ETH_ALEN); else - memcpy(eh->h_source, fc->ctlr.data_src_addr, ETH_ALEN); + memcpy(eh->h_source, port->ctlr.data_src_addr, ETH_ALEN); hp = (struct fcoe_hdr *)(eh + 1); memset(hp, 0, sizeof(*hp)); @@ -1214,7 +1231,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) /* send down to lld */ fr_dev(fp) = lp; - if (fc->fcoe_pending_queue.qlen) + if (port->fcoe_pending_queue.qlen) fcoe_check_wait_queue(lp, skb); else if (fcoe_start_io(skb)) fcoe_check_wait_queue(lp, skb); @@ -1240,7 +1257,7 @@ int fcoe_percpu_receive_thread(void *arg) struct fcoe_crc_eof crc_eof; struct fc_frame *fp; u8 *mac = NULL; - struct fcoe_softc *fc; + struct fcoe_port *port; struct fcoe_hdr *hp; set_user_nice(current, -20); @@ -1276,7 +1293,7 @@ int fcoe_percpu_receive_thread(void *arg) /* * Save source MAC address before discarding header. */ - fc = lport_priv(lp); + port = lport_priv(lp); if (skb_is_nonlinear(skb)) skb_linearize(skb); /* not ideal */ mac = eth_hdr(skb)->h_source; @@ -1354,8 +1371,8 @@ int fcoe_percpu_receive_thread(void *arg) } fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; } - if (unlikely(fc->ctlr.flogi_oxid != FC_XID_UNKNOWN) && - fcoe_ctlr_recv_flogi(&fc->ctlr, fp, mac)) { + if (unlikely(port->ctlr.flogi_oxid != FC_XID_UNKNOWN) && + fcoe_ctlr_recv_flogi(&port->ctlr, fp, mac)) { fc_frame_free(fp); continue; } @@ -1379,46 +1396,46 @@ int fcoe_percpu_receive_thread(void *arg) */ static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb) { - struct fcoe_softc *fc = lport_priv(lp); + struct fcoe_port *port = lport_priv(lp); int rc; - spin_lock_bh(&fc->fcoe_pending_queue.lock); + spin_lock_bh(&port->fcoe_pending_queue.lock); if (skb) - __skb_queue_tail(&fc->fcoe_pending_queue, skb); + __skb_queue_tail(&port->fcoe_pending_queue, skb); - if (fc->fcoe_pending_queue_active) + if (port->fcoe_pending_queue_active) goto out; - fc->fcoe_pending_queue_active = 1; + port->fcoe_pending_queue_active = 1; - while (fc->fcoe_pending_queue.qlen) { + while (port->fcoe_pending_queue.qlen) { /* keep qlen > 0 until fcoe_start_io succeeds */ - fc->fcoe_pending_queue.qlen++; - skb = __skb_dequeue(&fc->fcoe_pending_queue); + port->fcoe_pending_queue.qlen++; + skb = __skb_dequeue(&port->fcoe_pending_queue); - spin_unlock_bh(&fc->fcoe_pending_queue.lock); + spin_unlock_bh(&port->fcoe_pending_queue.lock); rc = fcoe_start_io(skb); - spin_lock_bh(&fc->fcoe_pending_queue.lock); + spin_lock_bh(&port->fcoe_pending_queue.lock); if (rc) { - __skb_queue_head(&fc->fcoe_pending_queue, skb); + __skb_queue_head(&port->fcoe_pending_queue, skb); /* undo temporary increment above */ - fc->fcoe_pending_queue.qlen--; + port->fcoe_pending_queue.qlen--; break; } /* undo temporary increment above */ - fc->fcoe_pending_queue.qlen--; + port->fcoe_pending_queue.qlen--; } - if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) + if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) lp->qfull = 0; - if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer)) - mod_timer(&fc->timer, jiffies + 2); - fc->fcoe_pending_queue_active = 0; + if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer)) + mod_timer(&port->timer, jiffies + 2); + port->fcoe_pending_queue_active = 0; out: - if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) + if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) lp->qfull = 1; - spin_unlock_bh(&fc->fcoe_pending_queue.lock); + spin_unlock_bh(&port->fcoe_pending_queue.lock); return; } @@ -1453,16 +1470,18 @@ static int fcoe_device_notification(struct notifier_block *notifier, { struct fc_lport *lp = NULL; struct net_device *netdev = ptr; - struct fcoe_softc *fc; + struct fcoe_interface *fcoe; + struct fcoe_port *port = NULL; struct fcoe_dev_stats *stats; u32 link_possible = 1; u32 mfs; int rc = NOTIFY_OK; read_lock(&fcoe_hostlist_lock); - list_for_each_entry(fc, &fcoe_hostlist, list) { - if (fc->netdev == netdev) { - lp = fc->ctlr.lp; + list_for_each_entry(fcoe, &fcoe_hostlist, list) { + port = fcoe->priv; + if (port->netdev == netdev) { + lp = port->ctlr.lp; break; } } @@ -1493,8 +1512,8 @@ static int fcoe_device_notification(struct notifier_block *notifier, "from netdev netlink\n", event); } if (link_possible && !fcoe_link_ok(lp)) - fcoe_ctlr_link_up(&fc->ctlr); - else if (fcoe_ctlr_link_down(&fc->ctlr)) { + fcoe_ctlr_link_up(&port->ctlr); + else if (fcoe_ctlr_link_down(&port->ctlr)) { stats = fc_lport_get_stats(lp); stats->LinkFailureCount++; fcoe_clean_pending_queue(lp); @@ -1668,10 +1687,10 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); __MODULE_PARM_TYPE(create, "string"); -MODULE_PARM_DESC(create, "Create fcoe port using net device passed in."); +MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in."); module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); __MODULE_PARM_TYPE(destroy, "string"); -MODULE_PARM_DESC(destroy, "Destroy fcoe port"); +MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe"); /** * fcoe_link_ok() - Check if link is ok for the fc_lport @@ -1689,8 +1708,8 @@ MODULE_PARM_DESC(destroy, "Destroy fcoe port"); */ int fcoe_link_ok(struct fc_lport *lp) { - struct fcoe_softc *fc = lport_priv(lp); - struct net_device *dev = fc->netdev; + struct fcoe_port *port = lport_priv(lp); + struct net_device *dev = port->netdev; struct ethtool_cmd ecmd = { ETHTOOL_GSET }; if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) && @@ -1752,16 +1771,16 @@ void fcoe_percpu_clean(struct fc_lport *lp) */ void fcoe_clean_pending_queue(struct fc_lport *lp) { - struct fcoe_softc *fc = lport_priv(lp); + struct fcoe_port *port = lport_priv(lp); struct sk_buff *skb; - spin_lock_bh(&fc->fcoe_pending_queue.lock); - while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { - spin_unlock_bh(&fc->fcoe_pending_queue.lock); + spin_lock_bh(&port->fcoe_pending_queue.lock); + while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) { + spin_unlock_bh(&port->fcoe_pending_queue.lock); kfree_skb(skb); - spin_lock_bh(&fc->fcoe_pending_queue.lock); + spin_lock_bh(&port->fcoe_pending_queue.lock); } - spin_unlock_bh(&fc->fcoe_pending_queue.lock); + spin_unlock_bh(&port->fcoe_pending_queue.lock); } /** @@ -1778,21 +1797,21 @@ int fcoe_reset(struct Scsi_Host *shost) } /** - * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device + * fcoe_hostlist_lookup_port() - find the corresponding lport by a given device * @dev: this is currently ptr to net_device * * Called with fcoe_hostlist_lock held. * - * Returns: NULL or the located fcoe_softc + * Returns: NULL or the located fcoe_port */ -static struct fcoe_softc * -fcoe_hostlist_lookup_softc(const struct net_device *dev) +static struct fcoe_interface * +fcoe_hostlist_lookup_port(const struct net_device *dev) { - struct fcoe_softc *fc; + struct fcoe_interface *fcoe; - list_for_each_entry(fc, &fcoe_hostlist, list) { - if (fc->netdev == dev) - return fc; + list_for_each_entry(fcoe, &fcoe_hostlist, list) { + if (fcoe->priv->netdev == dev) + return fcoe; } return NULL; } @@ -1805,13 +1824,13 @@ fcoe_hostlist_lookup_softc(const struct net_device *dev) */ struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) { - struct fcoe_softc *fc; + struct fcoe_interface *fcoe; read_lock(&fcoe_hostlist_lock); - fc = fcoe_hostlist_lookup_softc(netdev); + fcoe = fcoe_hostlist_lookup_port(netdev); read_unlock(&fcoe_hostlist_lock); - return (fc) ? fc->ctlr.lp : NULL; + return (fcoe) ? fcoe->priv->ctlr.lp : NULL; } /** @@ -1822,14 +1841,16 @@ struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) * * Returns: 0 for success */ -int fcoe_hostlist_add(const struct fc_lport *lp) +int fcoe_hostlist_add(const struct fc_lport *lport) { - struct fcoe_softc *fc; + struct fcoe_interface *fcoe; + struct fcoe_port *port; - fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp)); - if (!fc) { - fc = lport_priv(lp); - list_add_tail(&fc->list, &fcoe_hostlist); + fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport)); + if (!fcoe) { + port = lport_priv(lport); + fcoe = port->fcoe; + list_add_tail(&fcoe->list, &fcoe_hostlist); } return 0; } @@ -1840,14 +1861,14 @@ int fcoe_hostlist_add(const struct fc_lport *lp) * * Returns: 0 for success */ -int fcoe_hostlist_remove(const struct fc_lport *lp) +int fcoe_hostlist_remove(const struct fc_lport *lport) { - struct fcoe_softc *fc; + struct fcoe_interface *fcoe; write_lock_bh(&fcoe_hostlist_lock); - fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp)); - BUG_ON(!fc); - list_del(&fc->list); + fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport)); + BUG_ON(!fcoe); + list_del(&fcoe->list); write_unlock_bh(&fcoe_hostlist_lock); return 0; @@ -1903,19 +1924,18 @@ module_init(fcoe_init); static void __exit fcoe_exit(void) { unsigned int cpu; - struct fcoe_softc *fc, *tmp; + struct fcoe_interface *fcoe, *tmp; fcoe_dev_cleanup(); /* releases the associated fcoe hosts */ - list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) - fcoe_if_destroy(fc->ctlr.lp); + list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) + fcoe_if_destroy(fcoe->priv->ctlr.lp); unregister_hotcpu_notifier(&fcoe_cpu_notifier); - for_each_online_cpu(cpu) { + for_each_online_cpu(cpu) fcoe_percpu_thread_destroy(cpu); - } /* detach from scsi transport */ fcoe_if_exit(); diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 550d1e49d1a3..060a6dce6580 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -75,10 +75,21 @@ struct fcoe_percpu_s { }; /* - * the fcoe sw transport private data + * an FCoE interface, 1:1 with netdev */ -struct fcoe_softc { +struct fcoe_interface { struct list_head list; + /* This will be removed once all the shared values are + * moved out of fcoe_port */ + struct fcoe_port *priv; +}; + +/* + * the FCoE private structure that's allocated along with the + * Scsi_Host and libfc fc_lport structures + */ +struct fcoe_port { + struct fcoe_interface *fcoe; struct net_device *netdev; struct fc_exch_mgr *oem; /* offload exchange manger */ struct packet_type fcoe_packet_type; @@ -89,12 +100,12 @@ struct fcoe_softc { struct fcoe_ctlr ctlr; }; -#define fcoe_from_ctlr(fc) container_of(fc, struct fcoe_softc, ctlr) +#define fcoe_from_ctlr(port) container_of(port, struct fcoe_port, ctlr) static inline struct net_device *fcoe_netdev( const struct fc_lport *lp) { - return ((struct fcoe_softc *)lport_priv(lp))->netdev; + return ((struct fcoe_port *)lport_priv(lp))->netdev; } #endif /* _FCOE_H_ */ From 250249898a92a1228050f40fbe3c05deb1392da8 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 25 Aug 2009 13:59:35 -0700 Subject: [PATCH 136/209] [SCSI] fcoe: move netdev to fcoe_interface The network interface needs to be shared between all NPIV VN_Ports, therefor it should be tracked in the fcoe_interface and not for each SCSI host in fcoe_port. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 61 ++++++++++++++++++++++------------------ drivers/scsi/fcoe/fcoe.h | 9 +++--- 2 files changed, 38 insertions(+), 32 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 0ae54b2bce34..44c963c8bc7d 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -165,7 +165,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev, */ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) { - skb->dev = fcoe_from_ctlr(fip)->netdev; + skb->dev = fcoe_from_ctlr(fip)->fcoe->netdev; dev_queue_xmit(skb); } @@ -180,13 +180,16 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) */ static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new) { + struct fcoe_interface *fcoe; struct fcoe_port *port; port = fcoe_from_ctlr(fip); + fcoe = port->fcoe; + rtnl_lock(); if (!is_zero_ether_addr(old)) - dev_unicast_delete(port->netdev, old); - dev_unicast_add(port->netdev, new); + dev_unicast_delete(fcoe->netdev, old); + dev_unicast_add(fcoe->netdev, new); rtnl_unlock(); } @@ -229,6 +232,7 @@ static int fcoe_lport_config(struct fc_lport *lp) void fcoe_netdev_cleanup(struct fcoe_port *port) { u8 flogi_maddr[ETH_ALEN]; + struct fcoe_interface *fcoe = port->fcoe; /* Don't listen for Ethernet packets anymore */ dev_remove_pack(&port->fcoe_packet_type); @@ -237,12 +241,12 @@ void fcoe_netdev_cleanup(struct fcoe_port *port) /* Delete secondary MAC addresses */ rtnl_lock(); memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); - dev_unicast_delete(port->netdev, flogi_maddr); + dev_unicast_delete(fcoe->netdev, flogi_maddr); if (!is_zero_ether_addr(port->ctlr.data_src_addr)) - dev_unicast_delete(port->netdev, port->ctlr.data_src_addr); + dev_unicast_delete(fcoe->netdev, port->ctlr.data_src_addr); if (port->ctlr.spma) - dev_unicast_delete(port->netdev, port->ctlr.ctl_src_addr); - dev_mc_delete(port->netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); + dev_unicast_delete(fcoe->netdev, port->ctlr.ctl_src_addr); + dev_mc_delete(fcoe->netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); rtnl_unlock(); } @@ -271,14 +275,16 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) { u32 mfs; u64 wwnn, wwpn; + struct fcoe_interface *fcoe; struct fcoe_port *port; u8 flogi_maddr[ETH_ALEN]; struct netdev_hw_addr *ha; /* Setup lport private data to point to fcoe softc */ port = lport_priv(lp); + fcoe = port->fcoe; port->ctlr.lp = lp; - port->netdev = netdev; + fcoe->netdev = netdev; /* Do not support for bonding device */ if ((netdev->priv_flags & IFF_MASTER_ALB) || @@ -434,9 +440,10 @@ bool fcoe_oem_match(struct fc_frame *fp) */ static inline int fcoe_em_config(struct fc_lport *lp) { - struct fcoe_interface *fcoe; struct fcoe_port *port = lport_priv(lp); - struct fcoe_port *oldfc = NULL; + struct fcoe_port *oldport = NULL; + struct fcoe_interface *fcoe = port->fcoe; + struct fcoe_interface *oldfcoe = NULL; struct net_device *old_real_dev, *cur_real_dev; u16 min_xid = FCOE_MIN_XID; u16 max_xid = FCOE_MAX_XID; @@ -454,20 +461,20 @@ static inline int fcoe_em_config(struct fc_lport *lp) * Reuse existing offload em instance in case * it is already allocated on real eth device */ - if (port->netdev->priv_flags & IFF_802_1Q_VLAN) - cur_real_dev = vlan_dev_real_dev(port->netdev); + if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN) + cur_real_dev = vlan_dev_real_dev(fcoe->netdev); else - cur_real_dev = port->netdev; + cur_real_dev = fcoe->netdev; - list_for_each_entry(fcoe, &fcoe_hostlist, list) { - oldfc = fcoe->priv; - if (oldfc->netdev->priv_flags & IFF_802_1Q_VLAN) - old_real_dev = vlan_dev_real_dev(oldfc->netdev); + list_for_each_entry(oldfcoe, &fcoe_hostlist, list) { + oldport = oldfcoe->priv; + if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN) + old_real_dev = vlan_dev_real_dev(oldfcoe->netdev); else - old_real_dev = oldfc->netdev; + old_real_dev = oldfcoe->netdev; if (cur_real_dev == old_real_dev) { - port->oem = oldfc->oem; + port->oem = oldport->oem; break; } } @@ -476,7 +483,7 @@ static inline int fcoe_em_config(struct fc_lport *lp) if (!fc_exch_mgr_add(lp, port->oem, fcoe_oem_match)) { printk(KERN_ERR "fcoe_em_config: failed to add " "offload em:%p on interface:%s\n", - port->oem, port->netdev->name); + port->oem, fcoe->netdev->name); return -ENOMEM; } } else { @@ -486,7 +493,7 @@ static inline int fcoe_em_config(struct fc_lport *lp) if (!port->oem) { printk(KERN_ERR "fcoe_em_config: failed to allocate " "em for offload exches on interface:%s\n", - port->netdev->name); + fcoe->netdev->name); return -ENOMEM; } } @@ -499,7 +506,7 @@ static inline int fcoe_em_config(struct fc_lport *lp) skip_oem: if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, min_xid, max_xid, NULL)) { printk(KERN_ERR "fcoe_em_config: failed to " - "allocate em on interface %s\n", port->netdev->name); + "allocate em on interface %s\n", fcoe->netdev->name); return -ENOMEM; } @@ -514,7 +521,7 @@ static void fcoe_if_destroy(struct fc_lport *lport) { struct fcoe_port *port = lport_priv(lport); struct fcoe_interface *fcoe = port->fcoe; - struct net_device *netdev = port->netdev; + struct net_device *netdev = fcoe->netdev; FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); @@ -1194,7 +1201,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) skb_reset_network_header(skb); skb->mac_len = elen; skb->protocol = htons(ETH_P_FCOE); - skb->dev = port->netdev; + skb->dev = port->fcoe->netdev; /* fill up mac and fcoe headers */ eh = eth_hdr(skb); @@ -1480,7 +1487,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, read_lock(&fcoe_hostlist_lock); list_for_each_entry(fcoe, &fcoe_hostlist, list) { port = fcoe->priv; - if (port->netdev == netdev) { + if (fcoe->netdev == netdev) { lp = port->ctlr.lp; break; } @@ -1709,7 +1716,7 @@ MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe"); int fcoe_link_ok(struct fc_lport *lp) { struct fcoe_port *port = lport_priv(lp); - struct net_device *dev = port->netdev; + struct net_device *dev = port->fcoe->netdev; struct ethtool_cmd ecmd = { ETHTOOL_GSET }; if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) && @@ -1810,7 +1817,7 @@ fcoe_hostlist_lookup_port(const struct net_device *dev) struct fcoe_interface *fcoe; list_for_each_entry(fcoe, &fcoe_hostlist, list) { - if (fcoe->priv->netdev == dev) + if (fcoe->netdev == dev) return fcoe; } return NULL; diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 060a6dce6580..3b3886e99b48 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -82,6 +82,7 @@ struct fcoe_interface { /* This will be removed once all the shared values are * moved out of fcoe_port */ struct fcoe_port *priv; + struct net_device *netdev; }; /* @@ -90,7 +91,6 @@ struct fcoe_interface { */ struct fcoe_port { struct fcoe_interface *fcoe; - struct net_device *netdev; struct fc_exch_mgr *oem; /* offload exchange manger */ struct packet_type fcoe_packet_type; struct packet_type fip_packet_type; @@ -100,12 +100,11 @@ struct fcoe_port { struct fcoe_ctlr ctlr; }; -#define fcoe_from_ctlr(port) container_of(port, struct fcoe_port, ctlr) +#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_port, ctlr) -static inline struct net_device *fcoe_netdev( - const struct fc_lport *lp) +static inline struct net_device *fcoe_netdev(const struct fc_lport *lp) { - return ((struct fcoe_port *)lport_priv(lp))->netdev; + return ((struct fcoe_port *)lport_priv(lp))->fcoe->netdev; } #endif /* _FCOE_H_ */ From 259ad85d8dbbcd508e3dad29a36e3e76365853b7 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 25 Aug 2009 13:59:41 -0700 Subject: [PATCH 137/209] [SCSI] fcoe: move packet handlers from fcoe_port to fcoe_interface The packet handlers need to be tracked in fcoe_interface so there is only one set per net_device. When NPIV is enabled there will be multiple SCSI hosts and multiple fcoe_port structures on a single net_device. The packet handlers match by ethertype and netdev. If the same handler gets registered on a single netdev multiple times, the receive function will be called multiple times for each frame. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 28 ++++++++++++++++------------ drivers/scsi/fcoe/fcoe.h | 4 ++-- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 44c963c8bc7d..c215235ee39f 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -151,9 +151,11 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { + struct fcoe_interface *fcoe; struct fcoe_port *port; - port = container_of(ptype, struct fcoe_port, fip_packet_type); + fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type); + port = fcoe->priv; fcoe_ctlr_recv(&port->ctlr, skb); return 0; } @@ -235,8 +237,8 @@ void fcoe_netdev_cleanup(struct fcoe_port *port) struct fcoe_interface *fcoe = port->fcoe; /* Don't listen for Ethernet packets anymore */ - dev_remove_pack(&port->fcoe_packet_type); - dev_remove_pack(&port->fip_packet_type); + dev_remove_pack(&fcoe->fcoe_packet_type); + dev_remove_pack(&fcoe->fip_packet_type); /* Delete secondary MAC addresses */ rtnl_lock(); @@ -368,15 +370,15 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) * setup the receive function from ethernet driver * on the ethertype for the given device */ - port->fcoe_packet_type.func = fcoe_rcv; - port->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); - port->fcoe_packet_type.dev = netdev; - dev_add_pack(&port->fcoe_packet_type); + fcoe->fcoe_packet_type.func = fcoe_rcv; + fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); + fcoe->fcoe_packet_type.dev = netdev; + dev_add_pack(&fcoe->fcoe_packet_type); - port->fip_packet_type.func = fcoe_fip_recv; - port->fip_packet_type.type = htons(ETH_P_FIP); - port->fip_packet_type.dev = netdev; - dev_add_pack(&port->fip_packet_type); + fcoe->fip_packet_type.func = fcoe_fip_recv; + fcoe->fip_packet_type.type = htons(ETH_P_FIP); + fcoe->fip_packet_type.dev = netdev; + dev_add_pack(&fcoe->fip_packet_type); return 0; } @@ -926,12 +928,14 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, { struct fc_lport *lp; struct fcoe_rcv_info *fr; + struct fcoe_interface *fcoe; struct fcoe_port *port; struct fc_frame_header *fh; struct fcoe_percpu_s *fps; unsigned int cpu; - port = container_of(ptype, struct fcoe_port, fcoe_packet_type); + fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type); + port = fcoe->priv; lp = port->ctlr.lp; if (unlikely(lp == NULL)) { FCOE_NETDEV_DBG(dev, "Cannot find hba structure"); diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 3b3886e99b48..685aa9d02226 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -83,6 +83,8 @@ struct fcoe_interface { * moved out of fcoe_port */ struct fcoe_port *priv; struct net_device *netdev; + struct packet_type fcoe_packet_type; + struct packet_type fip_packet_type; }; /* @@ -92,8 +94,6 @@ struct fcoe_interface { struct fcoe_port { struct fcoe_interface *fcoe; struct fc_exch_mgr *oem; /* offload exchange manger */ - struct packet_type fcoe_packet_type; - struct packet_type fip_packet_type; struct sk_buff_head fcoe_pending_queue; u8 fcoe_pending_queue_active; struct timer_list timer; /* queue timer */ From 3fe9a0badae7fa2eb35eff4f07e851fbd25e3d4f Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 25 Aug 2009 13:59:46 -0700 Subject: [PATCH 138/209] [SCSI] fcoe: move FIP controller from fcoe_port to fcoe_interface There is only one FIP state per net_device, so the FIP controller needs to be moved from the per-SCSI-host fcoe_port to the per-net_device fcoe_interface structure. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 83 ++++++++++++++++++---------------------- drivers/scsi/fcoe/fcoe.h | 4 +- 2 files changed, 39 insertions(+), 48 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index c215235ee39f..01519c722edf 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -152,11 +152,9 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev, struct net_device *orig_dev) { struct fcoe_interface *fcoe; - struct fcoe_port *port; fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type); - port = fcoe->priv; - fcoe_ctlr_recv(&port->ctlr, skb); + fcoe_ctlr_recv(&fcoe->ctlr, skb); return 0; } @@ -167,7 +165,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev, */ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) { - skb->dev = fcoe_from_ctlr(fip)->fcoe->netdev; + skb->dev = fcoe_from_ctlr(fip)->netdev; dev_queue_xmit(skb); } @@ -183,11 +181,8 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new) { struct fcoe_interface *fcoe; - struct fcoe_port *port; - - port = fcoe_from_ctlr(fip); - fcoe = port->fcoe; + fcoe = fcoe_from_ctlr(fip); rtnl_lock(); if (!is_zero_ether_addr(old)) dev_unicast_delete(fcoe->netdev, old); @@ -244,10 +239,10 @@ void fcoe_netdev_cleanup(struct fcoe_port *port) rtnl_lock(); memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); dev_unicast_delete(fcoe->netdev, flogi_maddr); - if (!is_zero_ether_addr(port->ctlr.data_src_addr)) - dev_unicast_delete(fcoe->netdev, port->ctlr.data_src_addr); - if (port->ctlr.spma) - dev_unicast_delete(fcoe->netdev, port->ctlr.ctl_src_addr); + if (!is_zero_ether_addr(fcoe->ctlr.data_src_addr)) + dev_unicast_delete(fcoe->netdev, fcoe->ctlr.data_src_addr); + if (fcoe->ctlr.spma) + dev_unicast_delete(fcoe->netdev, fcoe->ctlr.ctl_src_addr); dev_mc_delete(fcoe->netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); rtnl_unlock(); } @@ -285,7 +280,7 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) /* Setup lport private data to point to fcoe softc */ port = lport_priv(lp); fcoe = port->fcoe; - port->ctlr.lp = lp; + fcoe->ctlr.lp = lp; fcoe->netdev = netdev; /* Do not support for bonding device */ @@ -334,17 +329,17 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) rcu_read_lock(); for_each_dev_addr(netdev, ha) { if ((ha->type == NETDEV_HW_ADDR_T_SAN) && - (is_valid_ether_addr(port->ctlr.ctl_src_addr))) { - memcpy(port->ctlr.ctl_src_addr, ha->addr, ETH_ALEN); - port->ctlr.spma = 1; + (is_valid_ether_addr(fcoe->ctlr.ctl_src_addr))) { + memcpy(fcoe->ctlr.ctl_src_addr, ha->addr, ETH_ALEN); + fcoe->ctlr.spma = 1; break; } } rcu_read_unlock(); /* setup Source Mac Address */ - if (!port->ctlr.spma) - memcpy(port->ctlr.ctl_src_addr, netdev->dev_addr, + if (!fcoe->ctlr.spma) + memcpy(fcoe->ctlr.ctl_src_addr, netdev->dev_addr, netdev->addr_len); wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0); @@ -361,8 +356,8 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) rtnl_lock(); memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); dev_unicast_add(netdev, flogi_maddr); - if (port->ctlr.spma) - dev_unicast_add(netdev, port->ctlr.ctl_src_addr); + if (fcoe->ctlr.spma) + dev_unicast_add(netdev, fcoe->ctlr.ctl_src_addr); dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); rtnl_unlock(); @@ -537,7 +532,7 @@ static void fcoe_if_destroy(struct fc_lport *lport) fcoe_netdev_cleanup(port); /* tear-down the FCoE controller */ - fcoe_ctlr_destroy(&port->ctlr); + fcoe_ctlr_destroy(&fcoe->ctlr); /* Free queued packets for the per-CPU receive threads */ fcoe_percpu_clean(lport); @@ -662,9 +657,9 @@ static struct fc_lport *fcoe_if_create(struct net_device *netdev, /* * Initialize FIP. */ - fcoe_ctlr_init(&port->ctlr); - port->ctlr.send = fcoe_fip_send; - port->ctlr.update_mac = fcoe_update_src_mac; + fcoe_ctlr_init(&fcoe->ctlr); + fcoe->ctlr.send = fcoe_fip_send; + fcoe->ctlr.update_mac = fcoe_update_src_mac; /* configure lport network properties */ rc = fcoe_netdev_config(lport, netdev); @@ -714,7 +709,7 @@ static struct fc_lport *fcoe_if_create(struct net_device *netdev, fc_fabric_login(lport); if (!fcoe_link_ok(lport)) - fcoe_ctlr_link_up(&port->ctlr); + fcoe_ctlr_link_up(&fcoe->ctlr); dev_hold(netdev); @@ -929,14 +924,12 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, struct fc_lport *lp; struct fcoe_rcv_info *fr; struct fcoe_interface *fcoe; - struct fcoe_port *port; struct fc_frame_header *fh; struct fcoe_percpu_s *fps; unsigned int cpu; fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type); - port = fcoe->priv; - lp = port->ctlr.lp; + lp = fcoe->ctlr.lp; if (unlikely(lp == NULL)) { FCOE_NETDEV_DBG(dev, "Cannot find hba structure"); goto err2; @@ -1137,13 +1130,13 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) unsigned int hlen; /* header length implies the version */ unsigned int tlen; /* trailer length */ unsigned int elen; /* eth header, may include vlan */ - struct fcoe_port *port; + struct fcoe_port *port = lport_priv(lp); + struct fcoe_interface *fcoe = port->fcoe; u8 sof, eof; struct fcoe_hdr *hp; WARN_ON((fr_len(fp) % sizeof(u32)) != 0); - port = lport_priv(lp); fh = fc_frame_header_get(fp); skb = fp_skb(fp); wlen = skb->len / FCOE_WORD_TO_BYTE; @@ -1154,7 +1147,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) } if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && - fcoe_ctlr_els_send(&port->ctlr, skb)) + fcoe_ctlr_els_send(&fcoe->ctlr, skb)) return 0; sof = fr_sof(fp); @@ -1205,21 +1198,21 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) skb_reset_network_header(skb); skb->mac_len = elen; skb->protocol = htons(ETH_P_FCOE); - skb->dev = port->fcoe->netdev; + skb->dev = fcoe->netdev; /* fill up mac and fcoe headers */ eh = eth_hdr(skb); eh->h_proto = htons(ETH_P_FCOE); - if (port->ctlr.map_dest) + if (fcoe->ctlr.map_dest) fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); else /* insert GW address */ - memcpy(eh->h_dest, port->ctlr.dest_addr, ETH_ALEN); + memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN); - if (unlikely(port->ctlr.flogi_oxid != FC_XID_UNKNOWN)) - memcpy(eh->h_source, port->ctlr.ctl_src_addr, ETH_ALEN); + if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN)) + memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN); else - memcpy(eh->h_source, port->ctlr.data_src_addr, ETH_ALEN); + memcpy(eh->h_source, fcoe->ctlr.data_src_addr, ETH_ALEN); hp = (struct fcoe_hdr *)(eh + 1); memset(hp, 0, sizeof(*hp)); @@ -1382,8 +1375,8 @@ int fcoe_percpu_receive_thread(void *arg) } fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; } - if (unlikely(port->ctlr.flogi_oxid != FC_XID_UNKNOWN) && - fcoe_ctlr_recv_flogi(&port->ctlr, fp, mac)) { + if (unlikely(port->fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN) && + fcoe_ctlr_recv_flogi(&port->fcoe->ctlr, fp, mac)) { fc_frame_free(fp); continue; } @@ -1482,7 +1475,6 @@ static int fcoe_device_notification(struct notifier_block *notifier, struct fc_lport *lp = NULL; struct net_device *netdev = ptr; struct fcoe_interface *fcoe; - struct fcoe_port *port = NULL; struct fcoe_dev_stats *stats; u32 link_possible = 1; u32 mfs; @@ -1490,9 +1482,8 @@ static int fcoe_device_notification(struct notifier_block *notifier, read_lock(&fcoe_hostlist_lock); list_for_each_entry(fcoe, &fcoe_hostlist, list) { - port = fcoe->priv; if (fcoe->netdev == netdev) { - lp = port->ctlr.lp; + lp = fcoe->ctlr.lp; break; } } @@ -1523,8 +1514,8 @@ static int fcoe_device_notification(struct notifier_block *notifier, "from netdev netlink\n", event); } if (link_possible && !fcoe_link_ok(lp)) - fcoe_ctlr_link_up(&port->ctlr); - else if (fcoe_ctlr_link_down(&port->ctlr)) { + fcoe_ctlr_link_up(&fcoe->ctlr); + else if (fcoe_ctlr_link_down(&fcoe->ctlr)) { stats = fc_lport_get_stats(lp); stats->LinkFailureCount++; fcoe_clean_pending_queue(lp); @@ -1841,7 +1832,7 @@ struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) fcoe = fcoe_hostlist_lookup_port(netdev); read_unlock(&fcoe_hostlist_lock); - return (fcoe) ? fcoe->priv->ctlr.lp : NULL; + return (fcoe) ? fcoe->ctlr.lp : NULL; } /** @@ -1941,7 +1932,7 @@ static void __exit fcoe_exit(void) /* releases the associated fcoe hosts */ list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) - fcoe_if_destroy(fcoe->priv->ctlr.lp); + fcoe_if_destroy(fcoe->ctlr.lp); unregister_hotcpu_notifier(&fcoe_cpu_notifier); diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 685aa9d02226..5b190b5fea3f 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -85,6 +85,7 @@ struct fcoe_interface { struct net_device *netdev; struct packet_type fcoe_packet_type; struct packet_type fip_packet_type; + struct fcoe_ctlr ctlr; }; /* @@ -97,10 +98,9 @@ struct fcoe_port { struct sk_buff_head fcoe_pending_queue; u8 fcoe_pending_queue_active; struct timer_list timer; /* queue timer */ - struct fcoe_ctlr ctlr; }; -#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_port, ctlr) +#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr) static inline struct net_device *fcoe_netdev(const struct fc_lport *lp) { From 991cbb6082db3025bd82908eb9ee2d2920be2114 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 25 Aug 2009 13:59:51 -0700 Subject: [PATCH 139/209] [SCSI] fcoe: move offload exchange manager pointer from fcoe_port to fcoe_interface The offload EM pointer is only used when setting up a new libfc instance, but as it's designed to be shared among NPIV VN_Ports it should be tracked in fcoe_interface. With the host-list changed to track fcoe_interfaces as well, this is needed before we can remove the priv pointer from that structure (which is only there to help in the transition, and stops making sense once NPIV is enabled). Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 14 ++++++-------- drivers/scsi/fcoe/fcoe.h | 2 +- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 01519c722edf..bb59a7a04f23 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -438,7 +438,6 @@ bool fcoe_oem_match(struct fc_frame *fp) static inline int fcoe_em_config(struct fc_lport *lp) { struct fcoe_port *port = lport_priv(lp); - struct fcoe_port *oldport = NULL; struct fcoe_interface *fcoe = port->fcoe; struct fcoe_interface *oldfcoe = NULL; struct net_device *old_real_dev, *cur_real_dev; @@ -464,30 +463,29 @@ static inline int fcoe_em_config(struct fc_lport *lp) cur_real_dev = fcoe->netdev; list_for_each_entry(oldfcoe, &fcoe_hostlist, list) { - oldport = oldfcoe->priv; if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN) old_real_dev = vlan_dev_real_dev(oldfcoe->netdev); else old_real_dev = oldfcoe->netdev; if (cur_real_dev == old_real_dev) { - port->oem = oldport->oem; + fcoe->oem = oldfcoe->oem; break; } } - if (port->oem) { - if (!fc_exch_mgr_add(lp, port->oem, fcoe_oem_match)) { + if (fcoe->oem) { + if (!fc_exch_mgr_add(lp, fcoe->oem, fcoe_oem_match)) { printk(KERN_ERR "fcoe_em_config: failed to add " "offload em:%p on interface:%s\n", - port->oem, fcoe->netdev->name); + fcoe->oem, fcoe->netdev->name); return -ENOMEM; } } else { - port->oem = fc_exch_mgr_alloc(lp, FC_CLASS_3, + fcoe->oem = fc_exch_mgr_alloc(lp, FC_CLASS_3, FCOE_MIN_XID, lp->lro_xid, fcoe_oem_match); - if (!port->oem) { + if (!fcoe->oem) { printk(KERN_ERR "fcoe_em_config: failed to allocate " "em for offload exches on interface:%s\n", fcoe->netdev->name); diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 5b190b5fea3f..26e859574604 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -86,6 +86,7 @@ struct fcoe_interface { struct packet_type fcoe_packet_type; struct packet_type fip_packet_type; struct fcoe_ctlr ctlr; + struct fc_exch_mgr *oem; /* offload exchange manager */ }; /* @@ -94,7 +95,6 @@ struct fcoe_interface { */ struct fcoe_port { struct fcoe_interface *fcoe; - struct fc_exch_mgr *oem; /* offload exchange manger */ struct sk_buff_head fcoe_pending_queue; u8 fcoe_pending_queue_active; struct timer_list timer; /* queue timer */ From cb0a6ca81439a9f113d3b46de0953da168a06f6a Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 25 Aug 2009 13:59:56 -0700 Subject: [PATCH 140/209] [SCSI] fcoe: remove fcoe_interface->priv pointer The priv pointer is no longer needed, and once NPIV is enabled fcoe_interface:fc_lport becomes a one-to-many relationship. Remove the single pointer. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 2 -- drivers/scsi/fcoe/fcoe.h | 3 --- 2 files changed, 5 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index bb59a7a04f23..7f14c633da6f 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -640,9 +640,7 @@ static struct fc_lport *fcoe_if_create(struct net_device *netdev, } lport = shost_priv(shost); port = lport_priv(lport); - port->fcoe = fcoe; - fcoe->priv = port; /* configure fc_lport, e.g., em */ rc = fcoe_lport_config(lport); diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 26e859574604..673e70e1f8bb 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -79,9 +79,6 @@ struct fcoe_percpu_s { */ struct fcoe_interface { struct list_head list; - /* This will be removed once all the shared values are - * moved out of fcoe_port */ - struct fcoe_port *priv; struct net_device *netdev; struct packet_type fcoe_packet_type; struct packet_type fip_packet_type; From 030f4e001f13e0ee80bac1e756013341b1674d10 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 25 Aug 2009 14:00:02 -0700 Subject: [PATCH 141/209] [SCSI] fcoe: fcoe_interface create, destroy and refcounting Up to this point the fcoe_instance structure was simply kzalloc/kfreed. This patch introduces create and destroy functions as well as kref based reference counting. The create function will grow as the initialization code is moved there. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 97 ++++++++++++++++++++++++++++++++-------- drivers/scsi/fcoe/fcoe.h | 1 + 2 files changed, 79 insertions(+), 19 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 7f14c633da6f..d1d6b3b8bf5b 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -138,6 +138,58 @@ static struct scsi_host_template fcoe_shost_template = { .max_sectors = 0xffff, }; +/** + * fcoe_interface_create() + * @netdev: network interface + * + * Returns: pointer to a struct fcoe_interface or NULL on error + */ +static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev) +{ + struct fcoe_interface *fcoe; + + fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL); + if (!fcoe) { + FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n"); + return NULL; + } + + kref_init(&fcoe->kref); + fcoe->netdev = netdev; + + return fcoe; +} + +/** + * fcoe_interface_release() - fcoe_port kref release function + * @kref: embedded reference count in an fcoe_interface struct + */ +static void fcoe_interface_release(struct kref *kref) +{ + struct fcoe_interface *fcoe; + + fcoe = container_of(kref, struct fcoe_interface, kref); + kfree(fcoe); +} + +/** + * fcoe_interface_get() + * @fcoe: + */ +static inline void fcoe_interface_get(struct fcoe_interface *fcoe) +{ + kref_get(&fcoe->kref); +} + +/** + * fcoe_interface_put() + * @fcoe: + */ +static inline void fcoe_interface_put(struct fcoe_interface *fcoe) +{ + kref_put(&fcoe->kref, fcoe_interface_release); +} + /** * fcoe_fip_recv - handle a received FIP frame. * @skb: the receive skb @@ -558,7 +610,7 @@ static void fcoe_if_destroy(struct fc_lport *lport) /* Release the net_device and Scsi_Host */ dev_put(netdev); scsi_host_put(lport->host); - kfree(fcoe); /* TODO, should be refcounted */ + fcoe_interface_put(fcoe); } /* @@ -604,8 +656,8 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = { }; /** - * fcoe_if_create() - this function creates the fcoe interface - * @netdev: pointer the associated netdevice + * fcoe_if_create() - this function creates the fcoe port + * @fcoe: fcoe_interface structure to create an fc_lport instance on * @parent: device pointer to be the parent in sysfs for the SCSI host * * Creates fc_lport struct and scsi_host for lport, configures lport @@ -613,30 +665,23 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = { * * Returns : The allocated fc_lport or an error pointer */ -static struct fc_lport *fcoe_if_create(struct net_device *netdev, +static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, struct device *parent) { int rc; struct fc_lport *lport = NULL; struct fcoe_port *port; - struct fcoe_interface *fcoe; struct Scsi_Host *shost; + struct net_device *netdev = fcoe->netdev; FCOE_NETDEV_DBG(netdev, "Create Interface\n"); - fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL); - if (!fcoe) { - FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n"); - rc = -ENOMEM; - goto out; - } - shost = libfc_host_alloc(&fcoe_shost_template, sizeof(struct fcoe_port)); if (!shost) { FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); rc = -ENOMEM; - goto out_kfree_port; + goto out; } lport = shost_priv(shost); port = lport_priv(lport); @@ -708,7 +753,7 @@ static struct fc_lport *fcoe_if_create(struct net_device *netdev, fcoe_ctlr_link_up(&fcoe->ctlr); dev_hold(netdev); - + fcoe_interface_get(fcoe); return lport; out_lp_destroy: @@ -717,8 +762,6 @@ static struct fc_lport *fcoe_if_create(struct net_device *netdev, fcoe_netdev_cleanup(port); out_host_put: scsi_host_put(lport->host); -out_kfree_port: - kfree(fcoe); out: return ERR_PTR(rc); } @@ -1620,6 +1663,8 @@ static int fcoe_ethdrv_put(const struct net_device *netdev) static int fcoe_destroy(const char *buffer, struct kernel_param *kp) { struct net_device *netdev; + struct fcoe_interface *fcoe; + struct fcoe_port *port; struct fc_lport *lport; int rc; @@ -1634,6 +1679,8 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) rc = -ENODEV; goto out_putdev; } + port = lport_priv(lport); + fcoe = port->fcoe; fcoe_if_destroy(lport); fcoe_ethdrv_put(netdev); rc = 0; @@ -1653,6 +1700,7 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) static int fcoe_create(const char *buffer, struct kernel_param *kp) { int rc; + struct fcoe_interface *fcoe; struct fc_lport *lport; struct net_device *netdev; @@ -1668,15 +1716,26 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) } fcoe_ethdrv_get(netdev); - lport = fcoe_if_create(netdev, &netdev->dev); + fcoe = fcoe_interface_create(netdev); + if (!fcoe) { + rc = -ENOMEM; + goto out_putdev; + } + + lport = fcoe_if_create(fcoe, &netdev->dev); if (IS_ERR(lport)) { printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", netdev->name); fcoe_ethdrv_put(netdev); rc = -EIO; - goto out_putdev; + goto out_free; } - rc = 0; + + dev_put(netdev); + return 0; + +out_free: + fcoe_interface_put(fcoe); out_putdev: dev_put(netdev); out_nodev: diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 673e70e1f8bb..ff229288b7f0 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -84,6 +84,7 @@ struct fcoe_interface { struct packet_type fip_packet_type; struct fcoe_ctlr ctlr; struct fc_exch_mgr *oem; /* offload exchange manager */ + struct kref kref; }; /* From 54b649f88eb17a29687bece4b8ad7d72d99e2d95 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 25 Aug 2009 14:00:07 -0700 Subject: [PATCH 142/209] [SCSI] fcoe: split out per interface setup fcoe_netdev_config() is called during initialization of a libfc instance. Much of what was there only needs to be done once for each net_device. The same goes for the corresponding cleanup. The FIP controller initialization is moved to interface creation time. Otherwise it will keep getting re-initialized for every VN_Port once NPIV is enabled. fcoe_if_destroy() has some reordering to deal with the changes. Receives are not stopped until after fcoe_interface_put() is called, but transmits must be stopped before. So there is some care to stop libfc transmits and the transmit backlog timer, then call fcoe_interface_put which will stop receives and cleanup the FIP controller, then the receive queues can be cleaned and the port freed. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 269 ++++++++++++++++++++++----------------- 1 file changed, 149 insertions(+), 120 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index d1d6b3b8bf5b..63aeeca384ba 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -138,6 +138,82 @@ static struct scsi_host_template fcoe_shost_template = { .max_sectors = 0xffff, }; +static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, + struct net_device *orig_dev); +/** + * fcoe_interface_setup() + * @fcoe: new fcoe_interface + * @netdev : ptr to the associated netdevice struct + * + * Returns : 0 for success + */ +static int fcoe_interface_setup(struct fcoe_interface *fcoe, + struct net_device *netdev) +{ + struct fcoe_ctlr *fip = &fcoe->ctlr; + struct netdev_hw_addr *ha; + u8 flogi_maddr[ETH_ALEN]; + + fcoe->netdev = netdev; + + /* Do not support for bonding device */ + if ((netdev->priv_flags & IFF_MASTER_ALB) || + (netdev->priv_flags & IFF_SLAVE_INACTIVE) || + (netdev->priv_flags & IFF_MASTER_8023AD)) { + return -EOPNOTSUPP; + } + + /* look for SAN MAC address, if multiple SAN MACs exist, only + * use the first one for SPMA */ + rcu_read_lock(); + for_each_dev_addr(netdev, ha) { + if ((ha->type == NETDEV_HW_ADDR_T_SAN) && + (is_valid_ether_addr(fip->ctl_src_addr))) { + memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN); + fip->spma = 1; + break; + } + } + rcu_read_unlock(); + + /* setup Source Mac Address */ + if (!fip->spma) + memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len); + + /* + * Add FCoE MAC address as second unicast MAC address + * or enter promiscuous mode if not capable of listening + * for multiple unicast MACs. + */ + rtnl_lock(); + memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); + dev_unicast_add(netdev, flogi_maddr); + if (fip->spma) + dev_unicast_add(netdev, fip->ctl_src_addr); + dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); + rtnl_unlock(); + + /* + * setup the receive function from ethernet driver + * on the ethertype for the given device + */ + fcoe->fcoe_packet_type.func = fcoe_rcv; + fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); + fcoe->fcoe_packet_type.dev = netdev; + dev_add_pack(&fcoe->fcoe_packet_type); + + fcoe->fip_packet_type.func = fcoe_fip_recv; + fcoe->fip_packet_type.type = htons(ETH_P_FIP); + fcoe->fip_packet_type.dev = netdev; + dev_add_pack(&fcoe->fip_packet_type); + + return 0; +} + +static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb); +static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new); + /** * fcoe_interface_create() * @netdev: network interface @@ -155,11 +231,54 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev) } kref_init(&fcoe->kref); - fcoe->netdev = netdev; + + /* + * Initialize FIP. + */ + fcoe_ctlr_init(&fcoe->ctlr); + fcoe->ctlr.send = fcoe_fip_send; + fcoe->ctlr.update_mac = fcoe_update_src_mac; + + fcoe_interface_setup(fcoe, netdev); return fcoe; } +/** + * fcoe_interface_cleanup() - clean up netdev configurations + * @fcoe: + */ +void fcoe_interface_cleanup(struct fcoe_interface *fcoe) +{ + struct net_device *netdev = fcoe->netdev; + struct fcoe_ctlr *fip = &fcoe->ctlr; + u8 flogi_maddr[ETH_ALEN]; + + /* + * Don't listen for Ethernet packets anymore. + * synchronize_net() ensures that the packet handlers are not running + * on another CPU. dev_remove_pack() would do that, this calls the + * unsyncronized version __dev_remove_pack() to avoid multiple delays. + */ + __dev_remove_pack(&fcoe->fcoe_packet_type); + __dev_remove_pack(&fcoe->fip_packet_type); + synchronize_net(); + + /* tear-down the FCoE controller */ + fcoe_ctlr_destroy(&fcoe->ctlr); + + /* Delete secondary MAC addresses */ + rtnl_lock(); + memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); + dev_unicast_delete(netdev, flogi_maddr); + if (!is_zero_ether_addr(fip->data_src_addr)) + dev_unicast_delete(netdev, fip->data_src_addr); + if (fip->spma) + dev_unicast_delete(netdev, fip->ctl_src_addr); + dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); + rtnl_unlock(); +} + /** * fcoe_interface_release() - fcoe_port kref release function * @kref: embedded reference count in an fcoe_interface struct @@ -169,6 +288,7 @@ static void fcoe_interface_release(struct kref *kref) struct fcoe_interface *fcoe; fcoe = container_of(kref, struct fcoe_interface, kref); + fcoe_interface_cleanup(fcoe); kfree(fcoe); } @@ -274,31 +394,6 @@ static int fcoe_lport_config(struct fc_lport *lp) return 0; } -/** - * fcoe_netdev_cleanup() - clean up netdev configurations - * @port: ptr to the fcoe_port - */ -void fcoe_netdev_cleanup(struct fcoe_port *port) -{ - u8 flogi_maddr[ETH_ALEN]; - struct fcoe_interface *fcoe = port->fcoe; - - /* Don't listen for Ethernet packets anymore */ - dev_remove_pack(&fcoe->fcoe_packet_type); - dev_remove_pack(&fcoe->fip_packet_type); - - /* Delete secondary MAC addresses */ - rtnl_lock(); - memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); - dev_unicast_delete(fcoe->netdev, flogi_maddr); - if (!is_zero_ether_addr(fcoe->ctlr.data_src_addr)) - dev_unicast_delete(fcoe->netdev, fcoe->ctlr.data_src_addr); - if (fcoe->ctlr.spma) - dev_unicast_delete(fcoe->netdev, fcoe->ctlr.ctl_src_addr); - dev_mc_delete(fcoe->netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); - rtnl_unlock(); -} - /** * fcoe_queue_timer() - fcoe queue timer * @lp: the fc_lport pointer @@ -326,21 +421,10 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) u64 wwnn, wwpn; struct fcoe_interface *fcoe; struct fcoe_port *port; - u8 flogi_maddr[ETH_ALEN]; - struct netdev_hw_addr *ha; /* Setup lport private data to point to fcoe softc */ port = lport_priv(lp); fcoe = port->fcoe; - fcoe->ctlr.lp = lp; - fcoe->netdev = netdev; - - /* Do not support for bonding device */ - if ((netdev->priv_flags & IFF_MASTER_ALB) || - (netdev->priv_flags & IFF_SLAVE_INACTIVE) || - (netdev->priv_flags & IFF_MASTER_8023AD)) { - return -EOPNOTSUPP; - } /* * Determine max frame size based on underlying device and optional @@ -376,57 +460,12 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) port->fcoe_pending_queue_active = 0; setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lp); - /* look for SAN MAC address, if multiple SAN MACs exist, only - * use the first one for SPMA */ - rcu_read_lock(); - for_each_dev_addr(netdev, ha) { - if ((ha->type == NETDEV_HW_ADDR_T_SAN) && - (is_valid_ether_addr(fcoe->ctlr.ctl_src_addr))) { - memcpy(fcoe->ctlr.ctl_src_addr, ha->addr, ETH_ALEN); - fcoe->ctlr.spma = 1; - break; - } - } - rcu_read_unlock(); - - /* setup Source Mac Address */ - if (!fcoe->ctlr.spma) - memcpy(fcoe->ctlr.ctl_src_addr, netdev->dev_addr, - netdev->addr_len); - wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0); fc_set_wwnn(lp, wwnn); /* XXX - 3rd arg needs to be vlan id */ wwpn = fcoe_wwn_from_mac(netdev->dev_addr, 2, 0); fc_set_wwpn(lp, wwpn); - /* - * Add FCoE MAC address as second unicast MAC address - * or enter promiscuous mode if not capable of listening - * for multiple unicast MACs. - */ - rtnl_lock(); - memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); - dev_unicast_add(netdev, flogi_maddr); - if (fcoe->ctlr.spma) - dev_unicast_add(netdev, fcoe->ctlr.ctl_src_addr); - dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); - rtnl_unlock(); - - /* - * setup the receive function from ethernet driver - * on the ethertype for the given device - */ - fcoe->fcoe_packet_type.func = fcoe_rcv; - fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); - fcoe->fcoe_packet_type.dev = netdev; - dev_add_pack(&fcoe->fcoe_packet_type); - - fcoe->fip_packet_type.func = fcoe_fip_recv; - fcoe->fip_packet_type.type = htons(ETH_P_FIP); - fcoe->fip_packet_type.dev = netdev; - dev_add_pack(&fcoe->fip_packet_type); - return 0; } @@ -578,19 +617,22 @@ static void fcoe_if_destroy(struct fc_lport *lport) /* Remove the instance from fcoe's list */ fcoe_hostlist_remove(lport); - /* clean up netdev configurations */ - fcoe_netdev_cleanup(port); - - /* tear-down the FCoE controller */ - fcoe_ctlr_destroy(&fcoe->ctlr); - - /* Free queued packets for the per-CPU receive threads */ - fcoe_percpu_clean(lport); - /* Cleanup the fc_lport */ fc_lport_destroy(lport); fc_fcp_destroy(lport); + /* Stop the transmit retry timer */ + del_timer_sync(&port->timer); + + /* Free existing transmit skbs */ + fcoe_clean_pending_queue(lport); + + /* receives may not be stopped until after this */ + fcoe_interface_put(fcoe); + + /* Free queued packets for the per-CPU receive threads */ + fcoe_percpu_clean(lport); + /* Detach from the scsi-ml */ fc_remove_host(lport->host); scsi_remove_host(lport->host); @@ -598,19 +640,12 @@ static void fcoe_if_destroy(struct fc_lport *lport) /* There are no more rports or I/O, free the EM */ fc_exch_mgr_free(lport); - /* Free existing skbs */ - fcoe_clean_pending_queue(lport); - - /* Stop the timer */ - del_timer_sync(&port->timer); - /* Free memory used by statistical counters */ fc_lport_free_stats(lport); /* Release the net_device and Scsi_Host */ dev_put(netdev); scsi_host_put(lport->host); - fcoe_interface_put(fcoe); } /* @@ -660,8 +695,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = { * @fcoe: fcoe_interface structure to create an fc_lport instance on * @parent: device pointer to be the parent in sysfs for the SCSI host * - * Creates fc_lport struct and scsi_host for lport, configures lport - * and starts fabric login. + * Creates fc_lport struct and scsi_host for lport, configures lport. * * Returns : The allocated fc_lport or an error pointer */ @@ -695,19 +729,12 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, goto out_host_put; } - /* - * Initialize FIP. - */ - fcoe_ctlr_init(&fcoe->ctlr); - fcoe->ctlr.send = fcoe_fip_send; - fcoe->ctlr.update_mac = fcoe_update_src_mac; - /* configure lport network properties */ rc = fcoe_netdev_config(lport, netdev); if (rc) { FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the " "interface\n"); - goto out_netdev_cleanup; + goto out_lp_destroy; } /* configure lport scsi host properties */ @@ -715,7 +742,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, if (rc) { FCOE_NETDEV_DBG(netdev, "Could not configure shost for the " "interface\n"); - goto out_netdev_cleanup; + goto out_lp_destroy; } /* Initialize the library */ @@ -745,21 +772,12 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, fcoe_hostlist_add(lport); write_unlock(&fcoe_hostlist_lock); - lport->boot_time = jiffies; - - fc_fabric_login(lport); - - if (!fcoe_link_ok(lport)) - fcoe_ctlr_link_up(&fcoe->ctlr); - dev_hold(netdev); fcoe_interface_get(fcoe); return lport; out_lp_destroy: fc_exch_mgr_free(lport); -out_netdev_cleanup: - fcoe_netdev_cleanup(port); out_host_put: scsi_host_put(lport->host); out: @@ -1731,10 +1749,21 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) goto out_free; } - dev_put(netdev); - return 0; + /* Make this the "master" N_Port */ + fcoe->ctlr.lp = lport; + /* start FIP Discovery and FLOGI */ + lport->boot_time = jiffies; + fc_fabric_login(lport); + if (!fcoe_link_ok(lport)) + fcoe_ctlr_link_up(&fcoe->ctlr); + + rc = 0; out_free: + /* + * Release from init in fcoe_interface_create(), on success lport + * should be holding a reference taken in fcoe_if_create(). + */ fcoe_interface_put(fcoe); out_putdev: dev_put(netdev); From dfc1d0fe3a8b2139295600ab519f24059493e6f6 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 25 Aug 2009 14:00:13 -0700 Subject: [PATCH 143/209] [SCSI] fcoe: add mutex to protect create and destroy Rather than rely on the hostlist_lock to be held while creating exchange managers, serialize fcoe instance creation and destruction with a mutex. This will allow the hostlist addition to be moved out of fcoe_if_create(), which will simplify NPIV support. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 63aeeca384ba..43added0a17c 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -55,6 +55,8 @@ module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ "Direct Data Placement (DDP)."); +DEFINE_MUTEX(fcoe_config_mutex); + /* fcoe host list */ LIST_HEAD(fcoe_hostlist); DEFINE_RWLOCK(fcoe_hostlist_lock); @@ -811,6 +813,7 @@ static int __init fcoe_if_init(void) int __exit fcoe_if_exit(void) { fc_release_transport(scsi_transport_fcoe_sw); + scsi_transport_fcoe_sw = NULL; return 0; } @@ -1686,6 +1689,19 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) struct fc_lport *lport; int rc; + mutex_lock(&fcoe_config_mutex); +#ifdef CONFIG_FCOE_MODULE + /* + * Make sure the module has been initialized, and is not about to be + * removed. Module paramter sysfs files are writable before the + * module_init function is called and after module_exit. + */ + if (THIS_MODULE->state != MODULE_STATE_LIVE) { + rc = -ENODEV; + goto out_nodev; + } +#endif + netdev = fcoe_if_to_netdev(buffer); if (!netdev) { rc = -ENODEV; @@ -1705,6 +1721,7 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) out_putdev: dev_put(netdev); out_nodev: + mutex_unlock(&fcoe_config_mutex); return rc; } @@ -1722,6 +1739,19 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) struct fc_lport *lport; struct net_device *netdev; + mutex_lock(&fcoe_config_mutex); +#ifdef CONFIG_FCOE_MODULE + /* + * Make sure the module has been initialized, and is not about to be + * removed. Module paramter sysfs files are writable before the + * module_init function is called and after module_exit. + */ + if (THIS_MODULE->state != MODULE_STATE_LIVE) { + rc = -ENODEV; + goto out_nodev; + } +#endif + netdev = fcoe_if_to_netdev(buffer); if (!netdev) { rc = -ENODEV; @@ -1768,6 +1798,7 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) out_putdev: dev_put(netdev); out_nodev: + mutex_unlock(&fcoe_config_mutex); return rc; } @@ -1971,6 +2002,8 @@ static int __init fcoe_init(void) int rc = 0; struct fcoe_percpu_s *p; + mutex_lock(&fcoe_config_mutex); + for_each_possible_cpu(cpu) { p = &per_cpu(fcoe_percpu, cpu); skb_queue_head_init(&p->fcoe_rx_list); @@ -1991,13 +2024,14 @@ static int __init fcoe_init(void) if (rc) goto out_free; + mutex_unlock(&fcoe_config_mutex); return 0; out_free: for_each_online_cpu(cpu) { fcoe_percpu_thread_destroy(cpu); } - + mutex_unlock(&fcoe_config_mutex); return rc; } module_init(fcoe_init); @@ -2012,6 +2046,8 @@ static void __exit fcoe_exit(void) unsigned int cpu; struct fcoe_interface *fcoe, *tmp; + mutex_lock(&fcoe_config_mutex); + fcoe_dev_cleanup(); /* releases the associated fcoe hosts */ @@ -2025,5 +2061,7 @@ static void __exit fcoe_exit(void) /* detach from scsi transport */ fcoe_if_exit(); + + mutex_unlock(&fcoe_config_mutex); } module_exit(fcoe_exit); From c863df33bb784eecfb24090d2258fa0d3f653750 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 25 Aug 2009 14:00:18 -0700 Subject: [PATCH 144/209] [SCSI] fcoe: move the host-list add/remove to keep out VN_Ports We only want the FCoE create and destroy routines to deal with top level N_Ports, the VN_Ports are tracked on the vport list (see scsi_transport_fc). Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 43added0a17c..c9a0346e493a 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -616,9 +616,6 @@ static void fcoe_if_destroy(struct fc_lport *lport) /* Logout of the fabric */ fc_fabric_logoff(lport); - /* Remove the instance from fcoe's list */ - fcoe_hostlist_remove(lport); - /* Cleanup the fc_lport */ fc_lport_destroy(lport); fc_fcp_destroy(lport); @@ -757,11 +754,13 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, /* * fcoe_em_alloc() and fcoe_hostlist_add() both - * need to be atomic under fcoe_hostlist_lock + * need to be atomic with respect to other changes to the hostlist * since fcoe_em_alloc() looks for an existing EM * instance on host list updated by fcoe_hostlist_add(). + * + * This is currently handled through the fcoe_config_mutex begin held. */ - write_lock(&fcoe_hostlist_lock); + /* lport exch manager allocation */ rc = fcoe_em_config(lport); if (rc) { @@ -770,10 +769,6 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, goto out_lp_destroy; } - /* add to lports list */ - fcoe_hostlist_add(lport); - write_unlock(&fcoe_hostlist_lock); - dev_hold(netdev); fcoe_interface_get(fcoe); return lport; @@ -1713,6 +1708,8 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) rc = -ENODEV; goto out_putdev; } + /* Remove the instance from fcoe's list */ + fcoe_hostlist_remove(lport); port = lport_priv(lport); fcoe = port->fcoe; fcoe_if_destroy(lport); @@ -1782,6 +1779,9 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) /* Make this the "master" N_Port */ fcoe->ctlr.lp = lport; + /* add to lports list */ + fcoe_hostlist_add(lport); + /* start FIP Discovery and FLOGI */ lport->boot_time = jiffies; fc_fabric_login(lport); @@ -1954,8 +1954,6 @@ struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) * fcoe_hostlist_add() - Add a lport to lports list * @lp: ptr to the fc_lport to be added * - * Called with write fcoe_hostlist_lock held. - * * Returns: 0 for success */ int fcoe_hostlist_add(const struct fc_lport *lport) @@ -1963,12 +1961,14 @@ int fcoe_hostlist_add(const struct fc_lport *lport) struct fcoe_interface *fcoe; struct fcoe_port *port; + write_lock_bh(&fcoe_hostlist_lock); fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport)); if (!fcoe) { port = lport_priv(lport); fcoe = port->fcoe; list_add_tail(&fcoe->list, &fcoe_hostlist); } + write_unlock_bh(&fcoe_hostlist_lock); return 0; } @@ -2045,14 +2045,21 @@ static void __exit fcoe_exit(void) { unsigned int cpu; struct fcoe_interface *fcoe, *tmp; + LIST_HEAD(local_list); mutex_lock(&fcoe_config_mutex); fcoe_dev_cleanup(); /* releases the associated fcoe hosts */ - list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) + write_lock_bh(&fcoe_hostlist_lock); + list_splice_init(&fcoe_hostlist, &local_list); + write_unlock_bh(&fcoe_hostlist_lock); + + list_for_each_entry_safe(fcoe, tmp, &local_list, list) { + list_del(&fcoe->list); fcoe_if_destroy(fcoe->ctlr.lp); + } unregister_hotcpu_notifier(&fcoe_cpu_notifier); From 2e70e2415193b84c1b79ec373af15c3f280ad7c4 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 25 Aug 2009 14:00:23 -0700 Subject: [PATCH 145/209] [SCSI] fcoe: Fix module ref count bug by adding NETDEV UNREGISTER handling Fixes reference counting on fcoe_instance and net_device, and adds NETDEV_UNREGISTER notifier handling so that you can unload network drivers. FCoE no longer increments the module use count for the network driver. On an NETDEV_UNREGISTER event, destroying the FCoE instance is deferred to a workqueue context to avoid RTNL deadlocks. Based in part by an earlier patch from John Fastabend John's patch description: Currently, the netdev module ref count is not decremented with module_put() when the module is unloaded while fcoe instances are present. To fix this removed reference count on netdev module completely and added functionality to netdev event handling for NETDEV_UNREGISTER events. This allows fcoe to remove devices cleanly when the netdev module is unloaded so we no longer need to hold a reference count for the netdev module. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 186 ++++++++++++++------------------------- drivers/scsi/fcoe/fcoe.h | 2 + 2 files changed, 67 insertions(+), 121 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index c9a0346e493a..c0264a984394 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -74,12 +74,13 @@ static int fcoe_link_ok(struct fc_lport *lp); static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); static int fcoe_hostlist_add(const struct fc_lport *); -static int fcoe_hostlist_remove(const struct fc_lport *); static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *); static int fcoe_device_notification(struct notifier_block *, ulong, void *); static void fcoe_dev_setup(void); static void fcoe_dev_cleanup(void); +static struct fcoe_interface * + fcoe_hostlist_lookup_port(const struct net_device *dev); /* notification function from net device */ static struct notifier_block fcoe_notifier = { @@ -149,6 +150,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev, * @netdev : ptr to the associated netdevice struct * * Returns : 0 for success + * Locking: must be called with the RTNL mutex held */ static int fcoe_interface_setup(struct fcoe_interface *fcoe, struct net_device *netdev) @@ -188,13 +190,11 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, * or enter promiscuous mode if not capable of listening * for multiple unicast MACs. */ - rtnl_lock(); memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); dev_unicast_add(netdev, flogi_maddr); if (fip->spma) dev_unicast_add(netdev, fip->ctl_src_addr); dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); - rtnl_unlock(); /* * setup the receive function from ethernet driver @@ -215,6 +215,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb); static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new); +static void fcoe_destroy_work(struct work_struct *work); /** * fcoe_interface_create() @@ -232,6 +233,7 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev) return NULL; } + dev_hold(netdev); kref_init(&fcoe->kref); /* @@ -249,6 +251,8 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev) /** * fcoe_interface_cleanup() - clean up netdev configurations * @fcoe: + * + * Caller must be holding the RTNL mutex */ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) { @@ -266,11 +270,7 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) __dev_remove_pack(&fcoe->fip_packet_type); synchronize_net(); - /* tear-down the FCoE controller */ - fcoe_ctlr_destroy(&fcoe->ctlr); - /* Delete secondary MAC addresses */ - rtnl_lock(); memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); dev_unicast_delete(netdev, flogi_maddr); if (!is_zero_ether_addr(fip->data_src_addr)) @@ -278,7 +278,6 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) if (fip->spma) dev_unicast_delete(netdev, fip->ctl_src_addr); dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); - rtnl_unlock(); } /** @@ -288,10 +287,14 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) static void fcoe_interface_release(struct kref *kref) { struct fcoe_interface *fcoe; + struct net_device *netdev; fcoe = container_of(kref, struct fcoe_interface, kref); - fcoe_interface_cleanup(fcoe); + netdev = fcoe->netdev; + /* tear-down the FCoE controller */ + fcoe_ctlr_destroy(&fcoe->ctlr); kfree(fcoe); + dev_put(netdev); } /** @@ -642,8 +645,7 @@ static void fcoe_if_destroy(struct fc_lport *lport) /* Free memory used by statistical counters */ fc_lport_free_stats(lport); - /* Release the net_device and Scsi_Host */ - dev_put(netdev); + /* Release the Scsi_Host */ scsi_host_put(lport->host); } @@ -718,7 +720,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, } lport = shost_priv(shost); port = lport_priv(lport); + port->lport = lport; port->fcoe = fcoe; + INIT_WORK(&port->destroy_work, fcoe_destroy_work); /* configure fc_lport, e.g., em */ rc = fcoe_lport_config(lport); @@ -769,7 +773,6 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, goto out_lp_destroy; } - dev_hold(netdev); fcoe_interface_get(fcoe); return lport; @@ -1530,19 +1533,19 @@ static int fcoe_device_notification(struct notifier_block *notifier, struct fc_lport *lp = NULL; struct net_device *netdev = ptr; struct fcoe_interface *fcoe; + struct fcoe_port *port; struct fcoe_dev_stats *stats; u32 link_possible = 1; u32 mfs; int rc = NOTIFY_OK; - read_lock(&fcoe_hostlist_lock); + write_lock(&fcoe_hostlist_lock); list_for_each_entry(fcoe, &fcoe_hostlist, list) { if (fcoe->netdev == netdev) { lp = fcoe->ctlr.lp; break; } } - read_unlock(&fcoe_hostlist_lock); if (lp == NULL) { rc = NOTIFY_DONE; goto out; @@ -1564,6 +1567,13 @@ static int fcoe_device_notification(struct notifier_block *notifier, break; case NETDEV_REGISTER: break; + case NETDEV_UNREGISTER: + list_del(&fcoe->list); + port = lport_priv(fcoe->ctlr.lp); + fcoe_interface_cleanup(fcoe); + schedule_work(&port->destroy_work); + goto out; + break; default: FCOE_NETDEV_DBG(netdev, "Unknown event %ld " "from netdev netlink\n", event); @@ -1576,6 +1586,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, fcoe_clean_pending_queue(lp); } out: + write_unlock(&fcoe_hostlist_lock); return rc; } @@ -1600,75 +1611,6 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer) return NULL; } -/** - * fcoe_netdev_to_module_owner() - finds out the driver module of the netdev - * @netdev: the target netdev - * - * Returns: ptr to the struct module, NULL for failure - */ -static struct module * -fcoe_netdev_to_module_owner(const struct net_device *netdev) -{ - struct device *dev; - - if (!netdev) - return NULL; - - dev = netdev->dev.parent; - if (!dev) - return NULL; - - if (!dev->driver) - return NULL; - - return dev->driver->owner; -} - -/** - * fcoe_ethdrv_get() - Hold the Ethernet driver - * @netdev: the target netdev - * - * Holds the Ethernet driver module by try_module_get() for - * the corresponding netdev. - * - * Returns: 0 for success - */ -static int fcoe_ethdrv_get(const struct net_device *netdev) -{ - struct module *owner; - - owner = fcoe_netdev_to_module_owner(netdev); - if (owner) { - FCOE_NETDEV_DBG(netdev, "Hold driver module %s\n", - module_name(owner)); - return try_module_get(owner); - } - return -ENODEV; -} - -/** - * fcoe_ethdrv_put() - Release the Ethernet driver - * @netdev: the target netdev - * - * Releases the Ethernet driver module by module_put for - * the corresponding netdev. - * - * Returns: 0 for success - */ -static int fcoe_ethdrv_put(const struct net_device *netdev) -{ - struct module *owner; - - owner = fcoe_netdev_to_module_owner(netdev); - if (owner) { - FCOE_NETDEV_DBG(netdev, "Release driver module %s\n", - module_name(owner)); - module_put(owner); - return 0; - } - return -ENODEV; -} - /** * fcoe_destroy() - handles the destroy from sysfs * @buffer: expected to be an eth if name @@ -1678,10 +1620,8 @@ static int fcoe_ethdrv_put(const struct net_device *netdev) */ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) { - struct net_device *netdev; struct fcoe_interface *fcoe; - struct fcoe_port *port; - struct fc_lport *lport; + struct net_device *netdev; int rc; mutex_lock(&fcoe_config_mutex); @@ -1702,19 +1642,20 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) rc = -ENODEV; goto out_nodev; } - /* look for existing lport */ - lport = fcoe_hostlist_lookup(netdev); - if (!lport) { + + write_lock(&fcoe_hostlist_lock); + fcoe = fcoe_hostlist_lookup_port(netdev); + if (!fcoe) { + write_unlock(&fcoe_hostlist_lock); rc = -ENODEV; goto out_putdev; } - /* Remove the instance from fcoe's list */ - fcoe_hostlist_remove(lport); - port = lport_priv(lport); - fcoe = port->fcoe; - fcoe_if_destroy(lport); - fcoe_ethdrv_put(netdev); - rc = 0; + list_del(&fcoe->list); + write_unlock(&fcoe_hostlist_lock); + rtnl_lock(); + fcoe_interface_cleanup(fcoe); + rtnl_unlock(); + fcoe_if_destroy(fcoe->ctlr.lp); out_putdev: dev_put(netdev); out_nodev: @@ -1722,6 +1663,16 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) return rc; } +static void fcoe_destroy_work(struct work_struct *work) +{ + struct fcoe_port *port; + + port = container_of(work, struct fcoe_port, destroy_work); + mutex_lock(&fcoe_config_mutex); + fcoe_if_destroy(port->lport); + mutex_unlock(&fcoe_config_mutex); +} + /** * fcoe_create() - Handles the create call from sysfs * @buffer: expected to be an eth if name @@ -1749,17 +1700,18 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) } #endif + rtnl_lock(); netdev = fcoe_if_to_netdev(buffer); if (!netdev) { rc = -ENODEV; goto out_nodev; } + /* look for existing lport */ if (fcoe_hostlist_lookup(netdev)) { rc = -EEXIST; goto out_putdev; } - fcoe_ethdrv_get(netdev); fcoe = fcoe_interface_create(netdev); if (!fcoe) { @@ -1771,8 +1723,8 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) if (IS_ERR(lport)) { printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", netdev->name); - fcoe_ethdrv_put(netdev); rc = -EIO; + fcoe_interface_cleanup(fcoe); goto out_free; } @@ -1798,6 +1750,7 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) out_putdev: dev_put(netdev); out_nodev: + rtnl_unlock(); mutex_unlock(&fcoe_config_mutex); return rc; } @@ -1972,25 +1925,6 @@ int fcoe_hostlist_add(const struct fc_lport *lport) return 0; } -/** - * fcoe_hostlist_remove() - remove a lport from lports list - * @lp: ptr to the fc_lport to be removed - * - * Returns: 0 for success - */ -int fcoe_hostlist_remove(const struct fc_lport *lport) -{ - struct fcoe_interface *fcoe; - - write_lock_bh(&fcoe_hostlist_lock); - fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport)); - BUG_ON(!fcoe); - list_del(&fcoe->list); - write_unlock_bh(&fcoe_hostlist_lock); - - return 0; -} - /** * fcoe_init() - fcoe module loading initialization * @@ -2046,6 +1980,7 @@ static void __exit fcoe_exit(void) unsigned int cpu; struct fcoe_interface *fcoe, *tmp; LIST_HEAD(local_list); + struct fcoe_port *port; mutex_lock(&fcoe_config_mutex); @@ -2058,7 +1993,11 @@ static void __exit fcoe_exit(void) list_for_each_entry_safe(fcoe, tmp, &local_list, list) { list_del(&fcoe->list); - fcoe_if_destroy(fcoe->ctlr.lp); + port = lport_priv(fcoe->ctlr.lp); + rtnl_lock(); + fcoe_interface_cleanup(fcoe); + rtnl_unlock(); + schedule_work(&port->destroy_work); } unregister_hotcpu_notifier(&fcoe_cpu_notifier); @@ -2066,9 +2005,14 @@ static void __exit fcoe_exit(void) for_each_online_cpu(cpu) fcoe_percpu_thread_destroy(cpu); - /* detach from scsi transport */ - fcoe_if_exit(); - mutex_unlock(&fcoe_config_mutex); + + /* flush any asyncronous interface destroys, + * this should happen after the netdev notifier is unregistered */ + flush_scheduled_work(); + + /* detach from scsi transport + * must happen after all destroys are done, therefor after the flush */ + fcoe_if_exit(); } module_exit(fcoe_exit); diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index ff229288b7f0..ce7f60fb1bc0 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -93,9 +93,11 @@ struct fcoe_interface { */ struct fcoe_port { struct fcoe_interface *fcoe; + struct fc_lport *lport; struct sk_buff_head fcoe_pending_queue; u8 fcoe_pending_queue_active; struct timer_list timer; /* queue timer */ + struct work_struct destroy_work; /* to prevent rtnl deadlocks */ }; #define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr) From 090eb6c41aa74273d3f0721637cff738cfd80669 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 25 Aug 2009 14:00:28 -0700 Subject: [PATCH 146/209] [SCSI] fcoe: use rtnl mutex in place of hostlist lock This just cuts down on the number of locks we're dealing with, and eliminates the need to take another lock in the netdev notifier. Signed-off-by: Chris Leech Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 37 +++++++++++-------------------------- 1 file changed, 11 insertions(+), 26 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index c0264a984394..ac481ad112ad 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -58,8 +58,8 @@ MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ DEFINE_MUTEX(fcoe_config_mutex); /* fcoe host list */ +/* must only by accessed under the RTNL mutex */ LIST_HEAD(fcoe_hostlist); -DEFINE_RWLOCK(fcoe_hostlist_lock); DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); /* Function Prototypes */ @@ -527,8 +527,6 @@ bool fcoe_oem_match(struct fc_frame *fp) * fcoe_em_config() - allocates em for this lport * @lp: the fcoe that em is to allocated for * - * Called with write fcoe_hostlist_lock held. - * * Returns : 0 on success */ static inline int fcoe_em_config(struct fc_lport *lp) @@ -1539,7 +1537,6 @@ static int fcoe_device_notification(struct notifier_block *notifier, u32 mfs; int rc = NOTIFY_OK; - write_lock(&fcoe_hostlist_lock); list_for_each_entry(fcoe, &fcoe_hostlist, list) { if (fcoe->netdev == netdev) { lp = fcoe->ctlr.lp; @@ -1586,7 +1583,6 @@ static int fcoe_device_notification(struct notifier_block *notifier, fcoe_clean_pending_queue(lp); } out: - write_unlock(&fcoe_hostlist_lock); return rc; } @@ -1643,16 +1639,14 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) goto out_nodev; } - write_lock(&fcoe_hostlist_lock); + rtnl_lock(); fcoe = fcoe_hostlist_lookup_port(netdev); if (!fcoe) { - write_unlock(&fcoe_hostlist_lock); + rtnl_unlock(); rc = -ENODEV; goto out_putdev; } list_del(&fcoe->list); - write_unlock(&fcoe_hostlist_lock); - rtnl_lock(); fcoe_interface_cleanup(fcoe); rtnl_unlock(); fcoe_if_destroy(fcoe->ctlr.lp); @@ -1870,9 +1864,8 @@ int fcoe_reset(struct Scsi_Host *shost) * fcoe_hostlist_lookup_port() - find the corresponding lport by a given device * @dev: this is currently ptr to net_device * - * Called with fcoe_hostlist_lock held. - * * Returns: NULL or the located fcoe_port + * Locking: must be called with the RNL mutex held */ static struct fcoe_interface * fcoe_hostlist_lookup_port(const struct net_device *dev) @@ -1891,15 +1884,13 @@ fcoe_hostlist_lookup_port(const struct net_device *dev) * @netdev: ptr to net_device * * Returns: 0 for success + * Locking: must be called with the RTNL mutex held */ -struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) +static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) { struct fcoe_interface *fcoe; - read_lock(&fcoe_hostlist_lock); fcoe = fcoe_hostlist_lookup_port(netdev); - read_unlock(&fcoe_hostlist_lock); - return (fcoe) ? fcoe->ctlr.lp : NULL; } @@ -1908,20 +1899,19 @@ struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) * @lp: ptr to the fc_lport to be added * * Returns: 0 for success + * Locking: must be called with the RTNL mutex held */ -int fcoe_hostlist_add(const struct fc_lport *lport) +static int fcoe_hostlist_add(const struct fc_lport *lport) { struct fcoe_interface *fcoe; struct fcoe_port *port; - write_lock_bh(&fcoe_hostlist_lock); fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport)); if (!fcoe) { port = lport_priv(lport); fcoe = port->fcoe; list_add_tail(&fcoe->list, &fcoe_hostlist); } - write_unlock_bh(&fcoe_hostlist_lock); return 0; } @@ -1979,7 +1969,6 @@ static void __exit fcoe_exit(void) { unsigned int cpu; struct fcoe_interface *fcoe, *tmp; - LIST_HEAD(local_list); struct fcoe_port *port; mutex_lock(&fcoe_config_mutex); @@ -1987,18 +1976,14 @@ static void __exit fcoe_exit(void) fcoe_dev_cleanup(); /* releases the associated fcoe hosts */ - write_lock_bh(&fcoe_hostlist_lock); - list_splice_init(&fcoe_hostlist, &local_list); - write_unlock_bh(&fcoe_hostlist_lock); - - list_for_each_entry_safe(fcoe, tmp, &local_list, list) { + rtnl_lock(); + list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) { list_del(&fcoe->list); port = lport_priv(fcoe->ctlr.lp); - rtnl_lock(); fcoe_interface_cleanup(fcoe); - rtnl_unlock(); schedule_work(&port->destroy_work); } + rtnl_unlock(); unregister_hotcpu_notifier(&fcoe_cpu_notifier); From ab28f1fd3b0d14c1bd693e640decd711d5e6642a Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:00:34 -0700 Subject: [PATCH 147/209] [SCSI] libfc: prepare to split off struct fc_rport_priv from fc_rport_libfc_priv While the I/O and LLD interfaces use fc_rport_libfc_priv, the disc and rport interfaces will use fc_rport_priv, which will be separately allocated. Change the disc and rport usage of fc_rport_libfc_priv to fc_rport_priv. Use #define temporarily to make both names equivalent until a subsequent patch splits them. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 18 +++++------ drivers/scsi/libfc/fc_lport.c | 2 +- drivers/scsi/libfc/fc_rport.c | 60 +++++++++++++++++------------------ include/scsi/libfc.h | 10 +++++- 4 files changed, 49 insertions(+), 41 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 6fabf66972b9..4c8d893af7bd 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -65,7 +65,7 @@ struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport, { const struct fc_disc *disc = &lport->disc; struct fc_rport *rport, *found = NULL; - struct fc_rport_libfc_priv *rdata; + struct fc_rport_priv *rdata; int disc_found = 0; list_for_each_entry(rdata, &disc->rports, peers) { @@ -94,7 +94,7 @@ void fc_disc_stop_rports(struct fc_disc *disc) { struct fc_lport *lport; struct fc_rport *rport; - struct fc_rport_libfc_priv *rdata, *next; + struct fc_rport_priv *rdata, *next; lport = disc->lport; @@ -126,7 +126,7 @@ static void fc_disc_rport_callback(struct fc_lport *lport, struct fc_rport *rport, enum fc_rport_event event) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; struct fc_disc *disc = &lport->disc; FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event, @@ -170,7 +170,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, { struct fc_lport *lport; struct fc_rport *rport; - struct fc_rport_libfc_priv *rdata; + struct fc_rport_priv *rdata; struct fc_els_rscn *rp; struct fc_els_rscn_page *pp; struct fc_seq_els_data rjt_data; @@ -309,7 +309,7 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, static void fc_disc_restart(struct fc_disc *disc) { struct fc_rport *rport; - struct fc_rport_libfc_priv *rdata, *next; + struct fc_rport_priv *rdata, *next; struct fc_lport *lport = disc->lport; FC_DISC_DBG(disc, "Restarting discovery\n"); @@ -400,7 +400,7 @@ static int fc_disc_new_target(struct fc_disc *disc, struct fc_rport_identifiers *ids) { struct fc_lport *lport = disc->lport; - struct fc_rport_libfc_priv *rdata; + struct fc_rport_priv *rdata; int error = 0; if (rport && ids->port_name) { @@ -458,7 +458,7 @@ static int fc_disc_new_target(struct fc_disc *disc, static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport) { struct fc_lport *lport = disc->lport; - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; list_del(&rdata->peers); lport->tt.rport_logoff(rport); } @@ -580,7 +580,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) int error = 0; struct fc_disc_port dp; struct fc_rport *rport; - struct fc_rport_libfc_priv *rdata; + struct fc_rport_priv *rdata; lport = disc->lport; @@ -774,7 +774,7 @@ static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp) { struct fc_lport *lport; struct fc_rport *new_rport; - struct fc_rport_libfc_priv *rdata; + struct fc_rport_priv *rdata; lport = disc->lport; diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index ca8ea264b684..f7f328f952a5 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1306,7 +1306,7 @@ static struct fc_rport_operations fc_lport_rport_ops = { static void fc_lport_enter_dns(struct fc_lport *lport) { struct fc_rport *rport; - struct fc_rport_libfc_priv *rdata; + struct fc_rport_priv *rdata; struct fc_disc_port dp; dp.ids.port_id = FC_FID_DIR_SERV; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 90cc90dd3b5d..5f8f437e76b3 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -96,7 +96,7 @@ static void fc_rport_rogue_destroy(struct device *dev) struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp) { struct fc_rport *rport; - struct fc_rport_libfc_priv *rdata; + struct fc_rport_priv *rdata; rport = kzalloc(sizeof(*rport) + sizeof(*rdata), GFP_KERNEL); if (!rport) @@ -144,7 +144,7 @@ struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp) static const char *fc_rport_state(struct fc_rport *rport) { const char *cp; - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; cp = fc_rport_state_names[rdata->rp_state]; if (!cp) @@ -199,7 +199,7 @@ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, static void fc_rport_state_enter(struct fc_rport *rport, enum fc_rport_state new) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; if (rdata->rp_state != new) rdata->retries = 0; rdata->rp_state = new; @@ -208,8 +208,8 @@ static void fc_rport_state_enter(struct fc_rport *rport, static void fc_rport_work(struct work_struct *work) { u32 port_id; - struct fc_rport_libfc_priv *rdata = - container_of(work, struct fc_rport_libfc_priv, event_work); + struct fc_rport_priv *rdata = + container_of(work, struct fc_rport_priv, event_work); enum fc_rport_event event; enum fc_rport_trans_state trans_state; struct fc_lport *lport = rdata->local_port; @@ -222,7 +222,7 @@ static void fc_rport_work(struct work_struct *work) if (event == RPORT_EV_CREATED) { struct fc_rport *new_rport; - struct fc_rport_libfc_priv *new_rdata; + struct fc_rport_priv *new_rdata; struct fc_rport_identifiers ids; ids.port_id = rport->port_id; @@ -299,7 +299,7 @@ static void fc_rport_work(struct work_struct *work) */ int fc_rport_login(struct fc_rport *rport) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; mutex_lock(&rdata->rp_mutex); @@ -329,7 +329,7 @@ int fc_rport_login(struct fc_rport *rport) static void fc_rport_enter_delete(struct fc_rport *rport, enum fc_rport_event event) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; if (rdata->rp_state == RPORT_ST_DELETE) return; @@ -353,7 +353,7 @@ static void fc_rport_enter_delete(struct fc_rport *rport, */ int fc_rport_logoff(struct fc_rport *rport) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; mutex_lock(&rdata->rp_mutex); @@ -387,7 +387,7 @@ int fc_rport_logoff(struct fc_rport *rport) */ static void fc_rport_enter_ready(struct fc_rport *rport) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; fc_rport_state_enter(rport, RPORT_ST_READY); @@ -400,7 +400,7 @@ static void fc_rport_enter_ready(struct fc_rport *rport) /** * fc_rport_timeout() - Handler for the retry_work timer. - * @work: The work struct of the fc_rport_libfc_priv + * @work: The work struct of the fc_rport_priv * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* @@ -408,8 +408,8 @@ static void fc_rport_enter_ready(struct fc_rport *rport) */ static void fc_rport_timeout(struct work_struct *work) { - struct fc_rport_libfc_priv *rdata = - container_of(work, struct fc_rport_libfc_priv, retry_work.work); + struct fc_rport_priv *rdata = + container_of(work, struct fc_rport_priv, retry_work.work); struct fc_rport *rport = PRIV_TO_RPORT(rdata); mutex_lock(&rdata->rp_mutex); @@ -446,7 +446,7 @@ static void fc_rport_timeout(struct work_struct *work) */ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; FC_RPORT_DBG(rport, "Error %ld in state %s, retries %d\n", PTR_ERR(fp), fc_rport_state(rport), rdata->retries); @@ -480,7 +480,7 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) */ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; unsigned long delay = FC_DEF_E_D_TOV; /* make sure this isn't an FC_EX_CLOSED error, never retry those */ @@ -515,7 +515,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, void *rp_arg) { struct fc_rport *rport = rp_arg; - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; struct fc_els_flogi *plp = NULL; unsigned int tov; @@ -586,7 +586,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, */ static void fc_rport_enter_plogi(struct fc_rport *rport) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; @@ -624,7 +624,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, void *rp_arg) { struct fc_rport *rport = rp_arg; - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; struct { struct fc_els_prli prli; struct fc_els_spp spp; @@ -694,7 +694,7 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, void *rp_arg) { struct fc_rport *rport = rp_arg; - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; u8 op; mutex_lock(&rdata->rp_mutex); @@ -738,7 +738,7 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, */ static void fc_rport_enter_prli(struct fc_rport *rport) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; struct { struct fc_els_prli prli; @@ -780,7 +780,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, void *rp_arg) { struct fc_rport *rport = rp_arg; - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; u8 op; mutex_lock(&rdata->rp_mutex); @@ -841,7 +841,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, static void fc_rport_enter_rtv(struct fc_rport *rport) { struct fc_frame *fp; - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; FC_RPORT_DBG(rport, "Port entered RTV state from %s state\n", @@ -871,7 +871,7 @@ static void fc_rport_enter_rtv(struct fc_rport *rport) */ static void fc_rport_enter_logo(struct fc_rport *rport) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; @@ -907,7 +907,7 @@ static void fc_rport_enter_logo(struct fc_rport *rport) void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, struct fc_rport *rport) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; struct fc_frame_header *fh; @@ -967,7 +967,7 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, static void fc_rport_recv_plogi_req(struct fc_rport *rport, struct fc_seq *sp, struct fc_frame *rx_fp) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; struct fc_frame *fp = rx_fp; struct fc_exch *ep; @@ -1090,7 +1090,7 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, static void fc_rport_recv_prli_req(struct fc_rport *rport, struct fc_seq *sp, struct fc_frame *rx_fp) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; struct fc_exch *ep; struct fc_frame *fp; @@ -1242,7 +1242,7 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport, static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp, struct fc_frame *fp) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; struct fc_frame_header *fh; @@ -1278,7 +1278,7 @@ static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp, struct fc_frame *fp) { struct fc_frame_header *fh; - struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; fh = fc_frame_header_get(fp); @@ -1342,8 +1342,8 @@ EXPORT_SYMBOL(fc_destroy_rport); void fc_rport_terminate_io(struct fc_rport *rport) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; - struct fc_lport *lport = rdata->local_port; + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct fc_lport *lport = rp->local_port; lport->tt.exch_mgr_reset(lport, 0, rport->port_id); lport->tt.exch_mgr_reset(lport, rport->port_id, 0); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 53b38814d38a..aa219514c96b 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -77,7 +77,7 @@ do { \ #define FC_RPORT_DBG(rport, fmt, args...) \ do { \ - struct fc_rport_libfc_priv *rdata = rport->dd_data; \ + struct fc_rport_priv *rdata = rport->dd_data; \ struct fc_lport *lport = rdata->local_port; \ FC_RPORT_ID_DBG(lport, rport->port_id, fmt, ##args); \ } while (0) @@ -177,6 +177,14 @@ enum fc_rport_event { RPORT_EV_LOGO }; +/* + * Temporary definition to prepare for split off from fc_rport_libfc_priv + * of a separately-allocated structure called fc_rport_priv. This will + * be the primary object for the discovery and rport state machines. + * This definition is just to make this patch series easier to review. + */ +#define fc_rport_priv fc_rport_libfc_priv + struct fc_rport_operations { void (*event_callback)(struct fc_lport *, struct fc_rport *, enum fc_rport_event); From 795d86f55ec3bf6280dda368f208943f1fb7d366 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:00:39 -0700 Subject: [PATCH 148/209] [SCSI] libfc: change interface for rport_create The interface for lport->tt.rport_create() takes a fc_disc_port arg, which is unnatural for most calls. The only reason for this was to avoid passing in the local port as an argument, but otherwise added to complexity. Simplify by just using lport and fc_rport_identifiers. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 29 +++++++++++------------------ drivers/scsi/libfc/fc_lport.c | 26 ++++++++++++-------------- drivers/scsi/libfc/fc_rport.c | 17 +++++++++-------- include/scsi/libfc.h | 5 ++--- 4 files changed, 34 insertions(+), 43 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 4c8d893af7bd..ecc625c20520 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -428,13 +428,7 @@ static int fc_disc_new_target(struct fc_disc *disc, if (!rport) { rport = lport->tt.rport_lookup(lport, ids->port_id); if (!rport) { - struct fc_disc_port dp; - dp.lp = lport; - dp.ids.port_id = ids->port_id; - dp.ids.port_name = ids->port_name; - dp.ids.node_name = ids->node_name; - dp.ids.roles = ids->roles; - rport = lport->tt.rport_create(&dp); + rport = lport->tt.rport_create(lport, ids); } if (!rport) error = -ENOMEM; @@ -578,7 +572,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) size_t plen; size_t tlen; int error = 0; - struct fc_disc_port dp; + struct fc_rport_identifiers ids; struct fc_rport *rport; struct fc_rport_priv *rdata; @@ -621,15 +615,14 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) * After the first time through the loop, things return to "normal". */ while (plen >= sizeof(*np)) { - dp.lp = lport; - dp.ids.port_id = ntoh24(np->fp_fid); - dp.ids.port_name = ntohll(np->fp_wwpn); - dp.ids.node_name = -1; - dp.ids.roles = FC_RPORT_ROLE_UNKNOWN; + ids.port_id = ntoh24(np->fp_fid); + ids.port_name = ntohll(np->fp_wwpn); + ids.node_name = -1; + ids.roles = FC_RPORT_ROLE_UNKNOWN; - if ((dp.ids.port_id != fc_host_port_id(lport->host)) && - (dp.ids.port_name != lport->wwpn)) { - rport = lport->tt.rport_create(&dp); + if (ids.port_id != fc_host_port_id(lport->host) && + ids.port_name != lport->wwpn) { + rport = lport->tt.rport_create(lport, &ids); if (rport) { rdata = rport->dd_data; rdata->ops = &fc_disc_rport_ops; @@ -640,7 +633,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) } else printk(KERN_WARNING "libfc: Failed to allocate " "memory for the newly discovered port " - "(%6x)\n", dp.ids.port_id); + "(%6x)\n", ids.port_id); } if (np->fp_flags & FC_NS_FID_LAST) { @@ -781,7 +774,7 @@ static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp) if (dp->ids.port_id == fc_host_port_id(lport->host)) goto out; - new_rport = lport->tt.rport_create(dp); + new_rport = lport->tt.rport_create(lport, &dp->ids); if (new_rport) { rdata = new_rport->dd_data; rdata->ops = &fc_disc_rport_ops; diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index f7f328f952a5..a78161cf1811 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -211,20 +211,19 @@ static void fc_lport_ptp_setup(struct fc_lport *lport, u32 remote_fid, u64 remote_wwpn, u64 remote_wwnn) { - struct fc_disc_port dp; + struct fc_rport_identifiers ids; - dp.lp = lport; - dp.ids.port_id = remote_fid; - dp.ids.port_name = remote_wwpn; - dp.ids.node_name = remote_wwnn; - dp.ids.roles = FC_RPORT_ROLE_UNKNOWN; + ids.port_id = remote_fid; + ids.port_name = remote_wwpn; + ids.node_name = remote_wwnn; + ids.roles = FC_RPORT_ROLE_UNKNOWN; if (lport->ptp_rp) { lport->tt.rport_logoff(lport->ptp_rp); lport->ptp_rp = NULL; } - lport->ptp_rp = lport->tt.rport_create(&dp); + lport->ptp_rp = lport->tt.rport_create(lport, &ids); lport->tt.rport_login(lport->ptp_rp); @@ -1307,20 +1306,19 @@ static void fc_lport_enter_dns(struct fc_lport *lport) { struct fc_rport *rport; struct fc_rport_priv *rdata; - struct fc_disc_port dp; + struct fc_rport_identifiers ids; - dp.ids.port_id = FC_FID_DIR_SERV; - dp.ids.port_name = -1; - dp.ids.node_name = -1; - dp.ids.roles = FC_RPORT_ROLE_UNKNOWN; - dp.lp = lport; + ids.port_id = FC_FID_DIR_SERV; + ids.port_name = -1; + ids.node_name = -1; + ids.roles = FC_RPORT_ROLE_UNKNOWN; FC_LPORT_DBG(lport, "Entered DNS state from %s state\n", fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_DNS); - rport = lport->tt.rport_create(&dp); + rport = lport->tt.rport_create(lport, &ids); if (!rport) goto err; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 5f8f437e76b3..2fbc94aaf343 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -93,7 +93,8 @@ static void fc_rport_rogue_destroy(struct device *dev) kfree(rport); } -struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp) +struct fc_rport *fc_rport_rogue_create(struct fc_lport *lport, + struct fc_rport_identifiers *ids) { struct fc_rport *rport; struct fc_rport_priv *rdata; @@ -105,10 +106,10 @@ struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp) rdata = RPORT_TO_PRIV(rport); rport->dd_data = rdata; - rport->port_id = dp->ids.port_id; - rport->port_name = dp->ids.port_name; - rport->node_name = dp->ids.node_name; - rport->roles = dp->ids.roles; + rport->port_id = ids->port_id; + rport->port_name = ids->port_name; + rport->node_name = ids->node_name; + rport->roles = ids->roles; rport->maxframe_size = FC_MIN_MAX_PAYLOAD; /* * Note: all this libfc rogue rport code will be removed for @@ -118,14 +119,14 @@ struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp) rport->dev.release = fc_rport_rogue_destroy; mutex_init(&rdata->rp_mutex); - rdata->local_port = dp->lp; + rdata->local_port = lport; rdata->trans_state = FC_PORTSTATE_ROGUE; rdata->rp_state = RPORT_ST_INIT; rdata->event = RPORT_EV_NONE; rdata->flags = FC_RP_FLAGS_REC_SUPPORTED; rdata->ops = NULL; - rdata->e_d_tov = dp->lp->e_d_tov; - rdata->r_a_tov = dp->lp->r_a_tov; + rdata->e_d_tov = lport->e_d_tov; + rdata->r_a_tov = lport->r_a_tov; INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout); INIT_WORK(&rdata->event_work, fc_rport_work); /* diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index aa219514c96b..d888cbecd72e 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -227,8 +227,6 @@ struct fc_rport_libfc_priv { #define RPORT_TO_PRIV(x) \ (struct fc_rport_libfc_priv *)((void *)x + sizeof(struct fc_rport)); -struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *); - static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn) { rport->node_name = wwnn; @@ -569,7 +567,8 @@ struct libfc_function_template { /* * Create a remote port */ - struct fc_rport *(*rport_create)(struct fc_disc_port *); + struct fc_rport *(*rport_create)(struct fc_lport *, + struct fc_rport_identifiers *); /* * Initiates the RP state machine. It is called from the LP module. From 922aa210bcad4b34a7bb98ec9d318b7e59e7a5ca Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:00:45 -0700 Subject: [PATCH 149/209] [SCSI] libfc: fix RPORT_TO_PRIV and PRIV_TO_RPORT() macros. These macros introduce extra undesirable semicolons that keep them from being used in expressions, and they don't protect against being passed an expression. Add parens and remove the semicolons. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- include/scsi/libfc.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index d888cbecd72e..2fdd8ac12b89 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -223,9 +223,9 @@ struct fc_rport_libfc_priv { }; #define PRIV_TO_RPORT(x) \ - (struct fc_rport *)((void *)x - sizeof(struct fc_rport)); + ((struct fc_rport *)((void *)(x) - sizeof(struct fc_rport))) #define RPORT_TO_PRIV(x) \ - (struct fc_rport_libfc_priv *)((void *)x + sizeof(struct fc_rport)); + ((struct fc_rport_libfc_priv *)((void *)(x) + sizeof(struct fc_rport))) static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn) { From 9fb9d32831fd687e427ec5b147bb690f468b99a0 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:00:50 -0700 Subject: [PATCH 150/209] [SCSI] libfc: make fc_rport_priv the primary rport interface. The rport and discovery modules deal with remote ports before fc_remote_port_add() can be done, because the full set of rport identifiers is not known at early stages. In preparation for splitting the fc_rport/fc_rport_priv allocation, make fc_rport_priv the primary interface for the remote port and discovery engines. The FCP / SCSI layers still deal with fc_rport and fc_rport_libfc_priv, however. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 95 ++++----- drivers/scsi/libfc/fc_elsct.c | 4 +- drivers/scsi/libfc/fc_fcp.c | 2 +- drivers/scsi/libfc/fc_lport.c | 26 +-- drivers/scsi/libfc/fc_rport.c | 364 ++++++++++++++++------------------ include/scsi/fc_encode.h | 7 +- include/scsi/libfc.h | 26 +-- 7 files changed, 245 insertions(+), 279 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index ecc625c20520..448ffc388656 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -49,7 +49,6 @@ static void fc_disc_gpn_ft_req(struct fc_disc *); static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); static int fc_disc_new_target(struct fc_disc *, struct fc_rport *, struct fc_rport_identifiers *); -static void fc_disc_del_target(struct fc_disc *, struct fc_rport *); static void fc_disc_done(struct fc_disc *); static void fc_disc_timeout(struct work_struct *); static void fc_disc_single(struct fc_disc *, struct fc_disc_port *); @@ -60,27 +59,19 @@ static void fc_disc_restart(struct fc_disc *); * @lport: Fibre Channel host port instance * @port_id: remote port port_id to match */ -struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport, - u32 port_id) +struct fc_rport_priv *fc_disc_lookup_rport(const struct fc_lport *lport, + u32 port_id) { const struct fc_disc *disc = &lport->disc; - struct fc_rport *rport, *found = NULL; + struct fc_rport *rport; struct fc_rport_priv *rdata; - int disc_found = 0; list_for_each_entry(rdata, &disc->rports, peers) { rport = PRIV_TO_RPORT(rdata); - if (rport->port_id == port_id) { - disc_found = 1; - found = rport; - break; - } + if (rport->port_id == port_id) + return rdata; } - - if (!disc_found) - found = NULL; - - return found; + return NULL; } /** @@ -93,21 +84,18 @@ struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport, void fc_disc_stop_rports(struct fc_disc *disc) { struct fc_lport *lport; - struct fc_rport *rport; struct fc_rport_priv *rdata, *next; lport = disc->lport; mutex_lock(&disc->disc_mutex); list_for_each_entry_safe(rdata, next, &disc->rports, peers) { - rport = PRIV_TO_RPORT(rdata); list_del(&rdata->peers); - lport->tt.rport_logoff(rport); + lport->tt.rport_logoff(rdata); } list_for_each_entry_safe(rdata, next, &disc->rogue_rports, peers) { - rport = PRIV_TO_RPORT(rdata); - lport->tt.rport_logoff(rport); + lport->tt.rport_logoff(rdata); } mutex_unlock(&disc->disc_mutex); @@ -116,18 +104,18 @@ void fc_disc_stop_rports(struct fc_disc *disc) /** * fc_disc_rport_callback() - Event handler for rport events * @lport: The lport which is receiving the event - * @rport: The rport which the event has occured on + * @rdata: private remote port data * @event: The event that occured * * Locking Note: The rport lock should not be held when calling * this function. */ static void fc_disc_rport_callback(struct fc_lport *lport, - struct fc_rport *rport, + struct fc_rport_priv *rdata, enum fc_rport_event event) { - struct fc_rport_priv *rdata = rport->dd_data; struct fc_disc *disc = &lport->disc; + struct fc_rport *rport = PRIV_TO_RPORT(rdata); FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event, rport->port_id); @@ -169,7 +157,6 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, struct fc_disc *disc) { struct fc_lport *lport; - struct fc_rport *rport; struct fc_rport_priv *rdata; struct fc_els_rscn *rp; struct fc_els_rscn_page *pp; @@ -249,11 +236,10 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, redisc, lport->state, disc->pending); list_for_each_entry_safe(dp, next, &disc_ports, peers) { list_del(&dp->peers); - rport = lport->tt.rport_lookup(lport, dp->ids.port_id); - if (rport) { - rdata = rport->dd_data; + rdata = lport->tt.rport_lookup(lport, dp->ids.port_id); + if (rdata) { list_del(&rdata->peers); - lport->tt.rport_logoff(rport); + lport->tt.rport_logoff(rdata); } fc_disc_single(disc, dp); } @@ -308,16 +294,14 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, */ static void fc_disc_restart(struct fc_disc *disc) { - struct fc_rport *rport; struct fc_rport_priv *rdata, *next; struct fc_lport *lport = disc->lport; FC_DISC_DBG(disc, "Restarting discovery\n"); list_for_each_entry_safe(rdata, next, &disc->rports, peers) { - rport = PRIV_TO_RPORT(rdata); list_del(&rdata->peers); - lport->tt.rport_logoff(rport); + lport->tt.rport_logoff(rdata); } disc->requested = 1; @@ -335,6 +319,7 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, enum fc_disc_event), struct fc_lport *lport) { + struct fc_rport_priv *rdata; struct fc_rport *rport; struct fc_rport_identifiers ids; struct fc_disc *disc = &lport->disc; @@ -362,8 +347,9 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, * Handle point-to-point mode as a simple discovery * of the remote port. Yucky, yucky, yuck, yuck! */ - rport = disc->lport->ptp_rp; - if (rport) { + rdata = disc->lport->ptp_rp; + if (rdata) { + rport = PRIV_TO_RPORT(rdata); ids.port_id = rport->port_id; ids.port_name = rport->port_name; ids.node_name = rport->node_name; @@ -418,7 +404,9 @@ static int fc_disc_new_target(struct fc_disc *disc, * assigned the same FCID. This should be rare. * Delete the old one and fall thru to re-create. */ - fc_disc_del_target(disc, rport); + rdata = rport->dd_data; + list_del(&rdata->peers); + lport->tt.rport_logoff(rdata); rport = NULL; } } @@ -426,37 +414,26 @@ static int fc_disc_new_target(struct fc_disc *disc, ids->port_id != fc_host_port_id(lport->host) && ids->port_name != lport->wwpn) { if (!rport) { - rport = lport->tt.rport_lookup(lport, ids->port_id); + rdata = lport->tt.rport_lookup(lport, ids->port_id); if (!rport) { - rport = lport->tt.rport_create(lport, ids); + rdata = lport->tt.rport_create(lport, ids); } - if (!rport) + if (!rdata) error = -ENOMEM; + else + rport = PRIV_TO_RPORT(rdata); } if (rport) { rdata = rport->dd_data; rdata->ops = &fc_disc_rport_ops; rdata->rp_state = RPORT_ST_INIT; list_add_tail(&rdata->peers, &disc->rogue_rports); - lport->tt.rport_login(rport); + lport->tt.rport_login(rdata); } } return error; } -/** - * fc_disc_del_target() - Delete a target - * @disc: FC discovery context - * @rport: The remote port to be removed - */ -static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport) -{ - struct fc_lport *lport = disc->lport; - struct fc_rport_priv *rdata = rport->dd_data; - list_del(&rdata->peers); - lport->tt.rport_logoff(rport); -} - /** * fc_disc_done() - Discovery has been completed * @disc: FC discovery context @@ -573,7 +550,6 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) size_t tlen; int error = 0; struct fc_rport_identifiers ids; - struct fc_rport *rport; struct fc_rport_priv *rdata; lport = disc->lport; @@ -622,14 +598,13 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) if (ids.port_id != fc_host_port_id(lport->host) && ids.port_name != lport->wwpn) { - rport = lport->tt.rport_create(lport, &ids); - if (rport) { - rdata = rport->dd_data; + rdata = lport->tt.rport_create(lport, &ids); + if (rdata) { rdata->ops = &fc_disc_rport_ops; rdata->local_port = lport; list_add_tail(&rdata->peers, &disc->rogue_rports); - lport->tt.rport_login(rport); + lport->tt.rport_login(rdata); } else printk(KERN_WARNING "libfc: Failed to allocate " "memory for the newly discovered port " @@ -766,7 +741,6 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp) { struct fc_lport *lport; - struct fc_rport *new_rport; struct fc_rport_priv *rdata; lport = disc->lport; @@ -774,13 +748,12 @@ static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp) if (dp->ids.port_id == fc_host_port_id(lport->host)) goto out; - new_rport = lport->tt.rport_create(lport, &dp->ids); - if (new_rport) { - rdata = new_rport->dd_data; + rdata = lport->tt.rport_create(lport, &dp->ids); + if (rdata) { rdata->ops = &fc_disc_rport_ops; kfree(dp); list_add_tail(&rdata->peers, &disc->rogue_rports); - lport->tt.rport_login(new_rport); + lport->tt.rport_login(rdata); } return; out: diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c index 5878b34bff18..2b8a3bbc0399 100644 --- a/drivers/scsi/libfc/fc_elsct.c +++ b/drivers/scsi/libfc/fc_elsct.c @@ -32,7 +32,7 @@ * fc_elsct_send - sends ELS/CT frame */ static struct fc_seq *fc_elsct_send(struct fc_lport *lport, - struct fc_rport *rport, + struct fc_rport_priv *rdata, struct fc_frame *fp, unsigned int op, void (*resp)(struct fc_seq *, @@ -47,7 +47,7 @@ static struct fc_seq *fc_elsct_send(struct fc_lport *lport, /* ELS requests */ if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) - rc = fc_els_fill(lport, rport, fp, op, &r_ctl, &did, &fh_type); + rc = fc_els_fill(lport, rdata, fp, op, &r_ctl, &did, &fh_type); else /* CT requests */ rc = fc_ct_fill(lport, fp, op, &r_ctl, &did, &fh_type); diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 7d5ffcbbf39b..a622096eb315 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1308,7 +1308,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp) fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, fc_host_port_id(rp->local_port->host), FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - if (lp->tt.elsct_send(lp, rport, fp, ELS_REC, fc_fcp_rec_resp, + if (lp->tt.elsct_send(lp, rport->dd_data, fp, ELS_REC, fc_fcp_rec_resp, fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ return; diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index a78161cf1811..3c15abd35ffa 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -133,16 +133,18 @@ static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) /** * fc_lport_rport_callback() - Event handler for rport events * @lport: The lport which is receiving the event - * @rport: The rport which the event has occured on + * @rdata: private remote port data * @event: The event that occured * * Locking Note: The rport lock should not be held when calling * this function. */ static void fc_lport_rport_callback(struct fc_lport *lport, - struct fc_rport *rport, + struct fc_rport_priv *rdata, enum fc_rport_event event) { + struct fc_rport *rport = PRIV_TO_RPORT(rdata); + FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event, rport->port_id); @@ -151,7 +153,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport, if (rport->port_id == FC_FID_DIR_SERV) { mutex_lock(&lport->lp_mutex); if (lport->state == LPORT_ST_DNS) { - lport->dns_rp = rport; + lport->dns_rp = rdata; fc_lport_enter_rpn_id(lport); } else { FC_LPORT_DBG(lport, "Received an CREATED event " @@ -160,7 +162,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport, "in the DNS state, it's in the " "%d state", rport->port_id, lport->state); - lport->tt.rport_logoff(rport); + lport->tt.rport_logoff(rdata); } mutex_unlock(&lport->lp_mutex); } else @@ -832,7 +834,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, { struct fc_frame_header *fh = fc_frame_header_get(fp); void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *); - struct fc_rport *rport; + struct fc_rport_priv *rdata; u32 s_id; u32 d_id; struct fc_seq_els_data rjt_data; @@ -888,9 +890,9 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, s_id = ntoh24(fh->fh_s_id); d_id = ntoh24(fh->fh_d_id); - rport = lport->tt.rport_lookup(lport, s_id); - if (rport) - lport->tt.rport_recv_req(sp, fp, rport); + rdata = lport->tt.rport_lookup(lport, s_id); + if (rdata) + lport->tt.rport_recv_req(sp, fp, rdata); else { rjt_data.fp = NULL; rjt_data.reason = ELS_RJT_UNAB; @@ -1304,7 +1306,6 @@ static struct fc_rport_operations fc_lport_rport_ops = { */ static void fc_lport_enter_dns(struct fc_lport *lport) { - struct fc_rport *rport; struct fc_rport_priv *rdata; struct fc_rport_identifiers ids; @@ -1318,13 +1319,12 @@ static void fc_lport_enter_dns(struct fc_lport *lport) fc_lport_state_enter(lport, LPORT_ST_DNS); - rport = lport->tt.rport_create(lport, &ids); - if (!rport) + rdata = lport->tt.rport_create(lport, &ids); + if (!rdata) goto err; - rdata = rport->dd_data; rdata->ops = &fc_lport_rport_ops; - lport->tt.rport_login(rport); + lport->tt.rport_login(rdata); return; err: diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 2fbc94aaf343..13d3d758fb0e 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -57,23 +57,23 @@ struct workqueue_struct *rport_event_queue; -static void fc_rport_enter_plogi(struct fc_rport *); -static void fc_rport_enter_prli(struct fc_rport *); -static void fc_rport_enter_rtv(struct fc_rport *); -static void fc_rport_enter_ready(struct fc_rport *); -static void fc_rport_enter_logo(struct fc_rport *); +static void fc_rport_enter_plogi(struct fc_rport_priv *); +static void fc_rport_enter_prli(struct fc_rport_priv *); +static void fc_rport_enter_rtv(struct fc_rport_priv *); +static void fc_rport_enter_ready(struct fc_rport_priv *); +static void fc_rport_enter_logo(struct fc_rport_priv *); -static void fc_rport_recv_plogi_req(struct fc_rport *, +static void fc_rport_recv_plogi_req(struct fc_rport_priv *, struct fc_seq *, struct fc_frame *); -static void fc_rport_recv_prli_req(struct fc_rport *, +static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_seq *, struct fc_frame *); -static void fc_rport_recv_prlo_req(struct fc_rport *, +static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_seq *, struct fc_frame *); -static void fc_rport_recv_logo_req(struct fc_rport *, +static void fc_rport_recv_logo_req(struct fc_rport_priv *, struct fc_seq *, struct fc_frame *); static void fc_rport_timeout(struct work_struct *); -static void fc_rport_error(struct fc_rport *, struct fc_frame *); -static void fc_rport_error_retry(struct fc_rport *, struct fc_frame *); +static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *); +static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *); static void fc_rport_work(struct work_struct *); static const char *fc_rport_state_names[] = { @@ -89,12 +89,14 @@ static const char *fc_rport_state_names[] = { static void fc_rport_rogue_destroy(struct device *dev) { struct fc_rport *rport = dev_to_rport(dev); - FC_RPORT_DBG(rport, "Destroying rogue rport\n"); + struct fc_rport_priv *rdata = RPORT_TO_PRIV(rport); + + FC_RPORT_DBG(rdata, "Destroying rogue rport\n"); kfree(rport); } -struct fc_rport *fc_rport_rogue_create(struct fc_lport *lport, - struct fc_rport_identifiers *ids) +struct fc_rport_priv *fc_rport_rogue_create(struct fc_lport *lport, + struct fc_rport_identifiers *ids) { struct fc_rport *rport; struct fc_rport_priv *rdata; @@ -135,17 +137,16 @@ struct fc_rport *fc_rport_rogue_create(struct fc_lport *lport, */ INIT_LIST_HEAD(&rdata->peers); - return rport; + return rdata; } /** * fc_rport_state() - return a string for the state the rport is in - * @rport: The rport whose state we want to get a string for + * @rdata: remote port private data */ -static const char *fc_rport_state(struct fc_rport *rport) +static const char *fc_rport_state(struct fc_rport_priv *rdata) { const char *cp; - struct fc_rport_priv *rdata = rport->dd_data; cp = fc_rport_state_names[rdata->rp_state]; if (!cp) @@ -192,15 +193,14 @@ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, /** * fc_rport_state_enter() - Change the rport's state - * @rport: The rport whose state should change + * @rdata: The rport whose state should change * @new: The new state of the rport * * Locking Note: Called with the rport lock held */ -static void fc_rport_state_enter(struct fc_rport *rport, +static void fc_rport_state_enter(struct fc_rport_priv *rdata, enum fc_rport_state new) { - struct fc_rport_priv *rdata = rport->dd_data; if (rdata->rp_state != new) rdata->retries = 0; rdata->rp_state = new; @@ -255,7 +255,7 @@ static void fc_rport_work(struct work_struct *work) INIT_LIST_HEAD(&new_rdata->peers); INIT_WORK(&new_rdata->event_work, fc_rport_work); - fc_rport_state_enter(new_rport, RPORT_ST_READY); + fc_rport_state_enter(new_rdata, RPORT_ST_READY); } else { printk(KERN_WARNING "libfc: Failed to allocate " " memory for rport (%6x)\n", ids.port_id); @@ -263,20 +263,20 @@ static void fc_rport_work(struct work_struct *work) } if (rport->port_id != FC_FID_DIR_SERV) if (rport_ops->event_callback) - rport_ops->event_callback(lport, rport, + rport_ops->event_callback(lport, rdata, RPORT_EV_FAILED); put_device(&rport->dev); rport = new_rport; rdata = new_rport->dd_data; if (rport_ops->event_callback) - rport_ops->event_callback(lport, rport, event); + rport_ops->event_callback(lport, rdata, event); } else if ((event == RPORT_EV_FAILED) || (event == RPORT_EV_LOGO) || (event == RPORT_EV_STOP)) { trans_state = rdata->trans_state; mutex_unlock(&rdata->rp_mutex); if (rport_ops->event_callback) - rport_ops->event_callback(lport, rport, event); + rport_ops->event_callback(lport, rdata, event); cancel_delayed_work_sync(&rdata->retry_work); if (trans_state == FC_PORTSTATE_ROGUE) put_device(&rport->dev); @@ -292,21 +292,19 @@ static void fc_rport_work(struct work_struct *work) /** * fc_rport_login() - Start the remote port login state machine - * @rport: Fibre Channel remote port + * @rdata: private remote port * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* * function and then unlock the rport. */ -int fc_rport_login(struct fc_rport *rport) +int fc_rport_login(struct fc_rport_priv *rdata) { - struct fc_rport_priv *rdata = rport->dd_data; - mutex_lock(&rdata->rp_mutex); - FC_RPORT_DBG(rport, "Login to port\n"); + FC_RPORT_DBG(rdata, "Login to port\n"); - fc_rport_enter_plogi(rport); + fc_rport_enter_plogi(rdata); mutex_unlock(&rdata->rp_mutex); @@ -315,7 +313,7 @@ int fc_rport_login(struct fc_rport *rport) /** * fc_rport_enter_delete() - schedule a remote port to be deleted. - * @rport: Fibre Channel remote port + * @rdata: private remote port * @event: event to report as the reason for deletion * * Locking Note: Called with the rport lock held. @@ -327,17 +325,15 @@ int fc_rport_login(struct fc_rport *rport) * Since we have the mutex, even if fc_rport_work() is already started, * it'll see the new event. */ -static void fc_rport_enter_delete(struct fc_rport *rport, +static void fc_rport_enter_delete(struct fc_rport_priv *rdata, enum fc_rport_event event) { - struct fc_rport_priv *rdata = rport->dd_data; - if (rdata->rp_state == RPORT_ST_DELETE) return; - FC_RPORT_DBG(rport, "Delete port\n"); + FC_RPORT_DBG(rdata, "Delete port\n"); - fc_rport_state_enter(rport, RPORT_ST_DELETE); + fc_rport_state_enter(rdata, RPORT_ST_DELETE); if (rdata->event == RPORT_EV_NONE) queue_work(rport_event_queue, &rdata->event_work); @@ -346,33 +342,31 @@ static void fc_rport_enter_delete(struct fc_rport *rport, /** * fc_rport_logoff() - Logoff and remove an rport - * @rport: Fibre Channel remote port to be removed + * @rdata: private remote port * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* * function and then unlock the rport. */ -int fc_rport_logoff(struct fc_rport *rport) +int fc_rport_logoff(struct fc_rport_priv *rdata) { - struct fc_rport_priv *rdata = rport->dd_data; - mutex_lock(&rdata->rp_mutex); - FC_RPORT_DBG(rport, "Remove port\n"); + FC_RPORT_DBG(rdata, "Remove port\n"); if (rdata->rp_state == RPORT_ST_DELETE) { - FC_RPORT_DBG(rport, "Port in Delete state, not removing\n"); + FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n"); mutex_unlock(&rdata->rp_mutex); goto out; } - fc_rport_enter_logo(rport); + fc_rport_enter_logo(rdata); /* * Change the state to Delete so that we discard * the response. */ - fc_rport_enter_delete(rport, RPORT_EV_STOP); + fc_rport_enter_delete(rdata, RPORT_EV_STOP); mutex_unlock(&rdata->rp_mutex); out: @@ -381,18 +375,16 @@ int fc_rport_logoff(struct fc_rport *rport) /** * fc_rport_enter_ready() - The rport is ready - * @rport: Fibre Channel remote port that is ready + * @rdata: private remote port * * Locking Note: The rport lock is expected to be held before calling * this routine. */ -static void fc_rport_enter_ready(struct fc_rport *rport) +static void fc_rport_enter_ready(struct fc_rport_priv *rdata) { - struct fc_rport_priv *rdata = rport->dd_data; + fc_rport_state_enter(rdata, RPORT_ST_READY); - fc_rport_state_enter(rport, RPORT_ST_READY); - - FC_RPORT_DBG(rport, "Port is Ready\n"); + FC_RPORT_DBG(rdata, "Port is Ready\n"); if (rdata->event == RPORT_EV_NONE) queue_work(rport_event_queue, &rdata->event_work); @@ -411,22 +403,21 @@ static void fc_rport_timeout(struct work_struct *work) { struct fc_rport_priv *rdata = container_of(work, struct fc_rport_priv, retry_work.work); - struct fc_rport *rport = PRIV_TO_RPORT(rdata); mutex_lock(&rdata->rp_mutex); switch (rdata->rp_state) { case RPORT_ST_PLOGI: - fc_rport_enter_plogi(rport); + fc_rport_enter_plogi(rdata); break; case RPORT_ST_PRLI: - fc_rport_enter_prli(rport); + fc_rport_enter_prli(rdata); break; case RPORT_ST_RTV: - fc_rport_enter_rtv(rport); + fc_rport_enter_rtv(rdata); break; case RPORT_ST_LOGO: - fc_rport_enter_logo(rport); + fc_rport_enter_logo(rdata); break; case RPORT_ST_READY: case RPORT_ST_INIT: @@ -439,27 +430,25 @@ static void fc_rport_timeout(struct work_struct *work) /** * fc_rport_error() - Error handler, called once retries have been exhausted - * @rport: The fc_rport object + * @rdata: private remote port * @fp: The frame pointer * * Locking Note: The rport lock is expected to be held before * calling this routine */ -static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) +static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) { - struct fc_rport_priv *rdata = rport->dd_data; - - FC_RPORT_DBG(rport, "Error %ld in state %s, retries %d\n", - PTR_ERR(fp), fc_rport_state(rport), rdata->retries); + FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n", + PTR_ERR(fp), fc_rport_state(rdata), rdata->retries); switch (rdata->rp_state) { case RPORT_ST_PLOGI: case RPORT_ST_PRLI: case RPORT_ST_LOGO: - fc_rport_enter_delete(rport, RPORT_EV_FAILED); + fc_rport_enter_delete(rdata, RPORT_EV_FAILED); break; case RPORT_ST_RTV: - fc_rport_enter_ready(rport); + fc_rport_enter_ready(rdata); break; case RPORT_ST_DELETE: case RPORT_ST_READY: @@ -470,7 +459,7 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) /** * fc_rport_error_retry() - Error handler when retries are desired - * @rport: The fc_rport object + * @rdata: private remote port data * @fp: The frame pointer * * If the error was an exchange timeout retry immediately, @@ -479,18 +468,18 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) * Locking Note: The rport lock is expected to be held before * calling this routine */ -static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp) +static void fc_rport_error_retry(struct fc_rport_priv *rdata, + struct fc_frame *fp) { - struct fc_rport_priv *rdata = rport->dd_data; unsigned long delay = FC_DEF_E_D_TOV; /* make sure this isn't an FC_EX_CLOSED error, never retry those */ if (PTR_ERR(fp) == -FC_EX_CLOSED) - return fc_rport_error(rport, fp); + return fc_rport_error(rdata, fp); if (rdata->retries < rdata->local_port->max_rport_retry_count) { - FC_RPORT_DBG(rport, "Error %ld in state %s, retrying\n", - PTR_ERR(fp), fc_rport_state(rport)); + FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n", + PTR_ERR(fp), fc_rport_state(rdata)); rdata->retries++; /* no additional delay on exchange timeouts */ if (PTR_ERR(fp) == -FC_EX_TIMEOUT) @@ -499,24 +488,24 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp) return; } - return fc_rport_error(rport, fp); + return fc_rport_error(rdata, fp); } /** * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response * @sp: current sequence in the PLOGI exchange * @fp: response frame - * @rp_arg: Fibre Channel remote port + * @rdata_arg: private remote port data * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error * and then unlock the rport. */ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, - void *rp_arg) + void *rdata_arg) { - struct fc_rport *rport = rp_arg; - struct fc_rport_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rdata_arg; + struct fc_rport *rport = PRIV_TO_RPORT(rdata); struct fc_lport *lport = rdata->local_port; struct fc_els_flogi *plp = NULL; unsigned int tov; @@ -526,18 +515,18 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&rdata->rp_mutex); - FC_RPORT_DBG(rport, "Received a PLOGI response\n"); + FC_RPORT_DBG(rdata, "Received a PLOGI response\n"); if (rdata->rp_state != RPORT_ST_PLOGI) { - FC_RPORT_DBG(rport, "Received a PLOGI response, but in state " - "%s\n", fc_rport_state(rport)); + FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state " + "%s\n", fc_rport_state(rdata)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { - fc_rport_error_retry(rport, fp); + fc_rport_error_retry(rdata, fp); goto err; } @@ -565,11 +554,11 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, * we skip PRLI and RTV and go straight to READY. */ if (rport->port_id >= FC_FID_DOM_MGR) - fc_rport_enter_ready(rport); + fc_rport_enter_ready(rdata); else - fc_rport_enter_prli(rport); + fc_rport_enter_prli(rdata); } else - fc_rport_error_retry(rport, fp); + fc_rport_error_retry(rdata, fp); out: fc_frame_free(fp); @@ -580,33 +569,33 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, /** * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer - * @rport: Fibre Channel remote port to send PLOGI to + * @rdata: private remote port data * * Locking Note: The rport lock is expected to be held before calling * this routine. */ -static void fc_rport_enter_plogi(struct fc_rport *rport) +static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) { - struct fc_rport_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; + struct fc_rport *rport = PRIV_TO_RPORT(rdata); struct fc_frame *fp; - FC_RPORT_DBG(rport, "Port entered PLOGI state from %s state\n", - fc_rport_state(rport)); + FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n", + fc_rport_state(rdata)); - fc_rport_state_enter(rport, RPORT_ST_PLOGI); + fc_rport_state_enter(rdata, RPORT_ST_PLOGI); rport->maxframe_size = FC_MIN_MAX_PAYLOAD; fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); if (!fp) { - fc_rport_error_retry(rport, fp); + fc_rport_error_retry(rdata, fp); return; } rdata->e_d_tov = lport->e_d_tov; - if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI, - fc_rport_plogi_resp, rport, lport->e_d_tov)) - fc_rport_error_retry(rport, fp); + if (!lport->tt.elsct_send(lport, rdata, fp, ELS_PLOGI, + fc_rport_plogi_resp, rdata, lport->e_d_tov)) + fc_rport_error_retry(rdata, fp); else get_device(&rport->dev); } @@ -615,17 +604,17 @@ static void fc_rport_enter_plogi(struct fc_rport *rport) * fc_rport_prli_resp() - Process Login (PRLI) response handler * @sp: current sequence in the PRLI exchange * @fp: response frame - * @rp_arg: Fibre Channel remote port + * @rdata_arg: private remote port data * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error * and then unlock the rport. */ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, - void *rp_arg) + void *rdata_arg) { - struct fc_rport *rport = rp_arg; - struct fc_rport_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rdata_arg; + struct fc_rport *rport = PRIV_TO_RPORT(rdata); struct { struct fc_els_prli prli; struct fc_els_spp spp; @@ -636,18 +625,18 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&rdata->rp_mutex); - FC_RPORT_DBG(rport, "Received a PRLI response\n"); + FC_RPORT_DBG(rdata, "Received a PRLI response\n"); if (rdata->rp_state != RPORT_ST_PRLI) { - FC_RPORT_DBG(rport, "Received a PRLI response, but in state " - "%s\n", fc_rport_state(rport)); + FC_RPORT_DBG(rdata, "Received a PRLI response, but in state " + "%s\n", fc_rport_state(rdata)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { - fc_rport_error_retry(rport, fp); + fc_rport_error_retry(rdata, fp); goto err; } @@ -667,11 +656,11 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, roles |= FC_RPORT_ROLE_FCP_TARGET; rport->roles = roles; - fc_rport_enter_rtv(rport); + fc_rport_enter_rtv(rdata); } else { - FC_RPORT_DBG(rport, "Bad ELS response for PRLI command\n"); - fc_rport_enter_delete(rport, RPORT_EV_FAILED); + FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n"); + fc_rport_enter_delete(rdata, RPORT_EV_FAILED); } out: @@ -685,42 +674,42 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, * fc_rport_logo_resp() - Logout (LOGO) response handler * @sp: current sequence in the LOGO exchange * @fp: response frame - * @rp_arg: Fibre Channel remote port + * @rdata_arg: private remote port data * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error * and then unlock the rport. */ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, - void *rp_arg) + void *rdata_arg) { - struct fc_rport *rport = rp_arg; - struct fc_rport_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rdata_arg; + struct fc_rport *rport = PRIV_TO_RPORT(rdata); u8 op; mutex_lock(&rdata->rp_mutex); - FC_RPORT_DBG(rport, "Received a LOGO response\n"); + FC_RPORT_DBG(rdata, "Received a LOGO response\n"); if (rdata->rp_state != RPORT_ST_LOGO) { - FC_RPORT_DBG(rport, "Received a LOGO response, but in state " - "%s\n", fc_rport_state(rport)); + FC_RPORT_DBG(rdata, "Received a LOGO response, but in state " + "%s\n", fc_rport_state(rdata)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { - fc_rport_error_retry(rport, fp); + fc_rport_error_retry(rdata, fp); goto err; } op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC) { - fc_rport_enter_rtv(rport); + fc_rport_enter_rtv(rdata); } else { - FC_RPORT_DBG(rport, "Bad ELS response for LOGO command\n"); - fc_rport_enter_delete(rport, RPORT_EV_LOGO); + FC_RPORT_DBG(rdata, "Bad ELS response for LOGO command\n"); + fc_rport_enter_delete(rdata, RPORT_EV_LOGO); } out: @@ -732,14 +721,14 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, /** * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer - * @rport: Fibre Channel remote port to send PRLI to + * @rdata: private remote port data * * Locking Note: The rport lock is expected to be held before calling * this routine. */ -static void fc_rport_enter_prli(struct fc_rport *rport) +static void fc_rport_enter_prli(struct fc_rport_priv *rdata) { - struct fc_rport_priv *rdata = rport->dd_data; + struct fc_rport *rport = PRIV_TO_RPORT(rdata); struct fc_lport *lport = rdata->local_port; struct { struct fc_els_prli prli; @@ -747,20 +736,20 @@ static void fc_rport_enter_prli(struct fc_rport *rport) } *pp; struct fc_frame *fp; - FC_RPORT_DBG(rport, "Port entered PRLI state from %s state\n", - fc_rport_state(rport)); + FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n", + fc_rport_state(rdata)); - fc_rport_state_enter(rport, RPORT_ST_PRLI); + fc_rport_state_enter(rdata, RPORT_ST_PRLI); fp = fc_frame_alloc(lport, sizeof(*pp)); if (!fp) { - fc_rport_error_retry(rport, fp); + fc_rport_error_retry(rdata, fp); return; } - if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI, - fc_rport_prli_resp, rport, lport->e_d_tov)) - fc_rport_error_retry(rport, fp); + if (!lport->tt.elsct_send(lport, rdata, fp, ELS_PRLI, + fc_rport_prli_resp, rdata, lport->e_d_tov)) + fc_rport_error_retry(rdata, fp); else get_device(&rport->dev); } @@ -769,7 +758,7 @@ static void fc_rport_enter_prli(struct fc_rport *rport) * fc_rport_els_rtv_resp() - Request Timeout Value response handler * @sp: current sequence in the RTV exchange * @fp: response frame - * @rp_arg: Fibre Channel remote port + * @rdata_arg: private remote port data * * Many targets don't seem to support this. * @@ -778,26 +767,26 @@ static void fc_rport_enter_prli(struct fc_rport *rport) * and then unlock the rport. */ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, - void *rp_arg) + void *rdata_arg) { - struct fc_rport *rport = rp_arg; - struct fc_rport_priv *rdata = rport->dd_data; + struct fc_rport_priv *rdata = rdata_arg; + struct fc_rport *rport = PRIV_TO_RPORT(rdata); u8 op; mutex_lock(&rdata->rp_mutex); - FC_RPORT_DBG(rport, "Received a RTV response\n"); + FC_RPORT_DBG(rdata, "Received a RTV response\n"); if (rdata->rp_state != RPORT_ST_RTV) { - FC_RPORT_DBG(rport, "Received a RTV response, but in state " - "%s\n", fc_rport_state(rport)); + FC_RPORT_DBG(rdata, "Received a RTV response, but in state " + "%s\n", fc_rport_state(rdata)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { - fc_rport_error(rport, fp); + fc_rport_error(rdata, fp); goto err; } @@ -823,7 +812,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, } } - fc_rport_enter_ready(rport); + fc_rport_enter_ready(rdata); out: fc_frame_free(fp); @@ -834,62 +823,62 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, /** * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer - * @rport: Fibre Channel remote port to send RTV to + * @rdata: private remote port data * * Locking Note: The rport lock is expected to be held before calling * this routine. */ -static void fc_rport_enter_rtv(struct fc_rport *rport) +static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) { struct fc_frame *fp; - struct fc_rport_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; + struct fc_rport *rport = PRIV_TO_RPORT(rdata); - FC_RPORT_DBG(rport, "Port entered RTV state from %s state\n", - fc_rport_state(rport)); + FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n", + fc_rport_state(rdata)); - fc_rport_state_enter(rport, RPORT_ST_RTV); + fc_rport_state_enter(rdata, RPORT_ST_RTV); fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv)); if (!fp) { - fc_rport_error_retry(rport, fp); + fc_rport_error_retry(rdata, fp); return; } - if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV, - fc_rport_rtv_resp, rport, lport->e_d_tov)) - fc_rport_error_retry(rport, fp); + if (!lport->tt.elsct_send(lport, rdata, fp, ELS_RTV, + fc_rport_rtv_resp, rdata, lport->e_d_tov)) + fc_rport_error_retry(rdata, fp); else get_device(&rport->dev); } /** * fc_rport_enter_logo() - Send Logout (LOGO) request to peer - * @rport: Fibre Channel remote port to send LOGO to + * @rdata: private remote port data * * Locking Note: The rport lock is expected to be held before calling * this routine. */ -static void fc_rport_enter_logo(struct fc_rport *rport) +static void fc_rport_enter_logo(struct fc_rport_priv *rdata) { - struct fc_rport_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; + struct fc_rport *rport = PRIV_TO_RPORT(rdata); struct fc_frame *fp; - FC_RPORT_DBG(rport, "Port entered LOGO state from %s state\n", - fc_rport_state(rport)); + FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n", + fc_rport_state(rdata)); - fc_rport_state_enter(rport, RPORT_ST_LOGO); + fc_rport_state_enter(rdata, RPORT_ST_LOGO); fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo)); if (!fp) { - fc_rport_error_retry(rport, fp); + fc_rport_error_retry(rdata, fp); return; } - if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO, - fc_rport_logo_resp, rport, lport->e_d_tov)) - fc_rport_error_retry(rport, fp); + if (!lport->tt.elsct_send(lport, rdata, fp, ELS_LOGO, + fc_rport_logo_resp, rdata, lport->e_d_tov)) + fc_rport_error_retry(rdata, fp); else get_device(&rport->dev); } @@ -899,16 +888,15 @@ static void fc_rport_enter_logo(struct fc_rport *rport) * fc_rport_recv_req() - Receive a request from a rport * @sp: current sequence in the PLOGI exchange * @fp: response frame - * @rp_arg: Fibre Channel remote port + * @rdata_arg: private remote port data * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* * function and then unlock the rport. */ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, - struct fc_rport *rport) + struct fc_rport_priv *rdata) { - struct fc_rport_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; struct fc_frame_header *fh; @@ -927,16 +915,16 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, op = fc_frame_payload_op(fp); switch (op) { case ELS_PLOGI: - fc_rport_recv_plogi_req(rport, sp, fp); + fc_rport_recv_plogi_req(rdata, sp, fp); break; case ELS_PRLI: - fc_rport_recv_prli_req(rport, sp, fp); + fc_rport_recv_prli_req(rdata, sp, fp); break; case ELS_PRLO: - fc_rport_recv_prlo_req(rport, sp, fp); + fc_rport_recv_prlo_req(rdata, sp, fp); break; case ELS_LOGO: - fc_rport_recv_logo_req(rport, sp, fp); + fc_rport_recv_logo_req(rdata, sp, fp); break; case ELS_RRQ: els_data.fp = fp; @@ -958,17 +946,17 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, /** * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request - * @rport: Fibre Channel remote port that initiated PLOGI + * @rdata: private remote port data * @sp: current sequence in the PLOGI exchange * @fp: PLOGI request frame * * Locking Note: The rport lock is exected to be held before calling * this function. */ -static void fc_rport_recv_plogi_req(struct fc_rport *rport, +static void fc_rport_recv_plogi_req(struct fc_rport_priv *rdata, struct fc_seq *sp, struct fc_frame *rx_fp) { - struct fc_rport_priv *rdata = rport->dd_data; + struct fc_rport *rport = PRIV_TO_RPORT(rdata); struct fc_lport *lport = rdata->local_port; struct fc_frame *fp = rx_fp; struct fc_exch *ep; @@ -984,13 +972,13 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, fh = fc_frame_header_get(fp); - FC_RPORT_DBG(rport, "Received PLOGI request while in state %s\n", - fc_rport_state(rport)); + FC_RPORT_DBG(rdata, "Received PLOGI request while in state %s\n", + fc_rport_state(rdata)); sid = ntoh24(fh->fh_s_id); pl = fc_frame_payload_get(fp, sizeof(*pl)); if (!pl) { - FC_RPORT_DBG(rport, "Received PLOGI too short\n"); + FC_RPORT_DBG(rdata, "Received PLOGI too short\n"); WARN_ON(1); /* XXX TBD: send reject? */ fc_frame_free(fp); @@ -1012,25 +1000,25 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, */ switch (rdata->rp_state) { case RPORT_ST_INIT: - FC_RPORT_DBG(rport, "Received PLOGI, wwpn %llx state INIT " + FC_RPORT_DBG(rdata, "Received PLOGI, wwpn %llx state INIT " "- reject\n", (unsigned long long)wwpn); reject = ELS_RJT_UNSUP; break; case RPORT_ST_PLOGI: - FC_RPORT_DBG(rport, "Received PLOGI in PLOGI state %d\n", + FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state %d\n", rdata->rp_state); if (wwpn < lport->wwpn) reject = ELS_RJT_INPROG; break; case RPORT_ST_PRLI: case RPORT_ST_READY: - FC_RPORT_DBG(rport, "Received PLOGI in logged-in state %d " + FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d " "- ignored for now\n", rdata->rp_state); /* XXX TBD - should reset */ break; case RPORT_ST_DELETE: default: - FC_RPORT_DBG(rport, "Received PLOGI in unexpected " + FC_RPORT_DBG(rdata, "Received PLOGI in unexpected " "state %d\n", rdata->rp_state); fc_frame_free(fp); return; @@ -1074,24 +1062,24 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, FC_TYPE_ELS, f_ctl, 0); lport->tt.seq_send(lport, sp, fp); if (rdata->rp_state == RPORT_ST_PLOGI) - fc_rport_enter_prli(rport); + fc_rport_enter_prli(rdata); } } } /** * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request - * @rport: Fibre Channel remote port that initiated PRLI + * @rdata: private remote port data * @sp: current sequence in the PRLI exchange * @fp: PRLI request frame * * Locking Note: The rport lock is exected to be held before calling * this function. */ -static void fc_rport_recv_prli_req(struct fc_rport *rport, +static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, struct fc_seq *sp, struct fc_frame *rx_fp) { - struct fc_rport_priv *rdata = rport->dd_data; + struct fc_rport *rport = PRIV_TO_RPORT(rdata); struct fc_lport *lport = rdata->local_port; struct fc_exch *ep; struct fc_frame *fp; @@ -1115,8 +1103,8 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport, fh = fc_frame_header_get(rx_fp); - FC_RPORT_DBG(rport, "Received PRLI request while in state %s\n", - fc_rport_state(rport)); + FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n", + fc_rport_state(rdata)); switch (rdata->rp_state) { case RPORT_ST_PRLI: @@ -1220,7 +1208,7 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport, */ switch (rdata->rp_state) { case RPORT_ST_PRLI: - fc_rport_enter_ready(rport); + fc_rport_enter_ready(rdata); break; case RPORT_ST_READY: break; @@ -1233,17 +1221,17 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport, /** * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request - * @rport: Fibre Channel remote port that initiated PRLO + * @rdata: private remote port data * @sp: current sequence in the PRLO exchange * @fp: PRLO request frame * * Locking Note: The rport lock is exected to be held before calling * this function. */ -static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp, +static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, + struct fc_seq *sp, struct fc_frame *fp) { - struct fc_rport_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; struct fc_frame_header *fh; @@ -1251,8 +1239,8 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp, fh = fc_frame_header_get(fp); - FC_RPORT_DBG(rport, "Received PRLO request while in state %s\n", - fc_rport_state(rport)); + FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n", + fc_rport_state(rdata)); if (rdata->rp_state == RPORT_ST_DELETE) { fc_frame_free(fp); @@ -1268,24 +1256,24 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp, /** * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request - * @rport: Fibre Channel remote port that initiated LOGO + * @rdata: private remote port data * @sp: current sequence in the LOGO exchange * @fp: LOGO request frame * * Locking Note: The rport lock is exected to be held before calling * this function. */ -static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp, +static void fc_rport_recv_logo_req(struct fc_rport_priv *rdata, + struct fc_seq *sp, struct fc_frame *fp) { struct fc_frame_header *fh; - struct fc_rport_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; fh = fc_frame_header_get(fp); - FC_RPORT_DBG(rport, "Received LOGO request while in state %s\n", - fc_rport_state(rport)); + FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", + fc_rport_state(rdata)); if (rdata->rp_state == RPORT_ST_DELETE) { fc_frame_free(fp); @@ -1293,7 +1281,7 @@ static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp, } rdata->event = RPORT_EV_LOGO; - fc_rport_state_enter(rport, RPORT_ST_DELETE); + fc_rport_state_enter(rdata, RPORT_ST_DELETE); queue_work(rport_event_queue, &rdata->event_work); lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h index a0ff61c3e935..3ede1abad4b7 100644 --- a/include/scsi/fc_encode.h +++ b/include/scsi/fc_encode.h @@ -249,10 +249,13 @@ static inline void fc_scr_fill(struct fc_lport *lport, struct fc_frame *fp) /** * fc_els_fill - Fill in an ELS request frame */ -static inline int fc_els_fill(struct fc_lport *lport, struct fc_rport *rport, +static inline int fc_els_fill(struct fc_lport *lport, + struct fc_rport_priv *rdata, struct fc_frame *fp, unsigned int op, enum fc_rctl *r_ctl, u32 *did, enum fc_fh_type *fh_type) { + struct fc_rport *rport = PRIV_TO_RPORT(rdata); + switch (op) { case ELS_PLOGI: fc_plogi_fill(lport, fp, ELS_PLOGI); @@ -272,7 +275,7 @@ static inline int fc_els_fill(struct fc_lport *lport, struct fc_rport *rport, * is port logo, therefore * set did to rport id. */ - if (rport) + if (rdata) *did = rport->port_id; break; diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 2fdd8ac12b89..df57cb762dc2 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -75,10 +75,10 @@ do { \ (lport)->host->host_no, \ (port_id), ##args)) -#define FC_RPORT_DBG(rport, fmt, args...) \ +#define FC_RPORT_DBG(rdata, fmt, args...) \ do { \ - struct fc_rport_priv *rdata = rport->dd_data; \ struct fc_lport *lport = rdata->local_port; \ + struct fc_rport *rport = PRIV_TO_RPORT(rdata); \ FC_RPORT_ID_DBG(lport, rport->port_id, fmt, ##args); \ } while (0) @@ -185,8 +185,10 @@ enum fc_rport_event { */ #define fc_rport_priv fc_rport_libfc_priv +struct fc_rport_priv; + struct fc_rport_operations { - void (*event_callback)(struct fc_lport *, struct fc_rport *, + void (*event_callback)(struct fc_lport *, struct fc_rport_priv *, enum fc_rport_event); }; @@ -422,7 +424,7 @@ struct libfc_function_template { * STATUS: OPTIONAL */ struct fc_seq *(*elsct_send)(struct fc_lport *lport, - struct fc_rport *rport, + struct fc_rport_priv *, struct fc_frame *fp, unsigned int op, void (*resp)(struct fc_seq *, @@ -567,8 +569,8 @@ struct libfc_function_template { /* * Create a remote port */ - struct fc_rport *(*rport_create)(struct fc_lport *, - struct fc_rport_identifiers *); + struct fc_rport_priv *(*rport_create)(struct fc_lport *, + struct fc_rport_identifiers *); /* * Initiates the RP state machine. It is called from the LP module. @@ -581,7 +583,7 @@ struct libfc_function_template { * * STATUS: OPTIONAL */ - int (*rport_login)(struct fc_rport *rport); + int (*rport_login)(struct fc_rport_priv *); /* * Logoff, and remove the rport from the transport if @@ -589,7 +591,7 @@ struct libfc_function_template { * * STATUS: OPTIONAL */ - int (*rport_logoff)(struct fc_rport *rport); + int (*rport_logoff)(struct fc_rport_priv *); /* * Recieve a request from a remote port. @@ -597,14 +599,14 @@ struct libfc_function_template { * STATUS: OPTIONAL */ void (*rport_recv_req)(struct fc_seq *, struct fc_frame *, - struct fc_rport *); + struct fc_rport_priv *); /* * lookup an rport by it's port ID. * * STATUS: OPTIONAL */ - struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32); + struct fc_rport_priv *(*rport_lookup)(const struct fc_lport *, u32); /* * Send a fcp cmd from fsp pkt. @@ -694,8 +696,8 @@ struct fc_lport { /* Associations */ struct Scsi_Host *host; struct list_head ema_list; - struct fc_rport *dns_rp; - struct fc_rport *ptp_rp; + struct fc_rport_priv *dns_rp; + struct fc_rport_priv *ptp_rp; void *scsi_priv; struct fc_disc disc; From a46f327aa5caf2cce138e98ddd863b6cca0e71e2 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:00:55 -0700 Subject: [PATCH 151/209] [SCSI] libfc: change elsct to use FC_ID instead of rdata tt.elsct_send is used by both FCP and by the rport state machine. After further patches, these two modules will use different structures for the remote port. So, change elsct_send to use the FC_ID instead of the fc_rport_priv as its argument. It currently only uses the FC_ID anyway. For CT requests the destination FC_ID is still implicitly 0xfffffc. After further patches the did arg on CT requests will be used to specify the FC_ID being inquired about for GPN_ID or other queries. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 2 +- drivers/scsi/libfc/fc_elsct.c | 11 ++++++----- drivers/scsi/libfc/fc_fcp.c | 2 +- drivers/scsi/libfc/fc_lport.c | 12 ++++++------ drivers/scsi/libfc/fc_rport.c | 8 ++++---- include/scsi/fc_encode.h | 26 +++++--------------------- include/scsi/libfc.h | 2 +- 7 files changed, 24 insertions(+), 39 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 448ffc388656..4b1f9faf639a 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -526,7 +526,7 @@ static void fc_disc_gpn_ft_req(struct fc_disc *disc) if (!fp) goto err; - if (lport->tt.elsct_send(lport, NULL, fp, + if (lport->tt.elsct_send(lport, 0, fp, FC_NS_GPN_FT, fc_disc_gpn_ft_resp, disc, lport->e_d_tov)) diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c index 2b8a3bbc0399..5e8b011af50e 100644 --- a/drivers/scsi/libfc/fc_elsct.c +++ b/drivers/scsi/libfc/fc_elsct.c @@ -32,7 +32,7 @@ * fc_elsct_send - sends ELS/CT frame */ static struct fc_seq *fc_elsct_send(struct fc_lport *lport, - struct fc_rport_priv *rdata, + u32 did, struct fc_frame *fp, unsigned int op, void (*resp)(struct fc_seq *, @@ -41,16 +41,17 @@ static struct fc_seq *fc_elsct_send(struct fc_lport *lport, void *arg, u32 timer_msec) { enum fc_rctl r_ctl; - u32 did = FC_FID_NONE; enum fc_fh_type fh_type; int rc; /* ELS requests */ if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) - rc = fc_els_fill(lport, rdata, fp, op, &r_ctl, &did, &fh_type); - else + rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type); + else { /* CT requests */ - rc = fc_ct_fill(lport, fp, op, &r_ctl, &did, &fh_type); + rc = fc_ct_fill(lport, fp, op, &r_ctl, &fh_type); + did = FC_FID_DIR_SERV; + } if (rc) return NULL; diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index a622096eb315..59a4408b27b5 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1308,7 +1308,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp) fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, fc_host_port_id(rp->local_port->host), FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - if (lp->tt.elsct_send(lp, rport->dd_data, fp, ELS_REC, fc_fcp_rec_resp, + if (lp->tt.elsct_send(lp, rport->port_id, fp, ELS_REC, fc_fcp_rec_resp, fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ return; diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 3c15abd35ffa..aa605d2012e0 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1217,7 +1217,7 @@ static void fc_lport_enter_scr(struct fc_lport *lport) return; } - if (!lport->tt.elsct_send(lport, NULL, fp, ELS_SCR, + if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, fc_lport_scr_resp, lport, lport->e_d_tov)) fc_lport_error(lport, fp); } @@ -1258,7 +1258,7 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport) return; } - if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RFT_ID, + if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RFT_ID, fc_lport_rft_id_resp, lport, lport->e_d_tov)) fc_lport_error(lport, fp); @@ -1287,7 +1287,7 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport) return; } - if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RPN_ID, + if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID, fc_lport_rpn_id_resp, lport, lport->e_d_tov)) fc_lport_error(lport, fp); @@ -1443,8 +1443,8 @@ static void fc_lport_enter_logo(struct fc_lport *lport) return; } - if (!lport->tt.elsct_send(lport, NULL, fp, ELS_LOGO, fc_lport_logo_resp, - lport, lport->e_d_tov)) + if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, + fc_lport_logo_resp, lport, lport->e_d_tov)) fc_lport_error(lport, fp); } @@ -1567,7 +1567,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport) if (!fp) return fc_lport_error(lport, fp); - if (!lport->tt.elsct_send(lport, NULL, fp, ELS_FLOGI, + if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI, fc_lport_flogi_resp, lport, lport->e_d_tov)) fc_lport_error(lport, fp); } diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 13d3d758fb0e..20371b445bb1 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -593,7 +593,7 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) } rdata->e_d_tov = lport->e_d_tov; - if (!lport->tt.elsct_send(lport, rdata, fp, ELS_PLOGI, + if (!lport->tt.elsct_send(lport, rport->port_id, fp, ELS_PLOGI, fc_rport_plogi_resp, rdata, lport->e_d_tov)) fc_rport_error_retry(rdata, fp); else @@ -747,7 +747,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) return; } - if (!lport->tt.elsct_send(lport, rdata, fp, ELS_PRLI, + if (!lport->tt.elsct_send(lport, rport->port_id, fp, ELS_PRLI, fc_rport_prli_resp, rdata, lport->e_d_tov)) fc_rport_error_retry(rdata, fp); else @@ -845,7 +845,7 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) return; } - if (!lport->tt.elsct_send(lport, rdata, fp, ELS_RTV, + if (!lport->tt.elsct_send(lport, rport->port_id, fp, ELS_RTV, fc_rport_rtv_resp, rdata, lport->e_d_tov)) fc_rport_error_retry(rdata, fp); else @@ -876,7 +876,7 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) return; } - if (!lport->tt.elsct_send(lport, rdata, fp, ELS_LOGO, + if (!lport->tt.elsct_send(lport, rport->port_id, fp, ELS_LOGO, fc_rport_logo_resp, rdata, lport->e_d_tov)) fc_rport_error_retry(rdata, fp); else diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h index 3ede1abad4b7..24bf764f9884 100644 --- a/include/scsi/fc_encode.h +++ b/include/scsi/fc_encode.h @@ -79,8 +79,9 @@ static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp, /** * fc_ct_fill - Fill in a name service request frame */ -static inline int fc_ct_fill(struct fc_lport *lport, struct fc_frame *fp, - unsigned int op, enum fc_rctl *r_ctl, u32 *did, +static inline int fc_ct_fill(struct fc_lport *lport, + struct fc_frame *fp, + unsigned int op, enum fc_rctl *r_ctl, enum fc_fh_type *fh_type) { struct fc_ct_req *ct; @@ -110,7 +111,6 @@ static inline int fc_ct_fill(struct fc_lport *lport, struct fc_frame *fp, return -EINVAL; } *r_ctl = FC_RCTL_DD_UNSOL_CTL; - *did = FC_FID_DIR_SERV; *fh_type = FC_TYPE_CT; return 0; } @@ -250,53 +250,37 @@ static inline void fc_scr_fill(struct fc_lport *lport, struct fc_frame *fp) * fc_els_fill - Fill in an ELS request frame */ static inline int fc_els_fill(struct fc_lport *lport, - struct fc_rport_priv *rdata, + u32 did, struct fc_frame *fp, unsigned int op, - enum fc_rctl *r_ctl, u32 *did, enum fc_fh_type *fh_type) + enum fc_rctl *r_ctl, enum fc_fh_type *fh_type) { - struct fc_rport *rport = PRIV_TO_RPORT(rdata); - switch (op) { case ELS_PLOGI: fc_plogi_fill(lport, fp, ELS_PLOGI); - *did = rport->port_id; break; case ELS_FLOGI: fc_flogi_fill(lport, fp); - *did = FC_FID_FLOGI; break; case ELS_LOGO: fc_logo_fill(lport, fp); - *did = FC_FID_FLOGI; - /* - * if rport is valid then it - * is port logo, therefore - * set did to rport id. - */ - if (rdata) - *did = rport->port_id; break; case ELS_RTV: fc_rtv_fill(lport, fp); - *did = rport->port_id; break; case ELS_REC: fc_rec_fill(lport, fp); - *did = rport->port_id; break; case ELS_PRLI: fc_prli_fill(lport, fp); - *did = rport->port_id; break; case ELS_SCR: fc_scr_fill(lport, fp); - *did = FC_FID_FCTRL; break; default: diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index df57cb762dc2..2473167464c2 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -424,7 +424,7 @@ struct libfc_function_template { * STATUS: OPTIONAL */ struct fc_seq *(*elsct_send)(struct fc_lport *lport, - struct fc_rport_priv *, + u32 did, struct fc_frame *fp, unsigned int op, void (*resp)(struct fc_seq *, From f211fa514a07326c0f9364c0e6ed17e38860172f Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:01:01 -0700 Subject: [PATCH 152/209] [SCSI] libfc: make rport structure optional Allow a struct fc_rport_priv to have no fc_rport associated with it. This sets up to remove the need for "rogue" rports. Add a few fields to fc_rport_priv that are needed before the fc_rport is created. These are the ids, maxframe_size, classes, and rport pointer. Remove the macro PRIV_TO_RPORT(). Just use rdata->rport where appropriate. To take the place of the get_device()/put_device ops that were used to hold both the rport and rdata, add a reference count to rdata structures using kref. When kref_get decrements the refcount to zero, a new template function releasing the rdata should be called. This will take care of freeing the rdata and releasing the hold on the rport (for now). After subsequent patches make the rport truly optional, this release function will simply free the rdata. Remove the simple inline function fc_rport_set_name(), which becomes semanticly ambiguous otherwise. The caller will set the port_name and node_name in the rdata->Ids, which will later be copied to the rport when it its created. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 54 +++++++----------- drivers/scsi/libfc/fc_lport.c | 14 ++--- drivers/scsi/libfc/fc_rport.c | 102 +++++++++++++++++++--------------- include/scsi/libfc.h | 29 +++++----- 4 files changed, 98 insertions(+), 101 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 4b1f9faf639a..5f839b625e50 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -47,7 +47,7 @@ static void fc_disc_gpn_ft_req(struct fc_disc *); static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); -static int fc_disc_new_target(struct fc_disc *, struct fc_rport *, +static int fc_disc_new_target(struct fc_disc *, struct fc_rport_priv *, struct fc_rport_identifiers *); static void fc_disc_done(struct fc_disc *); static void fc_disc_timeout(struct work_struct *); @@ -63,12 +63,10 @@ struct fc_rport_priv *fc_disc_lookup_rport(const struct fc_lport *lport, u32 port_id) { const struct fc_disc *disc = &lport->disc; - struct fc_rport *rport; struct fc_rport_priv *rdata; list_for_each_entry(rdata, &disc->rports, peers) { - rport = PRIV_TO_RPORT(rdata); - if (rport->port_id == port_id) + if (rdata->ids.port_id == port_id) return rdata; } return NULL; @@ -115,10 +113,9 @@ static void fc_disc_rport_callback(struct fc_lport *lport, enum fc_rport_event event) { struct fc_disc *disc = &lport->disc; - struct fc_rport *rport = PRIV_TO_RPORT(rdata); FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event, - rport->port_id); + rdata->ids.port_id); switch (event) { case RPORT_EV_CREATED: @@ -320,8 +317,6 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, struct fc_lport *lport) { struct fc_rport_priv *rdata; - struct fc_rport *rport; - struct fc_rport_identifiers ids; struct fc_disc *disc = &lport->disc; /* @@ -349,18 +344,12 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, */ rdata = disc->lport->ptp_rp; if (rdata) { - rport = PRIV_TO_RPORT(rdata); - ids.port_id = rport->port_id; - ids.port_name = rport->port_name; - ids.node_name = rport->node_name; - ids.roles = FC_RPORT_ROLE_UNKNOWN; - get_device(&rport->dev); - - if (!fc_disc_new_target(disc, rport, &ids)) { + kref_get(&rdata->kref); + if (!fc_disc_new_target(disc, rdata, &rdata->ids)) { disc->event = DISC_EV_SUCCESS; fc_disc_done(disc); } - put_device(&rport->dev); + kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } else { fc_disc_gpn_ft_req(disc); /* get ports by FC-4 type */ } @@ -375,28 +364,27 @@ static struct fc_rport_operations fc_disc_rport_ops = { /** * fc_disc_new_target() - Handle new target found by discovery * @lport: FC local port - * @rport: The previous FC remote port (NULL if new remote port) + * @rdata: The previous FC remote port priv (NULL if new remote port) * @ids: Identifiers for the new FC remote port * * Locking Note: This function expects that the disc_mutex is locked * before it is called. */ static int fc_disc_new_target(struct fc_disc *disc, - struct fc_rport *rport, + struct fc_rport_priv *rdata, struct fc_rport_identifiers *ids) { struct fc_lport *lport = disc->lport; - struct fc_rport_priv *rdata; int error = 0; - if (rport && ids->port_name) { - if (rport->port_name == -1) { + if (rdata && ids->port_name) { + if (rdata->ids.port_name == -1) { /* * Set WWN and fall through to notify of create. */ - fc_rport_set_name(rport, ids->port_name, - rport->node_name); - } else if (rport->port_name != ids->port_name) { + rdata->ids.port_name = ids->port_name; + rdata->ids.node_name = ids->node_name; + } else if (rdata->ids.port_name != ids->port_name) { /* * This is a new port with the same FCID as * a previously-discovered port. Presumably the old @@ -404,27 +392,23 @@ static int fc_disc_new_target(struct fc_disc *disc, * assigned the same FCID. This should be rare. * Delete the old one and fall thru to re-create. */ - rdata = rport->dd_data; list_del(&rdata->peers); lport->tt.rport_logoff(rdata); - rport = NULL; + rdata = NULL; } } if (((ids->port_name != -1) || (ids->port_id != -1)) && ids->port_id != fc_host_port_id(lport->host) && ids->port_name != lport->wwpn) { - if (!rport) { + if (!rdata) { rdata = lport->tt.rport_lookup(lport, ids->port_id); - if (!rport) { + if (!rdata) { rdata = lport->tt.rport_create(lport, ids); + if (!rdata) + error = -ENOMEM; } - if (!rdata) - error = -ENOMEM; - else - rport = PRIV_TO_RPORT(rdata); } - if (rport) { - rdata = rport->dd_data; + if (rdata) { rdata->ops = &fc_disc_rport_ops; rdata->rp_state = RPORT_ST_INIT; list_add_tail(&rdata->peers, &disc->rogue_rports); diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index aa605d2012e0..a7fe6b8d38b8 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -143,14 +143,12 @@ static void fc_lport_rport_callback(struct fc_lport *lport, struct fc_rport_priv *rdata, enum fc_rport_event event) { - struct fc_rport *rport = PRIV_TO_RPORT(rdata); - FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event, - rport->port_id); + rdata->ids.port_id); switch (event) { case RPORT_EV_CREATED: - if (rport->port_id == FC_FID_DIR_SERV) { + if (rdata->ids.port_id == FC_FID_DIR_SERV) { mutex_lock(&lport->lp_mutex); if (lport->state == LPORT_ST_DNS) { lport->dns_rp = rdata; @@ -160,7 +158,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport, "on port (%6x) for the directory " "server, but the lport is not " "in the DNS state, it's in the " - "%d state", rport->port_id, + "%d state", rdata->ids.port_id, lport->state); lport->tt.rport_logoff(rdata); } @@ -168,12 +166,12 @@ static void fc_lport_rport_callback(struct fc_lport *lport, } else FC_LPORT_DBG(lport, "Received an event for port (%6x) " "which is not the directory server\n", - rport->port_id); + rdata->ids.port_id); break; case RPORT_EV_LOGO: case RPORT_EV_FAILED: case RPORT_EV_STOP: - if (rport->port_id == FC_FID_DIR_SERV) { + if (rdata->ids.port_id == FC_FID_DIR_SERV) { mutex_lock(&lport->lp_mutex); lport->dns_rp = NULL; mutex_unlock(&lport->lp_mutex); @@ -181,7 +179,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport, } else FC_LPORT_DBG(lport, "Received an event for port (%6x) " "which is not the directory server\n", - rport->port_id); + rdata->ids.port_id); break; case RPORT_EV_NONE: break; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 20371b445bb1..69f6e588d37b 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -120,7 +120,10 @@ struct fc_rport_priv *fc_rport_rogue_create(struct fc_lport *lport, device_initialize(&rport->dev); rport->dev.release = fc_rport_rogue_destroy; + rdata->ids = *ids; + kref_init(&rdata->kref); mutex_init(&rdata->rp_mutex); + rdata->rport = rport; rdata->local_port = lport; rdata->trans_state = FC_PORTSTATE_ROGUE; rdata->rp_state = RPORT_ST_INIT; @@ -129,6 +132,7 @@ struct fc_rport_priv *fc_rport_rogue_create(struct fc_lport *lport, rdata->ops = NULL; rdata->e_d_tov = lport->e_d_tov; rdata->r_a_tov = lport->r_a_tov; + rdata->maxframe_size = FC_MIN_MAX_PAYLOAD; INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout); INIT_WORK(&rdata->event_work, fc_rport_work); /* @@ -140,6 +144,20 @@ struct fc_rport_priv *fc_rport_rogue_create(struct fc_lport *lport, return rdata; } +/** + * fc_rport_destroy() - free a remote port after last reference is released. + * @kref: pointer to kref inside struct fc_rport_priv + */ +static void fc_rport_destroy(struct kref *kref) +{ + struct fc_rport_priv *rdata; + struct fc_rport *rport; + + rdata = container_of(kref, struct fc_rport_priv, kref); + rport = rdata->rport; + put_device(&rport->dev); +} + /** * fc_rport_state() - return a string for the state the rport is in * @rdata: remote port private data @@ -215,22 +233,19 @@ static void fc_rport_work(struct work_struct *work) enum fc_rport_trans_state trans_state; struct fc_lport *lport = rdata->local_port; struct fc_rport_operations *rport_ops; - struct fc_rport *rport = PRIV_TO_RPORT(rdata); + struct fc_rport *rport; mutex_lock(&rdata->rp_mutex); event = rdata->event; rport_ops = rdata->ops; + rport = rdata->rport; if (event == RPORT_EV_CREATED) { struct fc_rport *new_rport; struct fc_rport_priv *new_rdata; struct fc_rport_identifiers ids; - ids.port_id = rport->port_id; - ids.roles = rport->roles; - ids.port_name = rport->port_name; - ids.node_name = rport->node_name; - + ids = rdata->ids; rdata->event = RPORT_EV_NONE; mutex_unlock(&rdata->rp_mutex); @@ -240,15 +255,20 @@ static void fc_rport_work(struct work_struct *work) * Switch from the rogue rport to the rport * returned by the FC class. */ - new_rport->maxframe_size = rport->maxframe_size; + new_rport->maxframe_size = rdata->maxframe_size; new_rdata = new_rport->dd_data; + new_rdata->rport = new_rport; + new_rdata->ids = ids; new_rdata->e_d_tov = rdata->e_d_tov; new_rdata->r_a_tov = rdata->r_a_tov; new_rdata->ops = rdata->ops; new_rdata->local_port = rdata->local_port; new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED; new_rdata->trans_state = FC_PORTSTATE_REAL; + new_rdata->maxframe_size = rdata->maxframe_size; + new_rdata->supported_classes = rdata->supported_classes; + kref_init(&new_rdata->kref); mutex_init(&new_rdata->rp_mutex); INIT_DELAYED_WORK(&new_rdata->retry_work, fc_rport_timeout); @@ -261,12 +281,11 @@ static void fc_rport_work(struct work_struct *work) " memory for rport (%6x)\n", ids.port_id); event = RPORT_EV_FAILED; } - if (rport->port_id != FC_FID_DIR_SERV) + if (rdata->ids.port_id != FC_FID_DIR_SERV) if (rport_ops->event_callback) rport_ops->event_callback(lport, rdata, RPORT_EV_FAILED); - put_device(&rport->dev); - rport = new_rport; + kref_put(&rdata->kref, lport->tt.rport_destroy); rdata = new_rport->dd_data; if (rport_ops->event_callback) rport_ops->event_callback(lport, rdata, event); @@ -279,7 +298,7 @@ static void fc_rport_work(struct work_struct *work) rport_ops->event_callback(lport, rdata, event); cancel_delayed_work_sync(&rdata->retry_work); if (trans_state == FC_PORTSTATE_ROGUE) - put_device(&rport->dev); + kref_put(&rdata->kref, lport->tt.rport_destroy); else { port_id = rport->port_id; fc_remote_port_delete(rport); @@ -505,7 +524,6 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, void *rdata_arg) { struct fc_rport_priv *rdata = rdata_arg; - struct fc_rport *rport = PRIV_TO_RPORT(rdata); struct fc_lport *lport = rdata->local_port; struct fc_els_flogi *plp = NULL; unsigned int tov; @@ -533,8 +551,8 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC && (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) { - rport->port_name = get_unaligned_be64(&plp->fl_wwpn); - rport->node_name = get_unaligned_be64(&plp->fl_wwnn); + rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn); + rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn); tov = ntohl(plp->fl_csp.sp_e_d_tov); if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR) @@ -546,14 +564,13 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, if (cssp_seq < csp_seq) csp_seq = cssp_seq; rdata->max_seq = csp_seq; - rport->maxframe_size = - fc_plogi_get_maxframe(plp, lport->mfs); + rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs); /* * If the rport is one of the well known addresses * we skip PRLI and RTV and go straight to READY. */ - if (rport->port_id >= FC_FID_DOM_MGR) + if (rdata->ids.port_id >= FC_FID_DOM_MGR) fc_rport_enter_ready(rdata); else fc_rport_enter_prli(rdata); @@ -564,7 +581,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); - put_device(&rport->dev); + kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } /** @@ -577,7 +594,6 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) { struct fc_lport *lport = rdata->local_port; - struct fc_rport *rport = PRIV_TO_RPORT(rdata); struct fc_frame *fp; FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n", @@ -585,7 +601,7 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) fc_rport_state_enter(rdata, RPORT_ST_PLOGI); - rport->maxframe_size = FC_MIN_MAX_PAYLOAD; + rdata->maxframe_size = FC_MIN_MAX_PAYLOAD; fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); if (!fp) { fc_rport_error_retry(rdata, fp); @@ -593,11 +609,11 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) } rdata->e_d_tov = lport->e_d_tov; - if (!lport->tt.elsct_send(lport, rport->port_id, fp, ELS_PLOGI, + if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI, fc_rport_plogi_resp, rdata, lport->e_d_tov)) fc_rport_error_retry(rdata, fp); else - get_device(&rport->dev); + kref_get(&rdata->kref); } /** @@ -614,7 +630,6 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, void *rdata_arg) { struct fc_rport_priv *rdata = rdata_arg; - struct fc_rport *rport = PRIV_TO_RPORT(rdata); struct { struct fc_els_prli prli; struct fc_els_spp spp; @@ -649,13 +664,13 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, rdata->flags |= FC_RP_FLAGS_RETRY; } - rport->supported_classes = FC_COS_CLASS3; + rdata->supported_classes = FC_COS_CLASS3; if (fcp_parm & FCP_SPPF_INIT_FCN) roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (fcp_parm & FCP_SPPF_TARG_FCN) roles |= FC_RPORT_ROLE_FCP_TARGET; - rport->roles = roles; + rdata->ids.roles = roles; fc_rport_enter_rtv(rdata); } else { @@ -667,7 +682,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); - put_device(&rport->dev); + kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } /** @@ -684,7 +699,6 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, void *rdata_arg) { struct fc_rport_priv *rdata = rdata_arg; - struct fc_rport *rport = PRIV_TO_RPORT(rdata); u8 op; mutex_lock(&rdata->rp_mutex); @@ -716,7 +730,7 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); - put_device(&rport->dev); + kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } /** @@ -728,7 +742,6 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, */ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) { - struct fc_rport *rport = PRIV_TO_RPORT(rdata); struct fc_lport *lport = rdata->local_port; struct { struct fc_els_prli prli; @@ -747,11 +760,11 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) return; } - if (!lport->tt.elsct_send(lport, rport->port_id, fp, ELS_PRLI, + if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI, fc_rport_prli_resp, rdata, lport->e_d_tov)) fc_rport_error_retry(rdata, fp); else - get_device(&rport->dev); + kref_get(&rdata->kref); } /** @@ -770,7 +783,6 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, void *rdata_arg) { struct fc_rport_priv *rdata = rdata_arg; - struct fc_rport *rport = PRIV_TO_RPORT(rdata); u8 op; mutex_lock(&rdata->rp_mutex); @@ -818,7 +830,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); - put_device(&rport->dev); + kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } /** @@ -832,7 +844,6 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) { struct fc_frame *fp; struct fc_lport *lport = rdata->local_port; - struct fc_rport *rport = PRIV_TO_RPORT(rdata); FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n", fc_rport_state(rdata)); @@ -845,11 +856,11 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) return; } - if (!lport->tt.elsct_send(lport, rport->port_id, fp, ELS_RTV, + if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV, fc_rport_rtv_resp, rdata, lport->e_d_tov)) fc_rport_error_retry(rdata, fp); else - get_device(&rport->dev); + kref_get(&rdata->kref); } /** @@ -862,7 +873,6 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) static void fc_rport_enter_logo(struct fc_rport_priv *rdata) { struct fc_lport *lport = rdata->local_port; - struct fc_rport *rport = PRIV_TO_RPORT(rdata); struct fc_frame *fp; FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n", @@ -876,11 +886,11 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) return; } - if (!lport->tt.elsct_send(lport, rport->port_id, fp, ELS_LOGO, + if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, fc_rport_logo_resp, rdata, lport->e_d_tov)) fc_rport_error_retry(rdata, fp); else - get_device(&rport->dev); + kref_get(&rdata->kref); } @@ -956,7 +966,6 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, static void fc_rport_recv_plogi_req(struct fc_rport_priv *rdata, struct fc_seq *sp, struct fc_frame *rx_fp) { - struct fc_rport *rport = PRIV_TO_RPORT(rdata); struct fc_lport *lport = rdata->local_port; struct fc_frame *fp = rx_fp; struct fc_exch *ep; @@ -1041,12 +1050,13 @@ static void fc_rport_recv_plogi_req(struct fc_rport_priv *rdata, } else { sp = lport->tt.seq_start_next(sp); WARN_ON(!sp); - fc_rport_set_name(rport, wwpn, wwnn); + rdata->ids.port_name = wwpn; + rdata->ids.node_name = wwnn; /* * Get session payload size from incoming PLOGI. */ - rport->maxframe_size = + rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs); fc_frame_free(rx_fp); fc_plogi_fill(lport, fp, ELS_LS_ACC); @@ -1079,7 +1089,6 @@ static void fc_rport_recv_plogi_req(struct fc_rport_priv *rdata, static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, struct fc_seq *sp, struct fc_frame *rx_fp) { - struct fc_rport *rport = PRIV_TO_RPORT(rdata); struct fc_lport *lport = rdata->local_port; struct fc_exch *ep; struct fc_frame *fp; @@ -1173,12 +1182,12 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, fcp_parm = ntohl(rspp->spp_params); if (fcp_parm * FCP_SPPF_RETRY) rdata->flags |= FC_RP_FLAGS_RETRY; - rport->supported_classes = FC_COS_CLASS3; + rdata->supported_classes = FC_COS_CLASS3; if (fcp_parm & FCP_SPPF_INIT_FCN) roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (fcp_parm & FCP_SPPF_TARG_FCN) roles |= FC_RPORT_ROLE_FCP_TARGET; - rport->roles = roles; + rdata->ids.roles = roles; spp->spp_params = htonl(lport->service_params); @@ -1310,6 +1319,9 @@ int fc_rport_init(struct fc_lport *lport) if (!lport->tt.rport_flush_queue) lport->tt.rport_flush_queue = fc_rport_flush_queue; + if (!lport->tt.rport_destroy) + lport->tt.rport_destroy = fc_rport_destroy; + return 0; } EXPORT_SYMBOL(fc_rport_init); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 2473167464c2..a94d216d2207 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -76,11 +76,7 @@ do { \ (port_id), ##args)) #define FC_RPORT_DBG(rdata, fmt, args...) \ -do { \ - struct fc_lport *lport = rdata->local_port; \ - struct fc_rport *rport = PRIV_TO_RPORT(rdata); \ - FC_RPORT_ID_DBG(lport, rport->port_id, fmt, ##args); \ -} while (0) + FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args) #define FC_FCP_DBG(pkt, fmt, args...) \ FC_CHECK_LOGGING(FC_FCP_LOGGING, \ @@ -195,9 +191,13 @@ struct fc_rport_operations { /** * struct fc_rport_libfc_priv - libfc internal information about a remote port * @local_port: Fibre Channel host port instance + * @rport: transport remote port + * @kref: reference counter * @rp_state: state tracks progress of PLOGI, PRLI, and RTV exchanges + * @ids: remote port identifiers and roles * @flags: REC and RETRY supported flags * @max_seq: maximum number of concurrent sequences + * @maxframe_size: maximum frame size * @retries: retry count in current state * @e_d_tov: error detect timeout value (in msec) * @r_a_tov: resource allocation timeout value (in msec) @@ -207,11 +207,15 @@ struct fc_rport_operations { */ struct fc_rport_libfc_priv { struct fc_lport *local_port; + struct fc_rport *rport; + struct kref kref; enum fc_rport_state rp_state; + struct fc_rport_identifiers ids; u16 flags; #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0) #define FC_RP_FLAGS_RETRY (1 << 1) u16 max_seq; + u16 maxframe_size; unsigned int retries; unsigned int e_d_tov; unsigned int r_a_tov; @@ -222,19 +226,12 @@ struct fc_rport_libfc_priv { struct fc_rport_operations *ops; struct list_head peers; struct work_struct event_work; + u32 supported_classes; }; -#define PRIV_TO_RPORT(x) \ - ((struct fc_rport *)((void *)(x) - sizeof(struct fc_rport))) #define RPORT_TO_PRIV(x) \ ((struct fc_rport_libfc_priv *)((void *)(x) + sizeof(struct fc_rport))) -static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn) -{ - rport->node_name = wwnn; - rport->port_name = wwpn; -} - /* * fcoe stats structure */ @@ -608,6 +605,12 @@ struct libfc_function_template { */ struct fc_rport_priv *(*rport_lookup)(const struct fc_lport *, u32); + /* + * Destroy an rport after final kref_put(). + * The argument is a pointer to the kref inside the fc_rport_priv. + */ + void (*rport_destroy)(struct kref *); + /* * Send a fcp cmd from fsp pkt. * Called with the SCSI host lock unlocked and irqs disabled. From 629f44279d169f29b084d406e9f1c33314f220fa Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:01:06 -0700 Subject: [PATCH 153/209] [SCSI] libfc: rearrange code in fc_rport_work This is a cleanup without semantic changes to use a switch statement instead of a series of if-statements in fc_rport_work(), and to move some declarations up to the top. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 69f6e588d37b..d0a45425cc3f 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -233,6 +233,9 @@ static void fc_rport_work(struct work_struct *work) enum fc_rport_trans_state trans_state; struct fc_lport *lport = rdata->local_port; struct fc_rport_operations *rport_ops; + struct fc_rport *new_rport; + struct fc_rport_priv *new_rdata; + struct fc_rport_identifiers ids; struct fc_rport *rport; mutex_lock(&rdata->rp_mutex); @@ -240,11 +243,8 @@ static void fc_rport_work(struct work_struct *work) rport_ops = rdata->ops; rport = rdata->rport; - if (event == RPORT_EV_CREATED) { - struct fc_rport *new_rport; - struct fc_rport_priv *new_rdata; - struct fc_rport_identifiers ids; - + switch (event) { + case RPORT_EV_CREATED: ids = rdata->ids; rdata->event = RPORT_EV_NONE; mutex_unlock(&rdata->rp_mutex); @@ -289,9 +289,11 @@ static void fc_rport_work(struct work_struct *work) rdata = new_rport->dd_data; if (rport_ops->event_callback) rport_ops->event_callback(lport, rdata, event); - } else if ((event == RPORT_EV_FAILED) || - (event == RPORT_EV_LOGO) || - (event == RPORT_EV_STOP)) { + break; + + case RPORT_EV_FAILED: + case RPORT_EV_LOGO: + case RPORT_EV_STOP: trans_state = rdata->trans_state; mutex_unlock(&rdata->rp_mutex); if (rport_ops->event_callback) @@ -305,8 +307,12 @@ static void fc_rport_work(struct work_struct *work) lport->tt.exch_mgr_reset(lport, 0, port_id); lport->tt.exch_mgr_reset(lport, port_id, 0); } - } else + break; + + default: mutex_unlock(&rdata->rp_mutex); + break; + } } /** From 4c0f62b5679321b2e5572cf541ffb9f7b344d47c Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:01:12 -0700 Subject: [PATCH 154/209] [SCSI] libfc: rename rport event CREATED to READY Remote ports will become READY more than once after ADISC is implemented in a later patch. The event callback that has been called "CREATED" will mean "READY". Rename it now in preparation for those changes. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 2 +- drivers/scsi/libfc/fc_lport.c | 4 ++-- drivers/scsi/libfc/fc_rport.c | 4 ++-- include/scsi/libfc.h | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 5f839b625e50..e5e5b260a572 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -118,7 +118,7 @@ static void fc_disc_rport_callback(struct fc_lport *lport, rdata->ids.port_id); switch (event) { - case RPORT_EV_CREATED: + case RPORT_EV_READY: if (disc) { mutex_lock(&disc->disc_mutex); list_add_tail(&rdata->peers, &disc->rports); diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index a7fe6b8d38b8..016f771ebe69 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -147,14 +147,14 @@ static void fc_lport_rport_callback(struct fc_lport *lport, rdata->ids.port_id); switch (event) { - case RPORT_EV_CREATED: + case RPORT_EV_READY: if (rdata->ids.port_id == FC_FID_DIR_SERV) { mutex_lock(&lport->lp_mutex); if (lport->state == LPORT_ST_DNS) { lport->dns_rp = rdata; fc_lport_enter_rpn_id(lport); } else { - FC_LPORT_DBG(lport, "Received an CREATED event " + FC_LPORT_DBG(lport, "Received an READY event " "on port (%6x) for the directory " "server, but the lport is not " "in the DNS state, it's in the " diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index d0a45425cc3f..50959ba0a9a0 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -244,7 +244,7 @@ static void fc_rport_work(struct work_struct *work) rport = rdata->rport; switch (event) { - case RPORT_EV_CREATED: + case RPORT_EV_READY: ids = rdata->ids; rdata->event = RPORT_EV_NONE; mutex_unlock(&rdata->rp_mutex); @@ -413,7 +413,7 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata) if (rdata->event == RPORT_EV_NONE) queue_work(rport_event_queue, &rdata->event_work); - rdata->event = RPORT_EV_CREATED; + rdata->event = RPORT_EV_READY; } /** diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index a94d216d2207..d324df8c36e6 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -167,7 +167,7 @@ struct fc_disc_port { enum fc_rport_event { RPORT_EV_NONE = 0, - RPORT_EV_CREATED, + RPORT_EV_READY, RPORT_EV_FAILED, RPORT_EV_STOP, RPORT_EV_LOGO From 9e9d0452fe12115b1c1883c0d4d2ee509079791b Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:01:18 -0700 Subject: [PATCH 155/209] [SCSI] libfc: don't create dummy (rogue) remote ports Don't create a "dummy" remote port to go with fc_rport_priv. Make the rport truly optional by allocating fc_rport_priv separately and not requiring a dummy rport to be there if we haven't yet done fc_remote_port_add(). The fc_rport_libfc_priv remains as a structure attached to the rport for I/O purposes. Be sure to hold references on rdata when the lock is dropped in fc_rport_work(). Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 40 +++------ drivers/scsi/libfc/fc_rport.c | 158 ++++++++++++++-------------------- include/scsi/libfc.h | 40 ++++----- 3 files changed, 94 insertions(+), 144 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index e5e5b260a572..bbea41e50a57 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -66,7 +66,8 @@ struct fc_rport_priv *fc_disc_lookup_rport(const struct fc_lport *lport, struct fc_rport_priv *rdata; list_for_each_entry(rdata, &disc->rports, peers) { - if (rdata->ids.port_id == port_id) + if (rdata->ids.port_id == port_id && + rdata->rp_state != RPORT_ST_DELETE) return rdata; } return NULL; @@ -87,15 +88,8 @@ void fc_disc_stop_rports(struct fc_disc *disc) lport = disc->lport; mutex_lock(&disc->disc_mutex); - list_for_each_entry_safe(rdata, next, &disc->rports, peers) { - list_del(&rdata->peers); + list_for_each_entry_safe(rdata, next, &disc->rports, peers) lport->tt.rport_logoff(rdata); - } - - list_for_each_entry_safe(rdata, next, &disc->rogue_rports, peers) { - lport->tt.rport_logoff(rdata); - } - mutex_unlock(&disc->disc_mutex); } @@ -119,20 +113,12 @@ static void fc_disc_rport_callback(struct fc_lport *lport, switch (event) { case RPORT_EV_READY: - if (disc) { - mutex_lock(&disc->disc_mutex); - list_add_tail(&rdata->peers, &disc->rports); - mutex_unlock(&disc->disc_mutex); - } break; case RPORT_EV_LOGO: case RPORT_EV_FAILED: case RPORT_EV_STOP: mutex_lock(&disc->disc_mutex); - mutex_lock(&rdata->rp_mutex); - if (rdata->trans_state == FC_PORTSTATE_ROGUE) - list_del(&rdata->peers); - mutex_unlock(&rdata->rp_mutex); + list_del(&rdata->peers); mutex_unlock(&disc->disc_mutex); break; default: @@ -235,7 +221,6 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, list_del(&dp->peers); rdata = lport->tt.rport_lookup(lport, dp->ids.port_id); if (rdata) { - list_del(&rdata->peers); lport->tt.rport_logoff(rdata); } fc_disc_single(disc, dp); @@ -296,10 +281,8 @@ static void fc_disc_restart(struct fc_disc *disc) FC_DISC_DBG(disc, "Restarting discovery\n"); - list_for_each_entry_safe(rdata, next, &disc->rports, peers) { - list_del(&rdata->peers); + list_for_each_entry_safe(rdata, next, &disc->rports, peers) lport->tt.rport_logoff(rdata); - } disc->requested = 1; if (!disc->pending) @@ -392,7 +375,6 @@ static int fc_disc_new_target(struct fc_disc *disc, * assigned the same FCID. This should be rare. * Delete the old one and fall thru to re-create. */ - list_del(&rdata->peers); lport->tt.rport_logoff(rdata); rdata = NULL; } @@ -406,12 +388,13 @@ static int fc_disc_new_target(struct fc_disc *disc, rdata = lport->tt.rport_create(lport, ids); if (!rdata) error = -ENOMEM; + else + list_add_tail(&rdata->peers, + &disc->rports); } } if (rdata) { rdata->ops = &fc_disc_rport_ops; - rdata->rp_state = RPORT_ST_INIT; - list_add_tail(&rdata->peers, &disc->rogue_rports); lport->tt.rport_login(rdata); } } @@ -585,9 +568,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) rdata = lport->tt.rport_create(lport, &ids); if (rdata) { rdata->ops = &fc_disc_rport_ops; - rdata->local_port = lport; - list_add_tail(&rdata->peers, - &disc->rogue_rports); + list_add_tail(&rdata->peers, &disc->rports); lport->tt.rport_login(rdata); } else printk(KERN_WARNING "libfc: Failed to allocate " @@ -736,7 +717,7 @@ static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp) if (rdata) { rdata->ops = &fc_disc_rport_ops; kfree(dp); - list_add_tail(&rdata->peers, &disc->rogue_rports); + list_add_tail(&rdata->peers, &disc->rports); lport->tt.rport_login(rdata); } return; @@ -798,7 +779,6 @@ int fc_disc_init(struct fc_lport *lport) INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); mutex_init(&disc->disc_mutex); INIT_LIST_HEAD(&disc->rports); - INIT_LIST_HEAD(&disc->rogue_rports); disc->lport = lport; disc->delay = FC_DISC_DELAY; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 50959ba0a9a0..a1794a39158e 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -86,61 +86,35 @@ static const char *fc_rport_state_names[] = { [RPORT_ST_DELETE] = "Delete", }; -static void fc_rport_rogue_destroy(struct device *dev) +/** + * fc_rport_create() - create remote port in INIT state. + * @lport: local port. + * @ids: remote port identifiers. + * + * Locking note: this may be called without locks held, but + * is usually called from discovery with the disc_mutex held. + */ +static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, + struct fc_rport_identifiers *ids) { - struct fc_rport *rport = dev_to_rport(dev); - struct fc_rport_priv *rdata = RPORT_TO_PRIV(rport); - - FC_RPORT_DBG(rdata, "Destroying rogue rport\n"); - kfree(rport); -} - -struct fc_rport_priv *fc_rport_rogue_create(struct fc_lport *lport, - struct fc_rport_identifiers *ids) -{ - struct fc_rport *rport; struct fc_rport_priv *rdata; - rport = kzalloc(sizeof(*rport) + sizeof(*rdata), GFP_KERNEL); - if (!rport) + rdata = kzalloc(sizeof(*rdata), GFP_KERNEL); + if (!rdata) return NULL; - rdata = RPORT_TO_PRIV(rport); - - rport->dd_data = rdata; - rport->port_id = ids->port_id; - rport->port_name = ids->port_name; - rport->node_name = ids->node_name; - rport->roles = ids->roles; - rport->maxframe_size = FC_MIN_MAX_PAYLOAD; - /* - * Note: all this libfc rogue rport code will be removed for - * upstream so it fine that this is really ugly and hacky right now. - */ - device_initialize(&rport->dev); - rport->dev.release = fc_rport_rogue_destroy; - rdata->ids = *ids; kref_init(&rdata->kref); mutex_init(&rdata->rp_mutex); - rdata->rport = rport; rdata->local_port = lport; - rdata->trans_state = FC_PORTSTATE_ROGUE; rdata->rp_state = RPORT_ST_INIT; rdata->event = RPORT_EV_NONE; rdata->flags = FC_RP_FLAGS_REC_SUPPORTED; - rdata->ops = NULL; rdata->e_d_tov = lport->e_d_tov; rdata->r_a_tov = lport->r_a_tov; rdata->maxframe_size = FC_MIN_MAX_PAYLOAD; INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout); INIT_WORK(&rdata->event_work, fc_rport_work); - /* - * For good measure, but not necessary as we should only - * add REAL rport to the lport list. - */ - INIT_LIST_HEAD(&rdata->peers); - return rdata; } @@ -151,11 +125,9 @@ struct fc_rport_priv *fc_rport_rogue_create(struct fc_lport *lport, static void fc_rport_destroy(struct kref *kref) { struct fc_rport_priv *rdata; - struct fc_rport *rport; rdata = container_of(kref, struct fc_rport_priv, kref); - rport = rdata->rport; - put_device(&rport->dev); + kfree(rdata); } /** @@ -229,12 +201,10 @@ static void fc_rport_work(struct work_struct *work) u32 port_id; struct fc_rport_priv *rdata = container_of(work, struct fc_rport_priv, event_work); + struct fc_rport_libfc_priv *rp; enum fc_rport_event event; - enum fc_rport_trans_state trans_state; struct fc_lport *lport = rdata->local_port; struct fc_rport_operations *rport_ops; - struct fc_rport *new_rport; - struct fc_rport_priv *new_rdata; struct fc_rport_identifiers ids; struct fc_rport *rport; @@ -243,70 +213,72 @@ static void fc_rport_work(struct work_struct *work) rport_ops = rdata->ops; rport = rdata->rport; + FC_RPORT_DBG(rdata, "work event %u\n", event); + switch (event) { case RPORT_EV_READY: ids = rdata->ids; rdata->event = RPORT_EV_NONE; + kref_get(&rdata->kref); mutex_unlock(&rdata->rp_mutex); - new_rport = fc_remote_port_add(lport->host, 0, &ids); - if (new_rport) { - /* - * Switch from the rogue rport to the rport - * returned by the FC class. - */ - new_rport->maxframe_size = rdata->maxframe_size; - - new_rdata = new_rport->dd_data; - new_rdata->rport = new_rport; - new_rdata->ids = ids; - new_rdata->e_d_tov = rdata->e_d_tov; - new_rdata->r_a_tov = rdata->r_a_tov; - new_rdata->ops = rdata->ops; - new_rdata->local_port = rdata->local_port; - new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED; - new_rdata->trans_state = FC_PORTSTATE_REAL; - new_rdata->maxframe_size = rdata->maxframe_size; - new_rdata->supported_classes = rdata->supported_classes; - kref_init(&new_rdata->kref); - mutex_init(&new_rdata->rp_mutex); - INIT_DELAYED_WORK(&new_rdata->retry_work, - fc_rport_timeout); - INIT_LIST_HEAD(&new_rdata->peers); - INIT_WORK(&new_rdata->event_work, fc_rport_work); - - fc_rport_state_enter(new_rdata, RPORT_ST_READY); - } else { - printk(KERN_WARNING "libfc: Failed to allocate " - " memory for rport (%6x)\n", ids.port_id); - event = RPORT_EV_FAILED; + if (!rport) + rport = fc_remote_port_add(lport->host, 0, &ids); + if (!rport) { + FC_RPORT_DBG(rdata, "Failed to add the rport\n"); + lport->tt.rport_logoff(rdata); + kref_put(&rdata->kref, lport->tt.rport_destroy); + return; } - if (rdata->ids.port_id != FC_FID_DIR_SERV) - if (rport_ops->event_callback) - rport_ops->event_callback(lport, rdata, - RPORT_EV_FAILED); - kref_put(&rdata->kref, lport->tt.rport_destroy); - rdata = new_rport->dd_data; - if (rport_ops->event_callback) + mutex_lock(&rdata->rp_mutex); + if (rdata->rport) + FC_RPORT_DBG(rdata, "rport already allocated\n"); + rdata->rport = rport; + rport->maxframe_size = rdata->maxframe_size; + rport->supported_classes = rdata->supported_classes; + + rp = rport->dd_data; + rp->local_port = lport; + rp->rp_state = rdata->rp_state; + rp->flags = rdata->flags; + rp->e_d_tov = rdata->e_d_tov; + rp->r_a_tov = rdata->r_a_tov; + mutex_unlock(&rdata->rp_mutex); + + if (rport_ops->event_callback) { + FC_RPORT_DBG(rdata, "callback ev %d\n", event); rport_ops->event_callback(lport, rdata, event); + } + kref_put(&rdata->kref, lport->tt.rport_destroy); break; case RPORT_EV_FAILED: case RPORT_EV_LOGO: case RPORT_EV_STOP: - trans_state = rdata->trans_state; + port_id = rdata->ids.port_id; mutex_unlock(&rdata->rp_mutex); - if (rport_ops->event_callback) + + if (rport_ops->event_callback) { + FC_RPORT_DBG(rdata, "callback ev %d\n", event); rport_ops->event_callback(lport, rdata, event); - cancel_delayed_work_sync(&rdata->retry_work); - if (trans_state == FC_PORTSTATE_ROGUE) - kref_put(&rdata->kref, lport->tt.rport_destroy); - else { - port_id = rport->port_id; - fc_remote_port_delete(rport); - lport->tt.exch_mgr_reset(lport, 0, port_id); - lport->tt.exch_mgr_reset(lport, port_id, 0); } + cancel_delayed_work_sync(&rdata->retry_work); + + /* + * Reset any outstanding exchanges before freeing rport. + */ + lport->tt.exch_mgr_reset(lport, 0, port_id); + lport->tt.exch_mgr_reset(lport, port_id, 0); + + if (rport) { + rp = rport->dd_data; + rp->rp_state = RPORT_ST_DELETE; + mutex_lock(&rdata->rp_mutex); + rdata->rport = NULL; + mutex_unlock(&rdata->rp_mutex); + fc_remote_port_delete(rport); + } + kref_put(&rdata->kref, lport->tt.rport_destroy); break; default: @@ -1311,7 +1283,7 @@ static void fc_rport_flush_queue(void) int fc_rport_init(struct fc_lport *lport) { if (!lport->tt.rport_create) - lport->tt.rport_create = fc_rport_rogue_create; + lport->tt.rport_create = fc_rport_create; if (!lport->tt.rport_login) lport->tt.rport_login = fc_rport_login; diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index d324df8c36e6..bf4b1c2ec600 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -146,11 +146,6 @@ enum fc_rport_state { RPORT_ST_DELETE, /* port being deleted */ }; -enum fc_rport_trans_state { - FC_PORTSTATE_ROGUE, - FC_PORTSTATE_REAL, -}; - /** * struct fc_disc_port - temporary discovery port to hold rport identifiers * @lp: Fibre Channel host port instance @@ -173,14 +168,6 @@ enum fc_rport_event { RPORT_EV_LOGO }; -/* - * Temporary definition to prepare for split off from fc_rport_libfc_priv - * of a separately-allocated structure called fc_rport_priv. This will - * be the primary object for the discovery and rport state machines. - * This definition is just to make this patch series easier to review. - */ -#define fc_rport_priv fc_rport_libfc_priv - struct fc_rport_priv; struct fc_rport_operations { @@ -191,6 +178,24 @@ struct fc_rport_operations { /** * struct fc_rport_libfc_priv - libfc internal information about a remote port * @local_port: Fibre Channel host port instance + * @rp_state: indicates READY for I/O or DELETE when blocked. + * @flags: REC and RETRY supported flags + * @e_d_tov: error detect timeout value (in msec) + * @r_a_tov: resource allocation timeout value (in msec) + */ +struct fc_rport_libfc_priv { + struct fc_lport *local_port; + enum fc_rport_state rp_state; + u16 flags; + #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0) + #define FC_RP_FLAGS_RETRY (1 << 1) + unsigned int e_d_tov; + unsigned int r_a_tov; +}; + +/** + * struct fc_rport_priv - libfc rport and discovery info about a remote port + * @local_port: Fibre Channel host port instance * @rport: transport remote port * @kref: reference counter * @rp_state: state tracks progress of PLOGI, PRLI, and RTV exchanges @@ -205,21 +210,18 @@ struct fc_rport_operations { * @retry_work: * @event_callback: Callback for rport READY, FAILED or LOGO */ -struct fc_rport_libfc_priv { +struct fc_rport_priv { struct fc_lport *local_port; struct fc_rport *rport; struct kref kref; enum fc_rport_state rp_state; struct fc_rport_identifiers ids; u16 flags; - #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0) - #define FC_RP_FLAGS_RETRY (1 << 1) u16 max_seq; u16 maxframe_size; unsigned int retries; unsigned int e_d_tov; unsigned int r_a_tov; - enum fc_rport_trans_state trans_state; struct mutex rp_mutex; struct delayed_work retry_work; enum fc_rport_event event; @@ -229,9 +231,6 @@ struct fc_rport_libfc_priv { u32 supported_classes; }; -#define RPORT_TO_PRIV(x) \ - ((struct fc_rport_libfc_priv *)((void *)(x) + sizeof(struct fc_rport))) - /* * fcoe stats structure */ @@ -686,7 +685,6 @@ struct fc_disc { enum fc_disc_event); struct list_head rports; - struct list_head rogue_rports; struct fc_lport *lport; struct mutex disc_mutex; struct fc_gpn_ft_resp partial_buf; /* partial name buffer */ From 00fea930d404b9a9039291d5a61975e6c2ea974e Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:01:23 -0700 Subject: [PATCH 156/209] [SCSI] libfc: fix rport event race between READY and LOGO When a remote port becomes ready and a LOGO is received before the READY event is in rport_work waiting on the mutex, the event is changed to LOGO and the work queued, so both the calls to rport_work see the LOGO event, and both try to do the list_del(), causing a crash. Don't change the event if it is already set. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index a1794a39158e..cbf6c9f233ca 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -1267,9 +1267,7 @@ static void fc_rport_recv_logo_req(struct fc_rport_priv *rdata, return; } - rdata->event = RPORT_EV_LOGO; - fc_rport_state_enter(rdata, RPORT_ST_DELETE); - queue_work(rport_event_queue, &rdata->event_work); + fc_rport_enter_delete(rdata, RPORT_EV_LOGO); lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); fc_frame_free(fp); From 786681b96fc1a5b94d187160b7bf80bf6b4681ed Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:01:29 -0700 Subject: [PATCH 157/209] [SCSI] libfc: eliminate disc->event There was no need to have the discovery status stored in struct fc_disc. Change fc_disc_done() to take the discovery status as an argument and just pass it on to the discovery callback. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 31 ++++++++++++------------------- include/scsi/libfc.h | 1 - 2 files changed, 12 insertions(+), 20 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index bbea41e50a57..736f91742064 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -49,7 +49,7 @@ static void fc_disc_gpn_ft_req(struct fc_disc *); static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); static int fc_disc_new_target(struct fc_disc *, struct fc_rport_priv *, struct fc_rport_identifiers *); -static void fc_disc_done(struct fc_disc *); +static void fc_disc_done(struct fc_disc *, enum fc_disc_event); static void fc_disc_timeout(struct work_struct *); static void fc_disc_single(struct fc_disc *, struct fc_disc_port *); static void fc_disc_restart(struct fc_disc *); @@ -329,8 +329,7 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, if (rdata) { kref_get(&rdata->kref); if (!fc_disc_new_target(disc, rdata, &rdata->ids)) { - disc->event = DISC_EV_SUCCESS; - fc_disc_done(disc); + fc_disc_done(disc, DISC_EV_SUCCESS); } kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } else { @@ -404,20 +403,18 @@ static int fc_disc_new_target(struct fc_disc *disc, /** * fc_disc_done() - Discovery has been completed * @disc: FC discovery context + * @event: discovery completion status + * * Locking Note: This function expects that the disc mutex is locked before * it is called. The discovery callback is then made with the lock released, * and the lock is re-taken before returning from this function */ -static void fc_disc_done(struct fc_disc *disc) +static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) { struct fc_lport *lport = disc->lport; - enum fc_disc_event event; FC_DISC_DBG(disc, "Discovery complete\n"); - event = disc->event; - disc->event = DISC_EV_NONE; - if (disc->requested) fc_disc_gpn_ft_req(disc); else @@ -460,11 +457,8 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) } disc->retry_count++; schedule_delayed_work(&disc->disc_work, delay); - } else { - /* exceeded retries */ - disc->event = DISC_EV_FAILED; - fc_disc_done(disc); - } + } else + fc_disc_done(disc, DISC_EV_FAILED); } } @@ -503,10 +497,12 @@ static void fc_disc_gpn_ft_req(struct fc_disc *disc) } /** - * fc_disc_gpn_ft_parse() - Parse the list of IDs and names resulting from a request + * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response. * @lport: Fibre Channel host port instance * @buf: GPN_FT response buffer * @len: size of response buffer + * + * Goes through the list of IDs and names resulting from a request. */ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) { @@ -577,8 +573,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) } if (np->fp_flags & FC_NS_FID_LAST) { - disc->event = DISC_EV_SUCCESS; - fc_disc_done(disc); + fc_disc_done(disc, DISC_EV_SUCCESS); len = 0; break; } @@ -669,8 +664,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x " "(check zoning)\n", cp->ct_reason, cp->ct_explan); - disc->event = DISC_EV_FAILED; - fc_disc_done(disc); + fc_disc_done(disc, DISC_EV_FAILED); } else { FC_DISC_DBG(disc, "GPN_FT unexpected response code " "%x\n", ntohs(cp->ct_cmd)); @@ -782,7 +776,6 @@ int fc_disc_init(struct fc_lport *lport) disc->lport = lport; disc->delay = FC_DISC_DELAY; - disc->event = DISC_EV_NONE; return 0; } diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index bf4b1c2ec600..f3f320f1d399 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -679,7 +679,6 @@ struct fc_disc { unsigned char requested; unsigned short seq_count; unsigned char buf_len; - enum fc_disc_event event; void (*disc_callback)(struct fc_lport *, enum fc_disc_event); From b84c7962653e4d04065d2603f0e1424ee0f455ae Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:01:34 -0700 Subject: [PATCH 158/209] [SCSI] libfc: remove unused disc->delay element Delete unused disc->delay element. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 3 --- include/scsi/libfc.h | 1 - 2 files changed, 4 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 736f91742064..8427396909e8 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -43,8 +43,6 @@ #define FC_DISC_RETRY_LIMIT 3 /* max retries */ #define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */ -#define FC_DISC_DELAY 3 - static void fc_disc_gpn_ft_req(struct fc_disc *); static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); static int fc_disc_new_target(struct fc_disc *, struct fc_rport_priv *, @@ -775,7 +773,6 @@ int fc_disc_init(struct fc_lport *lport) INIT_LIST_HEAD(&disc->rports); disc->lport = lport; - disc->delay = FC_DISC_DELAY; return 0; } diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index f3f320f1d399..093b0439a2cf 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -674,7 +674,6 @@ struct libfc_function_template { /* information used by the discovery layer */ struct fc_disc { unsigned char retry_count; - unsigned char delay; unsigned char pending; unsigned char requested; unsigned short seq_count; From cdbe6dfece038e75214735d56cd06319b9a6df46 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:01:39 -0700 Subject: [PATCH 159/209] [SCSI] libfc: rport debug messages were printing pointer values Don't print large negative decimal numbers for frame pointers in the debug messages from fc_rport_error(). Just print 0 if its a frame pointer, and print the error numbers as positive. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index cbf6c9f233ca..6ecf36d4708c 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -436,7 +436,8 @@ static void fc_rport_timeout(struct work_struct *work) static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) { FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n", - PTR_ERR(fp), fc_rport_state(rdata), rdata->retries); + IS_ERR(fp) ? -PTR_ERR(fp) : 0, + fc_rport_state(rdata), rdata->retries); switch (rdata->rp_state) { case RPORT_ST_PLOGI: From b5cbf083736e14911c32fca2a93c540a92e8413d Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:01:44 -0700 Subject: [PATCH 160/209] [SCSI] libfc: simplify fc_lport_rport_callback The lport rport callback can only be called for the dNS rport, since its the only rport who's ops point to that function. Remove unnecessary checking and debug messages. Put the locking outside the switch statement as a simplification. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 43 ++++++++++++----------------------- 1 file changed, 15 insertions(+), 28 deletions(-) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 016f771ebe69..22c0f7bc004f 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -146,44 +146,31 @@ static void fc_lport_rport_callback(struct fc_lport *lport, FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event, rdata->ids.port_id); + mutex_lock(&lport->lp_mutex); switch (event) { case RPORT_EV_READY: - if (rdata->ids.port_id == FC_FID_DIR_SERV) { - mutex_lock(&lport->lp_mutex); - if (lport->state == LPORT_ST_DNS) { - lport->dns_rp = rdata; - fc_lport_enter_rpn_id(lport); - } else { - FC_LPORT_DBG(lport, "Received an READY event " - "on port (%6x) for the directory " - "server, but the lport is not " - "in the DNS state, it's in the " - "%d state", rdata->ids.port_id, - lport->state); - lport->tt.rport_logoff(rdata); - } - mutex_unlock(&lport->lp_mutex); - } else - FC_LPORT_DBG(lport, "Received an event for port (%6x) " - "which is not the directory server\n", - rdata->ids.port_id); + if (lport->state == LPORT_ST_DNS) { + lport->dns_rp = rdata; + fc_lport_enter_rpn_id(lport); + } else { + FC_LPORT_DBG(lport, "Received an READY event " + "on port (%6x) for the directory " + "server, but the lport is not " + "in the DNS state, it's in the " + "%d state", rdata->ids.port_id, + lport->state); + lport->tt.rport_logoff(rdata); + } break; case RPORT_EV_LOGO: case RPORT_EV_FAILED: case RPORT_EV_STOP: - if (rdata->ids.port_id == FC_FID_DIR_SERV) { - mutex_lock(&lport->lp_mutex); - lport->dns_rp = NULL; - mutex_unlock(&lport->lp_mutex); - - } else - FC_LPORT_DBG(lport, "Received an event for port (%6x) " - "which is not the directory server\n", - rdata->ids.port_id); + lport->dns_rp = NULL; break; case RPORT_EV_NONE: break; } + mutex_unlock(&lport->lp_mutex); } /** From 48f00902ba40d3e4467782a42258b952437a89f9 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:01:50 -0700 Subject: [PATCH 161/209] [SCSI] libfc: make rport module maintain the rport list The list of remote ports (struct fc_rport_priv) has been maintained by the discovery module. In preparation for having lport->tt.rport_create() do a lookup first, maintain the rports list in the rport module. It will still be protected by the disc_mutex. The DNS rport is an exception for until after further patches. For now, do not add it to the list. The point-to-point rport will be in the discovery list. So at shutdown, it doesn't need to be separately logged out. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 8 -------- drivers/scsi/libfc/fc_lport.c | 14 ++++++-------- drivers/scsi/libfc/fc_rport.c | 11 +++++++++-- 3 files changed, 15 insertions(+), 18 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 8427396909e8..e6b13bfecf73 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -115,9 +115,6 @@ static void fc_disc_rport_callback(struct fc_lport *lport, case RPORT_EV_LOGO: case RPORT_EV_FAILED: case RPORT_EV_STOP: - mutex_lock(&disc->disc_mutex); - list_del(&rdata->peers); - mutex_unlock(&disc->disc_mutex); break; default: break; @@ -385,9 +382,6 @@ static int fc_disc_new_target(struct fc_disc *disc, rdata = lport->tt.rport_create(lport, ids); if (!rdata) error = -ENOMEM; - else - list_add_tail(&rdata->peers, - &disc->rports); } } if (rdata) { @@ -562,7 +556,6 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) rdata = lport->tt.rport_create(lport, &ids); if (rdata) { rdata->ops = &fc_disc_rport_ops; - list_add_tail(&rdata->peers, &disc->rports); lport->tt.rport_login(rdata); } else printk(KERN_WARNING "libfc: Failed to allocate " @@ -709,7 +702,6 @@ static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp) if (rdata) { rdata->ops = &fc_disc_rport_ops; kfree(dp); - list_add_tail(&rdata->peers, &disc->rports); lport->tt.rport_login(rdata); } return; diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 22c0f7bc004f..f33e5732e3fc 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -205,12 +205,11 @@ static void fc_lport_ptp_setup(struct fc_lport *lport, ids.node_name = remote_wwnn; ids.roles = FC_RPORT_ROLE_UNKNOWN; - if (lport->ptp_rp) { + mutex_lock(&lport->disc.disc_mutex); + if (lport->ptp_rp) lport->tt.rport_logoff(lport->ptp_rp); - lport->ptp_rp = NULL; - } - lport->ptp_rp = lport->tt.rport_create(lport, &ids); + mutex_unlock(&lport->disc.disc_mutex); lport->tt.rport_login(lport->ptp_rp); @@ -931,10 +930,7 @@ static void fc_lport_reset_locked(struct fc_lport *lport) if (lport->dns_rp) lport->tt.rport_logoff(lport->dns_rp); - if (lport->ptp_rp) { - lport->tt.rport_logoff(lport->ptp_rp); - lport->ptp_rp = NULL; - } + lport->ptp_rp = NULL; lport->tt.disc_stop(lport); @@ -1304,7 +1300,9 @@ static void fc_lport_enter_dns(struct fc_lport *lport) fc_lport_state_enter(lport, LPORT_ST_DNS); + mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_create(lport, &ids); + mutex_unlock(&lport->disc.disc_mutex); if (!rdata) goto err; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 6ecf36d4708c..a9087b31bce9 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -91,8 +91,7 @@ static const char *fc_rport_state_names[] = { * @lport: local port. * @ids: remote port identifiers. * - * Locking note: this may be called without locks held, but - * is usually called from discovery with the disc_mutex held. + * Locking note: must be called with the disc_mutex held. */ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, struct fc_rport_identifiers *ids) @@ -115,6 +114,8 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, rdata->maxframe_size = FC_MIN_MAX_PAYLOAD; INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout); INIT_WORK(&rdata->event_work, fc_rport_work); + if (ids->port_id != FC_FID_DIR_SERV) + list_add(&rdata->peers, &lport->disc.rports); return rdata; } @@ -258,6 +259,12 @@ static void fc_rport_work(struct work_struct *work) port_id = rdata->ids.port_id; mutex_unlock(&rdata->rp_mutex); + if (port_id != FC_FID_DIR_SERV) { + mutex_lock(&lport->disc.disc_mutex); + list_del(&rdata->peers); + mutex_unlock(&lport->disc.disc_mutex); + } + if (rport_ops->event_callback) { FC_RPORT_DBG(rdata, "callback ev %d\n", event); rport_ops->event_callback(lport, rdata, event); From 19f97e3c0acc5eb03486044f5428395b7690a01a Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:01:55 -0700 Subject: [PATCH 162/209] [SCSI] libfc: have rport_create do a lookup for pre-existing rports first For future discovery patches, change rport_create to return a previously created rport_priv that has the FC_ID as long as it isn't in deleted state. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 9 +++------ drivers/scsi/libfc/fc_rport.c | 4 ++++ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index e6b13bfecf73..266aa1ea01e4 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -377,12 +377,9 @@ static int fc_disc_new_target(struct fc_disc *disc, ids->port_id != fc_host_port_id(lport->host) && ids->port_name != lport->wwpn) { if (!rdata) { - rdata = lport->tt.rport_lookup(lport, ids->port_id); - if (!rdata) { - rdata = lport->tt.rport_create(lport, ids); - if (!rdata) - error = -ENOMEM; - } + rdata = lport->tt.rport_create(lport, ids); + if (!rdata) + error = -ENOMEM; } if (rdata) { rdata->ops = &fc_disc_rport_ops; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index a9087b31bce9..29bb6fd10036 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -98,6 +98,10 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, { struct fc_rport_priv *rdata; + rdata = lport->tt.rport_lookup(lport, ids->port_id); + if (rdata) + return rdata; + rdata = kzalloc(sizeof(*rdata), GFP_KERNEL); if (!rdata) return NULL; From 8345592b8388b51e0f52f63e94a5a5d3f07fda9a Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:02:01 -0700 Subject: [PATCH 163/209] [SCSI] libfc: change to make remote port callback optional Since the rport list maintenance is now done in the rport module, the callback (and ops) are usually not necessary. Allow rdata->ops to be left NULL if nothing needs to be done in an event callback. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 45 +++-------------------------------- drivers/scsi/libfc/fc_rport.c | 4 ++-- 2 files changed, 5 insertions(+), 44 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 266aa1ea01e4..3fcbba171863 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -91,37 +91,6 @@ void fc_disc_stop_rports(struct fc_disc *disc) mutex_unlock(&disc->disc_mutex); } -/** - * fc_disc_rport_callback() - Event handler for rport events - * @lport: The lport which is receiving the event - * @rdata: private remote port data - * @event: The event that occured - * - * Locking Note: The rport lock should not be held when calling - * this function. - */ -static void fc_disc_rport_callback(struct fc_lport *lport, - struct fc_rport_priv *rdata, - enum fc_rport_event event) -{ - struct fc_disc *disc = &lport->disc; - - FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event, - rdata->ids.port_id); - - switch (event) { - case RPORT_EV_READY: - break; - case RPORT_EV_LOGO: - case RPORT_EV_FAILED: - case RPORT_EV_STOP: - break; - default: - break; - } - -} - /** * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN) * @sp: Current sequence of the RSCN exchange @@ -334,10 +303,6 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, mutex_unlock(&disc->disc_mutex); } -static struct fc_rport_operations fc_disc_rport_ops = { - .event_callback = fc_disc_rport_callback, -}; - /** * fc_disc_new_target() - Handle new target found by discovery * @lport: FC local port @@ -381,10 +346,8 @@ static int fc_disc_new_target(struct fc_disc *disc, if (!rdata) error = -ENOMEM; } - if (rdata) { - rdata->ops = &fc_disc_rport_ops; + if (rdata) lport->tt.rport_login(rdata); - } } return error; } @@ -551,10 +514,9 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) if (ids.port_id != fc_host_port_id(lport->host) && ids.port_name != lport->wwpn) { rdata = lport->tt.rport_create(lport, &ids); - if (rdata) { - rdata->ops = &fc_disc_rport_ops; + if (rdata) lport->tt.rport_login(rdata); - } else + else printk(KERN_WARNING "libfc: Failed to allocate " "memory for the newly discovered port " "(%6x)\n", ids.port_id); @@ -697,7 +659,6 @@ static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp) rdata = lport->tt.rport_create(lport, &dp->ids); if (rdata) { - rdata->ops = &fc_disc_rport_ops; kfree(dp); lport->tt.rport_login(rdata); } diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 29bb6fd10036..406049c13a0e 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -250,7 +250,7 @@ static void fc_rport_work(struct work_struct *work) rp->r_a_tov = rdata->r_a_tov; mutex_unlock(&rdata->rp_mutex); - if (rport_ops->event_callback) { + if (rport_ops && rport_ops->event_callback) { FC_RPORT_DBG(rdata, "callback ev %d\n", event); rport_ops->event_callback(lport, rdata, event); } @@ -269,7 +269,7 @@ static void fc_rport_work(struct work_struct *work) mutex_unlock(&lport->disc.disc_mutex); } - if (rport_ops->event_callback) { + if (rport_ops && rport_ops->event_callback) { FC_RPORT_DBG(rdata, "callback ev %d\n", event); rport_ops->event_callback(lport, rdata, event); } From 8025b5db7e10cd90cadec940cc766be3bbda65e8 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:02:06 -0700 Subject: [PATCH 164/209] [SCSI] libfc: move rport_lookup into fc_rport.c Move the libfc remote port lookup function into fc_rport.c. This seems like the best place for it. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 22 ---------------------- drivers/scsi/libfc/fc_rport.c | 20 ++++++++++++++++++++ 2 files changed, 20 insertions(+), 22 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 3fcbba171863..f6762a52147d 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -52,25 +52,6 @@ static void fc_disc_timeout(struct work_struct *); static void fc_disc_single(struct fc_disc *, struct fc_disc_port *); static void fc_disc_restart(struct fc_disc *); -/** - * fc_disc_lookup_rport() - lookup a remote port by port_id - * @lport: Fibre Channel host port instance - * @port_id: remote port port_id to match - */ -struct fc_rport_priv *fc_disc_lookup_rport(const struct fc_lport *lport, - u32 port_id) -{ - const struct fc_disc *disc = &lport->disc; - struct fc_rport_priv *rdata; - - list_for_each_entry(rdata, &disc->rports, peers) { - if (rdata->ids.port_id == port_id && - rdata->rp_state != RPORT_ST_DELETE) - return rdata; - } - return NULL; -} - /** * fc_disc_stop_rports() - delete all the remote ports associated with the lport * @disc: The discovery job to stop rports on @@ -714,9 +695,6 @@ int fc_disc_init(struct fc_lport *lport) if (!lport->tt.disc_recv_req) lport->tt.disc_recv_req = fc_disc_recv_req; - if (!lport->tt.rport_lookup) - lport->tt.rport_lookup = fc_disc_lookup_rport; - disc = &lport->disc; INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); mutex_init(&disc->disc_mutex); diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 406049c13a0e..99ac056293f5 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -86,6 +86,23 @@ static const char *fc_rport_state_names[] = { [RPORT_ST_DELETE] = "Delete", }; +/** + * fc_rport_lookup() - lookup a remote port by port_id + * @lport: Fibre Channel host port instance + * @port_id: remote port port_id to match + */ +static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, + u32 port_id) +{ + struct fc_rport_priv *rdata; + + list_for_each_entry(rdata, &lport->disc.rports, peers) + if (rdata->ids.port_id == port_id && + rdata->rp_state != RPORT_ST_DELETE) + return rdata; + return NULL; +} + /** * fc_rport_create() - create remote port in INIT state. * @lport: local port. @@ -1292,6 +1309,9 @@ static void fc_rport_flush_queue(void) int fc_rport_init(struct fc_lport *lport) { + if (!lport->tt.rport_lookup) + lport->tt.rport_lookup = fc_rport_lookup; + if (!lport->tt.rport_create) lport->tt.rport_create = fc_rport_create; From 0f6c6149870e03c722af6eae406758b28cb71320 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:02:11 -0700 Subject: [PATCH 165/209] [SCSI] libfc: do not log off rports before or after discovery When receiving an RSCN, do not log off all rports. This is extremely disruptive. If, after the GPN_FT response, some rports haven't been listed, delete them. Add field disc_id to structs fc_rport_priv and fc_disc. disc_id is an arbitrary serial number used to identify the rports found by the latest discovery. This eliminates the need to go through the rport list when restarting discovery. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 47 ++++++++++++++++++++++++++---------- include/scsi/libfc.h | 3 +++ 2 files changed, 37 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index f6762a52147d..ddf494424ff4 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -221,17 +221,19 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, */ static void fc_disc_restart(struct fc_disc *disc) { - struct fc_rport_priv *rdata, *next; - struct fc_lport *lport = disc->lport; - FC_DISC_DBG(disc, "Restarting discovery\n"); - list_for_each_entry_safe(rdata, next, &disc->rports, peers) - lport->tt.rport_logoff(rdata); - disc->requested = 1; - if (!disc->pending) - fc_disc_gpn_ft_req(disc); + if (disc->pending) + return; + + /* + * Advance disc_id. This is an arbitrary non-zero number that will + * match the value in the fc_rport_priv after discovery for all + * freshly-discovered remote ports. Avoid wrapping to zero. + */ + disc->disc_id = (disc->disc_id + 2) | 1; + fc_disc_gpn_ft_req(disc); } /** @@ -278,6 +280,7 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, } kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } else { + disc->disc_id = (disc->disc_id + 2) | 1; fc_disc_gpn_ft_req(disc); /* get ports by FC-4 type */ } @@ -345,13 +348,30 @@ static int fc_disc_new_target(struct fc_disc *disc, static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) { struct fc_lport *lport = disc->lport; + struct fc_rport_priv *rdata; FC_DISC_DBG(disc, "Discovery complete\n"); - if (disc->requested) - fc_disc_gpn_ft_req(disc); - else - disc->pending = 0; + disc->pending = 0; + if (disc->requested) { + fc_disc_restart(disc); + return; + } + + /* + * Go through all remote ports. If they were found in the latest + * discovery, reverify or log them in. Otherwise, log them out. + * Skip ports which were never discovered. These are the dNS port + * and ports which were created by PLOGI. + */ + list_for_each_entry(rdata, &disc->rports, peers) { + if (!rdata->disc_id) + continue; + if (rdata->disc_id == disc->disc_id) + lport->tt.rport_login(rdata); + else + lport->tt.rport_logoff(rdata); + } mutex_unlock(&disc->disc_mutex); disc->disc_callback(lport, event); @@ -496,7 +516,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) ids.port_name != lport->wwpn) { rdata = lport->tt.rport_create(lport, &ids); if (rdata) - lport->tt.rport_login(rdata); + rdata->disc_id = disc->disc_id; else printk(KERN_WARNING "libfc: Failed to allocate " "memory for the newly discovered port " @@ -640,6 +660,7 @@ static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp) rdata = lport->tt.rport_create(lport, &dp->ids); if (rdata) { + rdata->disc_id = disc->disc_id; kfree(dp); lport->tt.rport_login(rdata); } diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 093b0439a2cf..517dce5c8d0d 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -202,6 +202,7 @@ struct fc_rport_libfc_priv { * @ids: remote port identifiers and roles * @flags: REC and RETRY supported flags * @max_seq: maximum number of concurrent sequences + * @disc_id: discovery identifier * @maxframe_size: maximum frame size * @retries: retry count in current state * @e_d_tov: error detect timeout value (in msec) @@ -218,6 +219,7 @@ struct fc_rport_priv { struct fc_rport_identifiers ids; u16 flags; u16 max_seq; + u16 disc_id; u16 maxframe_size; unsigned int retries; unsigned int e_d_tov; @@ -678,6 +680,7 @@ struct fc_disc { unsigned char requested; unsigned short seq_count; unsigned char buf_len; + u16 disc_id; void (*disc_callback)(struct fc_lport *, enum fc_disc_event); From c356afd48690775097507c37780ee17c0b0ea375 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:02:17 -0700 Subject: [PATCH 166/209] [SCSI] libfc: discovery restart sequence error fix When an RSCN is received during fabric discovery, it restarts. After the restart, disc->seq_count was incremented, so when the first frame was received, it was considered "out of sequence". That left the state disc->active, preventing further discoveries. Change to advance the sequence count before parsing, so that it won't be changed after a potential restart. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index ddf494424ff4..fd3a6b287e98 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -629,11 +629,10 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp)); } if (buf) { + disc->seq_count++; error = fc_disc_gpn_ft_parse(disc, buf, len); if (error) fc_disc_error(disc, fp); - else - disc->seq_count++; } fc_frame_free(fp); From a1c1e4e76c68f564cddc0b43842b895464f7865f Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:02:22 -0700 Subject: [PATCH 167/209] [SCSI] libfc: rearrange code in fc_disc_gpn_ft_resp() Code cleanup for fc_disc_gpn_ft_resp(). Some of the fc_disc.c code was poorly formatted. For example, some lines in fc_disc.c were unnecessarily truncated and the buf variable could be eliminated. Also moved the increment of seq_count into fc_disc_gpn_ft_parse(), to avoid doing it separately before each call. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index fd3a6b287e98..819ec6256a53 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -469,6 +469,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) struct fc_rport_priv *rdata; lport = disc->lport; + disc->seq_count++; /* * Handle partial name record left over from previous call. @@ -582,10 +583,10 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, struct fc_disc *disc = disc_arg; struct fc_ct_hdr *cp; struct fc_frame_header *fh; + enum fc_disc_event event = DISC_EV_NONE; unsigned int seq_cnt; - void *buf = NULL; unsigned int len; - int error; + int error = 0; mutex_lock(&disc->disc_mutex); FC_DISC_DBG(disc, "Received a GPN_FT response\n"); @@ -600,8 +601,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, fh = fc_frame_header_get(fp); len = fr_len(fp) - sizeof(*fh); seq_cnt = ntohs(fh->fh_seq_cnt); - if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 && - disc->seq_count == 0) { + if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 && disc->seq_count == 0) { cp = fc_frame_payload_get(fp, sizeof(*cp)); if (!cp) { FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n", @@ -609,33 +609,29 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) { /* Accepted, parse the response. */ - buf = cp + 1; len -= sizeof(*cp); + error = fc_disc_gpn_ft_parse(disc, cp + 1, len); } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x " "(check zoning)\n", cp->ct_reason, cp->ct_explan); - fc_disc_done(disc, DISC_EV_FAILED); + event = DISC_EV_FAILED; } else { FC_DISC_DBG(disc, "GPN_FT unexpected response code " "%x\n", ntohs(cp->ct_cmd)); } - } else if (fr_sof(fp) == FC_SOF_N3 && - seq_cnt == disc->seq_count) { - buf = fh + 1; + } else if (fr_sof(fp) == FC_SOF_N3 && seq_cnt == disc->seq_count) { + error = fc_disc_gpn_ft_parse(disc, fh + 1, len); } else { FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? " "seq_cnt %x expected %x sof %x eof %x\n", seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp)); } - if (buf) { - disc->seq_count++; - error = fc_disc_gpn_ft_parse(disc, buf, len); - if (error) - fc_disc_error(disc, fp); - } + if (error) + fc_disc_error(disc, fp); + else if (event != DISC_EV_NONE) + fc_disc_done(disc, event); fc_frame_free(fp); - mutex_unlock(&disc->disc_mutex); } From 883a337cf8969b2906ffd8aeb838d875f7338190 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:02:27 -0700 Subject: [PATCH 168/209] [SCSI] libfc: handle discovery failure more correctly. Abhijeet Joglekar wrote: "In gpn_ft_resp, if the payload is short, or unexpected response or out of sequence frame, then we just return and do nothing. We should either enter fc_disc_done() with DISC_EV_FAIL which will then restart any queued discovery requests or call lport module which will reset local port, or we should call fc_disc_error() so that the gpn_ft is retried. The situation as is causes discovery to remain pending and never get restarted, in these rare cases. We saw this due to a coding bug in fc_disc before. The only ways it could happen would be bugs, packet corruption or an FC fabric problem. Change it to fail discovery. The local port will restart discovery, although it probably should just give up until the next link flap. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 819ec6256a53..844376c1d8dc 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -606,6 +606,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, if (!cp) { FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n", fr_len(fp)); + event = DISC_EV_FAILED; } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) { /* Accepted, parse the response. */ @@ -619,6 +620,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, } else { FC_DISC_DBG(disc, "GPN_FT unexpected response code " "%x\n", ntohs(cp->ct_cmd)); + event = DISC_EV_FAILED; } } else if (fr_sof(fp) == FC_SOF_N3 && seq_cnt == disc->seq_count) { error = fc_disc_gpn_ft_parse(disc, fh + 1, len); @@ -626,6 +628,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? " "seq_cnt %x expected %x sof %x eof %x\n", seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp)); + event = DISC_EV_FAILED; } if (error) fc_disc_error(disc, fp); From c762608bf75f792dcaf3658338189b9970951704 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:02:33 -0700 Subject: [PATCH 169/209] [SCSI] libfc: fix: empty zone causes endless discovery retries. On some switches, an empty zone causes GPN_FT to be rejected with reason 9 (unable) explanation 7 (FC-4 types not registered), which causes discovery to be retried endlessly. Treat this as just an empty response and consider discovery complete. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 3 +++ include/scsi/fc/fc_gs.h | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 844376c1d8dc..9b8043bdeddb 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -617,6 +617,9 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, "(check zoning)\n", cp->ct_reason, cp->ct_explan); event = DISC_EV_FAILED; + if (cp->ct_reason == FC_FS_RJT_UNABL && + cp->ct_explan == FC_FS_EXP_FTNR) + event = DISC_EV_SUCCESS; } else { FC_DISC_DBG(disc, "GPN_FT unexpected response code " "%x\n", ntohs(cp->ct_cmd)); diff --git a/include/scsi/fc/fc_gs.h b/include/scsi/fc/fc_gs.h index ffab0272c65a..324dd0e3c622 100644 --- a/include/scsi/fc/fc_gs.h +++ b/include/scsi/fc/fc_gs.h @@ -87,6 +87,7 @@ enum fc_ct_explan { FC_FS_EXP_PNAM = 0x02, /* port name not registered */ FC_FS_EXP_NNAM = 0x03, /* node name not registered */ FC_FS_EXP_COS = 0x04, /* class of service not registered */ + FC_FS_EXP_FTNR = 0x07, /* FC-4 types not registered */ /* definitions not complete */ }; From 3667d7e7f7bb63a394f73a671fdaf4a187aa858e Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:02:38 -0700 Subject: [PATCH 170/209] [SCSI] libfc: discovery retry should clear pending first. Currently fc_disc_timeout() restarts discovery only if it is not pending. When the timer is scheduled, the discovery is left pending, so the timeout never restarts it. Fix by not checking for pending in the timeout handler. If discovery is stopped and restarted in the meantime, the timeout will be canceled. Also, when a new discovery is started, the retry count wasn't cleared. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 9b8043bdeddb..3efdbbab9b2b 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -233,6 +233,7 @@ static void fc_disc_restart(struct fc_disc *disc) * freshly-discovered remote ports. Avoid wrapping to zero. */ disc->disc_id = (disc->disc_id + 2) | 1; + disc->retry_count = 0; fc_disc_gpn_ft_req(disc); } @@ -563,8 +564,7 @@ static void fc_disc_timeout(struct work_struct *work) struct fc_disc, disc_work.work); mutex_lock(&disc->disc_mutex); - if (disc->requested && !disc->pending) - fc_disc_gpn_ft_req(disc); + fc_disc_gpn_ft_req(disc); mutex_unlock(&disc->disc_mutex); } From 81a67b9717d9e4f81bed7d1f2df6ba86aaab9ad9 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:02:43 -0700 Subject: [PATCH 171/209] [SCSI] libfc: discovery gpn_ft parse bug In fc_disc_gpn_ft_parse(), after fc_disc_done() is called, the disc state is changed by setting buf_len = 0. This is wrong since the discovery may have restarted. Instead, return after calling fc_disc_done. Also, return an error on memory allocation failure. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 3efdbbab9b2b..a2410dc74441 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -479,6 +479,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) plen = len; np = (struct fc_gpn_ft_resp *)bp; tlen = disc->buf_len; + disc->buf_len = 0; if (tlen) { WARN_ON(tlen >= sizeof(*np)); plen = sizeof(*np) - tlen; @@ -519,10 +520,12 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) rdata = lport->tt.rport_create(lport, &ids); if (rdata) rdata->disc_id = disc->disc_id; - else + else { printk(KERN_WARNING "libfc: Failed to allocate " "memory for the newly discovered port " "(%6x)\n", ids.port_id); + error = -ENOMEM; + } } if (np->fp_flags & FC_NS_FID_LAST) { @@ -546,8 +549,6 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) memcpy(&disc->partial_buf, np, len); } disc->buf_len = (unsigned char) len; - } else { - disc->buf_len = 0; } return error; } From 29d898e909e3d086055e2649f5a24d4c2c1ca884 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:02:49 -0700 Subject: [PATCH 172/209] [SCSI] libfc: clean up point-to-point discovery code. The discovery code had a special-case for the point-to-point mode, which used a bunch of code that wasn't really needed. Now that rport_create adds the rport to the discovery list, completely skip discovery for the point-to-point case. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 84 +---------------------------------- drivers/scsi/libfc/fc_lport.c | 13 +----- 2 files changed, 4 insertions(+), 93 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index a2410dc74441..428421842f3a 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -45,8 +45,6 @@ static void fc_disc_gpn_ft_req(struct fc_disc *); static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); -static int fc_disc_new_target(struct fc_disc *, struct fc_rport_priv *, - struct fc_rport_identifiers *); static void fc_disc_done(struct fc_disc *, enum fc_disc_event); static void fc_disc_timeout(struct work_struct *); static void fc_disc_single(struct fc_disc *, struct fc_disc_port *); @@ -240,14 +238,12 @@ static void fc_disc_restart(struct fc_disc *disc) /** * fc_disc_start() - Fibre Channel Target discovery * @lport: FC local port - * - * Returns non-zero if discovery cannot be started. + * @disc_callback: function to be called when discovery is complete */ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, enum fc_disc_event), struct fc_lport *lport) { - struct fc_rport_priv *rdata; struct fc_disc *disc = &lport->disc; /* @@ -256,87 +252,11 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, * and send the GPN_FT request. */ mutex_lock(&disc->disc_mutex); - disc->disc_callback = disc_callback; - - /* - * If not ready, or already running discovery, just set request flag. - */ - disc->requested = 1; - - if (disc->pending) { - mutex_unlock(&disc->disc_mutex); - return; - } - - /* - * Handle point-to-point mode as a simple discovery - * of the remote port. Yucky, yucky, yuck, yuck! - */ - rdata = disc->lport->ptp_rp; - if (rdata) { - kref_get(&rdata->kref); - if (!fc_disc_new_target(disc, rdata, &rdata->ids)) { - fc_disc_done(disc, DISC_EV_SUCCESS); - } - kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); - } else { - disc->disc_id = (disc->disc_id + 2) | 1; - fc_disc_gpn_ft_req(disc); /* get ports by FC-4 type */ - } - + fc_disc_restart(disc); mutex_unlock(&disc->disc_mutex); } -/** - * fc_disc_new_target() - Handle new target found by discovery - * @lport: FC local port - * @rdata: The previous FC remote port priv (NULL if new remote port) - * @ids: Identifiers for the new FC remote port - * - * Locking Note: This function expects that the disc_mutex is locked - * before it is called. - */ -static int fc_disc_new_target(struct fc_disc *disc, - struct fc_rport_priv *rdata, - struct fc_rport_identifiers *ids) -{ - struct fc_lport *lport = disc->lport; - int error = 0; - - if (rdata && ids->port_name) { - if (rdata->ids.port_name == -1) { - /* - * Set WWN and fall through to notify of create. - */ - rdata->ids.port_name = ids->port_name; - rdata->ids.node_name = ids->node_name; - } else if (rdata->ids.port_name != ids->port_name) { - /* - * This is a new port with the same FCID as - * a previously-discovered port. Presumably the old - * port logged out and a new port logged in and was - * assigned the same FCID. This should be rare. - * Delete the old one and fall thru to re-create. - */ - lport->tt.rport_logoff(rdata); - rdata = NULL; - } - } - if (((ids->port_name != -1) || (ids->port_id != -1)) && - ids->port_id != fc_host_port_id(lport->host) && - ids->port_name != lport->wwpn) { - if (!rdata) { - rdata = lport->tt.rport_create(lport, ids); - if (!rdata) - error = -ENOMEM; - } - if (rdata) - lport->tt.rport_login(rdata); - } - return error; -} - /** * fc_disc_done() - Discovery has been completed * @disc: FC discovery context diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index f33e5732e3fc..7000df573691 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -708,7 +708,8 @@ static void fc_lport_enter_ready(struct fc_lport *lport) fc_lport_state_enter(lport, LPORT_ST_READY); - lport->tt.disc_start(fc_lport_disc_callback, lport); + if (!lport->ptp_rp) + lport->tt.disc_start(fc_lport_disc_callback, lport); } /** @@ -794,8 +795,6 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, fc_lport_ptp_setup(lport, remote_fid, remote_wwpn, get_unaligned_be64(&flp->fl_wwnn)); - lport->tt.disc_start(fc_lport_disc_callback, lport); - out: sp = fr_seq(rx_fp); fc_frame_free(rx_fp); @@ -1512,14 +1511,6 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, fc_lport_enter_dns(lport); } } - - if (flp) { - csp_flags = ntohs(flp->fl_csp.sp_features); - if ((csp_flags & FC_SP_FT_FPORT) == 0) { - lport->tt.disc_start(fc_lport_disc_callback, - lport); - } - } } else { FC_LPORT_DBG(lport, "Bad FLOGI response\n"); } From 935d0fce44b906268b8a29de4e72ebb57a3a06d8 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:02:54 -0700 Subject: [PATCH 173/209] [SCSI] libfc: don't do discovery before callback is set It's possible to "restart" discovery before it was started if an RSCN is received early enough. We were jumping to 0 due to the disc_callback function pointer not getting set. Don't restart discovery if disc_callback is NULL. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 428421842f3a..1a699f484c85 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -219,6 +219,9 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, */ static void fc_disc_restart(struct fc_disc *disc) { + if (!disc->disc_callback) + return; + FC_DISC_DBG(disc, "Restarting discovery\n"); disc->requested = 1; From 9737e6a7b5b8af48f983cd565df93493597c565b Mon Sep 17 00:00:00 2001 From: Robert Love Date: Tue, 25 Aug 2009 14:02:59 -0700 Subject: [PATCH 174/209] [SCSI] libfc: Initialize fc_rport_identifiers inside fc_rport_create Currently these values are initialized by the callers. This was exposed by a later patch that adds PLOGI request support. The patch failed to initialize the new remote port's roles and it caused problems. This patch has the rport_create routine initialize the identifiers and then the callers can override them with real values. Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 20 ++++++++------------ drivers/scsi/libfc/fc_lport.c | 19 ++++--------------- drivers/scsi/libfc/fc_rport.c | 18 +++++++++++------- include/scsi/libfc.h | 17 +++++++++-------- 4 files changed, 32 insertions(+), 42 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 1a699f484c85..4242894cce7c 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -137,10 +137,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, break; } dp->lp = lport; - dp->ids.port_id = ntoh24(pp->rscn_fid); - dp->ids.port_name = -1; - dp->ids.node_name = -1; - dp->ids.roles = FC_RPORT_ROLE_UNKNOWN; + dp->port_id = ntoh24(pp->rscn_fid); list_add_tail(&dp->peers, &disc_ports); break; case ELS_ADDR_FMT_AREA: @@ -162,7 +159,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, redisc, lport->state, disc->pending); list_for_each_entry_safe(dp, next, &disc_ports, peers) { list_del(&dp->peers); - rdata = lport->tt.rport_lookup(lport, dp->ids.port_id); + rdata = lport->tt.rport_lookup(lport, dp->port_id); if (rdata) { lport->tt.rport_logoff(rdata); } @@ -435,15 +432,14 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) while (plen >= sizeof(*np)) { ids.port_id = ntoh24(np->fp_fid); ids.port_name = ntohll(np->fp_wwpn); - ids.node_name = -1; - ids.roles = FC_RPORT_ROLE_UNKNOWN; if (ids.port_id != fc_host_port_id(lport->host) && ids.port_name != lport->wwpn) { - rdata = lport->tt.rport_create(lport, &ids); - if (rdata) + rdata = lport->tt.rport_create(lport, ids.port_id); + if (rdata) { + rdata->ids.port_name = ids.port_name; rdata->disc_id = disc->disc_id; - else { + } else { printk(KERN_WARNING "libfc: Failed to allocate " "memory for the newly discovered port " "(%6x)\n", ids.port_id); @@ -580,10 +576,10 @@ static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp) lport = disc->lport; - if (dp->ids.port_id == fc_host_port_id(lport->host)) + if (dp->port_id == fc_host_port_id(lport->host)) goto out; - rdata = lport->tt.rport_create(lport, &dp->ids); + rdata = lport->tt.rport_create(lport, dp->port_id); if (rdata) { rdata->disc_id = disc->disc_id; kfree(dp); diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 7000df573691..caf68240bddf 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -198,17 +198,12 @@ static void fc_lport_ptp_setup(struct fc_lport *lport, u32 remote_fid, u64 remote_wwpn, u64 remote_wwnn) { - struct fc_rport_identifiers ids; - - ids.port_id = remote_fid; - ids.port_name = remote_wwpn; - ids.node_name = remote_wwnn; - ids.roles = FC_RPORT_ROLE_UNKNOWN; - mutex_lock(&lport->disc.disc_mutex); if (lport->ptp_rp) lport->tt.rport_logoff(lport->ptp_rp); - lport->ptp_rp = lport->tt.rport_create(lport, &ids); + lport->ptp_rp = lport->tt.rport_create(lport, remote_fid); + lport->ptp_rp->ids.port_name = remote_wwpn; + lport->ptp_rp->ids.node_name = remote_wwnn; mutex_unlock(&lport->disc.disc_mutex); lport->tt.rport_login(lport->ptp_rp); @@ -1287,12 +1282,6 @@ static struct fc_rport_operations fc_lport_rport_ops = { static void fc_lport_enter_dns(struct fc_lport *lport) { struct fc_rport_priv *rdata; - struct fc_rport_identifiers ids; - - ids.port_id = FC_FID_DIR_SERV; - ids.port_name = -1; - ids.node_name = -1; - ids.roles = FC_RPORT_ROLE_UNKNOWN; FC_LPORT_DBG(lport, "Entered DNS state from %s state\n", fc_lport_state(lport)); @@ -1300,7 +1289,7 @@ static void fc_lport_enter_dns(struct fc_lport *lport) fc_lport_state_enter(lport, LPORT_ST_DNS); mutex_lock(&lport->disc.disc_mutex); - rdata = lport->tt.rport_create(lport, &ids); + rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV); mutex_unlock(&lport->disc.disc_mutex); if (!rdata) goto err; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 99ac056293f5..c667be879be6 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -104,18 +104,18 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, } /** - * fc_rport_create() - create remote port in INIT state. - * @lport: local port. - * @ids: remote port identifiers. + * fc_rport_create() - Create a new remote port + * @lport: The local port that the new remote port is for + * @port_id: The port ID for the new remote port * * Locking note: must be called with the disc_mutex held. */ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, - struct fc_rport_identifiers *ids) + u32 port_id) { struct fc_rport_priv *rdata; - rdata = lport->tt.rport_lookup(lport, ids->port_id); + rdata = lport->tt.rport_lookup(lport, port_id); if (rdata) return rdata; @@ -123,7 +123,11 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, if (!rdata) return NULL; - rdata->ids = *ids; + rdata->ids.node_name = -1; + rdata->ids.port_name = -1; + rdata->ids.port_id = port_id; + rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN; + kref_init(&rdata->kref); mutex_init(&rdata->rp_mutex); rdata->local_port = lport; @@ -135,7 +139,7 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, rdata->maxframe_size = FC_MIN_MAX_PAYLOAD; INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout); INIT_WORK(&rdata->event_work, fc_rport_work); - if (ids->port_id != FC_FID_DIR_SERV) + if (port_id != FC_FID_DIR_SERV) list_add(&rdata->peers, &lport->disc.rports); return rdata; } diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 517dce5c8d0d..cd410c123b99 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -148,16 +148,16 @@ enum fc_rport_state { /** * struct fc_disc_port - temporary discovery port to hold rport identifiers - * @lp: Fibre Channel host port instance - * @peers: node for list management during discovery and RSCN processing - * @ids: identifiers structure to pass to fc_remote_port_add() - * @rport_work: work struct for starting the rport state machine + * @lp: Fibre Channel host port instance + * @peers: Node for list management during discovery and RSCN processing + * @rport_work: Work struct for starting the rport state machine + * @port_id: Port ID of the discovered port */ struct fc_disc_port { struct fc_lport *lp; struct list_head peers; - struct fc_rport_identifiers ids; struct work_struct rport_work; + u32 port_id; }; enum fc_rport_event { @@ -565,10 +565,11 @@ struct libfc_function_template { int (*lport_reset)(struct fc_lport *); /* - * Create a remote port + * Create a remote port with a given port ID + * + * STATUS: OPTIONAL */ - struct fc_rport_priv *(*rport_create)(struct fc_lport *, - struct fc_rport_identifiers *); + struct fc_rport_priv *(*rport_create)(struct fc_lport *, u32); /* * Initiates the RP state machine. It is called from the LP module. From 6bd054cbf3f7da3442f30a7d4eb7da4dd1c44f21 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Tue, 25 Aug 2009 14:03:04 -0700 Subject: [PATCH 175/209] [SCSI] libfc: Always reset remote port roles when receiving PRLI Don't trust previous roles, reset them when we receive a PRLI. Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index c667be879be6..cb54115c26cb 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -666,6 +666,9 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, goto err; } + /* reinitialize remote port roles */ + rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN; + op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC) { pp = fc_frame_payload_get(fp, sizeof(*pp)); @@ -1173,6 +1176,9 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, pp->prli.prli_len = htons(len); len -= sizeof(struct fc_els_prli); + /* reinitialize remote port roles */ + rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN; + /* * Go through all the service parameter pages and build * response. If plen indicates longer SPP than standard, From 131203a1ef53f3a4deb3260031bc53c7e4db4a24 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:03:10 -0700 Subject: [PATCH 176/209] [SCSI] libfc: move remote port lookup for ELS requests into fc_rport.c. This moves the remote port lookup for incoming ELS requests into fc_rport.c, in preparation for handing PLOGI and LOGO from unknown rports. This changes the arg to rport_recv_req from an rdata to an lport. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 30 +------------- drivers/scsi/libfc/fc_rport.c | 78 ++++++++++++++++++----------------- include/scsi/libfc.h | 2 +- 3 files changed, 44 insertions(+), 66 deletions(-) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index caf68240bddf..d3f4e0c34f5c 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -812,10 +812,6 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, { struct fc_frame_header *fh = fc_frame_header_get(fp); void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *); - struct fc_rport_priv *rdata; - u32 s_id; - u32 d_id; - struct fc_seq_els_data rjt_data; mutex_lock(&lport->lp_mutex); @@ -831,7 +827,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, /* * Check opcode. */ - recv = NULL; + recv = lport->tt.rport_recv_req; switch (fc_frame_payload_op(fp)) { case ELS_FLOGI: recv = fc_lport_recv_flogi_req; @@ -858,29 +854,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, break; } - if (recv) - recv(sp, fp, lport); - else { - /* - * Find session. - * If this is a new incoming PLOGI, we won't find it. - */ - s_id = ntoh24(fh->fh_s_id); - d_id = ntoh24(fh->fh_d_id); - - rdata = lport->tt.rport_lookup(lport, s_id); - if (rdata) - lport->tt.rport_recv_req(sp, fp, rdata); - else { - rjt_data.fp = NULL; - rjt_data.reason = ELS_RJT_UNAB; - rjt_data.explan = ELS_EXPL_NONE; - lport->tt.seq_els_rsp_send(sp, - ELS_LS_RJT, - &rjt_data); - fc_frame_free(fp); - } - } + recv(sp, fp, lport); } else { FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n", fr_eof(fp)); diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index cb54115c26cb..acdc72d6b873 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -912,57 +912,61 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) * fc_rport_recv_req() - Receive a request from a rport * @sp: current sequence in the PLOGI exchange * @fp: response frame - * @rdata_arg: private remote port data + * @lport: Fibre Channel local port * - * Locking Note: Called without the rport lock held. This - * function will hold the rport lock, call an _enter_* - * function and then unlock the rport. + * Locking Note: Called with the lport lock held. */ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, - struct fc_rport_priv *rdata) + struct fc_lport *lport) { - struct fc_lport *lport = rdata->local_port; - + struct fc_rport_priv *rdata; struct fc_frame_header *fh; struct fc_seq_els_data els_data; + u32 s_id; u8 op; - mutex_lock(&rdata->rp_mutex); - els_data.fp = NULL; els_data.explan = ELS_EXPL_NONE; els_data.reason = ELS_RJT_NONE; fh = fc_frame_header_get(fp); + s_id = ntoh24(fh->fh_s_id); - if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) { - op = fc_frame_payload_op(fp); - switch (op) { - case ELS_PLOGI: - fc_rport_recv_plogi_req(rdata, sp, fp); - break; - case ELS_PRLI: - fc_rport_recv_prli_req(rdata, sp, fp); - break; - case ELS_PRLO: - fc_rport_recv_prlo_req(rdata, sp, fp); - break; - case ELS_LOGO: - fc_rport_recv_logo_req(rdata, sp, fp); - break; - case ELS_RRQ: - els_data.fp = fp; - lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data); - break; - case ELS_REC: - els_data.fp = fp; - lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data); - break; - default: - els_data.reason = ELS_RJT_UNSUP; - lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data); - break; - } + rdata = lport->tt.rport_lookup(lport, s_id); + if (!rdata) { + els_data.reason = ELS_RJT_UNAB; + lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data); + fc_frame_free(fp); + return; + } + mutex_lock(&rdata->rp_mutex); + + op = fc_frame_payload_op(fp); + switch (op) { + case ELS_PLOGI: + fc_rport_recv_plogi_req(rdata, sp, fp); + break; + case ELS_PRLI: + fc_rport_recv_prli_req(rdata, sp, fp); + break; + case ELS_PRLO: + fc_rport_recv_prlo_req(rdata, sp, fp); + break; + case ELS_LOGO: + fc_rport_recv_logo_req(rdata, sp, fp); + break; + case ELS_RRQ: + els_data.fp = fp; + lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data); + break; + case ELS_REC: + els_data.fp = fp; + lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data); + break; + default: + els_data.reason = ELS_RJT_UNSUP; + lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data); + break; } mutex_unlock(&rdata->rp_mutex); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index cd410c123b99..265f106d9fd6 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -598,7 +598,7 @@ struct libfc_function_template { * STATUS: OPTIONAL */ void (*rport_recv_req)(struct fc_seq *, struct fc_frame *, - struct fc_rport_priv *); + struct fc_lport *); /* * lookup an rport by it's port ID. From 25b37b981e706c6df72c28c94f7787c3ea0cd343 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:03:15 -0700 Subject: [PATCH 177/209] [SCSI] libfc: fix: rport_recv_req needs disc_mutex when calling rport_lookup The rport_lookup function must be called while holding the disc_mutex. Otherwise, the rdata could be deleted just after that by another thread. All callers now check the state after grabbing the rdata rp_mutex. Even though rport_lookup skips ports in DELETE state, it does that without holding the rdata rp_mutex, so that the state may change. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index acdc72d6b873..02200b26d897 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -932,14 +932,17 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, fh = fc_frame_header_get(fp); s_id = ntoh24(fh->fh_s_id); + mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_lookup(lport, s_id); if (!rdata) { + mutex_unlock(&lport->disc.disc_mutex); els_data.reason = ELS_RJT_UNAB; lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data); fc_frame_free(fp); return; } mutex_lock(&rdata->rp_mutex); + mutex_unlock(&lport->disc.disc_mutex); op = fc_frame_payload_op(fp); switch (op) { From f657d299cf05883e23e12a69e86842da1df378ad Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:03:21 -0700 Subject: [PATCH 178/209] [SCSI] libfc: improve debug messages for ELS response handlers Improve lport and rport debug messages to indicate whether the response is LS_ACC, LS_RJT, closed, or timeout. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_elsct.c | 38 +++++++++++++++++++++++++++++++++++ drivers/scsi/libfc/fc_lport.c | 20 +++++++++--------- drivers/scsi/libfc/fc_rport.c | 8 ++++---- include/scsi/libfc.h | 5 +++++ 4 files changed, 57 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c index 5e8b011af50e..d655924d46b6 100644 --- a/drivers/scsi/libfc/fc_elsct.c +++ b/drivers/scsi/libfc/fc_elsct.c @@ -70,3 +70,41 @@ int fc_elsct_init(struct fc_lport *lport) return 0; } EXPORT_SYMBOL(fc_elsct_init); + +/** + * fc_els_resp_type() - return string describing ELS response for debug. + * @fp: frame pointer with possible error code. + */ +const char *fc_els_resp_type(struct fc_frame *fp) +{ + const char *msg; + if (IS_ERR(fp)) { + switch (-PTR_ERR(fp)) { + case FC_NO_ERR: + msg = "response no error"; + break; + case FC_EX_TIMEOUT: + msg = "response timeout"; + break; + case FC_EX_CLOSED: + msg = "response closed"; + break; + default: + msg = "response unknown error"; + break; + } + } else { + switch (fc_frame_payload_op(fp)) { + case ELS_LS_ACC: + msg = "accept"; + break; + case ELS_LS_RJT: + msg = "reject"; + break; + default: + msg = "response unknown ELS"; + break; + } + } + return msg; +} diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index d3f4e0c34f5c..3f2f72390145 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1006,13 +1006,13 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp, struct fc_frame_header *fh; struct fc_ct_hdr *ct; + FC_LPORT_DBG(lport, "Received a RFT_ID %s\n", fc_els_resp_type(fp)); + if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); - FC_LPORT_DBG(lport, "Received a RFT_ID response\n"); - if (lport->state != LPORT_ST_RFT_ID) { FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state " "%s\n", fc_lport_state(lport)); @@ -1060,13 +1060,13 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, struct fc_frame_header *fh; struct fc_ct_hdr *ct; + FC_LPORT_DBG(lport, "Received a RPN_ID %s\n", fc_els_resp_type(fp)); + if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); - FC_LPORT_DBG(lport, "Received a RPN_ID response\n"); - if (lport->state != LPORT_ST_RPN_ID) { FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state " "%s\n", fc_lport_state(lport)); @@ -1112,13 +1112,13 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp, struct fc_lport *lport = lp_arg; u8 op; + FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp)); + if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); - FC_LPORT_DBG(lport, "Received a SCR response\n"); - if (lport->state != LPORT_ST_SCR) { FC_LPORT_DBG(lport, "Received a SCR response, but in state " "%s\n", fc_lport_state(lport)); @@ -1333,13 +1333,13 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, struct fc_lport *lport = lp_arg; u8 op; + FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp)); + if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); - FC_LPORT_DBG(lport, "Received a LOGO response\n"); - if (lport->state != LPORT_ST_LOGO) { FC_LPORT_DBG(lport, "Received a LOGO response, but in state " "%s\n", fc_lport_state(lport)); @@ -1415,13 +1415,13 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, unsigned int e_d_tov; u16 mfs; + FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp)); + if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); - FC_LPORT_DBG(lport, "Received a FLOGI response\n"); - if (lport->state != LPORT_ST_FLOGI) { FC_LPORT_DBG(lport, "Received a FLOGI response, but in state " "%s\n", fc_lport_state(lport)); diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 02200b26d897..d014b285cd1a 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -544,7 +544,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&rdata->rp_mutex); - FC_RPORT_DBG(rdata, "Received a PLOGI response\n"); + FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp)); if (rdata->rp_state != RPORT_ST_PLOGI) { FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state " @@ -651,7 +651,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&rdata->rp_mutex); - FC_RPORT_DBG(rdata, "Received a PRLI response\n"); + FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp)); if (rdata->rp_state != RPORT_ST_PRLI) { FC_RPORT_DBG(rdata, "Received a PRLI response, but in state " @@ -717,7 +717,7 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&rdata->rp_mutex); - FC_RPORT_DBG(rdata, "Received a LOGO response\n"); + FC_RPORT_DBG(rdata, "Received a LOGO %s\n", fc_els_resp_type(fp)); if (rdata->rp_state != RPORT_ST_LOGO) { FC_RPORT_DBG(rdata, "Received a LOGO response, but in state " @@ -801,7 +801,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&rdata->rp_mutex); - FC_RPORT_DBG(rdata, "Received a RTV response\n"); + FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp)); if (rdata->rp_state != RPORT_ST_RTV) { FC_RPORT_DBG(rdata, "Received a RTV response, but in state " diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 265f106d9fd6..e18e5ce5af51 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -1084,4 +1084,9 @@ void fc_destroy_exch_mgr(void); int fc_setup_rport(void); void fc_destroy_rport(void); +/* + * Internal libfc functions. + */ +const char *fc_els_resp_type(struct fc_frame *); + #endif /* _LIBFC_H_ */ From 3ac6f98f4113ec1c115cf9d443a9bff816e47c0b Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:03:26 -0700 Subject: [PATCH 179/209] [SCSI] libfc: correctly handle incoming PLOGI request. libfc receives PLOGIs from switches which are trying to discover what kind of devices are present, and from other initiators to find out if we're a target. As an initiator, some argue we don't need to handle incoming PLOGI requests, and we currently reject them from unknown remote ports, but accept them is we're in the middle of a PLOGI to the remote port. For eventual target implementations, we want to handle them always. For incoming PLOGI, don't fail if the rport_priv doesn't exist. Just create it and go become READY without going through PRLI. If PRLI occurs, then our roles will be set and we'll become READY again. Also, allow incoming PRLI in RTV state. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 183 +++++++++++++++++----------------- 1 file changed, 92 insertions(+), 91 deletions(-) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index d014b285cd1a..e121ff92c8ea 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -63,7 +63,7 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *); static void fc_rport_enter_ready(struct fc_rport_priv *); static void fc_rport_enter_logo(struct fc_rport_priv *); -static void fc_rport_recv_plogi_req(struct fc_rport_priv *, +static void fc_rport_recv_plogi_req(struct fc_lport *, struct fc_seq *, struct fc_frame *); static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_seq *, struct fc_frame *); @@ -576,15 +576,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, csp_seq = cssp_seq; rdata->max_seq = csp_seq; rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs); - - /* - * If the rport is one of the well known addresses - * we skip PRLI and RTV and go straight to READY. - */ - if (rdata->ids.port_id >= FC_FID_DOM_MGR) - fc_rport_enter_ready(rdata); - else - fc_rport_enter_prli(rdata); + fc_rport_enter_prli(rdata); } else fc_rport_error_retry(rdata, fp); @@ -763,6 +755,15 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) } *pp; struct fc_frame *fp; + /* + * If the rport is one of the well known addresses + * we skip PRLI and RTV and go straight to READY. + */ + if (rdata->ids.port_id >= FC_FID_DOM_MGR) { + fc_rport_enter_ready(rdata); + return; + } + FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n", fc_rport_state(rdata)); @@ -929,6 +930,15 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, els_data.explan = ELS_EXPL_NONE; els_data.reason = ELS_RJT_NONE; + op = fc_frame_payload_op(fp); + switch (op) { + case ELS_PLOGI: + fc_rport_recv_plogi_req(lport, sp, fp); + return; + default: + break; + } + fh = fc_frame_header_get(fp); s_id = ntoh24(fh->fh_s_id); @@ -944,11 +954,7 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&rdata->rp_mutex); mutex_unlock(&lport->disc.disc_mutex); - op = fc_frame_payload_op(fp); switch (op) { - case ELS_PLOGI: - fc_rport_recv_plogi_req(rdata, sp, fp); - break; case ELS_PRLI: fc_rport_recv_prli_req(rdata, sp, fp); break; @@ -977,48 +983,56 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, /** * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request - * @rdata: private remote port data + * @lport: local port * @sp: current sequence in the PLOGI exchange * @fp: PLOGI request frame * - * Locking Note: The rport lock is exected to be held before calling - * this function. + * Locking Note: The rport lock is held before calling this function. */ -static void fc_rport_recv_plogi_req(struct fc_rport_priv *rdata, +static void fc_rport_recv_plogi_req(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *rx_fp) { - struct fc_lport *lport = rdata->local_port; + struct fc_disc *disc; + struct fc_rport_priv *rdata; struct fc_frame *fp = rx_fp; struct fc_exch *ep; struct fc_frame_header *fh; struct fc_els_flogi *pl; struct fc_seq_els_data rjt_data; - u32 sid; - u64 wwpn; - u64 wwnn; - enum fc_els_rjt_reason reject = 0; - u32 f_ctl; + u32 sid, f_ctl; + rjt_data.fp = NULL; - fh = fc_frame_header_get(fp); - - FC_RPORT_DBG(rdata, "Received PLOGI request while in state %s\n", - fc_rport_state(rdata)); - sid = ntoh24(fh->fh_s_id); + + FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n"); + pl = fc_frame_payload_get(fp, sizeof(*pl)); if (!pl) { - FC_RPORT_DBG(rdata, "Received PLOGI too short\n"); - WARN_ON(1); - /* XXX TBD: send reject? */ - fc_frame_free(fp); - return; + FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n"); + rjt_data.reason = ELS_RJT_PROT; + rjt_data.explan = ELS_EXPL_INV_LEN; + goto reject; } - wwpn = get_unaligned_be64(&pl->fl_wwpn); - wwnn = get_unaligned_be64(&pl->fl_wwnn); + + disc = &lport->disc; + mutex_lock(&disc->disc_mutex); + rdata = lport->tt.rport_create(lport, sid); + if (!rdata) { + mutex_unlock(&disc->disc_mutex); + rjt_data.reason = ELS_RJT_UNAB; + rjt_data.explan = ELS_EXPL_INSUF_RES; + goto reject; + } + + mutex_lock(&rdata->rp_mutex); + mutex_unlock(&disc->disc_mutex); + + rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn); + rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn); /* - * If the session was just created, possibly due to the incoming PLOGI, + * If the rport was just created, possibly due to the incoming PLOGI, * set the state appropriately and accept the PLOGI. * * If we had also sent a PLOGI, and if the received PLOGI is from a @@ -1030,72 +1044,58 @@ static void fc_rport_recv_plogi_req(struct fc_rport_priv *rdata, */ switch (rdata->rp_state) { case RPORT_ST_INIT: - FC_RPORT_DBG(rdata, "Received PLOGI, wwpn %llx state INIT " - "- reject\n", (unsigned long long)wwpn); - reject = ELS_RJT_UNSUP; + FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n"); break; case RPORT_ST_PLOGI: - FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state %d\n", - rdata->rp_state); - if (wwpn < lport->wwpn) - reject = ELS_RJT_INPROG; + FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n"); + if (rdata->ids.port_name < lport->wwpn) { + mutex_unlock(&rdata->rp_mutex); + rjt_data.reason = ELS_RJT_INPROG; + rjt_data.explan = ELS_EXPL_NONE; + goto reject; + } break; case RPORT_ST_PRLI: case RPORT_ST_READY: - FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d " - "- ignored for now\n", rdata->rp_state); - /* XXX TBD - should reset */ break; case RPORT_ST_DELETE: default: - FC_RPORT_DBG(rdata, "Received PLOGI in unexpected " - "state %d\n", rdata->rp_state); - fc_frame_free(fp); - return; - break; + FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n", + rdata->rp_state); + fc_frame_free(rx_fp); + goto out; } - if (reject) { - rjt_data.reason = reject; - rjt_data.explan = ELS_EXPL_NONE; - lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); - fc_frame_free(fp); - } else { - fp = fc_frame_alloc(lport, sizeof(*pl)); - if (fp == NULL) { - fp = rx_fp; - rjt_data.reason = ELS_RJT_UNAB; - rjt_data.explan = ELS_EXPL_NONE; - lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); - fc_frame_free(fp); - } else { - sp = lport->tt.seq_start_next(sp); - WARN_ON(!sp); - rdata->ids.port_name = wwpn; - rdata->ids.node_name = wwnn; + /* + * Get session payload size from incoming PLOGI. + */ + rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs); + fc_frame_free(rx_fp); - /* - * Get session payload size from incoming PLOGI. - */ - rdata->maxframe_size = - fc_plogi_get_maxframe(pl, lport->mfs); - fc_frame_free(rx_fp); - fc_plogi_fill(lport, fp, ELS_LS_ACC); + /* + * Send LS_ACC. If this fails, the originator should retry. + */ + sp = lport->tt.seq_start_next(sp); + if (!sp) + goto out; + fp = fc_frame_alloc(lport, sizeof(*pl)); + if (!fp) + goto out; - /* - * Send LS_ACC. If this fails, - * the originator should retry. - */ - f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ; - f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT; - ep = fc_seq_exch(sp); - fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, - FC_TYPE_ELS, f_ctl, 0); - lport->tt.seq_send(lport, sp, fp); - if (rdata->rp_state == RPORT_ST_PLOGI) - fc_rport_enter_prli(rdata); - } - } + fc_plogi_fill(lport, fp, ELS_LS_ACC); + f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; + ep = fc_seq_exch(sp); + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, + FC_TYPE_ELS, f_ctl, 0); + lport->tt.seq_send(lport, sp, fp); + fc_rport_enter_prli(rdata); +out: + mutex_unlock(&rdata->rp_mutex); + return; + +reject: + lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); + fc_frame_free(fp); } /** @@ -1138,6 +1138,7 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, switch (rdata->rp_state) { case RPORT_ST_PRLI: + case RPORT_ST_RTV: case RPORT_ST_READY: reason = ELS_RJT_NONE; break; From 83fe6a93465750d1a20221aaa9a253d9ea7fe45c Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:03:31 -0700 Subject: [PATCH 180/209] [SCSI] libfc: fix rport error handling for login-required and invalid ops When receiving an ELS request, if the request isn't recognized, the unsupported operation error should be given even if the port is not found or not logged in. Also, the LOGO request shouldn't give the login-required explanation. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 137 ++++++++++++++++++++++------------ 1 file changed, 88 insertions(+), 49 deletions(-) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index e121ff92c8ea..04e9846ad1b5 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -69,7 +69,7 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_seq *, struct fc_frame *); static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_seq *, struct fc_frame *); -static void fc_rport_recv_logo_req(struct fc_rport_priv *, +static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_seq *, struct fc_frame *); static void fc_rport_timeout(struct work_struct *); static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *); @@ -908,62 +908,56 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) kref_get(&rdata->kref); } - /** - * fc_rport_recv_req() - Receive a request from a rport + * fc_rport_recv_els_req() - handle a validated ELS request. + * @lport: Fibre Channel local port * @sp: current sequence in the PLOGI exchange * @fp: response frame - * @lport: Fibre Channel local port + * + * Handle incoming ELS requests that require port login. + * The ELS opcode has already been validated by the caller. * * Locking Note: Called with the lport lock held. */ -void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, - struct fc_lport *lport) +static void fc_rport_recv_els_req(struct fc_lport *lport, + struct fc_seq *sp, struct fc_frame *fp) { struct fc_rport_priv *rdata; struct fc_frame_header *fh; struct fc_seq_els_data els_data; - u32 s_id; - u8 op; els_data.fp = NULL; - els_data.explan = ELS_EXPL_NONE; - els_data.reason = ELS_RJT_NONE; - - op = fc_frame_payload_op(fp); - switch (op) { - case ELS_PLOGI: - fc_rport_recv_plogi_req(lport, sp, fp); - return; - default: - break; - } + els_data.reason = ELS_RJT_UNAB; + els_data.explan = ELS_EXPL_PLOGI_REQD; fh = fc_frame_header_get(fp); - s_id = ntoh24(fh->fh_s_id); mutex_lock(&lport->disc.disc_mutex); - rdata = lport->tt.rport_lookup(lport, s_id); + rdata = lport->tt.rport_lookup(lport, ntoh24(fh->fh_s_id)); if (!rdata) { mutex_unlock(&lport->disc.disc_mutex); - els_data.reason = ELS_RJT_UNAB; - lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data); - fc_frame_free(fp); - return; + goto reject; } mutex_lock(&rdata->rp_mutex); mutex_unlock(&lport->disc.disc_mutex); - switch (op) { + switch (rdata->rp_state) { + case RPORT_ST_PRLI: + case RPORT_ST_RTV: + case RPORT_ST_READY: + break; + default: + mutex_unlock(&rdata->rp_mutex); + goto reject; + } + + switch (fc_frame_payload_op(fp)) { case ELS_PRLI: fc_rport_recv_prli_req(rdata, sp, fp); break; case ELS_PRLO: fc_rport_recv_prlo_req(rdata, sp, fp); break; - case ELS_LOGO: - fc_rport_recv_logo_req(rdata, sp, fp); - break; case ELS_RRQ: els_data.fp = fp; lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data); @@ -973,12 +967,58 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data); break; default: - els_data.reason = ELS_RJT_UNSUP; - lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data); + fc_frame_free(fp); /* can't happen */ break; } mutex_unlock(&rdata->rp_mutex); + return; + +reject: + lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data); + fc_frame_free(fp); +} + +/** + * fc_rport_recv_req() - Handle a received ELS request from a rport + * @sp: current sequence in the PLOGI exchange + * @fp: response frame + * @lport: Fibre Channel local port + * + * Locking Note: Called with the lport lock held. + */ +void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, + struct fc_lport *lport) +{ + struct fc_seq_els_data els_data; + + /* + * Handle PLOGI and LOGO requests separately, since they + * don't require prior login. + * Check for unsupported opcodes first and reject them. + * For some ops, it would be incorrect to reject with "PLOGI required". + */ + switch (fc_frame_payload_op(fp)) { + case ELS_PLOGI: + fc_rport_recv_plogi_req(lport, sp, fp); + break; + case ELS_LOGO: + fc_rport_recv_logo_req(lport, sp, fp); + break; + case ELS_PRLI: + case ELS_PRLO: + case ELS_RRQ: + case ELS_REC: + fc_rport_recv_els_req(lport, sp, fp); + break; + default: + fc_frame_free(fp); + els_data.fp = NULL; + els_data.reason = ELS_RJT_UNSUP; + els_data.explan = ELS_EXPL_NONE; + lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data); + break; + } } /** @@ -1276,11 +1316,6 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n", fc_rport_state(rdata)); - if (rdata->rp_state == RPORT_ST_DELETE) { - fc_frame_free(fp); - return; - } - rjt_data.fp = NULL; rjt_data.reason = ELS_RJT_UNAB; rjt_data.explan = ELS_EXPL_NONE; @@ -1290,32 +1325,36 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, /** * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request - * @rdata: private remote port data + * @lport: local port. * @sp: current sequence in the LOGO exchange * @fp: LOGO request frame * * Locking Note: The rport lock is exected to be held before calling * this function. */ -static void fc_rport_recv_logo_req(struct fc_rport_priv *rdata, +static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp) { struct fc_frame_header *fh; - struct fc_lport *lport = rdata->local_port; + struct fc_rport_priv *rdata; + u32 sid; fh = fc_frame_header_get(fp); + sid = ntoh24(fh->fh_s_id); - FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", - fc_rport_state(rdata)); - - if (rdata->rp_state == RPORT_ST_DELETE) { - fc_frame_free(fp); - return; - } - - fc_rport_enter_delete(rdata, RPORT_EV_LOGO); - + mutex_lock(&lport->disc.disc_mutex); + rdata = lport->tt.rport_lookup(lport, sid); + if (rdata) { + mutex_lock(&rdata->rp_mutex); + FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", + fc_rport_state(rdata)); + fc_rport_enter_delete(rdata, RPORT_EV_LOGO); + mutex_unlock(&rdata->rp_mutex); + } else + FC_RPORT_ID_DBG(lport, sid, + "Received LOGO from non-logged-in port\n"); + mutex_unlock(&lport->disc.disc_mutex); lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); fc_frame_free(fp); } From feab4ae73031699fcf92a88f4b1e4ec1b14157a5 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:03:36 -0700 Subject: [PATCH 181/209] [SCSI] libfc: re-login to remote ports that send us LOGO After a quick link flap, a target was seen to send us a LOGO. Apparently, it saw an RSCN reporting that we had dropped out of the fabric after we had logged back into it. This is likely in larger fabrics (more than 2 FC switches) after a quick link flap at the initiator. Each link transition causes an port-specific RSCN to the target. After the link comes back up, the initiator successfully discovers and does a PLOGI to the target before the target sees the first RSCN reporting the initiator is gone, and it sends a LOGO. The target may see a subsequent RSCN saying the port is back, but probably wouldn't send a PLOGI and leaves it up to the initiator to re-login. An RSCN can be delayed by the switches due to software layers but a PLOGI is forwarded in hardware causing the PLOGI to beat the RSCN. If a remote port is in the discovered set and sends a LOGO, re-login to it. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 04e9846ad1b5..dc97c603744f 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -1340,6 +1340,8 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_rport_priv *rdata; u32 sid; + lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); + fh = fc_frame_header_get(fp); sid = ntoh24(fh->fh_s_id); @@ -1349,13 +1351,20 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, mutex_lock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", fc_rport_state(rdata)); - fc_rport_enter_delete(rdata, RPORT_EV_LOGO); + + /* + * If the remote port was created due to discovery, + * log back in. It may have seen a stale RSCN about us. + */ + if (rdata->rp_state != RPORT_ST_DELETE && rdata->disc_id) + fc_rport_enter_plogi(rdata); + else + fc_rport_enter_delete(rdata, RPORT_EV_LOGO); mutex_unlock(&rdata->rp_mutex); } else FC_RPORT_ID_DBG(lport, sid, "Received LOGO from non-logged-in port\n"); mutex_unlock(&lport->disc.disc_mutex); - lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); fc_frame_free(fp); } From 68a1750b46ad5177f7703081b5fe85624f1aa62b Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:03:42 -0700 Subject: [PATCH 182/209] [SCSI] libfc: LOGO response code had extraeous enter_rtv fc_rport_logo_resp() had a call to fc_rport_enter_rtv() if the LOGO was accepted. This must've been a copy/paste mistake, but it didn't matter since we don't stay in the LOGO state long enough to hit this code. Change fc_rport_logo_resp() to just enter the delete state no matter what. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index dc97c603744f..b5bc8724e1a0 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -725,12 +725,10 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, } op = fc_frame_payload_op(fp); - if (op == ELS_LS_ACC) { - fc_rport_enter_rtv(rdata); - } else { - FC_RPORT_DBG(rdata, "Bad ELS response for LOGO command\n"); - fc_rport_enter_delete(rdata, RPORT_EV_LOGO); - } + if (op != ELS_LS_ACC) + FC_RPORT_DBG(rdata, "Bad ELS response op %x for LOGO command\n", + op); + fc_rport_enter_delete(rdata, RPORT_EV_LOGO); out: fc_frame_free(fp); From 370c3bd05cf02afabea9cd3f2de66202d6b516dc Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:03:47 -0700 Subject: [PATCH 183/209] [SCSI] libfc: use ADISC to verify rport login state When rport_login is called on an rport that is already thought to be logged in, use ADISC. If that fails, redo PLOGI. This is less disruptive after fabric changes that don't affect the state of the target. Implement the sending of ADISC via fc_els_fill. Add ADISC state to the rport state machine. This is entered from READY and returns to READY after successful completion. If it fails, the rport is either logged off and deleted or re-does PLOGI. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 122 ++++++++++++++++++++++++++++++++-- include/scsi/fc_encode.h | 21 ++++++ include/scsi/libfc.h | 1 + 3 files changed, 139 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index b5bc8724e1a0..c33e25851082 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -62,6 +62,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *); static void fc_rport_enter_rtv(struct fc_rport_priv *); static void fc_rport_enter_ready(struct fc_rport_priv *); static void fc_rport_enter_logo(struct fc_rport_priv *); +static void fc_rport_enter_adisc(struct fc_rport_priv *); static void fc_rport_recv_plogi_req(struct fc_lport *, struct fc_seq *, struct fc_frame *); @@ -83,6 +84,7 @@ static const char *fc_rport_state_names[] = { [RPORT_ST_RTV] = "RTV", [RPORT_ST_READY] = "Ready", [RPORT_ST_LOGO] = "LOGO", + [RPORT_ST_ADISC] = "ADISC", [RPORT_ST_DELETE] = "Delete", }; @@ -326,15 +328,25 @@ static void fc_rport_work(struct work_struct *work) * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* * function and then unlock the rport. + * + * This indicates the intent to be logged into the remote port. + * If it appears we are already logged in, ADISC is used to verify + * the setup. */ int fc_rport_login(struct fc_rport_priv *rdata) { mutex_lock(&rdata->rp_mutex); - FC_RPORT_DBG(rdata, "Login to port\n"); - - fc_rport_enter_plogi(rdata); - + switch (rdata->rp_state) { + case RPORT_ST_READY: + FC_RPORT_DBG(rdata, "ADISC port\n"); + fc_rport_enter_adisc(rdata); + break; + default: + FC_RPORT_DBG(rdata, "Login to port\n"); + fc_rport_enter_plogi(rdata); + break; + } mutex_unlock(&rdata->rp_mutex); return 0; @@ -448,6 +460,9 @@ static void fc_rport_timeout(struct work_struct *work) case RPORT_ST_LOGO: fc_rport_enter_logo(rdata); break; + case RPORT_ST_ADISC: + fc_rport_enter_adisc(rdata); + break; case RPORT_ST_READY: case RPORT_ST_INIT: case RPORT_ST_DELETE: @@ -473,13 +488,16 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) switch (rdata->rp_state) { case RPORT_ST_PLOGI: - case RPORT_ST_PRLI: case RPORT_ST_LOGO: fc_rport_enter_delete(rdata, RPORT_EV_FAILED); break; case RPORT_ST_RTV: fc_rport_enter_ready(rdata); break; + case RPORT_ST_PRLI: + case RPORT_ST_ADISC: + fc_rport_enter_logo(rdata); + break; case RPORT_ST_DELETE: case RPORT_ST_READY: case RPORT_ST_INIT: @@ -906,6 +924,93 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) kref_get(&rdata->kref); } +/** + * fc_rport_els_adisc_resp() - Address Discovery response handler + * @sp: current sequence in the ADISC exchange + * @fp: response frame + * @rdata_arg: remote port private. + * + * Locking Note: This function will be called without the rport lock + * held, but it will lock, call an _enter_* function or fc_rport_error + * and then unlock the rport. + */ +static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp, + void *rdata_arg) +{ + struct fc_rport_priv *rdata = rdata_arg; + struct fc_els_adisc *adisc; + u8 op; + + mutex_lock(&rdata->rp_mutex); + + FC_RPORT_DBG(rdata, "Received a ADISC response\n"); + + if (rdata->rp_state != RPORT_ST_ADISC) { + FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n", + fc_rport_state(rdata)); + if (IS_ERR(fp)) + goto err; + goto out; + } + + if (IS_ERR(fp)) { + fc_rport_error(rdata, fp); + goto err; + } + + /* + * If address verification failed. Consider us logged out of the rport. + * Since the rport is still in discovery, we want to be + * logged in, so go to PLOGI state. Otherwise, go back to READY. + */ + op = fc_frame_payload_op(fp); + adisc = fc_frame_payload_get(fp, sizeof(*adisc)); + if (op != ELS_LS_ACC || !adisc || + ntoh24(adisc->adisc_port_id) != rdata->ids.port_id || + get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name || + get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) { + FC_RPORT_DBG(rdata, "ADISC error or mismatch\n"); + fc_rport_enter_plogi(rdata); + } else { + FC_RPORT_DBG(rdata, "ADISC OK\n"); + fc_rport_enter_ready(rdata); + } +out: + fc_frame_free(fp); +err: + mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); +} + +/** + * fc_rport_enter_adisc() - Send Address Discover (ADISC) request to peer + * @rdata: remote port private data + * + * Locking Note: The rport lock is expected to be held before calling + * this routine. + */ +static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) +{ + struct fc_lport *lport = rdata->local_port; + struct fc_frame *fp; + + FC_RPORT_DBG(rdata, "sending ADISC from %s state\n", + fc_rport_state(rdata)); + + fc_rport_state_enter(rdata, RPORT_ST_ADISC); + + fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc)); + if (!fp) { + fc_rport_error_retry(rdata, fp); + return; + } + if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC, + fc_rport_adisc_resp, rdata, lport->e_d_tov)) + fc_rport_error_retry(rdata, fp); + else + kref_get(&rdata->kref); +} + /** * fc_rport_recv_els_req() - handle a validated ELS request. * @lport: Fibre Channel local port @@ -943,6 +1048,7 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, case RPORT_ST_PRLI: case RPORT_ST_RTV: case RPORT_ST_READY: + case RPORT_ST_ADISC: break; default: mutex_unlock(&rdata->rp_mutex); @@ -1095,6 +1201,10 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, break; case RPORT_ST_PRLI: case RPORT_ST_READY: + case RPORT_ST_ADISC: + FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d " + "- ignored for now\n", rdata->rp_state); + /* XXX TBD - should reset */ break; case RPORT_ST_DELETE: default: @@ -1178,6 +1288,7 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, case RPORT_ST_PRLI: case RPORT_ST_RTV: case RPORT_ST_READY: + case RPORT_ST_ADISC: reason = ELS_RJT_NONE; break; default: @@ -1283,6 +1394,7 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, fc_rport_enter_ready(rdata); break; case RPORT_ST_READY: + case RPORT_ST_ADISC: break; default: break; diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h index 24bf764f9884..c5ee6bb79e05 100644 --- a/include/scsi/fc_encode.h +++ b/include/scsi/fc_encode.h @@ -56,6 +56,23 @@ static inline void fc_fill_fc_hdr(struct fc_frame *fp, enum fc_rctl r_ctl, fh->fh_parm_offset = htonl(parm_offset); } +/** + * fc_adisc_fill() - Fill in adisc request frame + * @lport: local port. + * @fp: fc frame where payload will be placed. + */ +static inline void fc_adisc_fill(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_els_adisc *adisc; + + adisc = fc_frame_payload_get(fp, sizeof(*adisc)); + memset(adisc, 0, sizeof(*adisc)); + adisc->adisc_cmd = ELS_ADISC; + put_unaligned_be64(lport->wwpn, &adisc->adisc_wwpn); + put_unaligned_be64(lport->wwnn, &adisc->adisc_wwnn); + hton24(adisc->adisc_port_id, fc_host_port_id(lport->host)); +} + /** * fc_ct_hdr_fill- fills ct header and reset ct payload * returns pointer to ct request. @@ -255,6 +272,10 @@ static inline int fc_els_fill(struct fc_lport *lport, enum fc_rctl *r_ctl, enum fc_fh_type *fh_type) { switch (op) { + case ELS_ADISC: + fc_adisc_fill(lport, fp); + break; + case ELS_PLOGI: fc_plogi_fill(lport, fp, ELS_PLOGI); break; diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index e18e5ce5af51..65dc9aacbf70 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -143,6 +143,7 @@ enum fc_rport_state { RPORT_ST_RTV, /* waiting for RTV completion */ RPORT_ST_READY, /* ready for use */ RPORT_ST_LOGO, /* port logout sent */ + RPORT_ST_ADISC, /* Discover Address sent */ RPORT_ST_DELETE, /* port being deleted */ }; From 8abbe3a42324264c9d5cc4e7c3d265b5be6d82d6 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:03:52 -0700 Subject: [PATCH 184/209] [SCSI] libfc: fix handling of incoming Discover Address (ADISC) requests The local port facility has been replying to ADISC requests without looking to see if the remote port is logged in. This is incorrect. An ADISC request requires PLOGI first. It should be rejected if the sending remote port is not logged in. This is like other incoming requests that require login, all of which should be handled in the remote port module. Move the ADISC request handling from fc_lport.c to fc_rport.c. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 53 ----------------------------------- drivers/scsi/libfc/fc_rport.c | 48 +++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 53 deletions(-) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 3f2f72390145..bd2f77197447 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -451,56 +451,6 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, fc_frame_free(in_fp); } -/** - * fc_lport_recv_adisc_req() - Handle received Address Discovery Request - * @lport: Fibre Channel local port recieving the ADISC - * @sp: current sequence in the ADISC exchange - * @fp: ADISC request frame - * - * Locking Note: The lport lock is expected to be held before calling - * this function. - */ -static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp, - struct fc_lport *lport) -{ - struct fc_frame *fp; - struct fc_exch *ep = fc_seq_exch(sp); - struct fc_els_adisc *req, *rp; - struct fc_seq_els_data rjt_data; - size_t len; - u32 f_ctl; - - FC_LPORT_DBG(lport, "Received ADISC request while in state %s\n", - fc_lport_state(lport)); - - req = fc_frame_payload_get(in_fp, sizeof(*req)); - if (!req) { - rjt_data.fp = NULL; - rjt_data.reason = ELS_RJT_LOGIC; - rjt_data.explan = ELS_EXPL_NONE; - lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); - } else { - len = sizeof(*rp); - fp = fc_frame_alloc(lport, len); - if (fp) { - rp = fc_frame_payload_get(fp, len); - memset(rp, 0, len); - rp->adisc_cmd = ELS_LS_ACC; - rp->adisc_wwpn = htonll(lport->wwpn); - rp->adisc_wwnn = htonll(lport->wwnn); - hton24(rp->adisc_port_id, - fc_host_port_id(lport->host)); - sp = lport->tt.seq_start_next(sp); - f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ; - f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT; - fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, - FC_TYPE_ELS, f_ctl, 0); - lport->tt.seq_send(lport, sp, fp); - } - } - fc_frame_free(in_fp); -} - /** * fc_lport_recv_logo_req() - Handle received fabric LOGO request * @lport: Fibre Channel local port recieving the LOGO @@ -849,9 +799,6 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, case ELS_RNID: recv = fc_lport_recv_rnid_req; break; - case ELS_ADISC: - recv = fc_lport_recv_adisc_req; - break; } recv(sp, fp, lport); diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index c33e25851082..03ea6748e7ee 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -1011,6 +1011,50 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) kref_get(&rdata->kref); } +/** + * fc_rport_recv_adisc_req() - Handle incoming Address Discovery (ADISC) Request + * @rdata: remote port private + * @sp: current sequence in the ADISC exchange + * @in_fp: ADISC request frame + * + * Locking Note: Called with the lport and rport locks held. + */ +static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata, + struct fc_seq *sp, struct fc_frame *in_fp) +{ + struct fc_lport *lport = rdata->local_port; + struct fc_frame *fp; + struct fc_exch *ep = fc_seq_exch(sp); + struct fc_els_adisc *adisc; + struct fc_seq_els_data rjt_data; + u32 f_ctl; + + FC_RPORT_DBG(rdata, "Received ADISC request\n"); + + adisc = fc_frame_payload_get(in_fp, sizeof(*adisc)); + if (!adisc) { + rjt_data.fp = NULL; + rjt_data.reason = ELS_RJT_PROT; + rjt_data.explan = ELS_EXPL_INV_LEN; + lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); + goto drop; + } + + fp = fc_frame_alloc(lport, sizeof(*adisc)); + if (!fp) + goto drop; + fc_adisc_fill(lport, fp); + adisc = fc_frame_payload_get(fp, sizeof(*adisc)); + adisc->adisc_cmd = ELS_LS_ACC; + sp = lport->tt.seq_start_next(sp); + f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, + FC_TYPE_ELS, f_ctl, 0); + lport->tt.seq_send(lport, sp, fp); +drop: + fc_frame_free(in_fp); +} + /** * fc_rport_recv_els_req() - handle a validated ELS request. * @lport: Fibre Channel local port @@ -1062,6 +1106,9 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, case ELS_PRLO: fc_rport_recv_prlo_req(rdata, sp, fp); break; + case ELS_ADISC: + fc_rport_recv_adisc_req(rdata, sp, fp); + break; case ELS_RRQ: els_data.fp = fp; lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data); @@ -1111,6 +1158,7 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, break; case ELS_PRLI: case ELS_PRLO: + case ELS_ADISC: case ELS_RRQ: case ELS_REC: fc_rport_recv_els_req(lport, sp, fp); From 2ab7e1ecb81ce35ed8e8df512e3fc6338a4c55bb Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:03:58 -0700 Subject: [PATCH 185/209] [SCSI] libfc: send GPN_ID in reaction to single-port RSCNs. When an RSCN indicates changes to individual remote ports, don't blindly log them out and then back in. Instead, determine whether they're still in the directory, by doing GPN_ID. If that is successful, call login, which will send ADISC and reverify, otherwise, call logoff. Perhaps we should just delete the rport, not send LOGO, but it seems safer. Also, fix a possible issue where if a mix of records in the RSCN cause us to queue disc_ports for disc_single and then we decide to do full rediscovery, we leak memory for those disc_ports queued. So, go through the list of disc_ports even if doing full discovery. Free the disc_ports in any case. If any of the disc_single() calls return error, do a full discovery. The ability to fill in GPN_ID requests was added to fc_ct_fill(). For this, it needs the FC_ID to be passed in as an arg. The did parameter for fc_elsct_send() is used for that, since the actual D_DID will always be 0xfffffc for all CT requests so far. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 141 +++++++++++++++++++++++++++------- drivers/scsi/libfc/fc_elsct.c | 2 +- include/scsi/fc_encode.h | 16 +++- 3 files changed, 130 insertions(+), 29 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 4242894cce7c..c48799e9dd8e 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -47,7 +47,7 @@ static void fc_disc_gpn_ft_req(struct fc_disc *); static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); static void fc_disc_done(struct fc_disc *, enum fc_disc_event); static void fc_disc_timeout(struct work_struct *); -static void fc_disc_single(struct fc_disc *, struct fc_disc_port *); +static int fc_disc_single(struct fc_lport *, struct fc_disc_port *); static void fc_disc_restart(struct fc_disc *); /** @@ -83,7 +83,6 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, struct fc_disc *disc) { struct fc_lport *lport; - struct fc_rport_priv *rdata; struct fc_els_rscn *rp; struct fc_els_rscn_page *pp; struct fc_seq_els_data rjt_data; @@ -150,6 +149,19 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, } } lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); + + /* + * If not doing a complete rediscovery, do GPN_ID on + * the individual ports mentioned in the list. + * If any of these get an error, do a full rediscovery. + * In any case, go through the list and free the entries. + */ + list_for_each_entry_safe(dp, next, &disc_ports, peers) { + list_del(&dp->peers); + if (!redisc) + redisc = fc_disc_single(lport, dp); + kfree(dp); + } if (redisc) { FC_DISC_DBG(disc, "RSCN received: rediscovering\n"); fc_disc_restart(disc); @@ -157,14 +169,6 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, FC_DISC_DBG(disc, "RSCN received: not rediscovering. " "redisc %d state %d in_prog %d\n", redisc, lport->state, disc->pending); - list_for_each_entry_safe(dp, next, &disc_ports, peers) { - list_del(&dp->peers); - rdata = lport->tt.rport_lookup(lport, dp->port_id); - if (rdata) { - lport->tt.rport_logoff(rdata); - } - fc_disc_single(disc, dp); - } } fc_frame_free(fp); return; @@ -561,33 +565,118 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_unlock(&disc->disc_mutex); } +/** + * fc_disc_gpn_id_resp() - Handle a response frame from Get Port Names (GPN_ID) + * @sp: exchange sequence + * @fp: response frame + * @rdata_arg: remote port private data + * + * Locking Note: This function is called without disc mutex held. + */ +static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, + void *rdata_arg) +{ + struct fc_rport_priv *rdata = rdata_arg; + struct fc_rport_priv *new_rdata; + struct fc_lport *lport; + struct fc_disc *disc; + struct fc_ct_hdr *cp; + struct fc_ns_gid_pn *pn; + u64 port_name; + + lport = rdata->local_port; + disc = &lport->disc; + + mutex_lock(&disc->disc_mutex); + if (PTR_ERR(fp) == -FC_EX_CLOSED) + goto out; + if (IS_ERR(fp)) + goto redisc; + + cp = fc_frame_payload_get(fp, sizeof(*cp)); + if (!cp) + goto redisc; + if (ntohs(cp->ct_cmd) == FC_FS_ACC) { + if (fr_len(fp) < sizeof(struct fc_frame_header) + + sizeof(*cp) + sizeof(*pn)) + goto redisc; + pn = (struct fc_ns_gid_pn *)(cp + 1); + port_name = get_unaligned_be64(&pn->fn_wwpn); + if (rdata->ids.port_name == -1) + rdata->ids.port_name = port_name; + else if (rdata->ids.port_name != port_name) { + FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. " + "Port-id %x wwpn %llx\n", + rdata->ids.port_id, port_name); + lport->tt.rport_logoff(rdata); + + new_rdata = lport->tt.rport_create(lport, + rdata->ids.port_id); + if (new_rdata) { + new_rdata->disc_id = disc->disc_id; + lport->tt.rport_login(new_rdata); + } + goto out; + } + rdata->disc_id = disc->disc_id; + lport->tt.rport_login(rdata); + } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { + FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n", + cp->ct_reason, cp->ct_explan); + lport->tt.rport_logoff(rdata); + } else { + FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n", + ntohs(cp->ct_cmd)); +redisc: + fc_disc_restart(disc); + } +out: + mutex_unlock(&disc->disc_mutex); + kref_put(&rdata->kref, lport->tt.rport_destroy); +} + +/** + * fc_disc_gpn_id_req() - Send Get Port Names by ID (GPN_ID) request + * @lport: local port + * @rdata: remote port private data + * + * Locking Note: This function expects that the disc_mutex is locked + * before it is called. + * On failure, an error code is returned. + */ +static int fc_disc_gpn_id_req(struct fc_lport *lport, + struct fc_rport_priv *rdata) +{ + struct fc_frame *fp; + + fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + + sizeof(struct fc_ns_fid)); + if (!fp) + return -ENOMEM; + if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID, + fc_disc_gpn_id_resp, rdata, lport->e_d_tov)) + return -ENOMEM; + kref_get(&rdata->kref); + return 0; +} + /** * fc_disc_single() - Discover the directory information for a single target - * @lport: FC local port + * @lport: local port * @dp: The port to rediscover * * Locking Note: This function expects that the disc_mutex is locked * before it is called. */ -static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp) +static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp) { - struct fc_lport *lport; struct fc_rport_priv *rdata; - lport = disc->lport; - - if (dp->port_id == fc_host_port_id(lport->host)) - goto out; - rdata = lport->tt.rport_create(lport, dp->port_id); - if (rdata) { - rdata->disc_id = disc->disc_id; - kfree(dp); - lport->tt.rport_login(rdata); - } - return; -out: - kfree(dp); + if (!rdata) + return -ENOMEM; + rdata->disc_id = 0; + return fc_disc_gpn_id_req(lport, rdata); } /** diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c index d655924d46b6..5cfa68732e9d 100644 --- a/drivers/scsi/libfc/fc_elsct.c +++ b/drivers/scsi/libfc/fc_elsct.c @@ -49,7 +49,7 @@ static struct fc_seq *fc_elsct_send(struct fc_lport *lport, rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type); else { /* CT requests */ - rc = fc_ct_fill(lport, fp, op, &r_ctl, &fh_type); + rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type); did = FC_FID_DIR_SERV; } diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h index c5ee6bb79e05..27dad703824f 100644 --- a/include/scsi/fc_encode.h +++ b/include/scsi/fc_encode.h @@ -32,6 +32,7 @@ struct fc_ct_req { struct fc_ns_gid_ft gid; struct fc_ns_rn_id rn; struct fc_ns_rft rft; + struct fc_ns_fid fid; } payload; }; @@ -94,10 +95,16 @@ static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp, } /** - * fc_ct_fill - Fill in a name service request frame + * fc_ct_fill() - Fill in a name service request frame + * @lport: local port. + * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries. + * @fp: frame to contain payload. + * @op: CT opcode. + * @r_ctl: pointer to FC header R_CTL. + * @fh_type: pointer to FC-4 type. */ static inline int fc_ct_fill(struct fc_lport *lport, - struct fc_frame *fp, + u32 fc_id, struct fc_frame *fp, unsigned int op, enum fc_rctl *r_ctl, enum fc_fh_type *fh_type) { @@ -109,6 +116,11 @@ static inline int fc_ct_fill(struct fc_lport *lport, ct->payload.gid.fn_fc4_type = FC_TYPE_FCP; break; + case FC_NS_GPN_ID: + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_fid)); + hton24(ct->payload.fid.fp_fid, fc_id); + break; + case FC_NS_RFT_ID: ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft)); hton24(ct->payload.rft.fid.fp_fid, From 1d490ce33ee8b93638d09e471a3bc66ae33b6606 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:04:03 -0700 Subject: [PATCH 186/209] [SCSI] libfc: don't swap OX_ID and RX_ID when sending BA_RJT I saw an lport debug message from the exchange manager saying: "lport 70500: Received response for out of range oxid:ffff" A trace showed this was a BA_RJT sent due to an incoming ABTS which arrived on an unknown exchange. So, the sender of the BA_RJT was in error, but in this case, both the initiator and responder were the same machine. The OX_ID and RX_ID should not have been reversed in this case. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index b51db15a3876..c1c15748220c 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1017,8 +1017,8 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, */ memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3); memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3); - fh->fh_ox_id = rx_fh->fh_rx_id; - fh->fh_rx_id = rx_fh->fh_ox_id; + fh->fh_ox_id = rx_fh->fh_ox_id; + fh->fh_rx_id = rx_fh->fh_rx_id; fh->fh_seq_cnt = rx_fh->fh_seq_cnt; fh->fh_r_ctl = FC_RCTL_BA_RJT; fh->fh_type = FC_TYPE_BLS; From e7a51997dad4e17395be1209970e18d2e9305b24 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:04:08 -0700 Subject: [PATCH 187/209] [SCSI] fcoe: flush per-cpu thread work when destroying interface This fixes one cause of an occational problem when unloading libfc where the exchange manager pool doesn't have all items freed. The existing WARN_ON(mp->total_exches <= 0) isn't hit. However, note that total_exches is decremented when the exchange is completed, and it can be held with a refcnt for a while after that. I'm not sure what the offending exchange is, but I suspect it is an incoming request, because outgoing state machines should be all stopped at this point. Note that although receive is stopped before the exchange manager is freed, there could still be active threads handling received frames. This patch flushes the queues by allocating a new skb and sending it through, and have the thread handle this new skb specially. This is similar to the way the work queues are flushed now by putting work items in them and waiting until they make it through the queue. An skb->destructor function is used to inform us of the completion of the flush, and the fr_dev() is left NULL to indicate to fcoe_percpu_receive_thread() that the skb should be just freed. There's already a check for the lp being NULL which prints a message. We skip printing the message if the destructor is for flushing. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 42 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index ac481ad112ad..704b8e034946 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -57,6 +57,9 @@ MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ DEFINE_MUTEX(fcoe_config_mutex); +/* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */ +static DECLARE_COMPLETION(fcoe_flush_completion); + /* fcoe host list */ /* must only by accessed under the RTNL mutex */ LIST_HEAD(fcoe_hostlist); @@ -827,7 +830,7 @@ static void fcoe_percpu_thread_create(unsigned int cpu) thread = kthread_create(fcoe_percpu_receive_thread, (void *)p, "fcoethread/%d", cpu); - if (likely(!IS_ERR(p->thread))) { + if (likely(!IS_ERR(thread))) { kthread_bind(thread, cpu); wake_up_process(thread); @@ -1299,6 +1302,15 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) return 0; } +/** + * fcoe_percpu_flush_done() - Indicate percpu queue flush completion. + * @skb: the skb being completed. + */ +static void fcoe_percpu_flush_done(struct sk_buff *skb) +{ + complete(&fcoe_flush_completion); +} + /** * fcoe_percpu_receive_thread() - recv thread per cpu * @arg: ptr to the fcoe per cpu struct @@ -1338,7 +1350,8 @@ int fcoe_percpu_receive_thread(void *arg) fr = fcoe_dev_from_skb(skb); lp = fr->fr_dev; if (unlikely(lp == NULL)) { - FCOE_NETDEV_DBG(skb->dev, "Invalid HBA Structure"); + if (skb->destructor != fcoe_percpu_flush_done) + FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb"); kfree_skb(skb); continue; } @@ -1799,6 +1812,13 @@ int fcoe_link_ok(struct fc_lport *lp) /** * fcoe_percpu_clean() - Clear the pending skbs for an lport * @lp: the fc_lport + * + * Must be called with fcoe_create_mutex held to single-thread completion. + * + * This flushes the pending skbs by adding a new skb to each queue and + * waiting until they are all freed. This assures us that not only are + * there no packets that will be handled by the lport, but also that any + * threads already handling packet have returned. */ void fcoe_percpu_clean(struct fc_lport *lp) { @@ -1823,7 +1843,25 @@ void fcoe_percpu_clean(struct fc_lport *lp) kfree_skb(skb); } } + + if (!pp->thread || !cpu_online(cpu)) { + spin_unlock_bh(&pp->fcoe_rx_list.lock); + continue; + } + + skb = dev_alloc_skb(0); + if (!skb) { + spin_unlock_bh(&pp->fcoe_rx_list.lock); + continue; + } + skb->destructor = fcoe_percpu_flush_done; + + __skb_queue_tail(&pp->fcoe_rx_list, skb); + if (pp->fcoe_rx_list.qlen == 1) + wake_up_process(pp->thread); spin_unlock_bh(&pp->fcoe_rx_list.lock); + + wait_for_completion(&fcoe_flush_completion); } } From 8474f3a02a18e18459663ad88951822c62a45068 Mon Sep 17 00:00:00 2001 From: Santosh Vernekar Date: Tue, 25 Aug 2009 11:36:16 -0700 Subject: [PATCH 188/209] [SCSI] qla2xxx: Correctly set FCF_TAPE_PRESENT flag based on scsi-device. In fabric-login based on iop BIT_8 firmware notifies presence of a FCP2 device and not necessarily a TAPE device. So instead of setting FCF_TAPE_PRESENT flag there we set it using scsi_device->type after mid-layer scan recognises "type" of the device. It also adds a new flag FCF_FCP2_DEVICE for any future use. Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_def.h | 1 + drivers/scsi/qla2xxx/qla_init.c | 2 +- drivers/scsi/qla2xxx/qla_isr.c | 4 ++-- drivers/scsi/qla2xxx/qla_os.c | 3 +++ 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index efdfb1eb26ba..215061861794 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -1590,6 +1590,7 @@ typedef struct fc_port { #define FCF_FABRIC_DEVICE BIT_0 #define FCF_LOGIN_NEEDED BIT_1 #define FCF_TAPE_PRESENT BIT_2 +#define FCF_FCP2_DEVICE BIT_3 /* No loop ID flag. */ #define FC_NO_LOOP_ID 0x1000 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 37c99a27874c..51f81b272776 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3341,7 +3341,7 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, } else { fcport->port_type = FCT_TARGET; if (mb[1] & BIT_1) { - fcport->flags |= FCF_TAPE_PRESENT; + fcport->flags |= FCF_FCP2_DEVICE; } } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index c0fec6914b96..27db6246ca3e 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1013,7 +1013,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, data[0] = MBS_COMMAND_COMPLETE; if (lio->ctx.type == SRB_LOGIN_CMD && le16_to_cpu(mbx->mb1) & BIT_1) - fcport->flags |= FCF_TAPE_PRESENT; + fcport->flags |= FCF_FCP2_DEVICE; goto done_post_logio_done_work; } @@ -1112,7 +1112,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, if (iop[0] & BIT_4) { fcport->port_type = FCT_TARGET; if (iop[0] & BIT_8) - fcport->flags |= FCF_TAPE_PRESENT; + fcport->flags |= FCF_FCP2_DEVICE; } if (iop[0] & BIT_5) fcport->port_type = FCT_INITIATOR; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 5fd7adbff30b..649fd75d37b6 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1216,6 +1216,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev) scsi_qla_host_t *vha = shost_priv(sdev->host); struct qla_hw_data *ha = vha->hw; struct fc_rport *rport = starget_to_rport(sdev->sdev_target); + fc_port_t *fcport = *(fc_port_t **)rport->dd_data; struct req_que *req = vha->req; if (sdev->tagged_supported) @@ -1224,6 +1225,8 @@ qla2xxx_slave_configure(struct scsi_device *sdev) scsi_deactivate_tcq(sdev, req->max_q_depth); rport->dev_loss_tmo = ha->port_down_retry_count; + if (sdev->type == TYPE_TAPE) + fcport->flags |= FCF_TAPE_PRESENT; return 0; } From ab67114935d611caffe18063d1777f8c4f8b4272 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Tue, 25 Aug 2009 11:36:17 -0700 Subject: [PATCH 189/209] [SCSI] qla2xxx: Further limit device-table (qla_devtbl) lookup to non-24xx. Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_init.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 51f81b272776..825700ae5041 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1794,7 +1794,8 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, char *st, *en; uint16_t index; struct qla_hw_data *ha = vha->hw; - int use_tbl = !IS_QLA25XX(ha) && !IS_QLA81XX(ha); + int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && + !IS_QLA81XX(ha); if (memcmp(model, BINZERO, len) != 0) { strncpy(ha->model_number, model, len); From d970432c48ab8dd28216e80942723aeb505b623e Mon Sep 17 00:00:00 2001 From: Lalit Chandivade Date: Tue, 25 Aug 2009 11:36:18 -0700 Subject: [PATCH 190/209] [SCSI] qla2xxx: Correct qla2x00_eh_wait_on_command() to wait correctly. Original code would break-out of loop after only one iteration. Signed-off-by: Lalit Chandivade Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_os.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 649fd75d37b6..ea9f91756c1e 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -568,11 +568,8 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) unsigned long wait_iter = ABORT_WAIT_ITER; int ret = QLA_SUCCESS; - while (CMD_SP(cmd)) { + while (CMD_SP(cmd) && wait_iter--) { msleep(ABORT_POLLING_PERIOD); - - if (--wait_iter) - break; } if (CMD_SP(cmd)) ret = QLA_FUNCTION_FAILED; From 0d6e61bc6a4f3f54444b088ae6d447f1703a21dd Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Tue, 25 Aug 2009 11:36:19 -0700 Subject: [PATCH 191/209] [SCSI] qla2xxx: Correct various NPIV issues. * Consolidate vport-count processing. * Correct vp_idx restrictions during RSCN processing. * Push topology verification check to qla2x00_do_dpc_all_vps(). * Don't skip vport full-login-lip/lip-reset mailbox handling. Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 5 +++++ drivers/scsi/qla2xxx/qla_init.c | 2 -- drivers/scsi/qla2xxx/qla_isr.c | 3 ++- drivers/scsi/qla2xxx/qla_mid.c | 30 ++++++++++++++---------------- drivers/scsi/qla2xxx/qla_os.c | 21 +++++++++++---------- 5 files changed, 32 insertions(+), 29 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 5b0a222241bb..fbcb82a2f7f4 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1736,6 +1736,11 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) qla24xx_deallocate_vp_id(vha); + mutex_lock(&ha->vport_lock); + ha->cur_vport_count--; + clear_bit(vha->vp_idx, ha->vp_idx_map); + mutex_unlock(&ha->vport_lock); + if (vha->timer_active) { qla2x00_vp_stop_timer(vha); DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p " diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 825700ae5041..f74d07a9e945 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3540,8 +3540,6 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); qla2x00_mark_all_devices_lost(vha, 0); - list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) - qla2x00_mark_all_devices_lost(vp, 0); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 27db6246ca3e..b20a7169aac2 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -685,8 +685,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) break; /* Only handle SCNs for our Vport index. */ - if (vha->vp_idx != (mb[3] & 0xff)) + if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) break; + DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", vha->host_no)); DEBUG(printk(KERN_INFO diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index a748a95efb10..42b799abba57 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -42,7 +42,6 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha) set_bit(vp_id, ha->vp_idx_map); ha->num_vhosts++; - ha->cur_vport_count++; vha->vp_idx = vp_id; list_add_tail(&vha->list, &ha->vp_list); mutex_unlock(&ha->vport_lock); @@ -58,7 +57,6 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) mutex_lock(&ha->vport_lock); vp_id = vha->vp_idx; ha->num_vhosts--; - ha->cur_vport_count--; clear_bit(vp_id, ha->vp_idx_map); list_del(&vha->list); mutex_unlock(&ha->vport_lock); @@ -235,7 +233,11 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); } - /* To exclusively reset vport, we need to log it out first.*/ + /* + * To exclusively reset vport, we need to log it out first. Note: this + * control_vp can fail if ISP reset is already issued, this is + * expected, as the vp would be already logged out due to ISP reset. + */ if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); @@ -247,23 +249,11 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha) static int qla2x00_do_dpc_vp(scsi_qla_host_t *vha) { - struct qla_hw_data *ha = vha->hw; - scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - - if (!(ha->current_topology & ISP_CFG_F)) - return 0; - qla2x00_do_work(vha); if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { /* VP acquired. complete port configuration */ - if (atomic_read(&base_vha->loop_state) == LOOP_READY) { - qla24xx_configure_vp(vha); - } else { - set_bit(VP_IDX_ACQUIRED, &vha->vp_flags); - set_bit(VP_DPC_NEEDED, &base_vha->dpc_flags); - } - + qla24xx_configure_vp(vha); return 0; } @@ -314,6 +304,9 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) clear_bit(VP_DPC_NEEDED, &vha->dpc_flags); + if (!(ha->current_topology & ISP_CFG_F)) + return; + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { if (vp->vp_idx) ret = qla2x00_do_dpc_vp(vp); @@ -418,6 +411,11 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) vha->flags.init_done = 1; + mutex_lock(&ha->vport_lock); + set_bit(vha->vp_idx, ha->vp_idx_map); + ha->cur_vport_count++; + mutex_unlock(&ha->vport_lock); + return vha; create_vhost_failed: diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index ea9f91756c1e..210229019588 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1119,8 +1119,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) struct fc_port *fcport; struct qla_hw_data *ha = vha->hw; - if (ha->flags.enable_lip_full_login && !vha->vp_idx && - !IS_QLA81XX(ha)) { + if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) { ret = qla2x00_full_login_lip(vha); if (ret != QLA_SUCCESS) { DEBUG2_3(printk("%s(%ld): failed: " @@ -1133,7 +1132,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) qla2x00_wait_for_loop_ready(vha); } - if (ha->flags.enable_lip_reset && !vha->vp_idx) { + if (ha->flags.enable_lip_reset) { ret = qla2x00_lip_reset(vha); if (ret != QLA_SUCCESS) { DEBUG2_3(printk("%s(%ld): failed: " @@ -2264,8 +2263,9 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) fc_port_t *fcport; list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (vha->vp_idx != fcport->vp_idx) + if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx) continue; + /* * No point in marking the device as lost, if the device is * already DEAD. @@ -2273,10 +2273,12 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) continue; if (atomic_read(&fcport->state) == FCS_ONLINE) { - atomic_set(&fcport->state, FCS_DEVICE_LOST); - qla2x00_schedule_rport_del(vha, fcport, defer); - } else - atomic_set(&fcport->state, FCS_DEVICE_LOST); + if (defer) + qla2x00_schedule_rport_del(vha, fcport, defer); + else if (vha->vp_idx == fcport->vp_idx) + qla2x00_schedule_rport_del(vha, fcport, defer); + } + atomic_set(&fcport->state, FCS_DEVICE_LOST); } } @@ -3069,8 +3071,7 @@ qla2x00_timer(scsi_qla_host_t *vha) /* if the loop has been down for 4 minutes, reinit adapter */ if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { - if (!(vha->device_flags & DFLG_NO_CABLE) && - !vha->vp_idx) { + if (!(vha->device_flags & DFLG_NO_CABLE)) { DEBUG(printk("scsi(%ld): Loop down - " "aborting ISP.\n", vha->host_no)); From 67becc0041615651b75e4496204a0835a8c345a8 Mon Sep 17 00:00:00 2001 From: Andrew Vasquez Date: Tue, 25 Aug 2009 11:36:20 -0700 Subject: [PATCH 192/209] [SCSI] qla2xxx: Properly delete rports attached to a vport. Original code would inadvertently skip the deferred fc_remote_port_delete() call for rports hanging off any vport. Signed-off-by: Andrew Vasquez Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_init.c | 12 +++++++----- drivers/scsi/qla2xxx/qla_os.c | 6 ++++-- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f74d07a9e945..9e3eaac25596 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3495,15 +3495,17 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) } void -qla2x00_update_fcports(scsi_qla_host_t *vha) +qla2x00_update_fcports(scsi_qla_host_t *base_vha) { fc_port_t *fcport; + struct scsi_qla_host *tvp, *vha; /* Go with deferred removal of rport references. */ - list_for_each_entry(fcport, &vha->vp_fcports, list) - if (fcport && fcport->drport && - atomic_read(&fcport->state) != FCS_UNCONFIGURED) - qla2x00_rport_del(fcport); + list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) + list_for_each_entry(fcport, &vha->vp_fcports, list) + if (fcport && fcport->drport && + atomic_read(&fcport->state) != FCS_UNCONFIGURED) + qla2x00_rport_del(fcport); } /* diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 210229019588..b79fca7d461b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2181,17 +2181,19 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, int defer) { struct fc_rport *rport; + scsi_qla_host_t *base_vha; if (!fcport->rport) return; rport = fcport->rport; if (defer) { + base_vha = pci_get_drvdata(vha->hw->pdev); spin_lock_irq(vha->host->host_lock); fcport->drport = rport; spin_unlock_irq(vha->host->host_lock); - set_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags); - qla2xxx_wake_dpc(vha); + set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); + qla2xxx_wake_dpc(base_vha); } else fc_remote_port_delete(rport); } From 073ed91e245d56d71a85e2a49bf0b3962fe74dc4 Mon Sep 17 00:00:00 2001 From: Giridhar Malavali Date: Tue, 25 Aug 2009 11:36:21 -0700 Subject: [PATCH 193/209] [SCSI] qla2xxx: Update version number to 8.03.01-k6. Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 87a884e341a1..ac107a2c34a4 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.01-k5" +#define QLA2XXX_VERSION "8.03.01-k6" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 From 89a3681041507773dfee1b88c1c90c8a811a79d3 Mon Sep 17 00:00:00 2001 From: Anil Ravindranath Date: Tue, 25 Aug 2009 17:35:18 -0700 Subject: [PATCH 194/209] [SCSI] pmcraid: PMC-Sierra MaxRAID driver to support 6Gb/s SAS RAID controller Signed-off-by: Anil Ravindranath Signed-off-by: James Bottomley --- MAINTAINERS | 8 + drivers/scsi/Kconfig | 6 + drivers/scsi/Makefile | 1 + drivers/scsi/pmcraid.c | 5604 ++++++++++++++++++++++++++++++++++++++++ drivers/scsi/pmcraid.h | 1029 ++++++++ 5 files changed, 6648 insertions(+) create mode 100644 drivers/scsi/pmcraid.c create mode 100644 drivers/scsi/pmcraid.h diff --git a/MAINTAINERS b/MAINTAINERS index b841e3c20e9b..e8fabf86c86a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3970,6 +3970,14 @@ S: Maintained F: drivers/block/pktcdvd.c F: include/linux/pktcdvd.h +PMC SIERRA MaxRAID DRIVER +P: Anil Ravindranath +M: anil_ravindranath@pmc-sierra.com +L: linux-scsi@vger.kernel.org +W: http://www.pmc-sierra.com/ +S: Supported +F: drivers/scsi/pmcraid.* + POSIX CLOCKS and TIMERS M: Thomas Gleixner S: Supported diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 9c23122f755f..82bb3b2d207a 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1811,6 +1811,12 @@ config ZFCP called zfcp. If you want to compile it as a module, say M here and read . +config SCSI_PMCRAID + tristate "PMC SIERRA Linux MaxRAID adapter support" + depends on PCI && SCSI + ---help--- + This driver supports the PMC SIERRA MaxRAID adapters. + config SCSI_SRP tristate "SCSI RDMA Protocol helper library" depends on SCSI && PCI diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 25429ea63d0a..61a94af3cee7 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -130,6 +130,7 @@ obj-$(CONFIG_SCSI_MVSAS) += mvsas/ obj-$(CONFIG_PS3_ROM) += ps3rom.o obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ +obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o obj-$(CONFIG_ARM) += arm/ diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c new file mode 100644 index 000000000000..4302f06e4ec9 --- /dev/null +++ b/drivers/scsi/pmcraid.c @@ -0,0 +1,5604 @@ +/* + * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters + * + * Written By: PMC Sierra Corporation + * + * Copyright (C) 2008, 2009 PMC Sierra Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, + * USA + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pmcraid.h" + +/* + * Module configuration parameters + */ +static unsigned int pmcraid_debug_log; +static unsigned int pmcraid_disable_aen; +static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST; + +/* + * Data structures to support multiple adapters by the LLD. + * pmcraid_adapter_count - count of configured adapters + */ +static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0); + +/* + * Supporting user-level control interface through IOCTL commands. + * pmcraid_major - major number to use + * pmcraid_minor - minor number(s) to use + */ +static unsigned int pmcraid_major; +static struct class *pmcraid_class; +DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS); + +/* + * Module parameters + */ +MODULE_AUTHOR("PMC Sierra Corporation, anil_ravindranath@pmc-sierra.com"); +MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(PMCRAID_DRIVER_VERSION); + +module_param_named(log_level, pmcraid_log_level, uint, (S_IRUGO | S_IWUSR)); +MODULE_PARM_DESC(log_level, + "Enables firmware error code logging, default :1 high-severity" + " errors, 2: all errors including high-severity errors," + " 0: disables logging"); + +module_param_named(debug, pmcraid_debug_log, uint, (S_IRUGO | S_IWUSR)); +MODULE_PARM_DESC(debug, + "Enable driver verbose message logging. Set 1 to enable." + "(default: 0)"); + +module_param_named(disable_aen, pmcraid_disable_aen, uint, (S_IRUGO | S_IWUSR)); +MODULE_PARM_DESC(disable_aen, + "Disable driver aen notifications to apps. Set 1 to disable." + "(default: 0)"); + +/* chip specific constants for PMC MaxRAID controllers (same for + * 0x5220 and 0x8010 + */ +static struct pmcraid_chip_details pmcraid_chip_cfg[] = { + { + .ioastatus = 0x0, + .ioarrin = 0x00040, + .mailbox = 0x7FC30, + .global_intr_mask = 0x00034, + .ioa_host_intr = 0x0009C, + .ioa_host_intr_clr = 0x000A0, + .ioa_host_mask = 0x7FC28, + .ioa_host_mask_clr = 0x7FC28, + .host_ioa_intr = 0x00020, + .host_ioa_intr_clr = 0x00020, + .transop_timeout = 300 + } +}; + +/* + * PCI device ids supported by pmcraid driver + */ +static struct pci_device_id pmcraid_pci_table[] __devinitdata = { + { PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID), + 0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0] + }, + {} +}; + +MODULE_DEVICE_TABLE(pci, pmcraid_pci_table); + + + +/** + * pmcraid_slave_alloc - Prepare for commands to a device + * @scsi_dev: scsi device struct + * + * This function is called by mid-layer prior to sending any command to the new + * device. Stores resource entry details of the device in scsi_device struct. + * Queuecommand uses the resource handle and other details to fill up IOARCB + * while sending commands to the device. + * + * Return value: + * 0 on success / -ENXIO if device does not exist + */ +static int pmcraid_slave_alloc(struct scsi_device *scsi_dev) +{ + struct pmcraid_resource_entry *temp, *res = NULL; + struct pmcraid_instance *pinstance; + u8 target, bus, lun; + unsigned long lock_flags; + int rc = -ENXIO; + pinstance = shost_priv(scsi_dev->host); + + /* Driver exposes VSET and GSCSI resources only; all other device types + * are not exposed. Resource list is synchronized using resource lock + * so any traversal or modifications to the list should be done inside + * this lock + */ + spin_lock_irqsave(&pinstance->resource_lock, lock_flags); + list_for_each_entry(temp, &pinstance->used_res_q, queue) { + + /* do not expose VSETs with order-ids >= 240 */ + if (RES_IS_VSET(temp->cfg_entry)) { + target = temp->cfg_entry.unique_flags1; + if (target >= PMCRAID_MAX_VSET_TARGETS) + continue; + bus = PMCRAID_VSET_BUS_ID; + lun = 0; + } else if (RES_IS_GSCSI(temp->cfg_entry)) { + target = RES_TARGET(temp->cfg_entry.resource_address); + bus = PMCRAID_PHYS_BUS_ID; + lun = RES_LUN(temp->cfg_entry.resource_address); + } else { + continue; + } + + if (bus == scsi_dev->channel && + target == scsi_dev->id && + lun == scsi_dev->lun) { + res = temp; + break; + } + } + + if (res) { + res->scsi_dev = scsi_dev; + scsi_dev->hostdata = res; + res->change_detected = 0; + atomic_set(&res->read_failures, 0); + atomic_set(&res->write_failures, 0); + rc = 0; + } + spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); + return rc; +} + +/** + * pmcraid_slave_configure - Configures a SCSI device + * @scsi_dev: scsi device struct + * + * This fucntion is executed by SCSI mid layer just after a device is first + * scanned (i.e. it has responded to an INQUIRY). For VSET resources, the + * timeout value (default 30s) will be over-written to a higher value (60s) + * and max_sectors value will be over-written to 512. It also sets queue depth + * to host->cmd_per_lun value + * + * Return value: + * 0 on success + */ +static int pmcraid_slave_configure(struct scsi_device *scsi_dev) +{ + struct pmcraid_resource_entry *res = scsi_dev->hostdata; + + if (!res) + return 0; + + /* LLD exposes VSETs and Enclosure devices only */ + if (RES_IS_GSCSI(res->cfg_entry) && + scsi_dev->type != TYPE_ENCLOSURE) + return -ENXIO; + + pmcraid_info("configuring %x:%x:%x:%x\n", + scsi_dev->host->unique_id, + scsi_dev->channel, + scsi_dev->id, + scsi_dev->lun); + + if (RES_IS_GSCSI(res->cfg_entry)) { + scsi_dev->allow_restart = 1; + } else if (RES_IS_VSET(res->cfg_entry)) { + scsi_dev->allow_restart = 1; + blk_queue_rq_timeout(scsi_dev->request_queue, + PMCRAID_VSET_IO_TIMEOUT); + blk_queue_max_sectors(scsi_dev->request_queue, + PMCRAID_VSET_MAX_SECTORS); + } + + if (scsi_dev->tagged_supported && + (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) { + scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth); + scsi_adjust_queue_depth(scsi_dev, MSG_SIMPLE_TAG, + scsi_dev->host->cmd_per_lun); + } else { + scsi_adjust_queue_depth(scsi_dev, 0, + scsi_dev->host->cmd_per_lun); + } + + return 0; +} + +/** + * pmcraid_slave_destroy - Unconfigure a SCSI device before removing it + * + * @scsi_dev: scsi device struct + * + * This is called by mid-layer before removing a device. Pointer assignments + * done in pmcraid_slave_alloc will be reset to NULL here. + * + * Return value + * none + */ +static void pmcraid_slave_destroy(struct scsi_device *scsi_dev) +{ + struct pmcraid_resource_entry *res; + + res = (struct pmcraid_resource_entry *)scsi_dev->hostdata; + + if (res) + res->scsi_dev = NULL; + + scsi_dev->hostdata = NULL; +} + +/** + * pmcraid_change_queue_depth - Change the device's queue depth + * @scsi_dev: scsi device struct + * @depth: depth to set + * + * Return value + * actual depth set + */ +static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth) +{ + if (depth > PMCRAID_MAX_CMD_PER_LUN) + depth = PMCRAID_MAX_CMD_PER_LUN; + + scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), depth); + + return scsi_dev->queue_depth; +} + +/** + * pmcraid_change_queue_type - Change the device's queue type + * @scsi_dev: scsi device struct + * @tag: type of tags to use + * + * Return value: + * actual queue type set + */ +static int pmcraid_change_queue_type(struct scsi_device *scsi_dev, int tag) +{ + struct pmcraid_resource_entry *res; + + res = (struct pmcraid_resource_entry *)scsi_dev->hostdata; + + if ((res) && scsi_dev->tagged_supported && + (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) { + scsi_set_tag_type(scsi_dev, tag); + + if (tag) + scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth); + else + scsi_deactivate_tcq(scsi_dev, scsi_dev->queue_depth); + } else + tag = 0; + + return tag; +} + + +/** + * pmcraid_init_cmdblk - initializes a command block + * + * @cmd: pointer to struct pmcraid_cmd to be initialized + * @index: if >=0 first time initialization; otherwise reinitialization + * + * Return Value + * None + */ +void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index) +{ + struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb); + dma_addr_t dma_addr = cmd->ioa_cb_bus_addr; + + if (index >= 0) { + /* first time initialization (called from probe) */ + u32 ioasa_offset = + offsetof(struct pmcraid_control_block, ioasa); + + cmd->index = index; + ioarcb->response_handle = cpu_to_le32(index << 2); + ioarcb->ioarcb_bus_addr = cpu_to_le64(dma_addr); + ioarcb->ioasa_bus_addr = cpu_to_le64(dma_addr + ioasa_offset); + ioarcb->ioasa_len = cpu_to_le16(sizeof(struct pmcraid_ioasa)); + } else { + /* re-initialization of various lengths, called once command is + * processed by IOA + */ + memset(&cmd->ioa_cb->ioarcb.cdb, 0, PMCRAID_MAX_CDB_LEN); + ioarcb->request_flags0 = 0; + ioarcb->request_flags1 = 0; + ioarcb->cmd_timeout = 0; + ioarcb->ioarcb_bus_addr &= (~0x1FULL); + ioarcb->ioadl_bus_addr = 0; + ioarcb->ioadl_length = 0; + ioarcb->data_transfer_length = 0; + ioarcb->add_cmd_param_length = 0; + ioarcb->add_cmd_param_offset = 0; + cmd->ioa_cb->ioasa.ioasc = 0; + cmd->ioa_cb->ioasa.residual_data_length = 0; + cmd->u.time_left = 0; + } + + cmd->cmd_done = NULL; + cmd->scsi_cmd = NULL; + cmd->release = 0; + cmd->completion_req = 0; + cmd->dma_handle = 0; + init_timer(&cmd->timer); +} + +/** + * pmcraid_reinit_cmdblk - reinitialize a command block + * + * @cmd: pointer to struct pmcraid_cmd to be reinitialized + * + * Return Value + * None + */ +static void pmcraid_reinit_cmdblk(struct pmcraid_cmd *cmd) +{ + pmcraid_init_cmdblk(cmd, -1); +} + +/** + * pmcraid_get_free_cmd - get a free cmd block from command block pool + * @pinstance: adapter instance structure + * + * Return Value: + * returns pointer to cmd block or NULL if no blocks are available + */ +static struct pmcraid_cmd *pmcraid_get_free_cmd( + struct pmcraid_instance *pinstance +) +{ + struct pmcraid_cmd *cmd = NULL; + unsigned long lock_flags; + + /* free cmd block list is protected by free_pool_lock */ + spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags); + + if (!list_empty(&pinstance->free_cmd_pool)) { + cmd = list_entry(pinstance->free_cmd_pool.next, + struct pmcraid_cmd, free_list); + list_del(&cmd->free_list); + } + spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags); + + /* Initialize the command block before giving it the caller */ + if (cmd != NULL) + pmcraid_reinit_cmdblk(cmd); + return cmd; +} + +/** + * pmcraid_return_cmd - return a completed command block back into free pool + * @cmd: pointer to the command block + * + * Return Value: + * nothing + */ +void pmcraid_return_cmd(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + unsigned long lock_flags; + + spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags); + list_add_tail(&cmd->free_list, &pinstance->free_cmd_pool); + spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags); +} + +/** + * pmcraid_read_interrupts - reads IOA interrupts + * + * @pinstance: pointer to adapter instance structure + * + * Return value + * interrupts read from IOA + */ +static u32 pmcraid_read_interrupts(struct pmcraid_instance *pinstance) +{ + return ioread32(pinstance->int_regs.ioa_host_interrupt_reg); +} + +/** + * pmcraid_disable_interrupts - Masks and clears all specified interrupts + * + * @pinstance: pointer to per adapter instance structure + * @intrs: interrupts to disable + * + * Return Value + * None + */ +static void pmcraid_disable_interrupts( + struct pmcraid_instance *pinstance, + u32 intrs +) +{ + u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg); + u32 nmask = gmask | GLOBAL_INTERRUPT_MASK; + + iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg); + iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg); + iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_mask_reg); + ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg); +} + +/** + * pmcraid_enable_interrupts - Enables specified interrupts + * + * @pinstance: pointer to per adapter instance structure + * @intr: interrupts to enable + * + * Return Value + * None + */ +static void pmcraid_enable_interrupts( + struct pmcraid_instance *pinstance, + u32 intrs +) +{ + u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg); + u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK); + + iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg); + iowrite32(~intrs, pinstance->int_regs.ioa_host_interrupt_mask_reg); + ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg); + + pmcraid_info("enabled interrupts global mask = %x intr_mask = %x\n", + ioread32(pinstance->int_regs.global_interrupt_mask_reg), + ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg)); +} + +/** + * pmcraid_reset_type - Determine the required reset type + * @pinstance: pointer to adapter instance structure + * + * IOA requires hard reset if any of the following conditions is true. + * 1. If HRRQ valid interrupt is not masked + * 2. IOA reset alert doorbell is set + * 3. If there are any error interrupts + */ +static void pmcraid_reset_type(struct pmcraid_instance *pinstance) +{ + u32 mask; + u32 intrs; + u32 alerts; + + mask = ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg); + intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); + alerts = ioread32(pinstance->int_regs.host_ioa_interrupt_reg); + + if ((mask & INTRS_HRRQ_VALID) == 0 || + (alerts & DOORBELL_IOA_RESET_ALERT) || + (intrs & PMCRAID_ERROR_INTERRUPTS)) { + pmcraid_info("IOA requires hard reset\n"); + pinstance->ioa_hard_reset = 1; + } + + /* If unit check is active, trigger the dump */ + if (intrs & INTRS_IOA_UNIT_CHECK) + pinstance->ioa_unit_check = 1; +} + +/** + * pmcraid_bist_done - completion function for PCI BIST + * @cmd: pointer to reset command + * Return Value + * none + */ + +static void pmcraid_ioa_reset(struct pmcraid_cmd *); + +static void pmcraid_bist_done(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + unsigned long lock_flags; + int rc; + u16 pci_reg; + + rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg); + + /* If PCI config space can't be accessed wait for another two secs */ + if ((rc != PCIBIOS_SUCCESSFUL || (!(pci_reg & PCI_COMMAND_MEMORY))) && + cmd->u.time_left > 0) { + pmcraid_info("BIST not complete, waiting another 2 secs\n"); + cmd->timer.expires = jiffies + cmd->u.time_left; + cmd->u.time_left = 0; + cmd->timer.data = (unsigned long)cmd; + cmd->timer.function = + (void (*)(unsigned long))pmcraid_bist_done; + add_timer(&cmd->timer); + } else { + cmd->u.time_left = 0; + pmcraid_info("BIST is complete, proceeding with reset\n"); + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + pmcraid_ioa_reset(cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + } +} + +/** + * pmcraid_start_bist - starts BIST + * @cmd: pointer to reset cmd + * Return Value + * none + */ +static void pmcraid_start_bist(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + u32 doorbells, intrs; + + /* proceed with bist and wait for 2 seconds */ + iowrite32(DOORBELL_IOA_START_BIST, + pinstance->int_regs.host_ioa_interrupt_reg); + doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg); + intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); + pmcraid_info("doorbells after start bist: %x intrs: %x \n", + doorbells, intrs); + + cmd->u.time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT); + cmd->timer.data = (unsigned long)cmd; + cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT); + cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done; + add_timer(&cmd->timer); +} + +/** + * pmcraid_reset_alert_done - completion routine for reset_alert + * @cmd: pointer to command block used in reset sequence + * Return value + * None + */ +static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + u32 status = ioread32(pinstance->ioa_status); + unsigned long lock_flags; + + /* if the critical operation in progress bit is set or the wait times + * out, invoke reset engine to proceed with hard reset. If there is + * some more time to wait, restart the timer + */ + if (((status & INTRS_CRITICAL_OP_IN_PROGRESS) == 0) || + cmd->u.time_left <= 0) { + pmcraid_info("critical op is reset proceeding with reset\n"); + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + pmcraid_ioa_reset(cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + } else { + pmcraid_info("critical op is not yet reset waiting again\n"); + /* restart timer if some more time is available to wait */ + cmd->u.time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT; + cmd->timer.data = (unsigned long)cmd; + cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT; + cmd->timer.function = + (void (*)(unsigned long))pmcraid_reset_alert_done; + add_timer(&cmd->timer); + } +} + +/** + * pmcraid_reset_alert - alerts IOA for a possible reset + * @cmd : command block to be used for reset sequence. + * + * Return Value + * returns 0 if pci config-space is accessible and RESET_DOORBELL is + * successfully written to IOA. Returns non-zero in case pci_config_space + * is not accessible + */ +static void pmcraid_reset_alert(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + u32 doorbells; + int rc; + u16 pci_reg; + + /* If we are able to access IOA PCI config space, alert IOA that we are + * going to reset it soon. This enables IOA to preserv persistent error + * data if any. In case memory space is not accessible, proceed with + * BIST or slot_reset + */ + rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg); + if ((rc == PCIBIOS_SUCCESSFUL) && (pci_reg & PCI_COMMAND_MEMORY)) { + + /* wait for IOA permission i.e until CRITICAL_OPERATION bit is + * reset IOA doesn't generate any interrupts when CRITICAL + * OPERATION bit is reset. A timer is started to wait for this + * bit to be reset. + */ + cmd->u.time_left = PMCRAID_RESET_TIMEOUT; + cmd->timer.data = (unsigned long)cmd; + cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT; + cmd->timer.function = + (void (*)(unsigned long))pmcraid_reset_alert_done; + add_timer(&cmd->timer); + + iowrite32(DOORBELL_IOA_RESET_ALERT, + pinstance->int_regs.host_ioa_interrupt_reg); + doorbells = + ioread32(pinstance->int_regs.host_ioa_interrupt_reg); + pmcraid_info("doorbells after reset alert: %x\n", doorbells); + } else { + pmcraid_info("PCI config is not accessible starting BIST\n"); + pinstance->ioa_state = IOA_STATE_IN_HARD_RESET; + pmcraid_start_bist(cmd); + } +} + +/** + * pmcraid_timeout_handler - Timeout handler for internally generated ops + * + * @cmd : pointer to command structure, that got timedout + * + * This function blocks host requests and initiates an adapter reset. + * + * Return value: + * None + */ +static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + unsigned long lock_flags; + + dev_err(&pinstance->pdev->dev, + "Adapter being reset due to command timeout.\n"); + + /* Command timeouts result in hard reset sequence. The command that got + * timed out may be the one used as part of reset sequence. In this + * case restart reset sequence using the same command block even if + * reset is in progress. Otherwise fail this command and get a free + * command block to restart the reset sequence. + */ + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + if (!pinstance->ioa_reset_in_progress) { + pinstance->ioa_reset_attempts = 0; + cmd = pmcraid_get_free_cmd(pinstance); + + /* If we are out of command blocks, just return here itself. + * Some other command's timeout handler can do the reset job + */ + if (cmd == NULL) { + spin_unlock_irqrestore(pinstance->host->host_lock, + lock_flags); + pmcraid_err("no free cmnd block for timeout handler\n"); + return; + } + + pinstance->reset_cmd = cmd; + pinstance->ioa_reset_in_progress = 1; + } else { + pmcraid_info("reset is already in progress\n"); + + if (pinstance->reset_cmd != cmd) { + /* This command should have been given to IOA, this + * command will be completed by fail_outstanding_cmds + * anyway + */ + pmcraid_err("cmd is pending but reset in progress\n"); + } + + /* If this command was being used as part of the reset + * sequence, set cmd_done pointer to pmcraid_ioa_reset. This + * causes fail_outstanding_commands not to return the command + * block back to free pool + */ + if (cmd == pinstance->reset_cmd) + cmd->cmd_done = pmcraid_ioa_reset; + + } + + pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; + scsi_block_requests(pinstance->host); + pmcraid_reset_alert(cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); +} + +/** + * pmcraid_internal_done - completion routine for internally generated cmds + * + * @cmd: command that got response from IOA + * + * Return Value: + * none + */ +static void pmcraid_internal_done(struct pmcraid_cmd *cmd) +{ + pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n", + cmd->ioa_cb->ioarcb.cdb[0], + le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)); + + /* Some of the internal commands are sent with callers blocking for the + * response. Same will be indicated as part of cmd->completion_req + * field. Response path needs to wake up any waiters waiting for cmd + * completion if this flag is set. + */ + if (cmd->completion_req) { + cmd->completion_req = 0; + complete(&cmd->wait_for_completion); + } + + /* most of the internal commands are completed by caller itself, so + * no need to return the command block back to free pool until we are + * required to do so (e.g once done with initialization). + */ + if (cmd->release) { + cmd->release = 0; + pmcraid_return_cmd(cmd); + } +} + +/** + * pmcraid_reinit_cfgtable_done - done function for cfg table reinitialization + * + * @cmd: command that got response from IOA + * + * This routine is called after driver re-reads configuration table due to a + * lost CCN. It returns the command block back to free pool and schedules + * worker thread to add/delete devices into the system. + * + * Return Value: + * none + */ +static void pmcraid_reinit_cfgtable_done(struct pmcraid_cmd *cmd) +{ + pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n", + cmd->ioa_cb->ioarcb.cdb[0], + le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)); + + if (cmd->release) { + cmd->release = 0; + pmcraid_return_cmd(cmd); + } + pmcraid_info("scheduling worker for config table reinitialization\n"); + schedule_work(&cmd->drv_inst->worker_q); +} + +/** + * pmcraid_erp_done - Process completion of SCSI error response from device + * @cmd: pmcraid_command + * + * This function copies the sense buffer into the scsi_cmd struct and completes + * scsi_cmd by calling scsi_done function. + * + * Return value: + * none + */ +static void pmcraid_erp_done(struct pmcraid_cmd *cmd) +{ + struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; + struct pmcraid_instance *pinstance = cmd->drv_inst; + u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); + + if (PMCRAID_IOASC_SENSE_KEY(ioasc) > 0) { + scsi_cmd->result |= (DID_ERROR << 16); + pmcraid_err("command CDB[0] = %x failed with IOASC: 0x%08X\n", + cmd->ioa_cb->ioarcb.cdb[0], ioasc); + } + + /* if we had allocated sense buffers for request sense, copy the sense + * release the buffers + */ + if (cmd->sense_buffer != NULL) { + memcpy(scsi_cmd->sense_buffer, + cmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE); + pci_free_consistent(pinstance->pdev, + SCSI_SENSE_BUFFERSIZE, + cmd->sense_buffer, cmd->sense_buffer_dma); + cmd->sense_buffer = NULL; + cmd->sense_buffer_dma = 0; + } + + scsi_dma_unmap(scsi_cmd); + pmcraid_return_cmd(cmd); + scsi_cmd->scsi_done(scsi_cmd); +} + +/** + * pmcraid_fire_command - sends an IOA command to adapter + * + * This function adds the given block into pending command list + * and returns without waiting + * + * @cmd : command to be sent to the device + * + * Return Value + * None + */ +static void _pmcraid_fire_command(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + unsigned long lock_flags; + + /* Add this command block to pending cmd pool. We do this prior to + * writting IOARCB to ioarrin because IOA might complete the command + * by the time we are about to add it to the list. Response handler + * (isr/tasklet) looks for cmb block in the pending pending list. + */ + spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags); + list_add_tail(&cmd->free_list, &pinstance->pending_cmd_pool); + spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags); + atomic_inc(&pinstance->outstanding_cmds); + + /* driver writes lower 32-bit value of IOARCB address only */ + mb(); + iowrite32(le32_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr), + pinstance->ioarrin); +} + +/** + * pmcraid_send_cmd - fires a command to IOA + * + * This function also sets up timeout function, and command completion + * function + * + * @cmd: pointer to the command block to be fired to IOA + * @cmd_done: command completion function, called once IOA responds + * @timeout: timeout to wait for this command completion + * @timeout_func: timeout handler + * + * Return value + * none + */ +static void pmcraid_send_cmd( + struct pmcraid_cmd *cmd, + void (*cmd_done) (struct pmcraid_cmd *), + unsigned long timeout, + void (*timeout_func) (struct pmcraid_cmd *) +) +{ + /* initialize done function */ + cmd->cmd_done = cmd_done; + + if (timeout_func) { + /* setup timeout handler */ + cmd->timer.data = (unsigned long)cmd; + cmd->timer.expires = jiffies + timeout; + cmd->timer.function = (void (*)(unsigned long))timeout_func; + add_timer(&cmd->timer); + } + + /* fire the command to IOA */ + _pmcraid_fire_command(cmd); +} + +/** + * pmcraid_ioa_shutdown - sends SHUTDOWN command to ioa + * + * @cmd: pointer to the command block used as part of reset sequence + * + * Return Value + * None + */ +static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd) +{ + pmcraid_info("response for Cancel CCN CDB[0] = %x ioasc = %x\n", + cmd->ioa_cb->ioarcb.cdb[0], + le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)); + + /* Note that commands sent during reset require next command to be sent + * to IOA. Hence reinit the done function as well as timeout function + */ + pmcraid_reinit_cmdblk(cmd); + cmd->ioa_cb->ioarcb.request_type = REQ_TYPE_IOACMD; + cmd->ioa_cb->ioarcb.resource_handle = + cpu_to_le32(PMCRAID_IOA_RES_HANDLE); + cmd->ioa_cb->ioarcb.cdb[0] = PMCRAID_IOA_SHUTDOWN; + cmd->ioa_cb->ioarcb.cdb[1] = PMCRAID_SHUTDOWN_NORMAL; + + /* fire shutdown command to hardware. */ + pmcraid_info("firing normal shutdown command (%d) to IOA\n", + le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle)); + + pmcraid_send_cmd(cmd, pmcraid_ioa_reset, + PMCRAID_SHUTDOWN_TIMEOUT, + pmcraid_timeout_handler); +} + +/** + * pmcraid_identify_hrrq - registers host rrq buffers with IOA + * @cmd: pointer to command block to be used for identify hrrq + * + * Return Value + * 0 in case of success, otherwise non-zero failure code + */ + +static void pmcraid_querycfg(struct pmcraid_cmd *); + +static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + int index = 0; + __be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]); + u32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD); + + pmcraid_reinit_cmdblk(cmd); + + /* Initialize ioarcb */ + ioarcb->request_type = REQ_TYPE_IOACMD; + ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); + + /* initialize the hrrq number where IOA will respond to this command */ + ioarcb->hrrq_id = index; + ioarcb->cdb[0] = PMCRAID_IDENTIFY_HRRQ; + ioarcb->cdb[1] = index; + + /* IOA expects 64-bit pci address to be written in B.E format + * (i.e cdb[2]=MSByte..cdb[9]=LSB. + */ + pmcraid_info("HRRQ_IDENTIFY with hrrq:ioarcb => %llx:%llx\n", + hrrq_addr, ioarcb->ioarcb_bus_addr); + + memcpy(&(ioarcb->cdb[2]), &hrrq_addr, sizeof(hrrq_addr)); + memcpy(&(ioarcb->cdb[10]), &hrrq_size, sizeof(hrrq_size)); + + /* Subsequent commands require HRRQ identification to be successful. + * Note that this gets called even during reset from SCSI mid-layer + * or tasklet + */ + pmcraid_send_cmd(cmd, pmcraid_querycfg, + PMCRAID_INTERNAL_TIMEOUT, + pmcraid_timeout_handler); +} + +static void pmcraid_process_ccn(struct pmcraid_cmd *cmd); +static void pmcraid_process_ldn(struct pmcraid_cmd *cmd); + +/** + * pmcraid_send_hcam_cmd - send an initialized command block(HCAM) to IOA + * + * @cmd: initialized command block pointer + * + * Return Value + * none + */ +static void pmcraid_send_hcam_cmd(struct pmcraid_cmd *cmd) +{ + if (cmd->ioa_cb->ioarcb.cdb[1] == PMCRAID_HCAM_CODE_CONFIG_CHANGE) + atomic_set(&(cmd->drv_inst->ccn.ignore), 0); + else + atomic_set(&(cmd->drv_inst->ldn.ignore), 0); + + pmcraid_send_cmd(cmd, cmd->cmd_done, 0, NULL); +} + +/** + * pmcraid_init_hcam - send an initialized command block(HCAM) to IOA + * + * @pinstance: pointer to adapter instance structure + * @type: HCAM type + * + * Return Value + * pointer to initialized pmcraid_cmd structure or NULL + */ +static struct pmcraid_cmd *pmcraid_init_hcam +( + struct pmcraid_instance *pinstance, + u8 type +) +{ + struct pmcraid_cmd *cmd; + struct pmcraid_ioarcb *ioarcb; + struct pmcraid_ioadl_desc *ioadl; + struct pmcraid_hostrcb *hcam; + void (*cmd_done) (struct pmcraid_cmd *); + dma_addr_t dma; + int rcb_size; + + cmd = pmcraid_get_free_cmd(pinstance); + + if (!cmd) { + pmcraid_err("no free command blocks for hcam\n"); + return cmd; + } + + if (type == PMCRAID_HCAM_CODE_CONFIG_CHANGE) { + rcb_size = sizeof(struct pmcraid_hcam_ccn); + cmd_done = pmcraid_process_ccn; + dma = pinstance->ccn.baddr + PMCRAID_AEN_HDR_SIZE; + hcam = &pinstance->ccn; + } else { + rcb_size = sizeof(struct pmcraid_hcam_ldn); + cmd_done = pmcraid_process_ldn; + dma = pinstance->ldn.baddr + PMCRAID_AEN_HDR_SIZE; + hcam = &pinstance->ldn; + } + + /* initialize command pointer used for HCAM registration */ + hcam->cmd = cmd; + + ioarcb = &cmd->ioa_cb->ioarcb; + ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + + offsetof(struct pmcraid_ioarcb, + add_data.u.ioadl[0])); + ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); + ioadl = ioarcb->add_data.u.ioadl; + + /* Initialize ioarcb */ + ioarcb->request_type = REQ_TYPE_HCAM; + ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); + ioarcb->cdb[0] = PMCRAID_HOST_CONTROLLED_ASYNC; + ioarcb->cdb[1] = type; + ioarcb->cdb[7] = (rcb_size >> 8) & 0xFF; + ioarcb->cdb[8] = (rcb_size) & 0xFF; + + ioarcb->data_transfer_length = cpu_to_le32(rcb_size); + + ioadl[0].flags |= cpu_to_le32(IOADL_FLAGS_READ_LAST); + ioadl[0].data_len = cpu_to_le32(rcb_size); + ioadl[0].address = cpu_to_le32(dma); + + cmd->cmd_done = cmd_done; + return cmd; +} + +/** + * pmcraid_send_hcam - Send an HCAM to IOA + * @pinstance: ioa config struct + * @type: HCAM type + * + * This function will send a Host Controlled Async command to IOA. + * + * Return value: + * none + */ +static void pmcraid_send_hcam(struct pmcraid_instance *pinstance, u8 type) +{ + struct pmcraid_cmd *cmd = pmcraid_init_hcam(pinstance, type); + pmcraid_send_hcam_cmd(cmd); +} + + +/** + * pmcraid_prepare_cancel_cmd - prepares a command block to abort another + * + * @cmd: pointer to cmd that is used as cancelling command + * @cmd_to_cancel: pointer to the command that needs to be cancelled + */ +static void pmcraid_prepare_cancel_cmd( + struct pmcraid_cmd *cmd, + struct pmcraid_cmd *cmd_to_cancel +) +{ + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + __be64 ioarcb_addr = cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr; + + /* Get the resource handle to where the command to be aborted has been + * sent. + */ + ioarcb->resource_handle = cmd_to_cancel->ioa_cb->ioarcb.resource_handle; + ioarcb->request_type = REQ_TYPE_IOACMD; + memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN); + ioarcb->cdb[0] = PMCRAID_ABORT_CMD; + + /* IOARCB address of the command to be cancelled is given in + * cdb[2]..cdb[9] is Big-Endian format. Note that length bits in + * IOARCB address are not masked. + */ + ioarcb_addr = cpu_to_be64(ioarcb_addr); + memcpy(&(ioarcb->cdb[2]), &ioarcb_addr, sizeof(ioarcb_addr)); +} + +/** + * pmcraid_cancel_hcam - sends ABORT task to abort a given HCAM + * + * @cmd: command to be used as cancelling command + * @type: HCAM type + * @cmd_done: op done function for the cancelling command + */ +static void pmcraid_cancel_hcam( + struct pmcraid_cmd *cmd, + u8 type, + void (*cmd_done) (struct pmcraid_cmd *) +) +{ + struct pmcraid_instance *pinstance; + struct pmcraid_hostrcb *hcam; + + pinstance = cmd->drv_inst; + hcam = (type == PMCRAID_HCAM_CODE_LOG_DATA) ? + &pinstance->ldn : &pinstance->ccn; + + /* prepare for cancelling previous hcam command. If the HCAM is + * currently not pending with IOA, we would have hcam->cmd as non-null + */ + if (hcam->cmd == NULL) + return; + + pmcraid_prepare_cancel_cmd(cmd, hcam->cmd); + + /* writing to IOARRIN must be protected by host_lock, as mid-layer + * schedule queuecommand while we are doing this + */ + pmcraid_send_cmd(cmd, cmd_done, + PMCRAID_INTERNAL_TIMEOUT, + pmcraid_timeout_handler); +} + +/** + * pmcraid_cancel_ccn - cancel CCN HCAM already registered with IOA + * + * @cmd: command block to be used for cancelling the HCAM + */ +static void pmcraid_cancel_ccn(struct pmcraid_cmd *cmd) +{ + pmcraid_info("response for Cancel LDN CDB[0] = %x ioasc = %x\n", + cmd->ioa_cb->ioarcb.cdb[0], + le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)); + + pmcraid_reinit_cmdblk(cmd); + + pmcraid_cancel_hcam(cmd, + PMCRAID_HCAM_CODE_CONFIG_CHANGE, + pmcraid_ioa_shutdown); +} + +/** + * pmcraid_cancel_ldn - cancel LDN HCAM already registered with IOA + * + * @cmd: command block to be used for cancelling the HCAM + */ +static void pmcraid_cancel_ldn(struct pmcraid_cmd *cmd) +{ + pmcraid_cancel_hcam(cmd, + PMCRAID_HCAM_CODE_LOG_DATA, + pmcraid_cancel_ccn); +} + +/** + * pmcraid_expose_resource - check if the resource can be exposed to OS + * + * @cfgte: pointer to configuration table entry of the resource + * + * Return value: + * true if resource can be added to midlayer, false(0) otherwise + */ +static int pmcraid_expose_resource(struct pmcraid_config_table_entry *cfgte) +{ + int retval = 0; + + if (cfgte->resource_type == RES_TYPE_VSET) + retval = ((cfgte->unique_flags1 & 0xFF) < 0xFE); + else if (cfgte->resource_type == RES_TYPE_GSCSI) + retval = (RES_BUS(cfgte->resource_address) != + PMCRAID_VIRTUAL_ENCL_BUS_ID); + return retval; +} + +/* attributes supported by pmcraid_event_family */ +enum { + PMCRAID_AEN_ATTR_UNSPEC, + PMCRAID_AEN_ATTR_EVENT, + __PMCRAID_AEN_ATTR_MAX, +}; +#define PMCRAID_AEN_ATTR_MAX (__PMCRAID_AEN_ATTR_MAX - 1) + +/* commands supported by pmcraid_event_family */ +enum { + PMCRAID_AEN_CMD_UNSPEC, + PMCRAID_AEN_CMD_EVENT, + __PMCRAID_AEN_CMD_MAX, +}; +#define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1) + +static struct genl_family pmcraid_event_family = { + .id = GENL_ID_GENERATE, + .name = "pmcraid", + .version = 1, + .maxattr = PMCRAID_AEN_ATTR_MAX +}; + +/** + * pmcraid_netlink_init - registers pmcraid_event_family + * + * Return value: + * 0 if the pmcraid_event_family is successfully registered + * with netlink generic, non-zero otherwise + */ +static int pmcraid_netlink_init(void) +{ + int result; + + result = genl_register_family(&pmcraid_event_family); + + if (result) + return result; + + pmcraid_info("registered NETLINK GENERIC group: %d\n", + pmcraid_event_family.id); + + return result; +} + +/** + * pmcraid_netlink_release - unregisters pmcraid_event_family + * + * Return value: + * none + */ +static void pmcraid_netlink_release(void) +{ + genl_unregister_family(&pmcraid_event_family); +} + +/** + * pmcraid_notify_aen - sends event msg to user space application + * @pinstance: pointer to adapter instance structure + * @type: HCAM type + * + * Return value: + * 0 if success, error value in case of any failure. + */ +static int pmcraid_notify_aen(struct pmcraid_instance *pinstance, u8 type) +{ + struct sk_buff *skb; + struct pmcraid_aen_msg *aen_msg; + void *msg_header; + int data_size, total_size; + int result; + + + if (type == PMCRAID_HCAM_CODE_LOG_DATA) { + aen_msg = pinstance->ldn.msg; + data_size = pinstance->ldn.hcam->data_len; + } else { + aen_msg = pinstance->ccn.msg; + data_size = pinstance->ccn.hcam->data_len; + } + + data_size += sizeof(struct pmcraid_hcam_hdr); + aen_msg->hostno = (pinstance->host->unique_id << 16 | + MINOR(pinstance->cdev.dev)); + aen_msg->length = data_size; + data_size += sizeof(*aen_msg); + + total_size = nla_total_size(data_size); + skb = genlmsg_new(total_size, GFP_ATOMIC); + + + if (!skb) { + pmcraid_err("Failed to allocate aen data SKB of size: %x\n", + total_size); + return -ENOMEM; + } + + /* add the genetlink message header */ + msg_header = genlmsg_put(skb, 0, 0, + &pmcraid_event_family, 0, + PMCRAID_AEN_CMD_EVENT); + if (!msg_header) { + pmcraid_err("failed to copy command details\n"); + nlmsg_free(skb); + return -ENOMEM; + } + + result = nla_put(skb, PMCRAID_AEN_ATTR_EVENT, data_size, aen_msg); + + if (result) { + pmcraid_err("failed to copy AEN attribute data \n"); + nlmsg_free(skb); + return -EINVAL; + } + + /* send genetlink multicast message to notify appplications */ + result = genlmsg_end(skb, msg_header); + + if (result < 0) { + pmcraid_err("genlmsg_end failed\n"); + nlmsg_free(skb); + return result; + } + + result = + genlmsg_multicast(skb, 0, pmcraid_event_family.id, GFP_ATOMIC); + + /* If there are no listeners, genlmsg_multicast may return non-zero + * value. + */ + if (result) + pmcraid_info("failed to send %s event message %x!\n", + type == PMCRAID_HCAM_CODE_LOG_DATA ? "LDN" : "CCN", + result); + return result; +} + +/** + * pmcraid_handle_config_change - Handle a config change from the adapter + * @pinstance: pointer to per adapter instance structure + * + * Return value: + * none + */ +static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance) +{ + struct pmcraid_config_table_entry *cfg_entry; + struct pmcraid_hcam_ccn *ccn_hcam; + struct pmcraid_cmd *cmd; + struct pmcraid_cmd *cfgcmd; + struct pmcraid_resource_entry *res = NULL; + u32 new_entry = 1; + unsigned long lock_flags; + unsigned long host_lock_flags; + int rc; + + ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam; + cfg_entry = &ccn_hcam->cfg_entry; + + pmcraid_info + ("CCN(%x): %x type: %x lost: %x flags: %x res: %x:%x:%x:%x\n", + pinstance->ccn.hcam->ilid, + pinstance->ccn.hcam->op_code, + pinstance->ccn.hcam->notification_type, + pinstance->ccn.hcam->notification_lost, + pinstance->ccn.hcam->flags, + pinstance->host->unique_id, + RES_IS_VSET(*cfg_entry) ? PMCRAID_VSET_BUS_ID : + (RES_IS_GSCSI(*cfg_entry) ? PMCRAID_PHYS_BUS_ID : + RES_BUS(cfg_entry->resource_address)), + RES_IS_VSET(*cfg_entry) ? cfg_entry->unique_flags1 : + RES_TARGET(cfg_entry->resource_address), + RES_LUN(cfg_entry->resource_address)); + + + /* If this HCAM indicates a lost notification, read the config table */ + if (pinstance->ccn.hcam->notification_lost) { + cfgcmd = pmcraid_get_free_cmd(pinstance); + if (cfgcmd) { + pmcraid_info("lost CCN, reading config table\b"); + pinstance->reinit_cfg_table = 1; + pmcraid_querycfg(cfgcmd); + } else { + pmcraid_err("lost CCN, no free cmd for querycfg\n"); + } + goto out_notify_apps; + } + + /* If this resource is not going to be added to mid-layer, just notify + * applications and return + */ + if (!pmcraid_expose_resource(cfg_entry)) + goto out_notify_apps; + + spin_lock_irqsave(&pinstance->resource_lock, lock_flags); + list_for_each_entry(res, &pinstance->used_res_q, queue) { + rc = memcmp(&res->cfg_entry.resource_address, + &cfg_entry->resource_address, + sizeof(cfg_entry->resource_address)); + if (!rc) { + new_entry = 0; + break; + } + } + + if (new_entry) { + + /* If there are more number of resources than what driver can + * manage, do not notify the applications about the CCN. Just + * ignore this notifications and re-register the same HCAM + */ + if (list_empty(&pinstance->free_res_q)) { + spin_unlock_irqrestore(&pinstance->resource_lock, + lock_flags); + pmcraid_err("too many resources attached\n"); + spin_lock_irqsave(pinstance->host->host_lock, + host_lock_flags); + pmcraid_send_hcam(pinstance, + PMCRAID_HCAM_CODE_CONFIG_CHANGE); + spin_unlock_irqrestore(pinstance->host->host_lock, + host_lock_flags); + return; + } + + res = list_entry(pinstance->free_res_q.next, + struct pmcraid_resource_entry, queue); + + list_del(&res->queue); + res->scsi_dev = NULL; + res->reset_progress = 0; + list_add_tail(&res->queue, &pinstance->used_res_q); + } + + memcpy(&res->cfg_entry, cfg_entry, + sizeof(struct pmcraid_config_table_entry)); + + if (pinstance->ccn.hcam->notification_type == + NOTIFICATION_TYPE_ENTRY_DELETED) { + if (res->scsi_dev) { + res->change_detected = RES_CHANGE_DEL; + res->cfg_entry.resource_handle = + PMCRAID_INVALID_RES_HANDLE; + schedule_work(&pinstance->worker_q); + } else { + /* This may be one of the non-exposed resources */ + list_move_tail(&res->queue, &pinstance->free_res_q); + } + } else if (!res->scsi_dev) { + res->change_detected = RES_CHANGE_ADD; + schedule_work(&pinstance->worker_q); + } + spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); + +out_notify_apps: + + /* Notify configuration changes to registered applications.*/ + if (!pmcraid_disable_aen) + pmcraid_notify_aen(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE); + + cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE); + if (cmd) + pmcraid_send_hcam_cmd(cmd); +} + +/** + * pmcraid_get_error_info - return error string for an ioasc + * @ioasc: ioasc code + * Return Value + * none + */ +static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc) +{ + int i; + for (i = 0; i < ARRAY_SIZE(pmcraid_ioasc_error_table); i++) { + if (pmcraid_ioasc_error_table[i].ioasc_code == ioasc) + return &pmcraid_ioasc_error_table[i]; + } + return NULL; +} + +/** + * pmcraid_ioasc_logger - log IOASC information based user-settings + * @ioasc: ioasc code + * @cmd: pointer to command that resulted in 'ioasc' + */ +void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd) +{ + struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc); + + if (error_info == NULL || + cmd->drv_inst->current_log_level < error_info->log_level) + return; + + /* log the error string */ + pmcraid_err("cmd [%d] for resource %x failed with %x(%s)\n", + cmd->ioa_cb->ioarcb.cdb[0], + cmd->ioa_cb->ioarcb.resource_handle, + le32_to_cpu(ioasc), error_info->error_string); +} + +/** + * pmcraid_handle_error_log - Handle a config change (error log) from the IOA + * + * @pinstance: pointer to per adapter instance structure + * + * Return value: + * none + */ +static void pmcraid_handle_error_log(struct pmcraid_instance *pinstance) +{ + struct pmcraid_hcam_ldn *hcam_ldn; + u32 ioasc; + + hcam_ldn = (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam; + + pmcraid_info + ("LDN(%x): %x type: %x lost: %x flags: %x overlay id: %x\n", + pinstance->ldn.hcam->ilid, + pinstance->ldn.hcam->op_code, + pinstance->ldn.hcam->notification_type, + pinstance->ldn.hcam->notification_lost, + pinstance->ldn.hcam->flags, + pinstance->ldn.hcam->overlay_id); + + /* log only the errors, no need to log informational log entries */ + if (pinstance->ldn.hcam->notification_type != + NOTIFICATION_TYPE_ERROR_LOG) + return; + + if (pinstance->ldn.hcam->notification_lost == + HOSTRCB_NOTIFICATIONS_LOST) + dev_err(&pinstance->pdev->dev, "Error notifications lost\n"); + + ioasc = le32_to_cpu(hcam_ldn->error_log.fd_ioasc); + + if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET || + ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER) { + dev_err(&pinstance->pdev->dev, + "UnitAttention due to IOA Bus Reset\n"); + scsi_report_bus_reset( + pinstance->host, + RES_BUS(hcam_ldn->error_log.fd_ra)); + } + + return; +} + +/** + * pmcraid_process_ccn - Op done function for a CCN. + * @cmd: pointer to command struct + * + * This function is the op done function for a configuration + * change notification + * + * Return value: + * none + */ +static void pmcraid_process_ccn(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); + unsigned long lock_flags; + + pinstance->ccn.cmd = NULL; + pmcraid_return_cmd(cmd); + + /* If driver initiated IOA reset happened while this hcam was pending + * with IOA, or IOA bringdown sequence is in progress, no need to + * re-register the hcam + */ + if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET || + atomic_read(&pinstance->ccn.ignore) == 1) { + return; + } else if (ioasc) { + dev_err(&pinstance->pdev->dev, + "Host RCB (CCN) failed with IOASC: 0x%08X\n", ioasc); + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE); + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + } else { + pmcraid_handle_config_change(pinstance); + } +} + +/** + * pmcraid_process_ldn - op done function for an LDN + * @cmd: pointer to command block + * + * Return value + * none + */ +static void pmcraid_initiate_reset(struct pmcraid_instance *); + +static void pmcraid_process_ldn(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + struct pmcraid_hcam_ldn *ldn_hcam = + (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam; + u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); + u32 fd_ioasc = le32_to_cpu(ldn_hcam->error_log.fd_ioasc); + unsigned long lock_flags; + + /* return the command block back to freepool */ + pinstance->ldn.cmd = NULL; + pmcraid_return_cmd(cmd); + + /* If driver initiated IOA reset happened while this hcam was pending + * with IOA, no need to re-register the hcam as reset engine will do it + * once reset sequence is complete + */ + if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET || + atomic_read(&pinstance->ccn.ignore) == 1) { + return; + } else if (!ioasc) { + pmcraid_handle_error_log(pinstance); + if (fd_ioasc == PMCRAID_IOASC_NR_IOA_RESET_REQUIRED) { + spin_lock_irqsave(pinstance->host->host_lock, + lock_flags); + pmcraid_initiate_reset(pinstance); + spin_unlock_irqrestore(pinstance->host->host_lock, + lock_flags); + return; + } + } else { + dev_err(&pinstance->pdev->dev, + "Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc); + } + /* send netlink message for HCAM notification if enabled */ + if (!pmcraid_disable_aen) + pmcraid_notify_aen(pinstance, PMCRAID_HCAM_CODE_LOG_DATA); + + cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA); + if (cmd) + pmcraid_send_hcam_cmd(cmd); +} + +/** + * pmcraid_register_hcams - register HCAMs for CCN and LDN + * + * @pinstance: pointer per adapter instance structure + * + * Return Value + * none + */ +static void pmcraid_register_hcams(struct pmcraid_instance *pinstance) +{ + pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE); + pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA); +} + +/** + * pmcraid_unregister_hcams - cancel HCAMs registered already + * @cmd: pointer to command used as part of reset sequence + */ +static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + + /* During IOA bringdown, HCAM gets fired and tasklet proceeds with + * handling hcam response though it is not necessary. In order to + * prevent this, set 'ignore', so that bring-down sequence doesn't + * re-send any more hcams + */ + atomic_set(&pinstance->ccn.ignore, 1); + atomic_set(&pinstance->ldn.ignore, 1); + + /* If adapter reset was forced as part of runtime reset sequence, + * start the reset sequence. + */ + if (pinstance->force_ioa_reset && !pinstance->ioa_bringdown) { + pinstance->force_ioa_reset = 0; + pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; + pmcraid_reset_alert(cmd); + return; + } + + /* Driver tries to cancel HCAMs by sending ABORT TASK for each HCAM + * one after the other. So CCN cancellation will be triggered by + * pmcraid_cancel_ldn itself. + */ + pmcraid_cancel_ldn(cmd); +} + +/** + * pmcraid_reset_enable_ioa - re-enable IOA after a hard reset + * @pinstance: pointer to adapter instance structure + * Return Value + * 1 if TRANSITION_TO_OPERATIONAL is active, otherwise 0 + */ +static void pmcraid_reinit_buffers(struct pmcraid_instance *); + +static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance) +{ + u32 intrs; + + pmcraid_reinit_buffers(pinstance); + intrs = pmcraid_read_interrupts(pinstance); + + pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS); + + if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) { + iowrite32(INTRS_TRANSITION_TO_OPERATIONAL, + pinstance->int_regs.ioa_host_interrupt_mask_reg); + iowrite32(INTRS_TRANSITION_TO_OPERATIONAL, + pinstance->int_regs.ioa_host_interrupt_clr_reg); + return 1; + } else { + return 0; + } +} + +/** + * pmcraid_soft_reset - performs a soft reset and makes IOA become ready + * @cmd : pointer to reset command block + * + * Return Value + * none + */ +static void pmcraid_soft_reset(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + u32 int_reg; + u32 doorbell; + + /* There will be an interrupt when Transition to Operational bit is + * set so tasklet would execute next reset task. The timeout handler + * would re-initiate a reset + */ + cmd->cmd_done = pmcraid_ioa_reset; + cmd->timer.data = (unsigned long)cmd; + cmd->timer.expires = jiffies + + msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT); + cmd->timer.function = (void (*)(unsigned long))pmcraid_timeout_handler; + + if (!timer_pending(&cmd->timer)) + add_timer(&cmd->timer); + + /* Enable destructive diagnostics on IOA if it is not yet in + * operational state + */ + doorbell = DOORBELL_RUNTIME_RESET | + DOORBELL_ENABLE_DESTRUCTIVE_DIAGS; + + iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg); + int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); + pmcraid_info("Waiting for IOA to become operational %x:%x\n", + ioread32(pinstance->int_regs.host_ioa_interrupt_reg), + int_reg); +} + +/** + * pmcraid_get_dump - retrieves IOA dump in case of Unit Check interrupt + * + * @pinstance: pointer to adapter instance structure + * + * Return Value + * none + */ +static void pmcraid_get_dump(struct pmcraid_instance *pinstance) +{ + pmcraid_info("%s is not yet implemented\n", __func__); +} + +/** + * pmcraid_fail_outstanding_cmds - Fails all outstanding ops. + * @pinstance: pointer to adapter instance structure + * + * This function fails all outstanding ops. If they are submitted to IOA + * already, it sends cancel all messages if IOA is still accepting IOARCBs, + * otherwise just completes the commands and returns the cmd blocks to free + * pool. + * + * Return value: + * none + */ +static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance) +{ + struct pmcraid_cmd *cmd, *temp; + unsigned long lock_flags; + + /* pending command list is protected by pending_pool_lock. Its + * traversal must be done as within this lock + */ + spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags); + list_for_each_entry_safe(cmd, temp, &pinstance->pending_cmd_pool, + free_list) { + list_del(&cmd->free_list); + spin_unlock_irqrestore(&pinstance->pending_pool_lock, + lock_flags); + cmd->ioa_cb->ioasa.ioasc = + cpu_to_le32(PMCRAID_IOASC_IOA_WAS_RESET); + cmd->ioa_cb->ioasa.ilid = + cpu_to_be32(PMCRAID_DRIVER_ILID); + + /* In case the command timer is still running */ + del_timer(&cmd->timer); + + /* If this is an IO command, complete it by invoking scsi_done + * function. If this is one of the internal commands other + * than pmcraid_ioa_reset and HCAM commands invoke cmd_done to + * complete it + */ + if (cmd->scsi_cmd) { + + struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; + __le32 resp = cmd->ioa_cb->ioarcb.response_handle; + + scsi_cmd->result |= DID_ERROR << 16; + + scsi_dma_unmap(scsi_cmd); + pmcraid_return_cmd(cmd); + + + pmcraid_info("failing(%d) CDB[0] = %x result: %x\n", + le32_to_cpu(resp) >> 2, + cmd->ioa_cb->ioarcb.cdb[0], + scsi_cmd->result); + scsi_cmd->scsi_done(scsi_cmd); + } else if (cmd->cmd_done == pmcraid_internal_done || + cmd->cmd_done == pmcraid_erp_done) { + cmd->cmd_done(cmd); + } else if (cmd->cmd_done != pmcraid_ioa_reset) { + pmcraid_return_cmd(cmd); + } + + atomic_dec(&pinstance->outstanding_cmds); + spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags); + } + + spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags); +} + +/** + * pmcraid_ioa_reset - Implementation of IOA reset logic + * + * @cmd: pointer to the cmd block to be used for entire reset process + * + * This function executes most of the steps required for IOA reset. This gets + * called by user threads (modprobe/insmod/rmmod) timer, tasklet and midlayer's + * 'eh_' thread. Access to variables used for controling the reset sequence is + * synchronized using host lock. Various functions called during reset process + * would make use of a single command block, pointer to which is also stored in + * adapter instance structure. + * + * Return Value + * None + */ +static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + u8 reset_complete = 0; + + pinstance->ioa_reset_in_progress = 1; + + if (pinstance->reset_cmd != cmd) { + pmcraid_err("reset is called with different command block\n"); + pinstance->reset_cmd = cmd; + } + + pmcraid_info("reset_engine: state = %d, command = %p\n", + pinstance->ioa_state, cmd); + + switch (pinstance->ioa_state) { + + case IOA_STATE_DEAD: + /* If IOA is offline, whatever may be the reset reason, just + * return. callers might be waiting on the reset wait_q, wake + * up them + */ + pmcraid_err("IOA is offline no reset is possible\n"); + reset_complete = 1; + break; + + case IOA_STATE_IN_BRINGDOWN: + /* we enter here, once ioa shutdown command is processed by IOA + * Alert IOA for a possible reset. If reset alert fails, IOA + * goes through hard-reset + */ + pmcraid_disable_interrupts(pinstance, ~0); + pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; + pmcraid_reset_alert(cmd); + break; + + case IOA_STATE_UNKNOWN: + /* We may be called during probe or resume. Some pre-processing + * is required for prior to reset + */ + scsi_block_requests(pinstance->host); + + /* If asked to reset while IOA was processing responses or + * there are any error responses then IOA may require + * hard-reset. + */ + if (pinstance->ioa_hard_reset == 0) { + if (ioread32(pinstance->ioa_status) & + INTRS_TRANSITION_TO_OPERATIONAL) { + pmcraid_info("sticky bit set, bring-up\n"); + pinstance->ioa_state = IOA_STATE_IN_BRINGUP; + pmcraid_reinit_cmdblk(cmd); + pmcraid_identify_hrrq(cmd); + } else { + pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET; + pmcraid_soft_reset(cmd); + } + } else { + /* Alert IOA of a possible reset and wait for critical + * operation in progress bit to reset + */ + pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; + pmcraid_reset_alert(cmd); + } + break; + + case IOA_STATE_IN_RESET_ALERT: + /* If critical operation in progress bit is reset or wait gets + * timed out, reset proceeds with starting BIST on the IOA. + * pmcraid_ioa_hard_reset keeps a count of reset attempts. If + * they are 3 or more, reset engine marks IOA dead and returns + */ + pinstance->ioa_state = IOA_STATE_IN_HARD_RESET; + pmcraid_start_bist(cmd); + break; + + case IOA_STATE_IN_HARD_RESET: + pinstance->ioa_reset_attempts++; + + /* retry reset if we haven't reached maximum allowed limit */ + if (pinstance->ioa_reset_attempts > PMCRAID_RESET_ATTEMPTS) { + pinstance->ioa_reset_attempts = 0; + pmcraid_err("IOA didn't respond marking it as dead\n"); + pinstance->ioa_state = IOA_STATE_DEAD; + reset_complete = 1; + break; + } + + /* Once either bist or pci reset is done, restore PCI config + * space. If this fails, proceed with hard reset again + */ + + if (pci_restore_state(pinstance->pdev)) { + pmcraid_info("config-space error resetting again\n"); + pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; + pmcraid_reset_alert(cmd); + break; + } + + /* fail all pending commands */ + pmcraid_fail_outstanding_cmds(pinstance); + + /* check if unit check is active, if so extract dump */ + if (pinstance->ioa_unit_check) { + pmcraid_info("unit check is active\n"); + pinstance->ioa_unit_check = 0; + pmcraid_get_dump(pinstance); + pinstance->ioa_reset_attempts--; + pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; + pmcraid_reset_alert(cmd); + break; + } + + /* if the reset reason is to bring-down the ioa, we might be + * done with the reset restore pci_config_space and complete + * the reset + */ + if (pinstance->ioa_bringdown) { + pmcraid_info("bringing down the adapter\n"); + pinstance->ioa_shutdown_type = SHUTDOWN_NONE; + pinstance->ioa_bringdown = 0; + pinstance->ioa_state = IOA_STATE_UNKNOWN; + reset_complete = 1; + } else { + /* bring-up IOA, so proceed with soft reset + * Reinitialize hrrq_buffers and their indices also + * enable interrupts after a pci_restore_state + */ + if (pmcraid_reset_enable_ioa(pinstance)) { + pinstance->ioa_state = IOA_STATE_IN_BRINGUP; + pmcraid_info("bringing up the adapter\n"); + pmcraid_reinit_cmdblk(cmd); + pmcraid_identify_hrrq(cmd); + } else { + pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET; + pmcraid_soft_reset(cmd); + } + } + break; + + case IOA_STATE_IN_SOFT_RESET: + /* TRANSITION TO OPERATIONAL is on so start initialization + * sequence + */ + pmcraid_info("In softreset proceeding with bring-up\n"); + pinstance->ioa_state = IOA_STATE_IN_BRINGUP; + + /* Initialization commands start with HRRQ identification. From + * now on tasklet completes most of the commands as IOA is up + * and intrs are enabled + */ + pmcraid_identify_hrrq(cmd); + break; + + case IOA_STATE_IN_BRINGUP: + /* we are done with bringing up of IOA, change the ioa_state to + * operational and wake up any waiters + */ + pinstance->ioa_state = IOA_STATE_OPERATIONAL; + reset_complete = 1; + break; + + case IOA_STATE_OPERATIONAL: + default: + /* When IOA is operational and a reset is requested, check for + * the reset reason. If reset is to bring down IOA, unregister + * HCAMs and initiate shutdown; if adapter reset is forced then + * restart reset sequence again + */ + if (pinstance->ioa_shutdown_type == SHUTDOWN_NONE && + pinstance->force_ioa_reset == 0) { + reset_complete = 1; + } else { + if (pinstance->ioa_shutdown_type != SHUTDOWN_NONE) + pinstance->ioa_state = IOA_STATE_IN_BRINGDOWN; + pmcraid_reinit_cmdblk(cmd); + pmcraid_unregister_hcams(cmd); + } + break; + } + + /* reset will be completed if ioa_state is either DEAD or UNKNOWN or + * OPERATIONAL. Reset all control variables used during reset, wake up + * any waiting threads and let the SCSI mid-layer send commands. Note + * that host_lock must be held before invoking scsi_report_bus_reset. + */ + if (reset_complete) { + pinstance->ioa_reset_in_progress = 0; + pinstance->ioa_reset_attempts = 0; + pinstance->reset_cmd = NULL; + pinstance->ioa_shutdown_type = SHUTDOWN_NONE; + pinstance->ioa_bringdown = 0; + pmcraid_return_cmd(cmd); + + /* If target state is to bring up the adapter, proceed with + * hcam registration and resource exposure to mid-layer. + */ + if (pinstance->ioa_state == IOA_STATE_OPERATIONAL) + pmcraid_register_hcams(pinstance); + + wake_up_all(&pinstance->reset_wait_q); + } + + return; +} + +/** + * pmcraid_initiate_reset - initiates reset sequence. This is called from + * ISR/tasklet during error interrupts including IOA unit check. If reset + * is already in progress, it just returns, otherwise initiates IOA reset + * to bring IOA up to operational state. + * + * @pinstance: pointer to adapter instance structure + * + * Return value + * none + */ +static void pmcraid_initiate_reset(struct pmcraid_instance *pinstance) +{ + struct pmcraid_cmd *cmd; + + /* If the reset is already in progress, just return, otherwise start + * reset sequence and return + */ + if (!pinstance->ioa_reset_in_progress) { + scsi_block_requests(pinstance->host); + cmd = pmcraid_get_free_cmd(pinstance); + + if (cmd == NULL) { + pmcraid_err("no cmnd blocks for initiate_reset\n"); + return; + } + + pinstance->ioa_shutdown_type = SHUTDOWN_NONE; + pinstance->reset_cmd = cmd; + pinstance->force_ioa_reset = 1; + pmcraid_ioa_reset(cmd); + } +} + +/** + * pmcraid_reset_reload - utility routine for doing IOA reset either to bringup + * or bringdown IOA + * @pinstance: pointer adapter instance structure + * @shutdown_type: shutdown type to be used NONE, NORMAL or ABRREV + * @target_state: expected target state after reset + * + * Note: This command initiates reset and waits for its completion. Hence this + * should not be called from isr/timer/tasklet functions (timeout handlers, + * error response handlers and interrupt handlers). + * + * Return Value + * 1 in case ioa_state is not target_state, 0 otherwise. + */ +static int pmcraid_reset_reload( + struct pmcraid_instance *pinstance, + u8 shutdown_type, + u8 target_state +) +{ + struct pmcraid_cmd *reset_cmd = NULL; + unsigned long lock_flags; + int reset = 1; + + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + + if (pinstance->ioa_reset_in_progress) { + pmcraid_info("reset_reload: reset is already in progress\n"); + + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + + wait_event(pinstance->reset_wait_q, + !pinstance->ioa_reset_in_progress); + + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + + if (pinstance->ioa_state == IOA_STATE_DEAD) { + spin_unlock_irqrestore(pinstance->host->host_lock, + lock_flags); + pmcraid_info("reset_reload: IOA is dead\n"); + return reset; + } else if (pinstance->ioa_state == target_state) { + reset = 0; + } + } + + if (reset) { + pmcraid_info("reset_reload: proceeding with reset\n"); + scsi_block_requests(pinstance->host); + reset_cmd = pmcraid_get_free_cmd(pinstance); + + if (reset_cmd == NULL) { + pmcraid_err("no free cmnd for reset_reload\n"); + spin_unlock_irqrestore(pinstance->host->host_lock, + lock_flags); + return reset; + } + + if (shutdown_type == SHUTDOWN_NORMAL) + pinstance->ioa_bringdown = 1; + + pinstance->ioa_shutdown_type = shutdown_type; + pinstance->reset_cmd = reset_cmd; + pinstance->force_ioa_reset = reset; + pmcraid_info("reset_reload: initiating reset\n"); + pmcraid_ioa_reset(reset_cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + pmcraid_info("reset_reload: waiting for reset to complete\n"); + wait_event(pinstance->reset_wait_q, + !pinstance->ioa_reset_in_progress); + + pmcraid_info("reset_reload: reset is complete !! \n"); + scsi_unblock_requests(pinstance->host); + if (pinstance->ioa_state == target_state) + reset = 0; + } + + return reset; +} + +/** + * pmcraid_reset_bringdown - wrapper over pmcraid_reset_reload to bringdown IOA + * + * @pinstance: pointer to adapter instance structure + * + * Return Value + * whatever is returned from pmcraid_reset_reload + */ +static int pmcraid_reset_bringdown(struct pmcraid_instance *pinstance) +{ + return pmcraid_reset_reload(pinstance, + SHUTDOWN_NORMAL, + IOA_STATE_UNKNOWN); +} + +/** + * pmcraid_reset_bringup - wrapper over pmcraid_reset_reload to bring up IOA + * + * @pinstance: pointer to adapter instance structure + * + * Return Value + * whatever is returned from pmcraid_reset_reload + */ +static int pmcraid_reset_bringup(struct pmcraid_instance *pinstance) +{ + return pmcraid_reset_reload(pinstance, + SHUTDOWN_NONE, + IOA_STATE_OPERATIONAL); +} + +/** + * pmcraid_request_sense - Send request sense to a device + * @cmd: pmcraid command struct + * + * This function sends a request sense to a device as a result of a check + * condition. This method re-uses the same command block that failed earlier. + */ +static void pmcraid_request_sense(struct pmcraid_cmd *cmd) +{ + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl; + + /* allocate DMAable memory for sense buffers */ + cmd->sense_buffer = pci_alloc_consistent(cmd->drv_inst->pdev, + SCSI_SENSE_BUFFERSIZE, + &cmd->sense_buffer_dma); + + if (cmd->sense_buffer == NULL) { + pmcraid_err + ("couldn't allocate sense buffer for request sense\n"); + pmcraid_erp_done(cmd); + return; + } + + /* re-use the command block */ + memset(&cmd->ioa_cb->ioasa, 0, sizeof(struct pmcraid_ioasa)); + memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN); + ioarcb->request_flags0 = (SYNC_COMPLETE | + NO_LINK_DESCS | + INHIBIT_UL_CHECK); + ioarcb->request_type = REQ_TYPE_SCSI; + ioarcb->cdb[0] = REQUEST_SENSE; + ioarcb->cdb[4] = SCSI_SENSE_BUFFERSIZE; + + ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + + offsetof(struct pmcraid_ioarcb, + add_data.u.ioadl[0])); + ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); + + ioarcb->data_transfer_length = cpu_to_le32(SCSI_SENSE_BUFFERSIZE); + + ioadl->address = cpu_to_le64(cmd->sense_buffer_dma); + ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE); + ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC); + + /* request sense might be called as part of error response processing + * which runs in tasklets context. It is possible that mid-layer might + * schedule queuecommand during this time, hence, writting to IOARRIN + * must be protect by host_lock + */ + pmcraid_send_cmd(cmd, pmcraid_erp_done, + PMCRAID_REQUEST_SENSE_TIMEOUT, + pmcraid_timeout_handler); +} + +/** + * pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery + * @cmd: command that failed + * @sense: true if request_sense is required after cancel all + * + * This function sends a cancel all to a device to clear the queue. + */ +static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense) +{ + struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata; + void (*cmd_done) (struct pmcraid_cmd *) = sense ? pmcraid_erp_done + : pmcraid_request_sense; + + memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN); + ioarcb->request_flags0 = SYNC_OVERRIDE; + ioarcb->request_type = REQ_TYPE_IOACMD; + ioarcb->cdb[0] = PMCRAID_CANCEL_ALL_REQUESTS; + + if (RES_IS_GSCSI(res->cfg_entry)) + ioarcb->cdb[1] = PMCRAID_SYNC_COMPLETE_AFTER_CANCEL; + + ioarcb->ioadl_bus_addr = 0; + ioarcb->ioadl_length = 0; + ioarcb->data_transfer_length = 0; + ioarcb->ioarcb_bus_addr &= (~0x1FULL); + + /* writing to IOARRIN must be protected by host_lock, as mid-layer + * schedule queuecommand while we are doing this + */ + pmcraid_send_cmd(cmd, cmd_done, + PMCRAID_REQUEST_SENSE_TIMEOUT, + pmcraid_timeout_handler); +} + +/** + * pmcraid_frame_auto_sense: frame fixed format sense information + * + * @cmd: pointer to failing command block + * + * Return value + * none + */ +static void pmcraid_frame_auto_sense(struct pmcraid_cmd *cmd) +{ + u8 *sense_buf = cmd->scsi_cmd->sense_buffer; + struct pmcraid_resource_entry *res = cmd->scsi_cmd->device->hostdata; + struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa; + u32 ioasc = le32_to_cpu(ioasa->ioasc); + u32 failing_lba = 0; + + memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); + cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; + + if (RES_IS_VSET(res->cfg_entry) && + ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC && + ioasa->u.vset.failing_lba_hi != 0) { + + sense_buf[0] = 0x72; + sense_buf[1] = PMCRAID_IOASC_SENSE_KEY(ioasc); + sense_buf[2] = PMCRAID_IOASC_SENSE_CODE(ioasc); + sense_buf[3] = PMCRAID_IOASC_SENSE_QUAL(ioasc); + + sense_buf[7] = 12; + sense_buf[8] = 0; + sense_buf[9] = 0x0A; + sense_buf[10] = 0x80; + + failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_hi); + + sense_buf[12] = (failing_lba & 0xff000000) >> 24; + sense_buf[13] = (failing_lba & 0x00ff0000) >> 16; + sense_buf[14] = (failing_lba & 0x0000ff00) >> 8; + sense_buf[15] = failing_lba & 0x000000ff; + + failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_lo); + + sense_buf[16] = (failing_lba & 0xff000000) >> 24; + sense_buf[17] = (failing_lba & 0x00ff0000) >> 16; + sense_buf[18] = (failing_lba & 0x0000ff00) >> 8; + sense_buf[19] = failing_lba & 0x000000ff; + } else { + sense_buf[0] = 0x70; + sense_buf[2] = PMCRAID_IOASC_SENSE_KEY(ioasc); + sense_buf[12] = PMCRAID_IOASC_SENSE_CODE(ioasc); + sense_buf[13] = PMCRAID_IOASC_SENSE_QUAL(ioasc); + + if (ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC) { + if (RES_IS_VSET(res->cfg_entry)) + failing_lba = + le32_to_cpu(ioasa->u. + vset.failing_lba_lo); + sense_buf[0] |= 0x80; + sense_buf[3] = (failing_lba >> 24) & 0xff; + sense_buf[4] = (failing_lba >> 16) & 0xff; + sense_buf[5] = (failing_lba >> 8) & 0xff; + sense_buf[6] = failing_lba & 0xff; + } + + sense_buf[7] = 6; /* additional length */ + } +} + +/** + * pmcraid_error_handler - Error response handlers for a SCSI op + * @cmd: pointer to pmcraid_cmd that has failed + * + * This function determines whether or not to initiate ERP on the affected + * device. This is called from a tasklet, which doesn't hold any locks. + * + * Return value: + * 0 it caller can complete the request, otherwise 1 where in error + * handler itself completes the request and returns the command block + * back to free-pool + */ +static int pmcraid_error_handler(struct pmcraid_cmd *cmd) +{ + struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; + struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata; + struct pmcraid_instance *pinstance = cmd->drv_inst; + struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa; + u32 ioasc = le32_to_cpu(ioasa->ioasc); + u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK; + u32 sense_copied = 0; + + if (!res) { + pmcraid_info("resource pointer is NULL\n"); + return 0; + } + + /* If this was a SCSI read/write command keep count of errors */ + if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD) + atomic_inc(&res->read_failures); + else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD) + atomic_inc(&res->write_failures); + + if (!RES_IS_GSCSI(res->cfg_entry) && + masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) { + pmcraid_frame_auto_sense(cmd); + } + + /* Log IOASC/IOASA information based on user settings */ + pmcraid_ioasc_logger(ioasc, cmd); + + switch (masked_ioasc) { + + case PMCRAID_IOASC_AC_TERMINATED_BY_HOST: + scsi_cmd->result |= (DID_ABORT << 16); + break; + + case PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE: + case PMCRAID_IOASC_HW_CANNOT_COMMUNICATE: + scsi_cmd->result |= (DID_NO_CONNECT << 16); + break; + + case PMCRAID_IOASC_NR_SYNC_REQUIRED: + res->sync_reqd = 1; + scsi_cmd->result |= (DID_IMM_RETRY << 16); + break; + + case PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC: + scsi_cmd->result |= (DID_PASSTHROUGH << 16); + break; + + case PMCRAID_IOASC_UA_BUS_WAS_RESET: + case PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER: + if (!res->reset_progress) + scsi_report_bus_reset(pinstance->host, + scsi_cmd->device->channel); + scsi_cmd->result |= (DID_ERROR << 16); + break; + + case PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR: + scsi_cmd->result |= PMCRAID_IOASC_SENSE_STATUS(ioasc); + res->sync_reqd = 1; + + /* if check_condition is not active return with error otherwise + * get/frame the sense buffer + */ + if (PMCRAID_IOASC_SENSE_STATUS(ioasc) != + SAM_STAT_CHECK_CONDITION && + PMCRAID_IOASC_SENSE_STATUS(ioasc) != SAM_STAT_ACA_ACTIVE) + return 0; + + /* If we have auto sense data as part of IOASA pass it to + * mid-layer + */ + if (ioasa->auto_sense_length != 0) { + short sense_len = ioasa->auto_sense_length; + int data_size = min_t(u16, le16_to_cpu(sense_len), + SCSI_SENSE_BUFFERSIZE); + + memcpy(scsi_cmd->sense_buffer, + ioasa->sense_data, + data_size); + sense_copied = 1; + } + + if (RES_IS_GSCSI(res->cfg_entry)) { + pmcraid_cancel_all(cmd, sense_copied); + } else if (sense_copied) { + pmcraid_erp_done(cmd); + return 0; + } else { + pmcraid_request_sense(cmd); + } + + return 1; + + case PMCRAID_IOASC_NR_INIT_CMD_REQUIRED: + break; + + default: + if (PMCRAID_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) + scsi_cmd->result |= (DID_ERROR << 16); + break; + } + return 0; +} + +/** + * pmcraid_reset_device - device reset handler functions + * + * @scsi_cmd: scsi command struct + * @modifier: reset modifier indicating the reset sequence to be performed + * + * This function issues a device reset to the affected device. + * A LUN reset will be sent to the device first. If that does + * not work, a target reset will be sent. + * + * Return value: + * SUCCESS / FAILED + */ +static int pmcraid_reset_device( + struct scsi_cmnd *scsi_cmd, + unsigned long timeout, + u8 modifier +) +{ + struct pmcraid_cmd *cmd; + struct pmcraid_instance *pinstance; + struct pmcraid_resource_entry *res; + struct pmcraid_ioarcb *ioarcb; + unsigned long lock_flags; + u32 ioasc; + + pinstance = + (struct pmcraid_instance *)scsi_cmd->device->host->hostdata; + res = scsi_cmd->device->hostdata; + + if (!res) { + pmcraid_err("reset_device: NULL resource pointer\n"); + return FAILED; + } + + /* If adapter is currently going through reset/reload, return failed. + * This will force the mid-layer to call _eh_bus/host reset, which + * will then go to sleep and wait for the reset to complete + */ + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + if (pinstance->ioa_reset_in_progress || + pinstance->ioa_state == IOA_STATE_DEAD) { + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + return FAILED; + } + + res->reset_progress = 1; + pmcraid_info("Resetting %s resource with addr %x\n", + ((modifier & RESET_DEVICE_LUN) ? "LUN" : + ((modifier & RESET_DEVICE_TARGET) ? "TARGET" : "BUS")), + le32_to_cpu(res->cfg_entry.resource_address)); + + /* get a free cmd block */ + cmd = pmcraid_get_free_cmd(pinstance); + + if (cmd == NULL) { + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + pmcraid_err("%s: no cmd blocks are available\n", __func__); + return FAILED; + } + + ioarcb = &cmd->ioa_cb->ioarcb; + ioarcb->resource_handle = res->cfg_entry.resource_handle; + ioarcb->request_type = REQ_TYPE_IOACMD; + ioarcb->cdb[0] = PMCRAID_RESET_DEVICE; + + /* Initialize reset modifier bits */ + if (modifier) + modifier = ENABLE_RESET_MODIFIER | modifier; + + ioarcb->cdb[1] = modifier; + + init_completion(&cmd->wait_for_completion); + cmd->completion_req = 1; + + pmcraid_info("cmd(CDB[0] = %x) for %x with index = %d\n", + cmd->ioa_cb->ioarcb.cdb[0], + le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle), + le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2); + + pmcraid_send_cmd(cmd, + pmcraid_internal_done, + timeout, + pmcraid_timeout_handler); + + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + + /* RESET_DEVICE command completes after all pending IOARCBs are + * completed. Once this command is completed, pmcraind_internal_done + * will wake up the 'completion' queue. + */ + wait_for_completion(&cmd->wait_for_completion); + + /* complete the command here itself and return the command block + * to free list + */ + pmcraid_return_cmd(cmd); + res->reset_progress = 0; + ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); + + /* set the return value based on the returned ioasc */ + return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS; +} + +/** + * _pmcraid_io_done - helper for pmcraid_io_done function + * + * @cmd: pointer to pmcraid command struct + * @reslen: residual data length to be set in the ioasa + * @ioasc: ioasc either returned by IOA or set by driver itself. + * + * This function is invoked by pmcraid_io_done to complete mid-layer + * scsi ops. + * + * Return value: + * 0 if caller is required to return it to free_pool. Returns 1 if + * caller need not worry about freeing command block as error handler + * will take care of that. + */ + +static int _pmcraid_io_done(struct pmcraid_cmd *cmd, int reslen, int ioasc) +{ + struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; + int rc = 0; + + scsi_set_resid(scsi_cmd, reslen); + + pmcraid_info("response(%d) CDB[0] = %x ioasc:result: %x:%x\n", + le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2, + cmd->ioa_cb->ioarcb.cdb[0], + ioasc, scsi_cmd->result); + + if (PMCRAID_IOASC_SENSE_KEY(ioasc) != 0) + rc = pmcraid_error_handler(cmd); + + if (rc == 0) { + scsi_dma_unmap(scsi_cmd); + scsi_cmd->scsi_done(scsi_cmd); + } + + return rc; +} + +/** + * pmcraid_io_done - SCSI completion function + * + * @cmd: pointer to pmcraid command struct + * + * This function is invoked by tasklet/mid-layer error handler to completing + * the SCSI ops sent from mid-layer. + * + * Return value + * none + */ + +static void pmcraid_io_done(struct pmcraid_cmd *cmd) +{ + u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); + u32 reslen = le32_to_cpu(cmd->ioa_cb->ioasa.residual_data_length); + + if (_pmcraid_io_done(cmd, reslen, ioasc) == 0) + pmcraid_return_cmd(cmd); +} + +/** + * pmcraid_abort_cmd - Aborts a single IOARCB already submitted to IOA + * + * @cmd: command block of the command to be aborted + * + * Return Value: + * returns pointer to command structure used as cancelling cmd + */ +static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd) +{ + struct pmcraid_cmd *cancel_cmd; + struct pmcraid_instance *pinstance; + struct pmcraid_resource_entry *res; + + pinstance = (struct pmcraid_instance *)cmd->drv_inst; + res = cmd->scsi_cmd->device->hostdata; + + cancel_cmd = pmcraid_get_free_cmd(pinstance); + + if (cancel_cmd == NULL) { + pmcraid_err("%s: no cmd blocks are available\n", __func__); + return NULL; + } + + pmcraid_prepare_cancel_cmd(cancel_cmd, cmd); + + pmcraid_info("aborting command CDB[0]= %x with index = %d\n", + cmd->ioa_cb->ioarcb.cdb[0], + cmd->ioa_cb->ioarcb.response_handle >> 2); + + init_completion(&cancel_cmd->wait_for_completion); + cancel_cmd->completion_req = 1; + + pmcraid_info("command (%d) CDB[0] = %x for %x\n", + le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.response_handle) >> 2, + cmd->ioa_cb->ioarcb.cdb[0], + le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.resource_handle)); + + pmcraid_send_cmd(cancel_cmd, + pmcraid_internal_done, + PMCRAID_INTERNAL_TIMEOUT, + pmcraid_timeout_handler); + return cancel_cmd; +} + +/** + * pmcraid_abort_complete - Waits for ABORT TASK completion + * + * @cancel_cmd: command block use as cancelling command + * + * Return Value: + * returns SUCCESS if ABORT TASK has good completion + * otherwise FAILED + */ +static int pmcraid_abort_complete(struct pmcraid_cmd *cancel_cmd) +{ + struct pmcraid_resource_entry *res; + u32 ioasc; + + wait_for_completion(&cancel_cmd->wait_for_completion); + res = cancel_cmd->u.res; + cancel_cmd->u.res = NULL; + ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc); + + /* If the abort task is not timed out we will get a Good completion + * as sense_key, otherwise we may get one the following responses + * due to subsquent bus reset or device reset. In case IOASC is + * NR_SYNC_REQUIRED, set sync_reqd flag for the corresponding resource + */ + if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET || + ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED) { + if (ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED) + res->sync_reqd = 1; + ioasc = 0; + } + + /* complete the command here itself */ + pmcraid_return_cmd(cancel_cmd); + return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS; +} + +/** + * pmcraid_eh_abort_handler - entry point for aborting a single task on errors + * + * @scsi_cmd: scsi command struct given by mid-layer. When this is called + * mid-layer ensures that no other commands are queued. This + * never gets called under interrupt, but a separate eh thread. + * + * Return value: + * SUCCESS / FAILED + */ +static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd) +{ + struct pmcraid_instance *pinstance; + struct pmcraid_cmd *cmd; + struct pmcraid_resource_entry *res; + unsigned long host_lock_flags; + unsigned long pending_lock_flags; + struct pmcraid_cmd *cancel_cmd = NULL; + int cmd_found = 0; + int rc = FAILED; + + pinstance = + (struct pmcraid_instance *)scsi_cmd->device->host->hostdata; + + dev_err(&pinstance->pdev->dev, + "I/O command timed out, aborting it.\n"); + + res = scsi_cmd->device->hostdata; + + if (res == NULL) + return rc; + + /* If we are currently going through reset/reload, return failed. + * This will force the mid-layer to eventually call + * pmcraid_eh_host_reset which will then go to sleep and wait for the + * reset to complete + */ + spin_lock_irqsave(pinstance->host->host_lock, host_lock_flags); + + if (pinstance->ioa_reset_in_progress || + pinstance->ioa_state == IOA_STATE_DEAD) { + spin_unlock_irqrestore(pinstance->host->host_lock, + host_lock_flags); + return rc; + } + + /* loop over pending cmd list to find cmd corresponding to this + * scsi_cmd. Note that this command might not have been completed + * already. locking: all pending commands are protected with + * pending_pool_lock. + */ + spin_lock_irqsave(&pinstance->pending_pool_lock, pending_lock_flags); + list_for_each_entry(cmd, &pinstance->pending_cmd_pool, free_list) { + + if (cmd->scsi_cmd == scsi_cmd) { + cmd_found = 1; + break; + } + } + + spin_unlock_irqrestore(&pinstance->pending_pool_lock, + pending_lock_flags); + + /* If the command to be aborted was given to IOA and still pending with + * it, send ABORT_TASK to abort this and wait for its completion + */ + if (cmd_found) + cancel_cmd = pmcraid_abort_cmd(cmd); + + spin_unlock_irqrestore(pinstance->host->host_lock, + host_lock_flags); + + if (cancel_cmd) { + cancel_cmd->u.res = cmd->scsi_cmd->device->hostdata; + rc = pmcraid_abort_complete(cancel_cmd); + } + + return cmd_found ? rc : SUCCESS; +} + +/** + * pmcraid_eh_xxxx_reset_handler - bus/target/device reset handler callbacks + * + * @scmd: pointer to scsi_cmd that was sent to the resource to be reset. + * + * All these routines invokve pmcraid_reset_device with appropriate parameters. + * Since these are called from mid-layer EH thread, no other IO will be queued + * to the resource being reset. However, control path (IOCTL) may be active so + * it is necessary to synchronize IOARRIN writes which pmcraid_reset_device + * takes care by locking/unlocking host_lock. + * + * Return value + * SUCCESS or FAILED + */ +static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd) +{ + pmcraid_err("Doing device reset due to an I/O command timeout.\n"); + return pmcraid_reset_device(scmd, + PMCRAID_INTERNAL_TIMEOUT, + RESET_DEVICE_LUN); +} + +static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd) +{ + pmcraid_err("Doing bus reset due to an I/O command timeout.\n"); + return pmcraid_reset_device(scmd, + PMCRAID_RESET_BUS_TIMEOUT, + RESET_DEVICE_BUS); +} + +static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd) +{ + pmcraid_err("Doing target reset due to an I/O command timeout.\n"); + return pmcraid_reset_device(scmd, + PMCRAID_INTERNAL_TIMEOUT, + RESET_DEVICE_TARGET); +} + +/** + * pmcraid_eh_host_reset_handler - adapter reset handler callback + * + * @scmd: pointer to scsi_cmd that was sent to a resource of adapter + * + * Initiates adapter reset to bring it up to operational state + * + * Return value + * SUCCESS or FAILED + */ +static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd) +{ + unsigned long interval = 10000; /* 10 seconds interval */ + int waits = jiffies_to_msecs(PMCRAID_RESET_HOST_TIMEOUT) / interval; + struct pmcraid_instance *pinstance = + (struct pmcraid_instance *)(scmd->device->host->hostdata); + + + /* wait for an additional 150 seconds just in case firmware could come + * up and if it could complete all the pending commands excluding the + * two HCAM (CCN and LDN). + */ + while (waits--) { + if (atomic_read(&pinstance->outstanding_cmds) <= + PMCRAID_MAX_HCAM_CMD) + return SUCCESS; + msleep(interval); + } + + dev_err(&pinstance->pdev->dev, + "Adapter being reset due to an I/O command timeout.\n"); + return pmcraid_reset_bringup(pinstance) == 0 ? SUCCESS : FAILED; +} + +/** + * pmcraid_task_attributes - Translate SPI Q-Tags to task attributes + * @scsi_cmd: scsi command struct + * + * Return value + * number of tags or 0 if the task is not tagged + */ +static u8 pmcraid_task_attributes(struct scsi_cmnd *scsi_cmd) +{ + char tag[2]; + u8 rc = 0; + + if (scsi_populate_tag_msg(scsi_cmd, tag)) { + switch (tag[0]) { + case MSG_SIMPLE_TAG: + rc = TASK_TAG_SIMPLE; + break; + case MSG_HEAD_TAG: + rc = TASK_TAG_QUEUE_HEAD; + break; + case MSG_ORDERED_TAG: + rc = TASK_TAG_ORDERED; + break; + }; + } + + return rc; +} + + +/** + * pmcraid_init_ioadls - initializes IOADL related fields in IOARCB + * @cmd: pmcraid command struct + * @sgcount: count of scatter-gather elements + * + * Return value + * returns pointer pmcraid_ioadl_desc, initialized to point to internal + * or external IOADLs + */ +struct pmcraid_ioadl_desc * +pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount) +{ + struct pmcraid_ioadl_desc *ioadl; + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + int ioadl_count = 0; + + if (ioarcb->add_cmd_param_length) + ioadl_count = DIV_ROUND_UP(ioarcb->add_cmd_param_length, 16); + ioarcb->ioadl_length = + sizeof(struct pmcraid_ioadl_desc) * sgcount; + + if ((sgcount + ioadl_count) > (ARRAY_SIZE(ioarcb->add_data.u.ioadl))) { + /* external ioadls start at offset 0x80 from control_block + * structure, re-using 24 out of 27 ioadls part of IOARCB. + * It is necessary to indicate to firmware that driver is + * using ioadls to be treated as external to IOARCB. + */ + ioarcb->ioarcb_bus_addr &= ~(0x1FULL); + ioarcb->ioadl_bus_addr = + cpu_to_le64((cmd->ioa_cb_bus_addr) + + offsetof(struct pmcraid_ioarcb, + add_data.u.ioadl[3])); + ioadl = &ioarcb->add_data.u.ioadl[3]; + } else { + ioarcb->ioadl_bus_addr = + cpu_to_le64((cmd->ioa_cb_bus_addr) + + offsetof(struct pmcraid_ioarcb, + add_data.u.ioadl[ioadl_count])); + + ioadl = &ioarcb->add_data.u.ioadl[ioadl_count]; + ioarcb->ioarcb_bus_addr |= + DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8); + } + + return ioadl; +} + +/** + * pmcraid_build_ioadl - Build a scatter/gather list and map the buffer + * @pinstance: pointer to adapter instance structure + * @cmd: pmcraid command struct + * + * This function is invoked by queuecommand entry point while sending a command + * to firmware. This builds ioadl descriptors and sets up ioarcb fields. + * + * Return value: + * 0 on success or -1 on failure + */ +static int pmcraid_build_ioadl( + struct pmcraid_instance *pinstance, + struct pmcraid_cmd *cmd +) +{ + int i, nseg; + struct scatterlist *sglist; + + struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; + struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb); + struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl; + + u32 length = scsi_bufflen(scsi_cmd); + + if (!length) + return 0; + + nseg = scsi_dma_map(scsi_cmd); + + if (nseg < 0) { + dev_err(&pinstance->pdev->dev, "scsi_map_dma failed!\n"); + return -1; + } else if (nseg > PMCRAID_MAX_IOADLS) { + scsi_dma_unmap(scsi_cmd); + dev_err(&pinstance->pdev->dev, + "sg count is (%d) more than allowed!\n", nseg); + return -1; + } + + /* Initialize IOARCB data transfer length fields */ + if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) + ioarcb->request_flags0 |= TRANSFER_DIR_WRITE; + + ioarcb->request_flags0 |= NO_LINK_DESCS; + ioarcb->data_transfer_length = cpu_to_le32(length); + ioadl = pmcraid_init_ioadls(cmd, nseg); + + /* Initialize IOADL descriptor addresses */ + scsi_for_each_sg(scsi_cmd, sglist, nseg, i) { + ioadl[i].data_len = cpu_to_le32(sg_dma_len(sglist)); + ioadl[i].address = cpu_to_le64(sg_dma_address(sglist)); + ioadl[i].flags = 0; + } + /* setup last descriptor */ + ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC); + + return 0; +} + +/** + * pmcraid_free_sglist - Frees an allocated SG buffer list + * @sglist: scatter/gather list pointer + * + * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist + * + * Return value: + * none + */ +static void pmcraid_free_sglist(struct pmcraid_sglist *sglist) +{ + int i; + + for (i = 0; i < sglist->num_sg; i++) + __free_pages(sg_page(&(sglist->scatterlist[i])), + sglist->order); + + kfree(sglist); +} + +/** + * pmcraid_alloc_sglist - Allocates memory for a SG list + * @buflen: buffer length + * + * Allocates a DMA'able buffer in chunks and assembles a scatter/gather + * list. + * + * Return value + * pointer to sglist / NULL on failure + */ +static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen) +{ + struct pmcraid_sglist *sglist; + struct scatterlist *scatterlist; + struct page *page; + int num_elem, i, j; + int sg_size; + int order; + int bsize_elem; + + sg_size = buflen / (PMCRAID_MAX_IOADLS - 1); + order = (sg_size > 0) ? get_order(sg_size) : 0; + bsize_elem = PAGE_SIZE * (1 << order); + + /* Determine the actual number of sg entries needed */ + if (buflen % bsize_elem) + num_elem = (buflen / bsize_elem) + 1; + else + num_elem = buflen / bsize_elem; + + /* Allocate a scatter/gather list for the DMA */ + sglist = kzalloc(sizeof(struct pmcraid_sglist) + + (sizeof(struct scatterlist) * (num_elem - 1)), + GFP_KERNEL); + + if (sglist == NULL) + return NULL; + + scatterlist = sglist->scatterlist; + sg_init_table(scatterlist, num_elem); + sglist->order = order; + sglist->num_sg = num_elem; + sg_size = buflen; + + for (i = 0; i < num_elem; i++) { + page = alloc_pages(GFP_KERNEL|GFP_DMA, order); + if (!page) { + for (j = i - 1; j >= 0; j--) + __free_pages(sg_page(&scatterlist[j]), order); + kfree(sglist); + return NULL; + } + + sg_set_page(&scatterlist[i], page, + sg_size < bsize_elem ? sg_size : bsize_elem, 0); + sg_size -= bsize_elem; + } + + return sglist; +} + +/** + * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list + * @sglist: scatter/gather list pointer + * @buffer: buffer pointer + * @len: buffer length + * @direction: data transfer direction + * + * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist + * + * Return value: + * 0 on success / other on failure + */ +static int pmcraid_copy_sglist( + struct pmcraid_sglist *sglist, + unsigned long buffer, + u32 len, + int direction +) +{ + struct scatterlist *scatterlist; + void *kaddr; + int bsize_elem; + int i; + int rc = 0; + + /* Determine the actual number of bytes per element */ + bsize_elem = PAGE_SIZE * (1 << sglist->order); + + scatterlist = sglist->scatterlist; + + for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) { + struct page *page = sg_page(&scatterlist[i]); + + kaddr = kmap(page); + if (direction == DMA_TO_DEVICE) + rc = __copy_from_user(kaddr, + (void *)buffer, + bsize_elem); + else + rc = __copy_to_user((void *)buffer, kaddr, bsize_elem); + + kunmap(page); + + if (rc) { + pmcraid_err("failed to copy user data into sg list\n"); + return -EFAULT; + } + + scatterlist[i].length = bsize_elem; + } + + if (len % bsize_elem) { + struct page *page = sg_page(&scatterlist[i]); + + kaddr = kmap(page); + + if (direction == DMA_TO_DEVICE) + rc = __copy_from_user(kaddr, + (void *)buffer, + len % bsize_elem); + else + rc = __copy_to_user((void *)buffer, + kaddr, + len % bsize_elem); + + kunmap(page); + + scatterlist[i].length = len % bsize_elem; + } + + if (rc) { + pmcraid_err("failed to copy user data into sg list\n"); + rc = -EFAULT; + } + + return rc; +} + +/** + * pmcraid_queuecommand - Queue a mid-layer request + * @scsi_cmd: scsi command struct + * @done: done function + * + * This function queues a request generated by the mid-layer. Midlayer calls + * this routine within host->lock. Some of the functions called by queuecommand + * would use cmd block queue locks (free_pool_lock and pending_pool_lock) + * + * Return value: + * 0 on success + * SCSI_MLQUEUE_DEVICE_BUSY if device is busy + * SCSI_MLQUEUE_HOST_BUSY if host is busy + */ +static int pmcraid_queuecommand( + struct scsi_cmnd *scsi_cmd, + void (*done) (struct scsi_cmnd *) +) +{ + struct pmcraid_instance *pinstance; + struct pmcraid_resource_entry *res; + struct pmcraid_ioarcb *ioarcb; + struct pmcraid_cmd *cmd; + int rc = 0; + + pinstance = + (struct pmcraid_instance *)scsi_cmd->device->host->hostdata; + + scsi_cmd->scsi_done = done; + res = scsi_cmd->device->hostdata; + scsi_cmd->result = (DID_OK << 16); + + /* if adapter is marked as dead, set result to DID_NO_CONNECT complete + * the command + */ + if (pinstance->ioa_state == IOA_STATE_DEAD) { + pmcraid_info("IOA is dead, but queuecommand is scheduled\n"); + scsi_cmd->result = (DID_NO_CONNECT << 16); + scsi_cmd->scsi_done(scsi_cmd); + return 0; + } + + /* If IOA reset is in progress, can't queue the commands */ + if (pinstance->ioa_reset_in_progress) + return SCSI_MLQUEUE_HOST_BUSY; + + /* initialize the command and IOARCB to be sent to IOA */ + cmd = pmcraid_get_free_cmd(pinstance); + + if (cmd == NULL) { + pmcraid_err("free command block is not available\n"); + return SCSI_MLQUEUE_HOST_BUSY; + } + + cmd->scsi_cmd = scsi_cmd; + ioarcb = &(cmd->ioa_cb->ioarcb); + memcpy(ioarcb->cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); + ioarcb->resource_handle = res->cfg_entry.resource_handle; + ioarcb->request_type = REQ_TYPE_SCSI; + + cmd->cmd_done = pmcraid_io_done; + + if (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry)) { + if (scsi_cmd->underflow == 0) + ioarcb->request_flags0 |= INHIBIT_UL_CHECK; + + if (res->sync_reqd) { + ioarcb->request_flags0 |= SYNC_COMPLETE; + res->sync_reqd = 0; + } + + ioarcb->request_flags0 |= NO_LINK_DESCS; + ioarcb->request_flags1 |= pmcraid_task_attributes(scsi_cmd); + + if (RES_IS_GSCSI(res->cfg_entry)) + ioarcb->request_flags1 |= DELAY_AFTER_RESET; + } + + rc = pmcraid_build_ioadl(pinstance, cmd); + + pmcraid_info("command (%d) CDB[0] = %x for %x:%x:%x:%x\n", + le32_to_cpu(ioarcb->response_handle) >> 2, + scsi_cmd->cmnd[0], pinstance->host->unique_id, + RES_IS_VSET(res->cfg_entry) ? PMCRAID_VSET_BUS_ID : + PMCRAID_PHYS_BUS_ID, + RES_IS_VSET(res->cfg_entry) ? + res->cfg_entry.unique_flags1 : + RES_TARGET(res->cfg_entry.resource_address), + RES_LUN(res->cfg_entry.resource_address)); + + if (likely(rc == 0)) { + _pmcraid_fire_command(cmd); + } else { + pmcraid_err("queuecommand could not build ioadl\n"); + pmcraid_return_cmd(cmd); + rc = SCSI_MLQUEUE_HOST_BUSY; + } + + return rc; +} + +/** + * pmcraid_open -char node "open" entry, allowed only users with admin access + */ +static int pmcraid_chr_open(struct inode *inode, struct file *filep) +{ + struct pmcraid_instance *pinstance; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + /* Populate adapter instance * pointer for use by ioctl */ + pinstance = container_of(inode->i_cdev, struct pmcraid_instance, cdev); + filep->private_data = pinstance; + + return 0; +} + +/** + * pmcraid_release - char node "release" entry point + */ +static int pmcraid_chr_release(struct inode *inode, struct file *filep) +{ + struct pmcraid_instance *pinstance = + ((struct pmcraid_instance *)filep->private_data); + + filep->private_data = NULL; + fasync_helper(-1, filep, 0, &pinstance->aen_queue); + + return 0; +} + +/** + * pmcraid_fasync - Async notifier registration from applications + * + * This function adds the calling process to a driver global queue. When an + * event occurs, SIGIO will be sent to all processes in this queue. + */ +static int pmcraid_chr_fasync(int fd, struct file *filep, int mode) +{ + struct pmcraid_instance *pinstance; + int rc; + + pinstance = (struct pmcraid_instance *)filep->private_data; + mutex_lock(&pinstance->aen_queue_lock); + rc = fasync_helper(fd, filep, mode, &pinstance->aen_queue); + mutex_unlock(&pinstance->aen_queue_lock); + + return rc; +} + + +/** + * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough + * commands sent over IOCTL interface + * + * @cmd : pointer to struct pmcraid_cmd + * @buflen : length of the request buffer + * @direction : data transfer direction + * + * Return value + * 0 on sucess, non-zero error code on failure + */ +static int pmcraid_build_passthrough_ioadls( + struct pmcraid_cmd *cmd, + int buflen, + int direction +) +{ + struct pmcraid_sglist *sglist = NULL; + struct scatterlist *sg = NULL; + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + struct pmcraid_ioadl_desc *ioadl; + int i; + + sglist = pmcraid_alloc_sglist(buflen); + + if (!sglist) { + pmcraid_err("can't allocate memory for passthrough SGls\n"); + return -ENOMEM; + } + + sglist->num_dma_sg = pci_map_sg(cmd->drv_inst->pdev, + sglist->scatterlist, + sglist->num_sg, direction); + + if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) { + dev_err(&cmd->drv_inst->pdev->dev, + "Failed to map passthrough buffer!\n"); + pmcraid_free_sglist(sglist); + return -EIO; + } + + cmd->sglist = sglist; + ioarcb->request_flags0 |= NO_LINK_DESCS; + + ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg); + + /* Initialize IOADL descriptor addresses */ + for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) { + ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg)); + ioadl[i].address = cpu_to_le64(sg_dma_address(sg)); + ioadl[i].flags = 0; + } + + /* setup the last descriptor */ + ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC); + + return 0; +} + + +/** + * pmcraid_release_passthrough_ioadls - release passthrough ioadls + * + * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated + * @buflen: size of the request buffer + * @direction: data transfer direction + * + * Return value + * 0 on sucess, non-zero error code on failure + */ +static void pmcraid_release_passthrough_ioadls( + struct pmcraid_cmd *cmd, + int buflen, + int direction +) +{ + struct pmcraid_sglist *sglist = cmd->sglist; + + if (buflen > 0) { + pci_unmap_sg(cmd->drv_inst->pdev, + sglist->scatterlist, + sglist->num_sg, + direction); + pmcraid_free_sglist(sglist); + cmd->sglist = NULL; + } +} + +/** + * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands + * + * @pinstance: pointer to adapter instance structure + * @cmd: ioctl code + * @arg: pointer to pmcraid_passthrough_buffer user buffer + * + * Return value + * 0 on sucess, non-zero error code on failure + */ +static long pmcraid_ioctl_passthrough( + struct pmcraid_instance *pinstance, + unsigned int ioctl_cmd, + unsigned int buflen, + unsigned long arg +) +{ + struct pmcraid_passthrough_ioctl_buffer *buffer; + struct pmcraid_ioarcb *ioarcb; + struct pmcraid_cmd *cmd; + struct pmcraid_cmd *cancel_cmd; + unsigned long request_buffer; + unsigned long request_offset; + unsigned long lock_flags; + int request_size; + int buffer_size; + u8 access, direction; + int rc = 0; + + /* If IOA reset is in progress, wait 10 secs for reset to complete */ + if (pinstance->ioa_reset_in_progress) { + rc = wait_event_interruptible_timeout( + pinstance->reset_wait_q, + !pinstance->ioa_reset_in_progress, + msecs_to_jiffies(10000)); + + if (!rc) + return -ETIMEDOUT; + else if (rc < 0) + return -ERESTARTSYS; + } + + /* If adapter is not in operational state, return error */ + if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) { + pmcraid_err("IOA is not operational\n"); + return -ENOTTY; + } + + buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer); + buffer = kmalloc(buffer_size, GFP_KERNEL); + + if (!buffer) { + pmcraid_err("no memory for passthrough buffer\n"); + return -ENOMEM; + } + + request_offset = + offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer); + + request_buffer = arg + request_offset; + + rc = __copy_from_user(buffer, + (struct pmcraid_passthrough_ioctl_buffer *) arg, + sizeof(struct pmcraid_passthrough_ioctl_buffer)); + if (rc) { + pmcraid_err("ioctl: can't copy passthrough buffer\n"); + rc = -EFAULT; + goto out_free_buffer; + } + + request_size = buffer->ioarcb.data_transfer_length; + + if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) { + access = VERIFY_READ; + direction = DMA_TO_DEVICE; + } else { + access = VERIFY_WRITE; + direction = DMA_FROM_DEVICE; + } + + if (request_size > 0) { + rc = access_ok(access, arg, request_offset + request_size); + + if (!rc) { + rc = -EFAULT; + goto out_free_buffer; + } + } + + /* check if we have any additional command parameters */ + if (buffer->ioarcb.add_cmd_param_length > PMCRAID_ADD_CMD_PARAM_LEN) { + rc = -EINVAL; + goto out_free_buffer; + } + + cmd = pmcraid_get_free_cmd(pinstance); + + if (!cmd) { + pmcraid_err("free command block is not available\n"); + rc = -ENOMEM; + goto out_free_buffer; + } + + cmd->scsi_cmd = NULL; + ioarcb = &(cmd->ioa_cb->ioarcb); + + /* Copy the user-provided IOARCB stuff field by field */ + ioarcb->resource_handle = buffer->ioarcb.resource_handle; + ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length; + ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout; + ioarcb->request_type = buffer->ioarcb.request_type; + ioarcb->request_flags0 = buffer->ioarcb.request_flags0; + ioarcb->request_flags1 = buffer->ioarcb.request_flags1; + memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN); + + if (buffer->ioarcb.add_cmd_param_length) { + ioarcb->add_cmd_param_length = + buffer->ioarcb.add_cmd_param_length; + ioarcb->add_cmd_param_offset = + buffer->ioarcb.add_cmd_param_offset; + memcpy(ioarcb->add_data.u.add_cmd_params, + buffer->ioarcb.add_data.u.add_cmd_params, + buffer->ioarcb.add_cmd_param_length); + } + + if (request_size) { + rc = pmcraid_build_passthrough_ioadls(cmd, + request_size, + direction); + if (rc) { + pmcraid_err("couldn't build passthrough ioadls\n"); + goto out_free_buffer; + } + } + + /* If data is being written into the device, copy the data from user + * buffers + */ + if (direction == DMA_TO_DEVICE && request_size > 0) { + rc = pmcraid_copy_sglist(cmd->sglist, + request_buffer, + request_size, + direction); + if (rc) { + pmcraid_err("failed to copy user buffer\n"); + goto out_free_sglist; + } + } + + /* passthrough ioctl is a blocking command so, put the user to sleep + * until timeout. Note that a timeout value of 0 means, do timeout. + */ + cmd->cmd_done = pmcraid_internal_done; + init_completion(&cmd->wait_for_completion); + cmd->completion_req = 1; + + pmcraid_info("command(%d) (CDB[0] = %x) for %x\n", + le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2, + cmd->ioa_cb->ioarcb.cdb[0], + le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle)); + + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + _pmcraid_fire_command(cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + + /* If command timeout is specified put caller to wait till that time, + * otherwise it would be blocking wait. If command gets timed out, it + * will be aborted. + */ + if (buffer->ioarcb.cmd_timeout == 0) { + wait_for_completion(&cmd->wait_for_completion); + } else if (!wait_for_completion_timeout( + &cmd->wait_for_completion, + msecs_to_jiffies(buffer->ioarcb.cmd_timeout * 1000))) { + + pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n", + le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle >> 2), + cmd->ioa_cb->ioarcb.cdb[0]); + + rc = -ETIMEDOUT; + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + cancel_cmd = pmcraid_abort_cmd(cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + + if (cancel_cmd) { + wait_for_completion(&cancel_cmd->wait_for_completion); + pmcraid_return_cmd(cancel_cmd); + } + + goto out_free_sglist; + } + + /* If the command failed for any reason, copy entire IOASA buffer and + * return IOCTL success. If copying IOASA to user-buffer fails, return + * EFAULT + */ + if (le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)) { + + void *ioasa = + (void *)(arg + + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa)); + + pmcraid_info("command failed with %x\n", + le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)); + if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa, + sizeof(struct pmcraid_ioasa))) { + pmcraid_err("failed to copy ioasa buffer to user\n"); + rc = -EFAULT; + } + } + /* If the data transfer was from device, copy the data onto user + * buffers + */ + else if (direction == DMA_FROM_DEVICE && request_size > 0) { + rc = pmcraid_copy_sglist(cmd->sglist, + request_buffer, + request_size, + direction); + if (rc) { + pmcraid_err("failed to copy user buffer\n"); + rc = -EFAULT; + } + } + +out_free_sglist: + pmcraid_release_passthrough_ioadls(cmd, request_size, direction); + pmcraid_return_cmd(cmd); + +out_free_buffer: + kfree(buffer); + + return rc; +} + + + + +/** + * pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself + * + * @pinstance: pointer to adapter instance structure + * @cmd: ioctl command passed in + * @buflen: length of user_buffer + * @user_buffer: user buffer pointer + * + * Return Value + * 0 in case of success, otherwise appropriate error code + */ +static long pmcraid_ioctl_driver( + struct pmcraid_instance *pinstance, + unsigned int cmd, + unsigned int buflen, + void __user *user_buffer +) +{ + int rc = -ENOSYS; + + if (!access_ok(VERIFY_READ, user_buffer, _IOC_SIZE(cmd))) { + pmcraid_err("ioctl_driver: access fault in request buffer \n"); + return -EFAULT; + } + + switch (cmd) { + case PMCRAID_IOCTL_RESET_ADAPTER: + pmcraid_reset_bringup(pinstance); + rc = 0; + break; + + default: + break; + } + + return rc; +} + +/** + * pmcraid_check_ioctl_buffer - check for proper access to user buffer + * + * @cmd: ioctl command + * @arg: user buffer + * @hdr: pointer to kernel memory for pmcraid_ioctl_header + * + * Return Value + * negetive error code if there are access issues, otherwise zero. + * Upon success, returns ioctl header copied out of user buffer. + */ + +static int pmcraid_check_ioctl_buffer( + int cmd, + void __user *arg, + struct pmcraid_ioctl_header *hdr +) +{ + int rc = 0; + int access = VERIFY_READ; + + if (copy_from_user(hdr, arg, sizeof(struct pmcraid_ioctl_header))) { + pmcraid_err("couldn't copy ioctl header from user buffer\n"); + return -EFAULT; + } + + /* check for valid driver signature */ + rc = memcmp(hdr->signature, + PMCRAID_IOCTL_SIGNATURE, + sizeof(hdr->signature)); + if (rc) { + pmcraid_err("signature verification failed\n"); + return -EINVAL; + } + + /* buffer length can't be negetive */ + if (hdr->buffer_length < 0) { + pmcraid_err("ioctl: invalid buffer length specified\n"); + return -EINVAL; + } + + /* check for appropriate buffer access */ + if ((_IOC_DIR(cmd) & _IOC_READ) == _IOC_READ) + access = VERIFY_WRITE; + + rc = access_ok(access, + (arg + sizeof(struct pmcraid_ioctl_header)), + hdr->buffer_length); + if (!rc) { + pmcraid_err("access failed for user buffer of size %d\n", + hdr->buffer_length); + return -EFAULT; + } + + return 0; +} + +/** + * pmcraid_ioctl - char node ioctl entry point + */ +static long pmcraid_chr_ioctl( + struct file *filep, + unsigned int cmd, + unsigned long arg +) +{ + struct pmcraid_instance *pinstance = NULL; + struct pmcraid_ioctl_header *hdr = NULL; + int retval = -ENOTTY; + + hdr = kmalloc(GFP_KERNEL, sizeof(struct pmcraid_ioctl_header)); + + if (!hdr) { + pmcraid_err("faile to allocate memory for ioctl header\n"); + return -ENOMEM; + } + + retval = pmcraid_check_ioctl_buffer(cmd, (void *)arg, hdr); + + if (retval) { + pmcraid_info("chr_ioctl: header check failed\n"); + kfree(hdr); + return retval; + } + + pinstance = (struct pmcraid_instance *)filep->private_data; + + if (!pinstance) { + pmcraid_info("adapter instance is not found\n"); + kfree(hdr); + return -ENOTTY; + } + + switch (_IOC_TYPE(cmd)) { + + case PMCRAID_PASSTHROUGH_IOCTL: + /* If ioctl code is to download microcode, we need to block + * mid-layer requests. + */ + if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE) + scsi_block_requests(pinstance->host); + + retval = pmcraid_ioctl_passthrough(pinstance, + cmd, + hdr->buffer_length, + arg); + + if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE) + scsi_unblock_requests(pinstance->host); + break; + + case PMCRAID_DRIVER_IOCTL: + arg += sizeof(struct pmcraid_ioctl_header); + retval = pmcraid_ioctl_driver(pinstance, + cmd, + hdr->buffer_length, + (void __user *)arg); + break; + + default: + retval = -ENOTTY; + break; + } + + kfree(hdr); + + return retval; +} + +/** + * File operations structure for management interface + */ +static const struct file_operations pmcraid_fops = { + .owner = THIS_MODULE, + .open = pmcraid_chr_open, + .release = pmcraid_chr_release, + .fasync = pmcraid_chr_fasync, + .unlocked_ioctl = pmcraid_chr_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = pmcraid_chr_ioctl, +#endif +}; + + + + +/** + * pmcraid_show_log_level - Display adapter's error logging level + * @dev: class device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + */ +static ssize_t pmcraid_show_log_level( + struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct pmcraid_instance *pinstance = + (struct pmcraid_instance *)shost->hostdata; + return snprintf(buf, PAGE_SIZE, "%d\n", pinstance->current_log_level); +} + +/** + * pmcraid_store_log_level - Change the adapter's error logging level + * @dev: class device struct + * @buf: buffer + * @count: not used + * + * Return value: + * number of bytes printed to buffer + */ +static ssize_t pmcraid_store_log_level( + struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count +) +{ + struct Scsi_Host *shost; + struct pmcraid_instance *pinstance; + unsigned long val; + + if (strict_strtoul(buf, 10, &val)) + return -EINVAL; + /* log-level should be from 0 to 2 */ + if (val > 2) + return -EINVAL; + + shost = class_to_shost(dev); + pinstance = (struct pmcraid_instance *)shost->hostdata; + pinstance->current_log_level = val; + + return strlen(buf); +} + +static struct device_attribute pmcraid_log_level_attr = { + .attr = { + .name = "log_level", + .mode = S_IRUGO | S_IWUSR, + }, + .show = pmcraid_show_log_level, + .store = pmcraid_store_log_level, +}; + +/** + * pmcraid_show_drv_version - Display driver version + * @dev: class device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + */ +static ssize_t pmcraid_show_drv_version( + struct device *dev, + struct device_attribute *attr, + char *buf +) +{ + return snprintf(buf, PAGE_SIZE, "version: %s, build date: %s\n", + PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE); +} + +static struct device_attribute pmcraid_driver_version_attr = { + .attr = { + .name = "drv_version", + .mode = S_IRUGO, + }, + .show = pmcraid_show_drv_version, +}; + +/** + * pmcraid_show_io_adapter_id - Display driver assigned adapter id + * @dev: class device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + */ +static ssize_t pmcraid_show_adapter_id( + struct device *dev, + struct device_attribute *attr, + char *buf +) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct pmcraid_instance *pinstance = + (struct pmcraid_instance *)shost->hostdata; + u32 adapter_id = (pinstance->pdev->bus->number << 8) | + pinstance->pdev->devfn; + u32 aen_group = pmcraid_event_family.id; + + return snprintf(buf, PAGE_SIZE, + "adapter id: %d\nminor: %d\naen group: %d\n", + adapter_id, MINOR(pinstance->cdev.dev), aen_group); +} + +static struct device_attribute pmcraid_adapter_id_attr = { + .attr = { + .name = "adapter_id", + .mode = S_IRUGO | S_IWUSR, + }, + .show = pmcraid_show_adapter_id, +}; + +static struct device_attribute *pmcraid_host_attrs[] = { + &pmcraid_log_level_attr, + &pmcraid_driver_version_attr, + &pmcraid_adapter_id_attr, + NULL, +}; + + +/* host template structure for pmcraid driver */ +static struct scsi_host_template pmcraid_host_template = { + .module = THIS_MODULE, + .name = PMCRAID_DRIVER_NAME, + .queuecommand = pmcraid_queuecommand, + .eh_abort_handler = pmcraid_eh_abort_handler, + .eh_bus_reset_handler = pmcraid_eh_bus_reset_handler, + .eh_target_reset_handler = pmcraid_eh_target_reset_handler, + .eh_device_reset_handler = pmcraid_eh_device_reset_handler, + .eh_host_reset_handler = pmcraid_eh_host_reset_handler, + + .slave_alloc = pmcraid_slave_alloc, + .slave_configure = pmcraid_slave_configure, + .slave_destroy = pmcraid_slave_destroy, + .change_queue_depth = pmcraid_change_queue_depth, + .change_queue_type = pmcraid_change_queue_type, + .can_queue = PMCRAID_MAX_IO_CMD, + .this_id = -1, + .sg_tablesize = PMCRAID_MAX_IOADLS, + .max_sectors = PMCRAID_IOA_MAX_SECTORS, + .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = pmcraid_host_attrs, + .proc_name = PMCRAID_DRIVER_NAME +}; + +/** + * pmcraid_isr_common - Common interrupt handler routine + * + * @pinstance: pointer to adapter instance + * @intrs: active interrupts (contents of ioa_host_interrupt register) + * @hrrq_id: Host RRQ index + * + * Return Value + * none + */ +static void pmcraid_isr_common( + struct pmcraid_instance *pinstance, + u32 intrs, + int hrrq_id +) +{ + u32 intrs_clear = + (intrs & INTRS_CRITICAL_OP_IN_PROGRESS) ? intrs + : INTRS_HRRQ_VALID; + iowrite32(intrs_clear, + pinstance->int_regs.ioa_host_interrupt_clr_reg); + intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); + + /* hrrq valid bit was set, schedule tasklet to handle the response */ + if (intrs_clear == INTRS_HRRQ_VALID) + tasklet_schedule(&(pinstance->isr_tasklet[hrrq_id])); +} + +/** + * pmcraid_isr - implements interrupt handling routine + * + * @irq: interrupt vector number + * @dev_id: pointer hrrq_vector + * + * Return Value + * IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored + */ +static irqreturn_t pmcraid_isr(int irq, void *dev_id) +{ + struct pmcraid_isr_param *hrrq_vector; + struct pmcraid_instance *pinstance; + unsigned long lock_flags; + u32 intrs; + + /* In case of legacy interrupt mode where interrupts are shared across + * isrs, it may be possible that the current interrupt is not from IOA + */ + if (!dev_id) { + printk(KERN_INFO "%s(): NULL host pointer\n", __func__); + return IRQ_NONE; + } + + hrrq_vector = (struct pmcraid_isr_param *)dev_id; + pinstance = hrrq_vector->drv_inst; + + /* Acquire the lock (currently host_lock) while processing interrupts. + * This interval is small as most of the response processing is done by + * tasklet without the lock. + */ + spin_lock_irqsave(pinstance->host->host_lock, lock_flags); + intrs = pmcraid_read_interrupts(pinstance); + + if (unlikely((intrs & PMCRAID_PCI_INTERRUPTS) == 0)) { + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + return IRQ_NONE; + } + + /* Any error interrupts including unit_check, initiate IOA reset. + * In case of unit check indicate to reset_sequence that IOA unit + * checked and prepare for a dump during reset sequence + */ + if (intrs & PMCRAID_ERROR_INTERRUPTS) { + + if (intrs & INTRS_IOA_UNIT_CHECK) + pinstance->ioa_unit_check = 1; + + iowrite32(intrs, + pinstance->int_regs.ioa_host_interrupt_clr_reg); + pmcraid_err("ISR: error interrupts: %x initiating reset\n", + intrs); + intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); + pmcraid_initiate_reset(pinstance); + } else { + pmcraid_isr_common(pinstance, intrs, hrrq_vector->hrrq_id); + } + + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + + return IRQ_HANDLED; +} + + +/** + * pmcraid_worker_function - worker thread function + * + * @workp: pointer to struct work queue + * + * Return Value + * None + */ + +static void pmcraid_worker_function(struct work_struct *workp) +{ + struct pmcraid_instance *pinstance; + struct pmcraid_resource_entry *res; + struct pmcraid_resource_entry *temp; + struct scsi_device *sdev; + unsigned long lock_flags; + unsigned long host_lock_flags; + u8 bus, target, lun; + + pinstance = container_of(workp, struct pmcraid_instance, worker_q); + /* add resources only after host is added into system */ + if (!atomic_read(&pinstance->expose_resources)) + return; + + spin_lock_irqsave(&pinstance->resource_lock, lock_flags); + list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) { + + if (res->change_detected == RES_CHANGE_DEL && res->scsi_dev) { + sdev = res->scsi_dev; + + /* host_lock must be held before calling + * scsi_device_get + */ + spin_lock_irqsave(pinstance->host->host_lock, + host_lock_flags); + if (!scsi_device_get(sdev)) { + spin_unlock_irqrestore( + pinstance->host->host_lock, + host_lock_flags); + pmcraid_info("deleting %x from midlayer\n", + res->cfg_entry.resource_address); + list_move_tail(&res->queue, + &pinstance->free_res_q); + spin_unlock_irqrestore( + &pinstance->resource_lock, + lock_flags); + scsi_remove_device(sdev); + scsi_device_put(sdev); + spin_lock_irqsave(&pinstance->resource_lock, + lock_flags); + res->change_detected = 0; + } else { + spin_unlock_irqrestore( + pinstance->host->host_lock, + host_lock_flags); + } + } + } + + list_for_each_entry(res, &pinstance->used_res_q, queue) { + + if (res->change_detected == RES_CHANGE_ADD) { + + if (!pmcraid_expose_resource(&res->cfg_entry)) + continue; + + if (RES_IS_VSET(res->cfg_entry)) { + bus = PMCRAID_VSET_BUS_ID; + target = res->cfg_entry.unique_flags1; + lun = PMCRAID_VSET_LUN_ID; + } else { + bus = PMCRAID_PHYS_BUS_ID; + target = + RES_TARGET( + res->cfg_entry.resource_address); + lun = RES_LUN(res->cfg_entry.resource_address); + } + + res->change_detected = 0; + spin_unlock_irqrestore(&pinstance->resource_lock, + lock_flags); + scsi_add_device(pinstance->host, bus, target, lun); + spin_lock_irqsave(&pinstance->resource_lock, + lock_flags); + } + } + + spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); +} + +/** + * pmcraid_tasklet_function - Tasklet function + * + * @instance: pointer to msix param structure + * + * Return Value + * None + */ +void pmcraid_tasklet_function(unsigned long instance) +{ + struct pmcraid_isr_param *hrrq_vector; + struct pmcraid_instance *pinstance; + unsigned long hrrq_lock_flags; + unsigned long pending_lock_flags; + unsigned long host_lock_flags; + spinlock_t *lockp; /* hrrq buffer lock */ + int id; + u32 intrs; + __le32 resp; + + hrrq_vector = (struct pmcraid_isr_param *)instance; + pinstance = hrrq_vector->drv_inst; + id = hrrq_vector->hrrq_id; + lockp = &(pinstance->hrrq_lock[id]); + intrs = pmcraid_read_interrupts(pinstance); + + /* If interrupts was as part of the ioa initialization, clear and mask + * it. Delete the timer and wakeup the reset engine to proceed with + * reset sequence + */ + if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) { + iowrite32(INTRS_TRANSITION_TO_OPERATIONAL, + pinstance->int_regs.ioa_host_interrupt_mask_reg); + iowrite32(INTRS_TRANSITION_TO_OPERATIONAL, + pinstance->int_regs.ioa_host_interrupt_clr_reg); + + if (pinstance->reset_cmd != NULL) { + del_timer(&pinstance->reset_cmd->timer); + spin_lock_irqsave(pinstance->host->host_lock, + host_lock_flags); + pinstance->reset_cmd->cmd_done(pinstance->reset_cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, + host_lock_flags); + } + return; + } + + /* loop through each of the commands responded by IOA. Each HRRQ buf is + * protected by its own lock. Traversals must be done within this lock + * as there may be multiple tasklets running on multiple CPUs. Note + * that the lock is held just for picking up the response handle and + * manipulating hrrq_curr/toggle_bit values. + */ + spin_lock_irqsave(lockp, hrrq_lock_flags); + + resp = le32_to_cpu(*(pinstance->hrrq_curr[id])); + + while ((resp & HRRQ_TOGGLE_BIT) == + pinstance->host_toggle_bit[id]) { + + int cmd_index = resp >> 2; + struct pmcraid_cmd *cmd = NULL; + + if (cmd_index < PMCRAID_MAX_CMD) { + cmd = pinstance->cmd_list[cmd_index]; + } else { + /* In case of invalid response handle, initiate IOA + * reset sequence. + */ + spin_unlock_irqrestore(lockp, hrrq_lock_flags); + + pmcraid_err("Invalid response %d initiating reset\n", + cmd_index); + + spin_lock_irqsave(pinstance->host->host_lock, + host_lock_flags); + pmcraid_initiate_reset(pinstance); + spin_unlock_irqrestore(pinstance->host->host_lock, + host_lock_flags); + + spin_lock_irqsave(lockp, hrrq_lock_flags); + break; + } + + if (pinstance->hrrq_curr[id] < pinstance->hrrq_end[id]) { + pinstance->hrrq_curr[id]++; + } else { + pinstance->hrrq_curr[id] = pinstance->hrrq_start[id]; + pinstance->host_toggle_bit[id] ^= 1u; + } + + spin_unlock_irqrestore(lockp, hrrq_lock_flags); + + spin_lock_irqsave(&pinstance->pending_pool_lock, + pending_lock_flags); + list_del(&cmd->free_list); + spin_unlock_irqrestore(&pinstance->pending_pool_lock, + pending_lock_flags); + del_timer(&cmd->timer); + atomic_dec(&pinstance->outstanding_cmds); + + if (cmd->cmd_done == pmcraid_ioa_reset) { + spin_lock_irqsave(pinstance->host->host_lock, + host_lock_flags); + cmd->cmd_done(cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, + host_lock_flags); + } else if (cmd->cmd_done != NULL) { + cmd->cmd_done(cmd); + } + /* loop over until we are done with all responses */ + spin_lock_irqsave(lockp, hrrq_lock_flags); + resp = le32_to_cpu(*(pinstance->hrrq_curr[id])); + } + + spin_unlock_irqrestore(lockp, hrrq_lock_flags); +} + +/** + * pmcraid_unregister_interrupt_handler - de-register interrupts handlers + * @pinstance: pointer to adapter instance structure + * + * This routine un-registers registered interrupt handler and + * also frees irqs/vectors. + * + * Retun Value + * None + */ +static +void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance) +{ + free_irq(pinstance->pdev->irq, &(pinstance->hrrq_vector[0])); +} + +/** + * pmcraid_register_interrupt_handler - registers interrupt handler + * @pinstance: pointer to per-adapter instance structure + * + * Return Value + * 0 on success, non-zero error code otherwise. + */ +static int +pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance) +{ + struct pci_dev *pdev = pinstance->pdev; + + pinstance->hrrq_vector[0].hrrq_id = 0; + pinstance->hrrq_vector[0].drv_inst = pinstance; + pinstance->hrrq_vector[0].vector = 0; + pinstance->num_hrrq = 1; + return request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED, + PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]); +} + +/** + * pmcraid_release_cmd_blocks - release buufers allocated for command blocks + * @pinstance: per adapter instance structure pointer + * @max_index: number of buffer blocks to release + * + * Return Value + * None + */ +static void +pmcraid_release_cmd_blocks(struct pmcraid_instance *pinstance, int max_index) +{ + int i; + for (i = 0; i < max_index; i++) { + kmem_cache_free(pinstance->cmd_cachep, pinstance->cmd_list[i]); + pinstance->cmd_list[i] = NULL; + } + kmem_cache_destroy(pinstance->cmd_cachep); + pinstance->cmd_cachep = NULL; +} + +/** + * pmcraid_release_control_blocks - releases buffers alloced for control blocks + * @pinstance: pointer to per adapter instance structure + * @max_index: number of buffers (from 0 onwards) to release + * + * This function assumes that the command blocks for which control blocks are + * linked are not released. + * + * Return Value + * None + */ +static void +pmcraid_release_control_blocks( + struct pmcraid_instance *pinstance, + int max_index +) +{ + int i; + + if (pinstance->control_pool == NULL) + return; + + for (i = 0; i < max_index; i++) { + pci_pool_free(pinstance->control_pool, + pinstance->cmd_list[i]->ioa_cb, + pinstance->cmd_list[i]->ioa_cb_bus_addr); + pinstance->cmd_list[i]->ioa_cb = NULL; + pinstance->cmd_list[i]->ioa_cb_bus_addr = 0; + } + pci_pool_destroy(pinstance->control_pool); + pinstance->control_pool = NULL; +} + +/** + * pmcraid_allocate_cmd_blocks - allocate memory for cmd block structures + * @pinstance - pointer to per adapter instance structure + * + * Allocates memory for command blocks using kernel slab allocator. + * + * Return Value + * 0 in case of success; -ENOMEM in case of failure + */ +static int __devinit +pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance) +{ + int i; + + sprintf(pinstance->cmd_pool_name, "pmcraid_cmd_pool_%d", + pinstance->host->unique_id); + + + pinstance->cmd_cachep = kmem_cache_create( + pinstance->cmd_pool_name, + sizeof(struct pmcraid_cmd), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!pinstance->cmd_cachep) + return -ENOMEM; + + for (i = 0; i < PMCRAID_MAX_CMD; i++) { + pinstance->cmd_list[i] = + kmem_cache_alloc(pinstance->cmd_cachep, GFP_KERNEL); + if (!pinstance->cmd_list[i]) { + pmcraid_release_cmd_blocks(pinstance, i); + return -ENOMEM; + } + } + return 0; +} + +/** + * pmcraid_allocate_control_blocks - allocates memory control blocks + * @pinstance : pointer to per adapter instance structure + * + * This function allocates PCI memory for DMAable buffers like IOARCB, IOADLs + * and IOASAs. This is called after command blocks are already allocated. + * + * Return Value + * 0 in case it can allocate all control blocks, otherwise -ENOMEM + */ +static int __devinit +pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance) +{ + int i; + + sprintf(pinstance->ctl_pool_name, "pmcraid_control_pool_%d", + pinstance->host->unique_id); + + pinstance->control_pool = + pci_pool_create(pinstance->ctl_pool_name, + pinstance->pdev, + sizeof(struct pmcraid_control_block), + PMCRAID_IOARCB_ALIGNMENT, 0); + + if (!pinstance->control_pool) + return -ENOMEM; + + for (i = 0; i < PMCRAID_MAX_CMD; i++) { + pinstance->cmd_list[i]->ioa_cb = + pci_pool_alloc( + pinstance->control_pool, + GFP_KERNEL, + &(pinstance->cmd_list[i]->ioa_cb_bus_addr)); + + if (!pinstance->cmd_list[i]->ioa_cb) { + pmcraid_release_control_blocks(pinstance, i); + return -ENOMEM; + } + memset(pinstance->cmd_list[i]->ioa_cb, 0, + sizeof(struct pmcraid_control_block)); + } + return 0; +} + +/** + * pmcraid_release_host_rrqs - release memory allocated for hrrq buffer(s) + * @pinstance: pointer to per adapter instance structure + * @maxindex: size of hrrq buffer pointer array + * + * Return Value + * None + */ +static void +pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex) +{ + int i; + for (i = 0; i < maxindex; i++) { + + pci_free_consistent(pinstance->pdev, + HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD, + pinstance->hrrq_start[i], + pinstance->hrrq_start_bus_addr[i]); + + /* reset pointers and toggle bit to zeros */ + pinstance->hrrq_start[i] = NULL; + pinstance->hrrq_start_bus_addr[i] = 0; + pinstance->host_toggle_bit[i] = 0; + } +} + +/** + * pmcraid_allocate_host_rrqs - Allocate and initialize host RRQ buffers + * @pinstance: pointer to per adapter instance structure + * + * Return value + * 0 hrrq buffers are allocated, -ENOMEM otherwise. + */ +static int __devinit +pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance) +{ + int i; + int buf_count = PMCRAID_MAX_CMD / pinstance->num_hrrq; + + for (i = 0; i < pinstance->num_hrrq; i++) { + int buffer_size = HRRQ_ENTRY_SIZE * buf_count; + + pinstance->hrrq_start[i] = + pci_alloc_consistent( + pinstance->pdev, + buffer_size, + &(pinstance->hrrq_start_bus_addr[i])); + + if (pinstance->hrrq_start[i] == 0) { + pmcraid_err("could not allocate host rrq: %d\n", i); + pmcraid_release_host_rrqs(pinstance, i); + return -ENOMEM; + } + + memset(pinstance->hrrq_start[i], 0, buffer_size); + pinstance->hrrq_curr[i] = pinstance->hrrq_start[i]; + pinstance->hrrq_end[i] = + pinstance->hrrq_start[i] + buf_count - 1; + pinstance->host_toggle_bit[i] = 1; + spin_lock_init(&pinstance->hrrq_lock[i]); + } + return 0; +} + +/** + * pmcraid_release_hcams - release HCAM buffers + * + * @pinstance: pointer to per adapter instance structure + * + * Return value + * none + */ +static void pmcraid_release_hcams(struct pmcraid_instance *pinstance) +{ + if (pinstance->ccn.msg != NULL) { + pci_free_consistent(pinstance->pdev, + PMCRAID_AEN_HDR_SIZE + + sizeof(struct pmcraid_hcam_ccn), + pinstance->ccn.msg, + pinstance->ccn.baddr); + + pinstance->ccn.msg = NULL; + pinstance->ccn.hcam = NULL; + pinstance->ccn.baddr = 0; + } + + if (pinstance->ldn.msg != NULL) { + pci_free_consistent(pinstance->pdev, + PMCRAID_AEN_HDR_SIZE + + sizeof(struct pmcraid_hcam_ldn), + pinstance->ldn.msg, + pinstance->ldn.baddr); + + pinstance->ldn.msg = NULL; + pinstance->ldn.hcam = NULL; + pinstance->ldn.baddr = 0; + } +} + +/** + * pmcraid_allocate_hcams - allocates HCAM buffers + * @pinstance : pointer to per adapter instance structure + * + * Return Value: + * 0 in case of successful allocation, non-zero otherwise + */ +static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance) +{ + pinstance->ccn.msg = pci_alloc_consistent( + pinstance->pdev, + PMCRAID_AEN_HDR_SIZE + + sizeof(struct pmcraid_hcam_ccn), + &(pinstance->ccn.baddr)); + + pinstance->ldn.msg = pci_alloc_consistent( + pinstance->pdev, + PMCRAID_AEN_HDR_SIZE + + sizeof(struct pmcraid_hcam_ldn), + &(pinstance->ldn.baddr)); + + if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) { + pmcraid_release_hcams(pinstance); + } else { + pinstance->ccn.hcam = + (void *)pinstance->ccn.msg + PMCRAID_AEN_HDR_SIZE; + pinstance->ldn.hcam = + (void *)pinstance->ldn.msg + PMCRAID_AEN_HDR_SIZE; + + atomic_set(&pinstance->ccn.ignore, 0); + atomic_set(&pinstance->ldn.ignore, 0); + } + + return (pinstance->ldn.msg == NULL) ? -ENOMEM : 0; +} + +/** + * pmcraid_release_config_buffers - release config.table buffers + * @pinstance: pointer to per adapter instance structure + * + * Return Value + * none + */ +static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance) +{ + if (pinstance->cfg_table != NULL && + pinstance->cfg_table_bus_addr != 0) { + pci_free_consistent(pinstance->pdev, + sizeof(struct pmcraid_config_table), + pinstance->cfg_table, + pinstance->cfg_table_bus_addr); + pinstance->cfg_table = NULL; + pinstance->cfg_table_bus_addr = 0; + } + + if (pinstance->res_entries != NULL) { + int i; + + for (i = 0; i < PMCRAID_MAX_RESOURCES; i++) + list_del(&pinstance->res_entries[i].queue); + kfree(pinstance->res_entries); + pinstance->res_entries = NULL; + } + + pmcraid_release_hcams(pinstance); +} + +/** + * pmcraid_allocate_config_buffers - allocates DMAable memory for config table + * @pinstance : pointer to per adapter instance structure + * + * Return Value + * 0 for successful allocation, -ENOMEM for any failure + */ +static int __devinit +pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance) +{ + int i; + + pinstance->res_entries = + kzalloc(sizeof(struct pmcraid_resource_entry) * + PMCRAID_MAX_RESOURCES, GFP_KERNEL); + + if (NULL == pinstance->res_entries) { + pmcraid_err("failed to allocate memory for resource table\n"); + return -ENOMEM; + } + + for (i = 0; i < PMCRAID_MAX_RESOURCES; i++) + list_add_tail(&pinstance->res_entries[i].queue, + &pinstance->free_res_q); + + pinstance->cfg_table = + pci_alloc_consistent(pinstance->pdev, + sizeof(struct pmcraid_config_table), + &pinstance->cfg_table_bus_addr); + + if (NULL == pinstance->cfg_table) { + pmcraid_err("couldn't alloc DMA memory for config table\n"); + pmcraid_release_config_buffers(pinstance); + return -ENOMEM; + } + + if (pmcraid_allocate_hcams(pinstance)) { + pmcraid_err("could not alloc DMA memory for HCAMS\n"); + pmcraid_release_config_buffers(pinstance); + return -ENOMEM; + } + + return 0; +} + +/** + * pmcraid_init_tasklets - registers tasklets for response handling + * + * @pinstance: pointer adapter instance structure + * + * Return value + * none + */ +static void pmcraid_init_tasklets(struct pmcraid_instance *pinstance) +{ + int i; + for (i = 0; i < pinstance->num_hrrq; i++) + tasklet_init(&pinstance->isr_tasklet[i], + pmcraid_tasklet_function, + (unsigned long)&pinstance->hrrq_vector[i]); +} + +/** + * pmcraid_kill_tasklets - destroys tasklets registered for response handling + * + * @pinstance: pointer to adapter instance structure + * + * Return value + * none + */ +static void pmcraid_kill_tasklets(struct pmcraid_instance *pinstance) +{ + int i; + for (i = 0; i < pinstance->num_hrrq; i++) + tasklet_kill(&pinstance->isr_tasklet[i]); +} + +/** + * pmcraid_init_buffers - allocates memory and initializes various structures + * @pinstance: pointer to per adapter instance structure + * + * This routine pre-allocates memory based on the type of block as below: + * cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator, + * IOARCBs(PMCRAID_MAX_CMD) : DMAable memory, using pci pool allocator + * config-table entries : DMAable memory using pci_alloc_consistent + * HostRRQs : DMAable memory, using pci_alloc_consistent + * + * Return Value + * 0 in case all of the blocks are allocated, -ENOMEM otherwise. + */ +static int __devinit pmcraid_init_buffers(struct pmcraid_instance *pinstance) +{ + int i; + + if (pmcraid_allocate_host_rrqs(pinstance)) { + pmcraid_err("couldn't allocate memory for %d host rrqs\n", + pinstance->num_hrrq); + return -ENOMEM; + } + + if (pmcraid_allocate_config_buffers(pinstance)) { + pmcraid_err("couldn't allocate memory for config buffers\n"); + pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq); + return -ENOMEM; + } + + if (pmcraid_allocate_cmd_blocks(pinstance)) { + pmcraid_err("couldn't allocate memory for cmd blocks \n"); + pmcraid_release_config_buffers(pinstance); + pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq); + return -ENOMEM; + } + + if (pmcraid_allocate_control_blocks(pinstance)) { + pmcraid_err("couldn't allocate memory control blocks \n"); + pmcraid_release_config_buffers(pinstance); + pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD); + pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq); + return -ENOMEM; + } + + /* Initialize all the command blocks and add them to free pool. No + * need to lock (free_pool_lock) as this is done in initialization + * itself + */ + for (i = 0; i < PMCRAID_MAX_CMD; i++) { + struct pmcraid_cmd *cmdp = pinstance->cmd_list[i]; + pmcraid_init_cmdblk(cmdp, i); + cmdp->drv_inst = pinstance; + list_add_tail(&cmdp->free_list, &pinstance->free_cmd_pool); + } + + return 0; +} + +/** + * pmcraid_reinit_buffers - resets various buffer pointers + * @pinstance: pointer to adapter instance + * Return value + * none + */ +static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance) +{ + int i; + int buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD; + + for (i = 0; i < pinstance->num_hrrq; i++) { + memset(pinstance->hrrq_start[i], 0, buffer_size); + pinstance->hrrq_curr[i] = pinstance->hrrq_start[i]; + pinstance->hrrq_end[i] = + pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1; + pinstance->host_toggle_bit[i] = 1; + } +} + +/** + * pmcraid_init_instance - initialize per instance data structure + * @pdev: pointer to pci device structure + * @host: pointer to Scsi_Host structure + * @mapped_pci_addr: memory mapped IOA configuration registers + * + * Return Value + * 0 on success, non-zero in case of any failure + */ +static int __devinit pmcraid_init_instance( + struct pci_dev *pdev, + struct Scsi_Host *host, + void __iomem *mapped_pci_addr +) +{ + struct pmcraid_instance *pinstance = + (struct pmcraid_instance *)host->hostdata; + + pinstance->host = host; + pinstance->pdev = pdev; + + /* Initialize register addresses */ + pinstance->mapped_dma_addr = mapped_pci_addr; + + /* Initialize chip-specific details */ + { + struct pmcraid_chip_details *chip_cfg = pinstance->chip_cfg; + struct pmcraid_interrupts *pint_regs = &pinstance->int_regs; + + pinstance->ioarrin = mapped_pci_addr + chip_cfg->ioarrin; + + pint_regs->ioa_host_interrupt_reg = + mapped_pci_addr + chip_cfg->ioa_host_intr; + pint_regs->ioa_host_interrupt_clr_reg = + mapped_pci_addr + chip_cfg->ioa_host_intr_clr; + pint_regs->host_ioa_interrupt_reg = + mapped_pci_addr + chip_cfg->host_ioa_intr; + pint_regs->host_ioa_interrupt_clr_reg = + mapped_pci_addr + chip_cfg->host_ioa_intr_clr; + + /* Current version of firmware exposes interrupt mask set + * and mask clr registers through memory mapped bar0. + */ + pinstance->mailbox = mapped_pci_addr + chip_cfg->mailbox; + pinstance->ioa_status = mapped_pci_addr + chip_cfg->ioastatus; + pint_regs->ioa_host_interrupt_mask_reg = + mapped_pci_addr + chip_cfg->ioa_host_mask; + pint_regs->ioa_host_interrupt_mask_clr_reg = + mapped_pci_addr + chip_cfg->ioa_host_mask_clr; + pint_regs->global_interrupt_mask_reg = + mapped_pci_addr + chip_cfg->global_intr_mask; + }; + + pinstance->ioa_reset_attempts = 0; + init_waitqueue_head(&pinstance->reset_wait_q); + + atomic_set(&pinstance->outstanding_cmds, 0); + atomic_set(&pinstance->expose_resources, 0); + + INIT_LIST_HEAD(&pinstance->free_res_q); + INIT_LIST_HEAD(&pinstance->used_res_q); + INIT_LIST_HEAD(&pinstance->free_cmd_pool); + INIT_LIST_HEAD(&pinstance->pending_cmd_pool); + + spin_lock_init(&pinstance->free_pool_lock); + spin_lock_init(&pinstance->pending_pool_lock); + spin_lock_init(&pinstance->resource_lock); + mutex_init(&pinstance->aen_queue_lock); + + /* Work-queue (Shared) for deferred processing error handling */ + INIT_WORK(&pinstance->worker_q, pmcraid_worker_function); + + /* Initialize the default log_level */ + pinstance->current_log_level = pmcraid_log_level; + + /* Setup variables required for reset engine */ + pinstance->ioa_state = IOA_STATE_UNKNOWN; + pinstance->reset_cmd = NULL; + return 0; +} + +/** + * pmcraid_release_buffers - release per-adapter buffers allocated + * + * @pinstance: pointer to adapter soft state + * + * Return Value + * none + */ +static void pmcraid_release_buffers(struct pmcraid_instance *pinstance) +{ + pmcraid_release_config_buffers(pinstance); + pmcraid_release_control_blocks(pinstance, PMCRAID_MAX_CMD); + pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD); + pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq); + +} + +/** + * pmcraid_shutdown - shutdown adapter controller. + * @pdev: pci device struct + * + * Issues an adapter shutdown to the card waits for its completion + * + * Return value + * none + */ +static void pmcraid_shutdown(struct pci_dev *pdev) +{ + struct pmcraid_instance *pinstance = pci_get_drvdata(pdev); + pmcraid_reset_bringdown(pinstance); +} + + +/** + * pmcraid_get_minor - returns unused minor number from minor number bitmap + */ +static unsigned short pmcraid_get_minor(void) +{ + int minor; + + minor = find_first_zero_bit(pmcraid_minor, sizeof(pmcraid_minor)); + __set_bit(minor, pmcraid_minor); + return minor; +} + +/** + * pmcraid_release_minor - releases given minor back to minor number bitmap + */ +static void pmcraid_release_minor(unsigned short minor) +{ + __clear_bit(minor, pmcraid_minor); +} + +/** + * pmcraid_setup_chrdev - allocates a minor number and registers a char device + * + * @pinstance: pointer to adapter instance for which to register device + * + * Return value + * 0 in case of success, otherwise non-zero + */ +static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance) +{ + int minor; + int error; + + minor = pmcraid_get_minor(); + cdev_init(&pinstance->cdev, &pmcraid_fops); + pinstance->cdev.owner = THIS_MODULE; + + error = cdev_add(&pinstance->cdev, MKDEV(pmcraid_major, minor), 1); + + if (error) + pmcraid_release_minor(minor); + else + device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor), + NULL, "pmcsas%u", minor); + return error; +} + +/** + * pmcraid_release_chrdev - unregisters per-adapter management interface + * + * @pinstance: pointer to adapter instance structure + * + * Return value + * none + */ +static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance) +{ + pmcraid_release_minor(MINOR(pinstance->cdev.dev)); + device_destroy(pmcraid_class, + MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev))); + cdev_del(&pinstance->cdev); +} + +/** + * pmcraid_remove - IOA hot plug remove entry point + * @pdev: pci device struct + * + * Return value + * none + */ +static void __devexit pmcraid_remove(struct pci_dev *pdev) +{ + struct pmcraid_instance *pinstance = pci_get_drvdata(pdev); + + /* remove the management interface (/dev file) for this device */ + pmcraid_release_chrdev(pinstance); + + /* remove host template from scsi midlayer */ + scsi_remove_host(pinstance->host); + + /* block requests from mid-layer */ + scsi_block_requests(pinstance->host); + + /* initiate shutdown adapter */ + pmcraid_shutdown(pdev); + + pmcraid_disable_interrupts(pinstance, ~0); + flush_scheduled_work(); + + pmcraid_kill_tasklets(pinstance); + pmcraid_unregister_interrupt_handler(pinstance); + pmcraid_release_buffers(pinstance); + iounmap(pinstance->mapped_dma_addr); + pci_release_regions(pdev); + scsi_host_put(pinstance->host); + pci_disable_device(pdev); + + return; +} + +#ifdef CONFIG_PM +/** + * pmcraid_suspend - driver suspend entry point for power management + * @pdev: PCI device structure + * @state: PCI power state to suspend routine + * + * Return Value - 0 always + */ +static int pmcraid_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct pmcraid_instance *pinstance = pci_get_drvdata(pdev); + + pmcraid_shutdown(pdev); + pmcraid_disable_interrupts(pinstance, ~0); + pmcraid_kill_tasklets(pinstance); + pci_set_drvdata(pinstance->pdev, pinstance); + pmcraid_unregister_interrupt_handler(pinstance); + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +/** + * pmcraid_resume - driver resume entry point PCI power management + * @pdev: PCI device structure + * + * Return Value - 0 in case of success. Error code in case of any failure + */ +static int pmcraid_resume(struct pci_dev *pdev) +{ + struct pmcraid_instance *pinstance = pci_get_drvdata(pdev); + struct Scsi_Host *host = pinstance->host; + int rc; + int hrrqs; + + pci_set_power_state(pdev, PCI_D0); + pci_enable_wake(pdev, PCI_D0, 0); + pci_restore_state(pdev); + + rc = pci_enable_device(pdev); + + if (rc) { + pmcraid_err("pmcraid: Enable device failed\n"); + return rc; + } + + pci_set_master(pdev); + + if ((sizeof(dma_addr_t) == 4) || + pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + + if (rc == 0) + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + + if (rc != 0) { + dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); + goto disable_device; + } + + atomic_set(&pinstance->outstanding_cmds, 0); + hrrqs = pinstance->num_hrrq; + rc = pmcraid_register_interrupt_handler(pinstance); + + if (rc) { + pmcraid_err("resume: couldn't register interrupt handlers\n"); + rc = -ENODEV; + goto release_host; + } + + pmcraid_init_tasklets(pinstance); + pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS); + + /* Start with hard reset sequence which brings up IOA to operational + * state as well as completes the reset sequence. + */ + pinstance->ioa_hard_reset = 1; + + /* Start IOA firmware initialization and bring card to Operational + * state. + */ + if (pmcraid_reset_bringup(pinstance)) { + pmcraid_err("couldn't initialize IOA \n"); + rc = -ENODEV; + goto release_tasklets; + } + + return 0; + +release_tasklets: + pmcraid_kill_tasklets(pinstance); + pmcraid_unregister_interrupt_handler(pinstance); + +release_host: + scsi_host_put(host); + +disable_device: + pci_disable_device(pdev); + + return rc; +} + +#else + +#define pmcraid_suspend NULL +#define pmcraid_resume NULL + +#endif /* CONFIG_PM */ + +/** + * pmcraid_complete_ioa_reset - Called by either timer or tasklet during + * completion of the ioa reset + * @cmd: pointer to reset command block + */ +static void pmcraid_complete_ioa_reset(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + unsigned long flags; + + spin_lock_irqsave(pinstance->host->host_lock, flags); + pmcraid_ioa_reset(cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, flags); + scsi_unblock_requests(pinstance->host); + schedule_work(&pinstance->worker_q); +} + +/** + * pmcraid_set_supported_devs - sends SET SUPPORTED DEVICES to IOAFP + * + * @cmd: pointer to pmcraid_cmd structure + * + * Return Value + * 0 for success or non-zero for failure cases + */ +static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd) +{ + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + void (*cmd_done) (struct pmcraid_cmd *) = pmcraid_complete_ioa_reset; + + pmcraid_reinit_cmdblk(cmd); + + ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); + ioarcb->request_type = REQ_TYPE_IOACMD; + ioarcb->cdb[0] = PMCRAID_SET_SUPPORTED_DEVICES; + ioarcb->cdb[1] = ALL_DEVICES_SUPPORTED; + + /* If this was called as part of resource table reinitialization due to + * lost CCN, it is enough to return the command block back to free pool + * as part of set_supported_devs completion function. + */ + if (cmd->drv_inst->reinit_cfg_table) { + cmd->drv_inst->reinit_cfg_table = 0; + cmd->release = 1; + cmd_done = pmcraid_reinit_cfgtable_done; + } + + /* we will be done with the reset sequence after set supported devices, + * setup the done function to return the command block back to free + * pool + */ + pmcraid_send_cmd(cmd, + cmd_done, + PMCRAID_SET_SUP_DEV_TIMEOUT, + pmcraid_timeout_handler); + return; +} + +/** + * pmcraid_init_res_table - Initialize the resource table + * @cmd: pointer to pmcraid command struct + * + * This function looks through the existing resource table, comparing + * it with the config table. This function will take care of old/new + * devices and schedule adding/removing them from the mid-layer + * as appropriate. + * + * Return value + * None + */ +static void pmcraid_init_res_table(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + struct pmcraid_resource_entry *res, *temp; + struct pmcraid_config_table_entry *cfgte; + unsigned long lock_flags; + int found, rc, i; + LIST_HEAD(old_res); + + if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED) + dev_err(&pinstance->pdev->dev, "Require microcode download\n"); + + /* resource list is protected by pinstance->resource_lock. + * init_res_table can be called from probe (user-thread) or runtime + * reset (timer/tasklet) + */ + spin_lock_irqsave(&pinstance->resource_lock, lock_flags); + + list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) + list_move_tail(&res->queue, &old_res); + + for (i = 0; i < pinstance->cfg_table->num_entries; i++) { + cfgte = &pinstance->cfg_table->entries[i]; + + if (!pmcraid_expose_resource(cfgte)) + continue; + + found = 0; + + /* If this entry was already detected and initialized */ + list_for_each_entry_safe(res, temp, &old_res, queue) { + + rc = memcmp(&res->cfg_entry.resource_address, + &cfgte->resource_address, + sizeof(cfgte->resource_address)); + if (!rc) { + list_move_tail(&res->queue, + &pinstance->used_res_q); + found = 1; + break; + } + } + + /* If this is new entry, initialize it and add it the queue */ + if (!found) { + + if (list_empty(&pinstance->free_res_q)) { + dev_err(&pinstance->pdev->dev, + "Too many devices attached\n"); + break; + } + + found = 1; + res = list_entry(pinstance->free_res_q.next, + struct pmcraid_resource_entry, queue); + + res->scsi_dev = NULL; + res->change_detected = RES_CHANGE_ADD; + res->reset_progress = 0; + list_move_tail(&res->queue, &pinstance->used_res_q); + } + + /* copy new configuration table entry details into driver + * maintained resource entry + */ + if (found) { + memcpy(&res->cfg_entry, cfgte, + sizeof(struct pmcraid_config_table_entry)); + pmcraid_info("New res type:%x, vset:%x, addr:%x:\n", + res->cfg_entry.resource_type, + res->cfg_entry.unique_flags1, + le32_to_cpu(res->cfg_entry.resource_address)); + } + } + + /* Detect any deleted entries, mark them for deletion from mid-layer */ + list_for_each_entry_safe(res, temp, &old_res, queue) { + + if (res->scsi_dev) { + res->change_detected = RES_CHANGE_DEL; + res->cfg_entry.resource_handle = + PMCRAID_INVALID_RES_HANDLE; + list_move_tail(&res->queue, &pinstance->used_res_q); + } else { + list_move_tail(&res->queue, &pinstance->free_res_q); + } + } + + /* release the resource list lock */ + spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); + pmcraid_set_supported_devs(cmd); +} + +/** + * pmcraid_querycfg - Send a Query IOA Config to the adapter. + * @cmd: pointer pmcraid_cmd struct + * + * This function sends a Query IOA Configuration command to the adapter to + * retrieve the IOA configuration table. + * + * Return value: + * none + */ +static void pmcraid_querycfg(struct pmcraid_cmd *cmd) +{ + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl; + struct pmcraid_instance *pinstance = cmd->drv_inst; + int cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table)); + + ioarcb->request_type = REQ_TYPE_IOACMD; + ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); + + ioarcb->cdb[0] = PMCRAID_QUERY_IOA_CONFIG; + + /* firmware requires 4-byte length field, specified in B.E format */ + memcpy(&(ioarcb->cdb[10]), &cfg_table_size, sizeof(cfg_table_size)); + + /* Since entire config table can be described by single IOADL, it can + * be part of IOARCB itself + */ + ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + + offsetof(struct pmcraid_ioarcb, + add_data.u.ioadl[0])); + ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); + ioarcb->ioarcb_bus_addr &= ~(0x1FULL); + + ioarcb->request_flags0 |= NO_LINK_DESCS; + ioarcb->data_transfer_length = + cpu_to_le32(sizeof(struct pmcraid_config_table)); + + ioadl = &(ioarcb->add_data.u.ioadl[0]); + ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC); + ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr); + ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table)); + + pmcraid_send_cmd(cmd, pmcraid_init_res_table, + PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler); +} + + +/** + * pmcraid_probe - PCI probe entry pointer for PMC MaxRaid controller driver + * @pdev: pointer to pci device structure + * @dev_id: pointer to device ids structure + * + * Return Value + * returns 0 if the device is claimed and successfully configured. + * returns non-zero error code in case of any failure + */ +static int __devinit pmcraid_probe( + struct pci_dev *pdev, + const struct pci_device_id *dev_id +) +{ + struct pmcraid_instance *pinstance; + struct Scsi_Host *host; + void __iomem *mapped_pci_addr; + int rc = PCIBIOS_SUCCESSFUL; + + if (atomic_read(&pmcraid_adapter_count) >= PMCRAID_MAX_ADAPTERS) { + pmcraid_err + ("maximum number(%d) of supported adapters reached\n", + atomic_read(&pmcraid_adapter_count)); + return -ENOMEM; + } + + atomic_inc(&pmcraid_adapter_count); + rc = pci_enable_device(pdev); + + if (rc) { + dev_err(&pdev->dev, "Cannot enable adapter\n"); + atomic_dec(&pmcraid_adapter_count); + return rc; + } + + dev_info(&pdev->dev, + "Found new IOA(%x:%x), Total IOA count: %d\n", + pdev->vendor, pdev->device, + atomic_read(&pmcraid_adapter_count)); + + rc = pci_request_regions(pdev, PMCRAID_DRIVER_NAME); + + if (rc < 0) { + dev_err(&pdev->dev, + "Couldn't register memory range of registers\n"); + goto out_disable_device; + } + + mapped_pci_addr = pci_iomap(pdev, 0, 0); + + if (!mapped_pci_addr) { + dev_err(&pdev->dev, "Couldn't map PCI registers memory\n"); + rc = -ENOMEM; + goto out_release_regions; + } + + pci_set_master(pdev); + + /* Firmware requires the system bus address of IOARCB to be within + * 32-bit addressable range though it has 64-bit IOARRIN register. + * However, firmware supports 64-bit streaming DMA buffers, whereas + * coherent buffers are to be 32-bit. Since pci_alloc_consistent always + * returns memory within 4GB (if not, change this logic), coherent + * buffers are within firmware acceptible address ranges. + */ + if ((sizeof(dma_addr_t) == 4) || + pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + + /* firmware expects 32-bit DMA addresses for IOARRIN register; set 32 + * bit mask for pci_alloc_consistent to return addresses within 4GB + */ + if (rc == 0) + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + + if (rc != 0) { + dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); + goto cleanup_nomem; + } + + host = scsi_host_alloc(&pmcraid_host_template, + sizeof(struct pmcraid_instance)); + + if (!host) { + dev_err(&pdev->dev, "scsi_host_alloc failed!\n"); + rc = -ENOMEM; + goto cleanup_nomem; + } + + host->max_id = PMCRAID_MAX_NUM_TARGETS_PER_BUS; + host->max_lun = PMCRAID_MAX_NUM_LUNS_PER_TARGET; + host->unique_id = host->host_no; + host->max_channel = PMCRAID_MAX_BUS_TO_SCAN; + host->max_cmd_len = PMCRAID_MAX_CDB_LEN; + + /* zero out entire instance structure */ + pinstance = (struct pmcraid_instance *)host->hostdata; + memset(pinstance, 0, sizeof(*pinstance)); + + pinstance->chip_cfg = + (struct pmcraid_chip_details *)(dev_id->driver_data); + + rc = pmcraid_init_instance(pdev, host, mapped_pci_addr); + + if (rc < 0) { + dev_err(&pdev->dev, "failed to initialize adapter instance\n"); + goto out_scsi_host_put; + } + + pci_set_drvdata(pdev, pinstance); + + /* Save PCI config-space for use following the reset */ + rc = pci_save_state(pinstance->pdev); + + if (rc != 0) { + dev_err(&pdev->dev, "Failed to save PCI config space\n"); + goto out_scsi_host_put; + } + + pmcraid_disable_interrupts(pinstance, ~0); + + rc = pmcraid_register_interrupt_handler(pinstance); + + if (rc) { + pmcraid_err("couldn't register interrupt handler\n"); + goto out_scsi_host_put; + } + + pmcraid_init_tasklets(pinstance); + + /* allocate verious buffers used by LLD.*/ + rc = pmcraid_init_buffers(pinstance); + + if (rc) { + pmcraid_err("couldn't allocate memory blocks\n"); + goto out_unregister_isr; + } + + /* check the reset type required */ + pmcraid_reset_type(pinstance); + + pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS); + + /* Start IOA firmware initialization and bring card to Operational + * state. + */ + pmcraid_info("starting IOA initialization sequence\n"); + if (pmcraid_reset_bringup(pinstance)) { + pmcraid_err("couldn't initialize IOA \n"); + rc = 1; + goto out_release_bufs; + } + + /* Add adapter instance into mid-layer list */ + rc = scsi_add_host(pinstance->host, &pdev->dev); + if (rc != 0) { + pmcraid_err("couldn't add host into mid-layer: %d\n", rc); + goto out_release_bufs; + } + + scsi_scan_host(pinstance->host); + + rc = pmcraid_setup_chrdev(pinstance); + + if (rc != 0) { + pmcraid_err("couldn't create mgmt interface, error: %x\n", + rc); + goto out_remove_host; + } + + /* Schedule worker thread to handle CCN and take care of adding and + * removing devices to OS + */ + atomic_set(&pinstance->expose_resources, 1); + schedule_work(&pinstance->worker_q); + return rc; + +out_remove_host: + scsi_remove_host(host); + +out_release_bufs: + pmcraid_release_buffers(pinstance); + +out_unregister_isr: + pmcraid_kill_tasklets(pinstance); + pmcraid_unregister_interrupt_handler(pinstance); + +out_scsi_host_put: + scsi_host_put(host); + +cleanup_nomem: + iounmap(mapped_pci_addr); + +out_release_regions: + pci_release_regions(pdev); + +out_disable_device: + atomic_dec(&pmcraid_adapter_count); + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); + return -ENODEV; +} + +/* + * PCI driver structure of pcmraid driver + */ +static struct pci_driver pmcraid_driver = { + .name = PMCRAID_DRIVER_NAME, + .id_table = pmcraid_pci_table, + .probe = pmcraid_probe, + .remove = pmcraid_remove, + .suspend = pmcraid_suspend, + .resume = pmcraid_resume, + .shutdown = pmcraid_shutdown +}; + + +/** + * pmcraid_init - module load entry point + */ +static int __init pmcraid_init(void) +{ + dev_t dev; + int error; + + pmcraid_info("%s Device Driver version: %s %s\n", + PMCRAID_DRIVER_NAME, + PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE); + + error = alloc_chrdev_region(&dev, 0, + PMCRAID_MAX_ADAPTERS, + PMCRAID_DEVFILE); + + if (error) { + pmcraid_err("failed to get a major number for adapters\n"); + goto out_init; + } + + pmcraid_major = MAJOR(dev); + pmcraid_class = class_create(THIS_MODULE, PMCRAID_DEVFILE); + + if (IS_ERR(pmcraid_class)) { + error = PTR_ERR(pmcraid_class); + pmcraid_err("failed to register with with sysfs, error = %x\n", + error); + goto out_unreg_chrdev; + } + + + error = pmcraid_netlink_init(); + + if (error) + goto out_unreg_chrdev; + + error = pci_register_driver(&pmcraid_driver); + + if (error == 0) + goto out_init; + + pmcraid_err("failed to register pmcraid driver, error = %x\n", + error); + class_destroy(pmcraid_class); + pmcraid_netlink_release(); + +out_unreg_chrdev: + unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS); +out_init: + return error; +} + +/** + * pmcraid_exit - module unload entry point + */ +static void __exit pmcraid_exit(void) +{ + pmcraid_netlink_release(); + class_destroy(pmcraid_class); + unregister_chrdev_region(MKDEV(pmcraid_major, 0), + PMCRAID_MAX_ADAPTERS); + pci_unregister_driver(&pmcraid_driver); +} + +module_init(pmcraid_init); +module_exit(pmcraid_exit); diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h new file mode 100644 index 000000000000..614b3a764fed --- /dev/null +++ b/drivers/scsi/pmcraid.h @@ -0,0 +1,1029 @@ +/* + * pmcraid.h -- PMC Sierra MaxRAID controller driver header file + * + * Copyright (C) 2008, 2009 PMC Sierra Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _PMCRAID_H +#define _PMCRAID_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* + * Driver name : string representing the driver name + * Device file : /dev file to be used for management interfaces + * Driver version: version string in major_version.minor_version.patch format + * Driver date : date information in "Mon dd yyyy" format + */ +#define PMCRAID_DRIVER_NAME "PMC MaxRAID" +#define PMCRAID_DEVFILE "pmcsas" +#define PMCRAID_DRIVER_VERSION "1.0.2" +#define PMCRAID_DRIVER_DATE __DATE__ + +/* Maximum number of adapters supported by current version of the driver */ +#define PMCRAID_MAX_ADAPTERS 1024 + +/* Bit definitions as per firmware, bit position [0][1][2].....[31] */ +#define PMC_BIT8(n) (1 << (7-n)) +#define PMC_BIT16(n) (1 << (15-n)) +#define PMC_BIT32(n) (1 << (31-n)) + +/* PMC PCI vendor ID and device ID values */ +#define PCI_VENDOR_ID_PMC 0x11F8 +#define PCI_DEVICE_ID_PMC_MAXRAID 0x5220 + +/* + * MAX_CMD : maximum commands that can be outstanding with IOA + * MAX_IO_CMD : command blocks available for IO commands + * MAX_HCAM_CMD : command blocks avaibale for HCAMS + * MAX_INTERNAL_CMD : command blocks avaible for internal commands like reset + */ +#define PMCRAID_MAX_CMD 1024 +#define PMCRAID_MAX_IO_CMD 1020 +#define PMCRAID_MAX_HCAM_CMD 2 +#define PMCRAID_MAX_INTERNAL_CMD 2 + +/* MAX_IOADLS : max number of scatter-gather lists supported by IOA + * IOADLS_INTERNAL : number of ioadls included as part of IOARCB. + * IOADLS_EXTERNAL : number of ioadls allocated external to IOARCB + */ +#define PMCRAID_IOADLS_INTERNAL 27 +#define PMCRAID_IOADLS_EXTERNAL 37 +#define PMCRAID_MAX_IOADLS PMCRAID_IOADLS_INTERNAL + +/* HRRQ_ENTRY_SIZE : size of hrrq buffer + * IOARCB_ALIGNMENT : alignment required for IOARCB + * IOADL_ALIGNMENT : alignment requirement for IOADLs + * MSIX_VECTORS : number of MSIX vectors supported + */ +#define HRRQ_ENTRY_SIZE sizeof(__le32) +#define PMCRAID_IOARCB_ALIGNMENT 32 +#define PMCRAID_IOADL_ALIGNMENT 16 +#define PMCRAID_IOASA_ALIGNMENT 4 +#define PMCRAID_NUM_MSIX_VECTORS 1 + +/* various other limits */ +#define PMCRAID_VENDOR_ID_LEN 8 +#define PMCRAID_PRODUCT_ID_LEN 16 +#define PMCRAID_SERIAL_NUM_LEN 8 +#define PMCRAID_LUN_LEN 8 +#define PMCRAID_MAX_CDB_LEN 16 +#define PMCRAID_DEVICE_ID_LEN 8 +#define PMCRAID_SENSE_DATA_LEN 256 +#define PMCRAID_ADD_CMD_PARAM_LEN 48 + +#define PMCRAID_MAX_BUS_TO_SCAN 1 +#define PMCRAID_MAX_NUM_TARGETS_PER_BUS 256 +#define PMCRAID_MAX_NUM_LUNS_PER_TARGET 8 + +/* IOA bus/target/lun number of IOA resources */ +#define PMCRAID_IOA_BUS_ID 0xfe +#define PMCRAID_IOA_TARGET_ID 0xff +#define PMCRAID_IOA_LUN_ID 0xff +#define PMCRAID_VSET_BUS_ID 0x1 +#define PMCRAID_VSET_LUN_ID 0x0 +#define PMCRAID_PHYS_BUS_ID 0x0 +#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8 +#define PMCRAID_MAX_VSET_TARGETS 240 +#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8 + +#define PMCRAID_IOA_MAX_SECTORS 32767 +#define PMCRAID_VSET_MAX_SECTORS 512 +#define PMCRAID_MAX_CMD_PER_LUN 254 + +/* Number of configuration table entries (resources) */ +#define PMCRAID_MAX_NUM_OF_VSETS 240 + +/* Todo : Check max limit for Phase 1 */ +#define PMCRAID_MAX_NUM_OF_PHY_DEVS 256 + +/* MAX_NUM_OF_DEVS includes 1 FP, 1 Dummy Enclosure device */ +#define PMCRAID_MAX_NUM_OF_DEVS \ + (PMCRAID_MAX_NUM_OF_VSETS + PMCRAID_MAX_NUM_OF_PHY_DEVS + 2) + +#define PMCRAID_MAX_RESOURCES PMCRAID_MAX_NUM_OF_DEVS + +/* Adapter Commands used by driver */ +#define PMCRAID_QUERY_RESOURCE_STATE 0xC2 +#define PMCRAID_RESET_DEVICE 0xC3 +/* options to select reset target */ +#define ENABLE_RESET_MODIFIER 0x80 +#define RESET_DEVICE_LUN 0x40 +#define RESET_DEVICE_TARGET 0x20 +#define RESET_DEVICE_BUS 0x10 + +#define PMCRAID_IDENTIFY_HRRQ 0xC4 +#define PMCRAID_QUERY_IOA_CONFIG 0xC5 +#define PMCRAID_QUERY_CMD_STATUS 0xCB +#define PMCRAID_ABORT_CMD 0xC7 + +/* CANCEL ALL command, provides option for setting SYNC_COMPLETE + * on the target resources for which commands got cancelled + */ +#define PMCRAID_CANCEL_ALL_REQUESTS 0xCE +#define PMCRAID_SYNC_COMPLETE_AFTER_CANCEL PMC_BIT8(0) + +/* HCAM command and types of HCAM supported by IOA */ +#define PMCRAID_HOST_CONTROLLED_ASYNC 0xCF +#define PMCRAID_HCAM_CODE_CONFIG_CHANGE 0x01 +#define PMCRAID_HCAM_CODE_LOG_DATA 0x02 + +/* IOA shutdown command and various shutdown types */ +#define PMCRAID_IOA_SHUTDOWN 0xF7 +#define PMCRAID_SHUTDOWN_NORMAL 0x00 +#define PMCRAID_SHUTDOWN_PREPARE_FOR_NORMAL 0x40 +#define PMCRAID_SHUTDOWN_NONE 0x100 +#define PMCRAID_SHUTDOWN_ABBREV 0x80 + +/* SET SUPPORTED DEVICES command and the option to select all the + * devices to be supported + */ +#define PMCRAID_SET_SUPPORTED_DEVICES 0xFB +#define ALL_DEVICES_SUPPORTED PMC_BIT8(0) + +/* This option is used with SCSI WRITE_BUFFER command */ +#define PMCRAID_WR_BUF_DOWNLOAD_AND_SAVE 0x05 + +/* IOASC Codes used by driver */ +#define PMCRAID_IOASC_SENSE_MASK 0xFFFFFF00 +#define PMCRAID_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24) +#define PMCRAID_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16) +#define PMCRAID_IOASC_SENSE_QUAL(ioasc) (((ioasc) & 0x0000ff00) >> 8) +#define PMCRAID_IOASC_SENSE_STATUS(ioasc) ((ioasc) & 0x000000ff) + +#define PMCRAID_IOASC_GOOD_COMPLETION 0x00000000 +#define PMCRAID_IOASC_NR_INIT_CMD_REQUIRED 0x02040200 +#define PMCRAID_IOASC_NR_IOA_RESET_REQUIRED 0x02048000 +#define PMCRAID_IOASC_NR_SYNC_REQUIRED 0x023F0000 +#define PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC 0x03110C00 +#define PMCRAID_IOASC_HW_CANNOT_COMMUNICATE 0x04050000 +#define PMCRAID_IOASC_HW_DEVICE_TIMEOUT 0x04080100 +#define PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR 0x04448500 +#define PMCRAID_IOASC_HW_IOA_RESET_REQUIRED 0x04448600 +#define PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE 0x05250000 +#define PMCRAID_IOASC_AC_TERMINATED_BY_HOST 0x0B5A0000 +#define PMCRAID_IOASC_UA_BUS_WAS_RESET 0x06290000 +#define PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER 0x06298000 + +/* Driver defined IOASCs */ +#define PMCRAID_IOASC_IOA_WAS_RESET 0x10000001 +#define PMCRAID_IOASC_PCI_ACCESS_ERROR 0x10000002 + +/* Various timeout values (in milliseconds) used. If any of these are chip + * specific, move them to pmcraid_chip_details structure. + */ +#define PMCRAID_PCI_DEASSERT_TIMEOUT 2000 +#define PMCRAID_BIST_TIMEOUT 2000 +#define PMCRAID_AENWAIT_TIMEOUT 5000 +#define PMCRAID_TRANSOP_TIMEOUT 60000 + +#define PMCRAID_RESET_TIMEOUT (2 * HZ) +#define PMCRAID_CHECK_FOR_RESET_TIMEOUT ((HZ / 10)) +#define PMCRAID_VSET_IO_TIMEOUT (60 * HZ) +#define PMCRAID_INTERNAL_TIMEOUT (60 * HZ) +#define PMCRAID_SHUTDOWN_TIMEOUT (150 * HZ) +#define PMCRAID_RESET_BUS_TIMEOUT (60 * HZ) +#define PMCRAID_RESET_HOST_TIMEOUT (150 * HZ) +#define PMCRAID_REQUEST_SENSE_TIMEOUT (30 * HZ) +#define PMCRAID_SET_SUP_DEV_TIMEOUT (2 * 60 * HZ) + +/* structure to represent a scatter-gather element (IOADL descriptor) */ +struct pmcraid_ioadl_desc { + __le64 address; + __le32 data_len; + __u8 reserved[3]; + __u8 flags; +} __attribute__((packed, aligned(PMCRAID_IOADL_ALIGNMENT))); + +/* pmcraid_ioadl_desc.flags values */ +#define IOADL_FLAGS_CHAINED PMC_BIT8(0) +#define IOADL_FLAGS_LAST_DESC PMC_BIT8(1) +#define IOADL_FLAGS_READ_LAST PMC_BIT8(1) +#define IOADL_FLAGS_WRITE_LAST PMC_BIT8(1) + + +/* additional IOARCB data which can be CDB or additional request parameters + * or list of IOADLs. Firmware supports max of 512 bytes for IOARCB, hence then + * number of IOADLs are limted to 27. In case they are more than 27, they will + * be used in chained form + */ +struct pmcraid_ioarcb_add_data { + union { + struct pmcraid_ioadl_desc ioadl[PMCRAID_IOADLS_INTERNAL]; + __u8 add_cmd_params[PMCRAID_ADD_CMD_PARAM_LEN]; + } u; +}; + +/* + * IOA Request Control Block + */ +struct pmcraid_ioarcb { + __le64 ioarcb_bus_addr; + __le32 resource_handle; + __le32 response_handle; + __le64 ioadl_bus_addr; + __le32 ioadl_length; + __le32 data_transfer_length; + __le64 ioasa_bus_addr; + __le16 ioasa_len; + __le16 cmd_timeout; + __le16 add_cmd_param_offset; + __le16 add_cmd_param_length; + __le32 reserved1[2]; + __le32 reserved2; + __u8 request_type; + __u8 request_flags0; + __u8 request_flags1; + __u8 hrrq_id; + __u8 cdb[PMCRAID_MAX_CDB_LEN]; + struct pmcraid_ioarcb_add_data add_data; +} __attribute__((packed, aligned(PMCRAID_IOARCB_ALIGNMENT))); + +/* well known resource handle values */ +#define PMCRAID_IOA_RES_HANDLE 0xffffffff +#define PMCRAID_INVALID_RES_HANDLE 0 + +/* pmcraid_ioarcb.request_type values */ +#define REQ_TYPE_SCSI 0x00 +#define REQ_TYPE_IOACMD 0x01 +#define REQ_TYPE_HCAM 0x02 + +/* pmcraid_ioarcb.flags0 values */ +#define TRANSFER_DIR_WRITE PMC_BIT8(0) +#define INHIBIT_UL_CHECK PMC_BIT8(2) +#define SYNC_OVERRIDE PMC_BIT8(3) +#define SYNC_COMPLETE PMC_BIT8(4) +#define NO_LINK_DESCS PMC_BIT8(5) + +/* pmcraid_ioarcb.flags1 values */ +#define DELAY_AFTER_RESET PMC_BIT8(0) +#define TASK_TAG_SIMPLE 0x10 +#define TASK_TAG_ORDERED 0x20 +#define TASK_TAG_QUEUE_HEAD 0x30 + +/* toggle bit offset in response handle */ +#define HRRQ_TOGGLE_BIT 0x01 +#define HRRQ_RESPONSE_BIT 0x02 + +/* IOA Status Area */ +struct pmcraid_ioasa_vset { + __le32 failing_lba_hi; + __le32 failing_lba_lo; + __le32 reserved; +} __attribute__((packed, aligned(4))); + +struct pmcraid_ioasa { + __le32 ioasc; + __le16 returned_status_length; + __le16 available_status_length; + __le32 residual_data_length; + __le32 ilid; + __le32 fd_ioasc; + __le32 fd_res_address; + __le32 fd_res_handle; + __le32 reserved; + + /* resource specific sense information */ + union { + struct pmcraid_ioasa_vset vset; + } u; + + /* IOA autosense data */ + __le16 auto_sense_length; + __le16 error_data_length; + __u8 sense_data[PMCRAID_SENSE_DATA_LEN]; +} __attribute__((packed, aligned(4))); + +#define PMCRAID_DRIVER_ILID 0xffffffff + +/* Config Table Entry per Resource */ +struct pmcraid_config_table_entry { + __u8 resource_type; + __u8 bus_protocol; + __le16 array_id; + __u8 common_flags0; + __u8 common_flags1; + __u8 unique_flags0; + __u8 unique_flags1; /*also used as vset target_id */ + __le32 resource_handle; + __le32 resource_address; + __u8 device_id[PMCRAID_DEVICE_ID_LEN]; + __u8 lun[PMCRAID_LUN_LEN]; +} __attribute__((packed, aligned(4))); + +/* resource types (config_table_entry.resource_type values) */ +#define RES_TYPE_AF_DASD 0x00 +#define RES_TYPE_GSCSI 0x01 +#define RES_TYPE_VSET 0x02 +#define RES_TYPE_IOA_FP 0xFF + +#define RES_IS_IOA(res) ((res).resource_type == RES_TYPE_IOA_FP) +#define RES_IS_GSCSI(res) ((res).resource_type == RES_TYPE_GSCSI) +#define RES_IS_VSET(res) ((res).resource_type == RES_TYPE_VSET) +#define RES_IS_AFDASD(res) ((res).resource_type == RES_TYPE_AF_DASD) + +/* bus_protocol values used by driver */ +#define RES_TYPE_VENCLOSURE 0x8 + +/* config_table_entry.common_flags0 */ +#define MULTIPATH_RESOURCE PMC_BIT32(0) + +/* unique_flags1 */ +#define IMPORT_MODE_MANUAL PMC_BIT8(0) + +/* well known resource handle values */ +#define RES_HANDLE_IOA 0xFFFFFFFF +#define RES_HANDLE_NONE 0x00000000 + +/* well known resource address values */ +#define RES_ADDRESS_IOAFP 0xFEFFFFFF +#define RES_ADDRESS_INVALID 0xFFFFFFFF + +/* BUS/TARGET/LUN values from resource_addrr */ +#define RES_BUS(res_addr) (le32_to_cpu(res_addr) & 0xFF) +#define RES_TARGET(res_addr) ((le32_to_cpu(res_addr) >> 16) & 0xFF) +#define RES_LUN(res_addr) 0x0 + +/* configuration table structure */ +struct pmcraid_config_table { + __le16 num_entries; + __u8 table_format; + __u8 reserved1; + __u8 flags; + __u8 reserved2[11]; + struct pmcraid_config_table_entry entries[PMCRAID_MAX_RESOURCES]; +} __attribute__((packed, aligned(4))); + +/* config_table.flags value */ +#define MICROCODE_UPDATE_REQUIRED PMC_BIT32(0) + +/* + * HCAM format + */ +#define PMCRAID_HOSTRCB_LDNSIZE 4056 + +/* Error log notification format */ +struct pmcraid_hostrcb_error { + __le32 fd_ioasc; + __le32 fd_ra; + __le32 fd_rh; + __le32 prc; + union { + __u8 data[PMCRAID_HOSTRCB_LDNSIZE]; + } u; +} __attribute__ ((packed, aligned(4))); + +struct pmcraid_hcam_hdr { + __u8 op_code; + __u8 notification_type; + __u8 notification_lost; + __u8 flags; + __u8 overlay_id; + __u8 reserved1[3]; + __le32 ilid; + __le32 timestamp1; + __le32 timestamp2; + __le32 data_len; +} __attribute__((packed, aligned(4))); + +#define PMCRAID_AEN_GROUP 0x3 + +struct pmcraid_hcam_ccn { + struct pmcraid_hcam_hdr header; + struct pmcraid_config_table_entry cfg_entry; +} __attribute__((packed, aligned(4))); + +struct pmcraid_hcam_ldn { + struct pmcraid_hcam_hdr header; + struct pmcraid_hostrcb_error error_log; +} __attribute__((packed, aligned(4))); + +/* pmcraid_hcam.op_code values */ +#define HOSTRCB_TYPE_CCN 0xE1 +#define HOSTRCB_TYPE_LDN 0xE2 + +/* pmcraid_hcam.notification_type values */ +#define NOTIFICATION_TYPE_ENTRY_CHANGED 0x0 +#define NOTIFICATION_TYPE_ENTRY_NEW 0x1 +#define NOTIFICATION_TYPE_ENTRY_DELETED 0x2 +#define NOTIFICATION_TYPE_ERROR_LOG 0x10 +#define NOTIFICATION_TYPE_INFORMATION_LOG 0x11 + +#define HOSTRCB_NOTIFICATIONS_LOST PMC_BIT8(0) + +/* pmcraid_hcam.flags values */ +#define HOSTRCB_INTERNAL_OP_ERROR PMC_BIT8(0) +#define HOSTRCB_ERROR_RESPONSE_SENT PMC_BIT8(1) + +/* pmcraid_hcam.overlay_id values */ +#define HOSTRCB_OVERLAY_ID_08 0x08 +#define HOSTRCB_OVERLAY_ID_09 0x09 +#define HOSTRCB_OVERLAY_ID_11 0x11 +#define HOSTRCB_OVERLAY_ID_12 0x12 +#define HOSTRCB_OVERLAY_ID_13 0x13 +#define HOSTRCB_OVERLAY_ID_14 0x14 +#define HOSTRCB_OVERLAY_ID_16 0x16 +#define HOSTRCB_OVERLAY_ID_17 0x17 +#define HOSTRCB_OVERLAY_ID_20 0x20 +#define HOSTRCB_OVERLAY_ID_FF 0xFF + +/* Implementation specific card details */ +struct pmcraid_chip_details { + /* hardware register offsets */ + unsigned long ioastatus; + unsigned long ioarrin; + unsigned long mailbox; + unsigned long global_intr_mask; + unsigned long ioa_host_intr; + unsigned long ioa_host_intr_clr; + unsigned long ioa_host_mask; + unsigned long ioa_host_mask_clr; + unsigned long host_ioa_intr; + unsigned long host_ioa_intr_clr; + + /* timeout used during transitional to operational state */ + unsigned long transop_timeout; +}; + +/* IOA to HOST doorbells (interrupts) */ +#define INTRS_TRANSITION_TO_OPERATIONAL PMC_BIT32(0) +#define INTRS_IOARCB_TRANSFER_FAILED PMC_BIT32(3) +#define INTRS_IOA_UNIT_CHECK PMC_BIT32(4) +#define INTRS_NO_HRRQ_FOR_CMD_RESPONSE PMC_BIT32(5) +#define INTRS_CRITICAL_OP_IN_PROGRESS PMC_BIT32(6) +#define INTRS_IO_DEBUG_ACK PMC_BIT32(7) +#define INTRS_IOARRIN_LOST PMC_BIT32(27) +#define INTRS_SYSTEM_BUS_MMIO_ERROR PMC_BIT32(28) +#define INTRS_IOA_PROCESSOR_ERROR PMC_BIT32(29) +#define INTRS_HRRQ_VALID PMC_BIT32(30) +#define INTRS_OPERATIONAL_STATUS PMC_BIT32(0) + +/* Host to IOA Doorbells */ +#define DOORBELL_RUNTIME_RESET PMC_BIT32(1) +#define DOORBELL_IOA_RESET_ALERT PMC_BIT32(7) +#define DOORBELL_IOA_DEBUG_ALERT PMC_BIT32(9) +#define DOORBELL_ENABLE_DESTRUCTIVE_DIAGS PMC_BIT32(8) +#define DOORBELL_IOA_START_BIST PMC_BIT32(23) +#define DOORBELL_RESET_IOA PMC_BIT32(31) + +/* Global interrupt mask register value */ +#define GLOBAL_INTERRUPT_MASK 0x4ULL + +#define PMCRAID_ERROR_INTERRUPTS (INTRS_IOARCB_TRANSFER_FAILED | \ + INTRS_IOA_UNIT_CHECK | \ + INTRS_NO_HRRQ_FOR_CMD_RESPONSE | \ + INTRS_IOARRIN_LOST | \ + INTRS_SYSTEM_BUS_MMIO_ERROR | \ + INTRS_IOA_PROCESSOR_ERROR) + +#define PMCRAID_PCI_INTERRUPTS (PMCRAID_ERROR_INTERRUPTS | \ + INTRS_HRRQ_VALID | \ + INTRS_CRITICAL_OP_IN_PROGRESS |\ + INTRS_TRANSITION_TO_OPERATIONAL) + +/* control_block, associated with each of the commands contains IOARCB, IOADLs + * memory for IOASA. Additional 3 * 16 bytes are allocated in order to support + * additional request parameters (of max size 48) any command. + */ +struct pmcraid_control_block { + struct pmcraid_ioarcb ioarcb; + struct pmcraid_ioadl_desc ioadl[PMCRAID_IOADLS_EXTERNAL + 3]; + struct pmcraid_ioasa ioasa; +} __attribute__ ((packed, aligned(PMCRAID_IOARCB_ALIGNMENT))); + +/* pmcraid_sglist - Scatter-gather list allocated for passthrough ioctls + */ +struct pmcraid_sglist { + u32 order; + u32 num_sg; + u32 num_dma_sg; + u32 buffer_len; + struct scatterlist scatterlist[1]; +}; + +/* pmcraid_cmd - LLD representation of SCSI command */ +struct pmcraid_cmd { + + /* Ptr and bus address of DMA.able control block for this command */ + struct pmcraid_control_block *ioa_cb; + dma_addr_t ioa_cb_bus_addr; + + /* sense buffer for REQUEST SENSE command if firmware is not sending + * auto sense data + */ + dma_addr_t sense_buffer_dma; + dma_addr_t dma_handle; + u8 *sense_buffer; + + /* pointer to mid layer structure of SCSI commands */ + struct scsi_cmnd *scsi_cmd; + + struct list_head free_list; + struct completion wait_for_completion; + struct timer_list timer; /* needed for internal commands */ + u32 timeout; /* current timeout value */ + u32 index; /* index into the command list */ + u8 completion_req; /* for handling internal commands */ + u8 release; /* for handling completions */ + + void (*cmd_done) (struct pmcraid_cmd *); + struct pmcraid_instance *drv_inst; + + struct pmcraid_sglist *sglist; /* used for passthrough IOCTLs */ + + /* scratch used during reset sequence */ + union { + unsigned long time_left; + struct pmcraid_resource_entry *res; + } u; +}; + +/* + * Interrupt registers of IOA + */ +struct pmcraid_interrupts { + void __iomem *ioa_host_interrupt_reg; + void __iomem *ioa_host_interrupt_clr_reg; + void __iomem *ioa_host_interrupt_mask_reg; + void __iomem *ioa_host_interrupt_mask_clr_reg; + void __iomem *global_interrupt_mask_reg; + void __iomem *host_ioa_interrupt_reg; + void __iomem *host_ioa_interrupt_clr_reg; +}; + +/* ISR parameters LLD allocates (one for each MSI-X if enabled) vectors */ +struct pmcraid_isr_param { + u8 hrrq_id; /* hrrq entry index */ + u16 vector; /* allocated msi-x vector */ + struct pmcraid_instance *drv_inst; +}; + +/* AEN message header sent as part of event data to applications */ +struct pmcraid_aen_msg { + u32 hostno; + u32 length; + u8 reserved[8]; + u8 data[0]; +}; + +struct pmcraid_hostrcb { + struct pmcraid_instance *drv_inst; + struct pmcraid_aen_msg *msg; + struct pmcraid_hcam_hdr *hcam; /* pointer to hcam buffer */ + struct pmcraid_cmd *cmd; /* pointer to command block used */ + dma_addr_t baddr; /* system address of hcam buffer */ + atomic_t ignore; /* process HCAM response ? */ +}; + +#define PMCRAID_AEN_HDR_SIZE sizeof(struct pmcraid_aen_msg) + + + +/* + * Per adapter structure maintained by LLD + */ +struct pmcraid_instance { + /* Array of allowed-to-be-exposed resources, initialized from + * Configutation Table, later updated with CCNs + */ + struct pmcraid_resource_entry *res_entries; + + struct list_head free_res_q; /* res_entries lists for easy lookup */ + struct list_head used_res_q; /* List of to be exposed resources */ + spinlock_t resource_lock; /* spinlock to protect resource list */ + + void __iomem *mapped_dma_addr; + void __iomem *ioa_status; /* Iomapped IOA status register */ + void __iomem *mailbox; /* Iomapped mailbox register */ + void __iomem *ioarrin; /* IOmapped IOARR IN register */ + + struct pmcraid_interrupts int_regs; + struct pmcraid_chip_details *chip_cfg; + + /* HostRCBs needed for HCAM */ + struct pmcraid_hostrcb ldn; + struct pmcraid_hostrcb ccn; + + + /* Bus address of start of HRRQ */ + dma_addr_t hrrq_start_bus_addr[PMCRAID_NUM_MSIX_VECTORS]; + + /* Pointer to 1st entry of HRRQ */ + __be32 *hrrq_start[PMCRAID_NUM_MSIX_VECTORS]; + + /* Pointer to last entry of HRRQ */ + __be32 *hrrq_end[PMCRAID_NUM_MSIX_VECTORS]; + + /* Pointer to current pointer of hrrq */ + __be32 *hrrq_curr[PMCRAID_NUM_MSIX_VECTORS]; + + /* Lock for HRRQ access */ + spinlock_t hrrq_lock[PMCRAID_NUM_MSIX_VECTORS]; + + /* Expected toggle bit at host */ + u8 host_toggle_bit[PMCRAID_NUM_MSIX_VECTORS]; + + /* No of Reset IOA retries . IOA marked dead if threshold exceeds */ + u8 ioa_reset_attempts; +#define PMCRAID_RESET_ATTEMPTS 3 + + /* Wait Q for threads to wait for Reset IOA completion */ + wait_queue_head_t reset_wait_q; + struct pmcraid_cmd *reset_cmd; + + /* structures for supporting SIGIO based AEN. */ + struct fasync_struct *aen_queue; + struct mutex aen_queue_lock; /* lock for aen subscribers list */ + struct cdev cdev; + + struct Scsi_Host *host; /* mid layer interface structure handle */ + struct pci_dev *pdev; /* PCI device structure handle */ + + u8 current_log_level; /* default level for logging IOASC errors */ + + u8 num_hrrq; /* Number of interrupt vectors allocated */ + dev_t dev; /* Major-Minor numbers for Char device */ + + /* Used as ISR handler argument */ + struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS]; + + /* configuration table */ + struct pmcraid_config_table *cfg_table; + dma_addr_t cfg_table_bus_addr; + + /* structures related to command blocks */ + struct kmem_cache *cmd_cachep; /* cache for cmd blocks */ + struct pci_pool *control_pool; /* pool for control blocks */ + char cmd_pool_name[64]; /* name of cmd cache */ + char ctl_pool_name[64]; /* name of control cache */ + + struct pmcraid_cmd *cmd_list[PMCRAID_MAX_CMD]; + + struct list_head free_cmd_pool; + struct list_head pending_cmd_pool; + spinlock_t free_pool_lock; /* free pool lock */ + spinlock_t pending_pool_lock; /* pending pool lock */ + + /* No of IO commands pending with FW */ + atomic_t outstanding_cmds; + + /* should add/delete resources to mid-layer now ?*/ + atomic_t expose_resources; + + /* Tasklet to handle deferred processing */ + struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS]; + + /* Work-queue (Shared) for deferred reset processing */ + struct work_struct worker_q; + + + u32 ioa_state:4; /* For IOA Reset sequence FSM */ +#define IOA_STATE_OPERATIONAL 0x0 +#define IOA_STATE_UNKNOWN 0x1 +#define IOA_STATE_DEAD 0x2 +#define IOA_STATE_IN_SOFT_RESET 0x3 +#define IOA_STATE_IN_HARD_RESET 0x4 +#define IOA_STATE_IN_RESET_ALERT 0x5 +#define IOA_STATE_IN_BRINGDOWN 0x6 +#define IOA_STATE_IN_BRINGUP 0x7 + + u32 ioa_reset_in_progress:1; /* true if IOA reset is in progress */ + u32 ioa_hard_reset:1; /* TRUE if Hard Reset is needed */ + u32 ioa_unit_check:1; /* Indicates Unit Check condition */ + u32 ioa_bringdown:1; /* whether IOA needs to be brought down */ + u32 force_ioa_reset:1; /* force adapter reset ? */ + u32 reinit_cfg_table:1; /* reinit config table due to lost CCN */ + u32 ioa_shutdown_type:2;/* shutdown type used during reset */ +#define SHUTDOWN_NONE 0x0 +#define SHUTDOWN_NORMAL 0x1 +#define SHUTDOWN_ABBREV 0x2 + +}; + +/* LLD maintained resource entry structure */ +struct pmcraid_resource_entry { + struct list_head queue; /* link to "to be exposed" resources */ + struct pmcraid_config_table_entry cfg_entry; + struct scsi_device *scsi_dev; /* Link scsi_device structure */ + atomic_t read_failures; /* count of failed READ commands */ + atomic_t write_failures; /* count of failed WRITE commands */ + + /* To indicate add/delete/modify during CCN */ + u8 change_detected; +#define RES_CHANGE_ADD 0x1 /* add this to mid-layer */ +#define RES_CHANGE_DEL 0x2 /* remove this from mid-layer */ + + u8 reset_progress; /* Device is resetting */ + + /* + * When IOA asks for sync (i.e. IOASC = Not Ready, Sync Required), this + * flag will be set, mid layer will be asked to retry. In the next + * attempt, this flag will be checked in queuecommand() to set + * SYNC_COMPLETE flag in IOARCB (flag_0). + */ + u8 sync_reqd; + + /* target indicates the mapped target_id assigned to this resource if + * this is VSET resource. For non-VSET resources this will be un-used + * or zero + */ + u8 target; +}; + +/* Data structures used in IOASC error code logging */ +struct pmcraid_ioasc_error { + u32 ioasc_code; /* IOASC code */ + u8 log_level; /* default log level assignment. */ + char *error_string; +}; + +/* Initial log_level assignments for various IOASCs */ +#define IOASC_LOG_LEVEL_NONE 0x0 /* no logging */ +#define IOASC_LOG_LEVEL_MUST 0x1 /* must log: all high-severity errors */ +#define IOASC_LOG_LEVEL_HARD 0x2 /* optional – low severity errors */ + +/* Error information maintained by LLD. LLD initializes the pmcraid_error_table + * statically. + */ +static struct pmcraid_ioasc_error pmcraid_ioasc_error_table[] = { + {0x01180600, IOASC_LOG_LEVEL_MUST, + "Recovered Error, soft media error, sector reassignment suggested"}, + {0x015D0000, IOASC_LOG_LEVEL_MUST, + "Recovered Error, failure prediction thresold exceeded"}, + {0x015D9200, IOASC_LOG_LEVEL_MUST, + "Recovered Error, soft Cache Card Battery error thresold"}, + {0x015D9200, IOASC_LOG_LEVEL_MUST, + "Recovered Error, soft Cache Card Battery error thresold"}, + {0x02048000, IOASC_LOG_LEVEL_MUST, + "Not Ready, IOA Reset Required"}, + {0x02408500, IOASC_LOG_LEVEL_MUST, + "Not Ready, IOA microcode download required"}, + {0x03110B00, IOASC_LOG_LEVEL_MUST, + "Medium Error, data unreadable, reassignment suggested"}, + {0x03110C00, IOASC_LOG_LEVEL_MUST, + "Medium Error, data unreadable do not reassign"}, + {0x03310000, IOASC_LOG_LEVEL_MUST, + "Medium Error, media corrupted"}, + {0x04050000, IOASC_LOG_LEVEL_MUST, + "Hardware Error, IOA can't communicate with device"}, + {0x04080000, IOASC_LOG_LEVEL_MUST, + "Hardware Error, device bus error"}, + {0x04080000, IOASC_LOG_LEVEL_MUST, + "Hardware Error, device bus is not functioning"}, + {0x04118000, IOASC_LOG_LEVEL_MUST, + "Hardware Error, IOA reserved area data check"}, + {0x04118100, IOASC_LOG_LEVEL_MUST, + "Hardware Error, IOA reserved area invalid data pattern"}, + {0x04118200, IOASC_LOG_LEVEL_MUST, + "Hardware Error, IOA reserved area LRC error"}, + {0x04320000, IOASC_LOG_LEVEL_MUST, + "Hardware Error, reassignment space exhausted"}, + {0x04330000, IOASC_LOG_LEVEL_MUST, + "Hardware Error, data transfer underlength error"}, + {0x04330000, IOASC_LOG_LEVEL_MUST, + "Hardware Error, data transfer overlength error"}, + {0x04418000, IOASC_LOG_LEVEL_MUST, + "Hardware Error, PCI bus error"}, + {0x04440000, IOASC_LOG_LEVEL_MUST, + "Hardware Error, device error"}, + {0x04448300, IOASC_LOG_LEVEL_MUST, + "Hardware Error, undefined device response"}, + {0x04448400, IOASC_LOG_LEVEL_MUST, + "Hardware Error, IOA microcode error"}, + {0x04448600, IOASC_LOG_LEVEL_MUST, + "Hardware Error, IOA reset required"}, + {0x04449200, IOASC_LOG_LEVEL_MUST, + "Hardware Error, hard Cache Fearuee Card Battery error"}, + {0x0444A000, IOASC_LOG_LEVEL_MUST, + "Hardware Error, failed device altered"}, + {0x0444A200, IOASC_LOG_LEVEL_MUST, + "Hardware Error, data check after reassignment"}, + {0x0444A300, IOASC_LOG_LEVEL_MUST, + "Hardware Error, LRC error after reassignment"}, + {0x044A0000, IOASC_LOG_LEVEL_MUST, + "Hardware Error, device bus error (msg/cmd phase)"}, + {0x04670400, IOASC_LOG_LEVEL_MUST, + "Hardware Error, new device can't be used"}, + {0x04678000, IOASC_LOG_LEVEL_MUST, + "Hardware Error, invalid multiadapter configuration"}, + {0x04678100, IOASC_LOG_LEVEL_MUST, + "Hardware Error, incorrect connection between enclosures"}, + {0x04678200, IOASC_LOG_LEVEL_MUST, + "Hardware Error, connections exceed IOA design limits"}, + {0x04678300, IOASC_LOG_LEVEL_MUST, + "Hardware Error, incorrect multipath connection"}, + {0x04679000, IOASC_LOG_LEVEL_MUST, + "Hardware Error, command to LUN failed"}, + {0x064C8000, IOASC_LOG_LEVEL_HARD, + "Unit Attention, cache exists for missing/failed device"}, + {0x06670100, IOASC_LOG_LEVEL_HARD, + "Unit Attention, incompatible exposed mode device"}, + {0x06670600, IOASC_LOG_LEVEL_HARD, + "Unit Attention, attachment of logical unit failed"}, + {0x06678000, IOASC_LOG_LEVEL_MUST, + "Unit Attention, cables exceed connective design limit"}, + {0x06678300, IOASC_LOG_LEVEL_MUST, + "Unit Attention, incomplete multipath connection between" \ + "IOA and enclosure"}, + {0x06678400, IOASC_LOG_LEVEL_MUST, + "Unit Attention, incomplete multipath connection between" \ + "device and enclosure"}, + {0x06678500, IOASC_LOG_LEVEL_MUST, + "Unit Attention, incomplete multipath connection between" \ + "IOA and remote IOA"}, + {0x06678600, IOASC_LOG_LEVEL_HARD, + "Unit Attention, missing remote IOA"}, + {0x06679100, IOASC_LOG_LEVEL_HARD, + "Unit Attention, enclosure doesn't support required multipath" \ + "function"}, + {0x06698200, IOASC_LOG_LEVEL_HARD, + "Unit Attention, corrupt array parity detected on device"}, + {0x066B0200, IOASC_LOG_LEVEL_MUST, + "Unit Attention, array exposed"}, + {0x066B8200, IOASC_LOG_LEVEL_HARD, + "Unit Attention, exposed array is still protected"}, + {0x066B9200, IOASC_LOG_LEVEL_MUST, + "Unit Attention, Multipath redundancy level got worse"}, + {0x07270000, IOASC_LOG_LEVEL_HARD, + "Data Protect, device is read/write protected by IOA"}, + {0x07278000, IOASC_LOG_LEVEL_HARD, + "Data Protect, IOA doesn't support device attribute"}, + {0x07278100, IOASC_LOG_LEVEL_HARD, + "Data Protect, NVRAM mirroring prohibited"}, + {0x07278400, IOASC_LOG_LEVEL_MUST, + "Data Protect, array is short 2 or more devices"}, + {0x07278600, IOASC_LOG_LEVEL_MUST, + "Data Protect, exposed array is short a required device"}, + {0x07278700, IOASC_LOG_LEVEL_MUST, + "Data Protect, array members not at required addresses"}, + {0x07278800, IOASC_LOG_LEVEL_MUST, + "Data Protect, exposed mode device resource address conflict"}, + {0x07278900, IOASC_LOG_LEVEL_MUST, + "Data Protect, incorrect resource address of exposed mode device"}, + {0x07278A00, IOASC_LOG_LEVEL_MUST, + "Data Protect, Array is missing a device and parity is out of sync"}, + {0x07278B00, IOASC_LOG_LEVEL_MUST, + "Data Protect, maximum number of arrays already exist"}, + {0x07278C00, IOASC_LOG_LEVEL_HARD, + "Data Protect, cannot locate cache data for device"}, + {0x07278D00, IOASC_LOG_LEVEL_HARD, + "Data Protect, cache data exits for a changed device"}, + {0x07279100, IOASC_LOG_LEVEL_MUST, + "Data Protect, detection of a device requiring format"}, + {0x07279200, IOASC_LOG_LEVEL_MUST, + "Data Protect, IOA exceeds maximum number of devices"}, + {0x07279600, IOASC_LOG_LEVEL_MUST, + "Data Protect, missing array, volume set is not functional"}, + {0x07279700, IOASC_LOG_LEVEL_MUST, + "Data Protect, single device for a volume set"}, + {0x07279800, IOASC_LOG_LEVEL_MUST, + "Data Protect, missing multiple devices for a volume set"}, + {0x07279900, IOASC_LOG_LEVEL_HARD, + "Data Protect, maximum number of volument sets already exists"}, + {0x07279A00, IOASC_LOG_LEVEL_MUST, + "Data Protect, other volume set problem"}, +}; + +/* macros to help in debugging */ +#define pmcraid_err(...) \ + printk(KERN_ERR "MaxRAID: "__VA_ARGS__) + +#define pmcraid_info(...) \ + if (pmcraid_debug_log) \ + printk(KERN_INFO "MaxRAID: "__VA_ARGS__) + +/* check if given command is a SCSI READ or SCSI WRITE command */ +#define SCSI_READ_CMD 0x1 /* any of SCSI READ commands */ +#define SCSI_WRITE_CMD 0x2 /* any of SCSI WRITE commands */ +#define SCSI_CMD_TYPE(opcode) \ +({ u8 op = opcode; u8 __type = 0;\ + if (op == READ_6 || op == READ_10 || op == READ_12 || op == READ_16)\ + __type = SCSI_READ_CMD;\ + else if (op == WRITE_6 || op == WRITE_10 || op == WRITE_12 || \ + op == WRITE_16)\ + __type = SCSI_WRITE_CMD;\ + __type;\ +}) + +#define IS_SCSI_READ_WRITE(opcode) \ +({ u8 __type = SCSI_CMD_TYPE(opcode); \ + (__type == SCSI_READ_CMD || __type == SCSI_WRITE_CMD) ? 1 : 0;\ +}) + + +/* + * pmcraid_ioctl_header - definition of header structure that preceeds all the + * buffers given as ioctl arguements. + * + * .signature : always ASCII string, "PMCRAID" + * .reserved : not used + * .buffer_length : length of the buffer following the header + */ +struct pmcraid_ioctl_header { + u8 signature[8]; + u32 reserved; + u32 buffer_length; +}; + +#define PMCRAID_IOCTL_SIGNATURE "PMCRAID" + + +/* + * pmcraid_event_details - defines AEN details that apps can retrieve from LLD + * + * .rcb_ccn - complete RCB of CCN + * .rcb_ldn - complete RCB of CCN + */ +struct pmcraid_event_details { + struct pmcraid_hcam_ccn rcb_ccn; + struct pmcraid_hcam_ldn rcb_ldn; +}; + +/* + * pmcraid_driver_ioctl_buffer - structure passed as argument to most of the + * PMC driver handled ioctls. + */ +struct pmcraid_driver_ioctl_buffer { + struct pmcraid_ioctl_header ioctl_header; + struct pmcraid_event_details event_details; +}; + +/* + * pmcraid_passthrough_ioctl_buffer - structure given as argument to + * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires + * 32-byte alignment so, it is necessary to pack this structure to avoid any + * holes between ioctl_header and passthrough buffer + * + * .ioactl_header : ioctl header + * .ioarcb : filled-up ioarcb buffer, driver always reads this buffer + * .ioasa : buffer for ioasa, driver fills this with IOASA from firmware + * .request_buffer: The I/O buffer (flat), driver reads/writes to this based on + * the transfer directions passed in ioarcb.flags0. Contents + * of this buffer are valid only when ioarcb.data_transfer_len + * is not zero. + */ +struct pmcraid_passthrough_ioctl_buffer { + struct pmcraid_ioctl_header ioctl_header; + struct pmcraid_ioarcb ioarcb; + struct pmcraid_ioasa ioasa; + u8 request_buffer[1]; +} __attribute__ ((packed)); + +/* + * keys to differentiate between driver handled IOCTLs and passthrough + * IOCTLs passed to IOA. driver determines the ioctl type using macro + * _IOC_TYPE + */ +#define PMCRAID_DRIVER_IOCTL 'D' +#define PMCRAID_PASSTHROUGH_IOCTL 'F' + +#define DRV_IOCTL(n, size) \ + _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size)) + +#define FMW_IOCTL(n, size) \ + _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size)) + +/* + * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd. + * This is to facilitate applications avoiding un-necessary memory allocations. + * For example, most of driver handled ioctls do not require ioarcb, ioasa. + */ +#define _ARGSIZE(arg) (sizeof(struct pmcraid_ioctl_header) + sizeof(arg)) + +/* Driver handled IOCTL command definitions */ + +#define PMCRAID_IOCTL_RESET_ADAPTER \ + DRV_IOCTL(5, sizeof(struct pmcraid_ioctl_header)) + +/* passthrough/firmware handled commands */ +#define PMCRAID_IOCTL_PASSTHROUGH_COMMAND \ + FMW_IOCTL(1, sizeof(struct pmcraid_passthrough_ioctl_buffer)) + +#define PMCRAID_IOCTL_DOWNLOAD_MICROCODE \ + FMW_IOCTL(2, sizeof(struct pmcraid_passthrough_ioctl_buffer)) + + +#endif /* _PMCRAID_H */ From 5bab08858cecaacba803e8c90638db14bde470c0 Mon Sep 17 00:00:00 2001 From: Charlie Brady Date: Wed, 26 Aug 2009 14:16:57 -0400 Subject: [PATCH 195/209] [SCSI] scsi_dh_rdac: Add support for Sun StorageTek ST2500, ST2510 and ST2530 These storage arrays can use RDAC when configured with 'linux' as the initiator. http://www.sun.com/storage/disk_systems/workgroup/2500/ http://www.sun.com/storage/disk_systems/workgroup/2510/ http://www.sun.com/storage/disk_systems/workgroup/2530/ Signed-off-by: Charlie Brady Reviewed-by: Chandra Seetharaman Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_rdac.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 872292f659c3..8b9e14318d1b 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -628,6 +628,9 @@ static const struct scsi_dh_devlist rdac_dev_list[] = { {"SGI", "IS"}, {"STK", "OPENstorage D280"}, {"SUN", "CSM200_R"}, + {"SUN", "LCSM100_I"}, + {"SUN", "LCSM100_S"}, + {"SUN", "LCSM100_E"}, {"SUN", "LCSM100_F"}, {"DELL", "MD3000"}, {"DELL", "MD3000i"}, From 9e39089b958818c8f3d772414cd27f84fb2622f2 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 2 Sep 2009 11:43:36 +0530 Subject: [PATCH 196/209] [SCSI] mptsas : Sanity check for phyinfo is added Check for phyinfo->phy before calling sas_port_delete_phy. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptsas.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 21bd78e4f306..f744f0fc9491 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -852,7 +852,13 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info) port_details->num_phys--; port_details->phy_bitmask &= ~ (1 << phy_info->phy_id); memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); - sas_port_delete_phy(port_details->port, phy_info->phy); + if (phy_info->phy) { + devtprintk(ioc, dev_printk(KERN_DEBUG, + &phy_info->phy->dev, MYIOC_s_FMT + "delete phy %d, phy-obj (0x%p)\n", ioc->name, + phy_info->phy_id, phy_info->phy)); + sas_port_delete_phy(port_details->port, phy_info->phy); + } phy_info->port_details = NULL; } From f44fd18198eb26b62ba86b17016e9441ce95fc71 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 2 Sep 2009 11:44:19 +0530 Subject: [PATCH 197/209] [SCSI] mptsas : NULL pointer on big endian systems causing Expander not to tear off On Big endian system kernel will crash due to address translation is not handle properly. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptsas.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index f744f0fc9491..02a18dfcd52a 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -3314,6 +3314,7 @@ mptsas_send_expander_event(struct fw_event_work *fw_event) expander_data = (MpiEventDataSasExpanderStatusChange_t *) fw_event->event_data; memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64)); + sas_address = le64_to_cpu(sas_address); port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address); if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) { From c55b89fba9872ebcd5ac15cdfdad29ffb89329f0 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 2 Sep 2009 11:44:57 +0530 Subject: [PATCH 198/209] [SCSI] mptsas : PAE Kernel more than 4 GB kernel panic This patch is solving problem for PAE kernel DMA operation. On PAE system dma_addr and unsigned long will have different values. Now dma_addr is not type casted using unsigned long. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 8ab7b37ed70d..76fa2ee0b574 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -1015,9 +1015,9 @@ mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr) { SGESimple64_t *pSge = (SGESimple64_t *) pAddr; pSge->Address.Low = cpu_to_le32 - (lower_32_bits((unsigned long)(dma_addr))); + (lower_32_bits(dma_addr)); pSge->Address.High = cpu_to_le32 - (upper_32_bits((unsigned long)dma_addr)); + (upper_32_bits(dma_addr)); pSge->FlagsLength = cpu_to_le32 ((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING)); } @@ -1038,8 +1038,8 @@ mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr) u32 tmp; pSge->Address.Low = cpu_to_le32 - (lower_32_bits((unsigned long)(dma_addr))); - tmp = (u32)(upper_32_bits((unsigned long)dma_addr)); + (lower_32_bits(dma_addr)); + tmp = (u32)(upper_32_bits(dma_addr)); /* * 1078 errata workaround for the 36GB limitation @@ -1101,7 +1101,7 @@ mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr) pChain->NextChainOffset = next; pChain->Address.Low = cpu_to_le32(tmp); - tmp = (u32)(upper_32_bits((unsigned long)dma_addr)); + tmp = (u32)(upper_32_bits(dma_addr)); pChain->Address.High = cpu_to_le32(tmp); } From fea984034b1ccdb26e8163ed5350ce7f0563b136 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 2 Sep 2009 11:45:53 +0530 Subject: [PATCH 199/209] [SCSI] mptsas : Send DID_NO_CONNECT for pending IOs of removed device Driver is modified to return DID_NO_CONNECT for all pending I/O requests for bus type SAS, if it founds the target is removed at the firmware level. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptscsih.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 0e402eb95716..c29578614504 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -628,6 +628,16 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) return 1; } + if (ioc->bus_type == SAS) { + VirtDevice *vdevice = sc->device->hostdata; + + if (!vdevice || !vdevice->vtarget || + vdevice->vtarget->deleted) { + sc->result = DID_NO_CONNECT << 16; + goto out; + } + } + sc->host_scribble = NULL; sc->result = DID_OK << 16; /* Set default reply as OK */ pScsiReq = (SCSIIORequest_t *) mf; @@ -892,7 +902,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) #endif } /* end of address reply case */ - +out: /* Unmap the DMA buffers, if any. */ scsi_dma_unmap(sc); From 9766096d331c82e71d3c9df61f1c88eff6ad916b Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 2 Sep 2009 11:46:33 +0530 Subject: [PATCH 200/209] [SCSI] mptsas : FW event thread and scsi mid layer deadlock in SYNCHRONIZE CACHE command Normally In HBA reset path MPT driver will flush existing work in current work queue (mpt/0) . This is just a dummy activity for MPT driver point of view, since HBA reset will turn off Work queue events. It means we will simply returns from work queue without doing anything. But for the case where Work is already done (half the way), we have to have that work to be done. Considering above condition we stuck forever since Deadlock in scsi midlayer and MPT driver. sd_sync_cache() will wait forever since HBA is not in Running state, and it will never come into Running state since sd_sync_cache() is called from HBA reset context. Now new code will not wait for half cooked work to be finished before returning from HBA reset. Once we are out of HBA reset, EH thread will change host state to running from recovery and work waiting for running state of HBA will be finished. New code is turning ON firmware event from another special work called Rescan toplogy. Signed-off-by: Kashyap Desai Signed-off-by: James Bottomley --- drivers/message/fusion/mptsas.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 02a18dfcd52a..83873e3d0ce7 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -325,7 +325,6 @@ mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc) { struct fw_event_work *fw_event, *next; struct mptsas_target_reset_event *target_reset_list, *n; - u8 flush_q; MPT_SCSI_HOST *hd = shost_priv(ioc->sh); /* flush the target_reset_list */ @@ -345,15 +344,10 @@ mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc) !ioc->fw_event_q || in_interrupt()) return; - flush_q = 0; list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) { if (cancel_delayed_work(&fw_event->work)) mptsas_free_fw_event(ioc, fw_event); - else - flush_q = 1; } - if (flush_q) - flush_workqueue(ioc->fw_event_q); } @@ -1279,7 +1273,6 @@ mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) } mptsas_cleanup_fw_event_q(ioc); mptsas_queue_rescan(ioc); - mptsas_fw_event_on(ioc); break; default: break; @@ -1599,6 +1592,7 @@ mptsas_firmware_event_work(struct work_struct *work) mptsas_scan_sas_topology(ioc); ioc->in_rescan = 0; mptsas_free_fw_event(ioc, fw_event); + mptsas_fw_event_on(ioc); return; } From b437b95620dbf4bf7bd13af0f9d32fdac82c5d37 Mon Sep 17 00:00:00 2001 From: "Kashyap, Desai" Date: Wed, 2 Sep 2009 12:44:10 +0530 Subject: [PATCH 201/209] [SCSI] mptsas : Bump version to 3.04.12 Bump version to 3.04.12 Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index 79b17ceb714a..8dd4d219e433 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -76,8 +76,8 @@ #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "3.04.11" -#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.11" +#define MPT_LINUX_VERSION_COMMON "3.04.12" +#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.12" #define WHAT_MAGIC_STRING "@" "(" "#" ")" #define show_mptmod_ver(s,ver) \ From e71044ee2efa4792e21d243b03d49006db66aec9 Mon Sep 17 00:00:00 2001 From: Michal Schmidt Date: Thu, 3 Sep 2009 14:27:08 +0200 Subject: [PATCH 202/209] [SCSI] sg: fix oops in the error path in sg_build_indirect() When the allocation fails in sg_build_indirect(), an oops happens in the error path. It's caused by an obvious typo. Signed-off-by: Michal Schmidt Reported-by: Bob Tracy Acked-by: Douglas Gilbert Cc: Stable Tree Signed-off-by: James Bottomley --- drivers/scsi/sg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 9230402c45af..4968c4ced385 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1811,7 +1811,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) return 0; out: for (i = 0; i < k; i++) - __free_pages(schp->pages[k], order); + __free_pages(schp->pages[i], order); if (--order >= 0) goto retry; From 87b79a53277c21a2de07106d0affa857bd79e1bb Mon Sep 17 00:00:00 2001 From: "Moger, Babu" Date: Thu, 3 Sep 2009 21:42:15 -0600 Subject: [PATCH 203/209] [SCSI] scsi_dh_rdac: move the init code from rdac_activate to rdac_bus_attach Moving the initialization code from rdac_activate to rdac_bus_attach which is more efficient. We don't have to collect all the information during every activate. Signed-off-by: Babu Moger Reviewed-by: Vijay Chauhan Reviewed-by: Bob Stankey Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_rdac.c | 26 +++++++++++----------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 8b9e14318d1b..c5e4476e912b 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -525,17 +525,6 @@ static int rdac_activate(struct scsi_device *sdev) if (err != SCSI_DH_OK) goto done; - if (!h->ctlr) { - err = initialize_controller(sdev, h); - if (err != SCSI_DH_OK) - goto done; - } - - if (h->ctlr->use_ms10 == -1) { - err = set_mode_select(sdev, h); - if (err != SCSI_DH_OK) - goto done; - } if (h->lun_state == RDAC_LUN_UNOWNED) err = send_mode_select(sdev, h); done: @@ -681,12 +670,20 @@ static int rdac_bus_attach(struct scsi_device *sdev) if (err != SCSI_DH_OK) goto failed; - err = check_ownership(sdev, h); + err = initialize_controller(sdev, h); if (err != SCSI_DH_OK) goto failed; + err = check_ownership(sdev, h); + if (err != SCSI_DH_OK) + goto clean_ctlr; + + err = set_mode_select(sdev, h); + if (err != SCSI_DH_OK) + goto clean_ctlr; + if (!try_module_get(THIS_MODULE)) - goto failed; + goto clean_ctlr; spin_lock_irqsave(sdev->request_queue->queue_lock, flags); sdev->scsi_dh_data = scsi_dh_data; @@ -698,6 +695,9 @@ static int rdac_bus_attach(struct scsi_device *sdev) return 0; +clean_ctlr: + kref_put(&h->ctlr->kref, release_controller); + failed: kfree(scsi_dh_data); sdev_printk(KERN_ERR, sdev, "%s: not attached\n", From 1527666e6af977cc287e0f7088356c8be29b3f75 Mon Sep 17 00:00:00 2001 From: "Moger, Babu" Date: Thu, 3 Sep 2009 21:42:21 -0600 Subject: [PATCH 204/209] [SCSI] scsi_dh_rdac: changes to collect the rdac debug information during the initialization Adding the code to read the debug information during initialization. This patch collects the information about storage and controllers during rdac_activate. Signed-off-by: Babu Moger Reviewed-by: Vijay Chauhan Reviewed-by: Bob Stankey Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_rdac.c | 34 +++++++++++++++++----- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index c5e4476e912b..9f1c4c2df924 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -112,6 +112,7 @@ struct c9_inquiry { #define SUBSYS_ID_LEN 16 #define SLOT_ID_LEN 2 +#define ARRAY_LABEL_LEN 31 struct c4_inquiry { u8 peripheral_info; @@ -135,6 +136,8 @@ struct rdac_controller { struct rdac_pg_legacy legacy; struct rdac_pg_expanded expanded; } mode_select; + u8 index; + u8 array_name[ARRAY_LABEL_LEN]; }; struct c8_inquiry { u8 peripheral_info; @@ -303,7 +306,8 @@ static void release_controller(struct kref *kref) kfree(ctlr); } -static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id) +static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id, + char *array_name) { struct rdac_controller *ctlr, *tmp; @@ -324,6 +328,14 @@ static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id) /* initialize fields of controller */ memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN); memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN); + memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN); + + /* update the controller index */ + if (slot_id[1] == 0x31) + ctlr->index = 0; + else + ctlr->index = 1; + kref_init(&ctlr->kref); ctlr->use_ms10 = -1; list_add(&ctlr->node, &ctlr_list); @@ -363,9 +375,10 @@ static int submit_inquiry(struct scsi_device *sdev, int page_code, return err; } -static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h) +static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h, + char *array_name) { - int err; + int err, i; struct c8_inquiry *inqp; err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h); @@ -377,6 +390,11 @@ static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h) inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd') return SCSI_DH_NOSYS; h->lun = inqp->lun[7]; /* Uses only the last byte */ + + for(i=0; iarray_user_label[(2*i)+1]; + + *(array_name+ARRAY_LABEL_LEN-1) = '\0'; } return err; } @@ -410,7 +428,7 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) } static int initialize_controller(struct scsi_device *sdev, - struct rdac_dh_data *h) + struct rdac_dh_data *h, char *array_name) { int err; struct c4_inquiry *inqp; @@ -418,7 +436,8 @@ static int initialize_controller(struct scsi_device *sdev, err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h); if (err == SCSI_DH_OK) { inqp = &h->inq.c4; - h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id); + h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id, + array_name); if (!h->ctlr) err = SCSI_DH_RES_TEMP_UNAVAIL; } @@ -652,6 +671,7 @@ static int rdac_bus_attach(struct scsi_device *sdev) struct rdac_dh_data *h; unsigned long flags; int err; + char array_name[ARRAY_LABEL_LEN]; scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) + sizeof(*h) , GFP_KERNEL); @@ -666,11 +686,11 @@ static int rdac_bus_attach(struct scsi_device *sdev) h->lun = UNINITIALIZED_LUN; h->state = RDAC_STATE_ACTIVE; - err = get_lun(sdev, h); + err = get_lun_info(sdev, h, array_name); if (err != SCSI_DH_OK) goto failed; - err = initialize_controller(sdev, h); + err = initialize_controller(sdev, h, array_name); if (err != SCSI_DH_OK) goto failed; From dd784edcfc080fb4c83f1f3d10d905c5ab61616f Mon Sep 17 00:00:00 2001 From: "Moger, Babu" Date: Thu, 3 Sep 2009 21:42:28 -0600 Subject: [PATCH 205/209] [SCSI] scsi_dh_rdac: changes for rdac debug logging Patch to add debugging stuff for rdac device handler. - Added a bit mask "module parameter" rdac_logging with 2 bits for each type of logging. - currently defined only two types of logging(failover and sense logging). Can be enhanced later if required. - By default only failover logging is enabled which is equivalent of current logging. Signed-off-by: Babu Moger Reviewed-by: Vijay Chauhan Reviewed-by: Bob Stankey Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_rdac.c | 51 +++++++++++++++++++--- 1 file changed, 46 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 9f1c4c2df924..11c89311427e 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -201,6 +201,31 @@ static const char *lun_state[] = static LIST_HEAD(ctlr_list); static DEFINE_SPINLOCK(list_lock); +/* + * module parameter to enable rdac debug logging. + * 2 bits for each type of logging, only two types defined for now + * Can be enhanced if required at later point + */ +static int rdac_logging = 1; +module_param(rdac_logging, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, " + "Default is 1 - failover logging enabled, " + "set it to 0xF to enable all the logs"); + +#define RDAC_LOG_FAILOVER 0 +#define RDAC_LOG_SENSE 2 + +#define RDAC_LOG_BITS 2 + +#define RDAC_LOG_LEVEL(SHIFT) \ + ((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1)) + +#define RDAC_LOG(SHIFT, sdev, f, arg...) \ +do { \ + if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \ + sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \ +} while (0); + static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev) { struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; @@ -469,6 +494,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev, { struct scsi_sense_hdr sense_hdr; int err = SCSI_DH_IO, ret; + struct rdac_dh_data *h = get_rdac_data(sdev); ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr); if (!ret) @@ -497,11 +523,14 @@ static int mode_select_handle_sense(struct scsi_device *sdev, err = SCSI_DH_RETRY; break; default: - sdev_printk(KERN_INFO, sdev, - "MODE_SELECT failed with sense %02x/%02x/%02x.\n", - sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq); + break; } + RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " + "MODE_SELECT returned with sense %02x/%02x/%02x", + (char *) h->ctlr->array_name, h->ctlr->index, + sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq); + done: return err; } @@ -518,7 +547,9 @@ static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) if (!rq) goto done; - sdev_printk(KERN_INFO, sdev, "%s MODE_SELECT command.\n", + RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " + "%s MODE_SELECT command", + (char *) h->ctlr->array_name, h->ctlr->index, (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying"); err = blk_execute_rq(q, NULL, rq, 1); @@ -528,8 +559,12 @@ static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) if (err == SCSI_DH_RETRY && retry_cnt--) goto retry; } - if (err == SCSI_DH_OK) + if (err == SCSI_DH_OK) { h->state = RDAC_STATE_ACTIVE; + RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " + "MODE_SELECT completed", + (char *) h->ctlr->array_name, h->ctlr->index); + } done: return err; @@ -567,6 +602,12 @@ static int rdac_check_sense(struct scsi_device *sdev, struct scsi_sense_hdr *sense_hdr) { struct rdac_dh_data *h = get_rdac_data(sdev); + + RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, " + "I/O returned with sense %02x/%02x/%02x", + (char *) h->ctlr->array_name, h->ctlr->index, + sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq); + switch (sense_hdr->sense_key) { case NOT_READY: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01) From 4c0ba5d2593b5156327263f3ef6d7399dc0717b8 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Sat, 5 Sep 2009 07:34:23 +0530 Subject: [PATCH 206/209] [SCSI] libiscsi: add completion function for drivers that do not need pdu processing beiscsi does not need the iscsi scsi cmd processing. It does not even get this info on the completion path. This adds a function to just update the sequencing numbers and complete a task. Signed-off-by: Mike Christie Signed-off-by: Jayamohan Kallickal Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 38 +++++++++++++++++++++++++++++++++----- include/scsi/libiscsi.h | 2 ++ 2 files changed, 35 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index a7ee4bb40708..67b2a2b00286 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -109,12 +109,9 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn) } EXPORT_SYMBOL_GPL(iscsi_conn_queue_work); -void -iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) +static void __iscsi_update_cmdsn(struct iscsi_session *session, + uint32_t exp_cmdsn, uint32_t max_cmdsn) { - uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn); - uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn); - /* * standard specifies this check for when to update expected and * max sequence numbers @@ -138,6 +135,12 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) iscsi_conn_queue_work(session->leadconn); } } + +void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) +{ + __iscsi_update_cmdsn(session, be32_to_cpu(hdr->exp_cmdsn), + be32_to_cpu(hdr->max_cmdsn)); +} EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); /** @@ -499,6 +502,31 @@ static void iscsi_complete_task(struct iscsi_task *task, int state) __iscsi_put_task(task); } +/** + * iscsi_complete_scsi_task - finish scsi task normally + * @task: iscsi task for scsi cmd + * @exp_cmdsn: expected cmd sn in cpu format + * @max_cmdsn: max cmd sn in cpu format + * + * This is used when drivers do not need or cannot perform + * lower level pdu processing. + * + * Called with session lock + */ +void iscsi_complete_scsi_task(struct iscsi_task *task, + uint32_t exp_cmdsn, uint32_t max_cmdsn) +{ + struct iscsi_conn *conn = task->conn; + + ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt); + + conn->last_recv = jiffies; + __iscsi_update_cmdsn(conn->session, exp_cmdsn, max_cmdsn); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); +} +EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task); + + /* * session lock must be held and if not called for a task that is * still pending or from the xmit thread, then xmit thread must diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 61afeb59a836..439c8b75cb69 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -415,6 +415,8 @@ extern struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *, itt_t); extern void iscsi_requeue_task(struct iscsi_task *task); extern void iscsi_put_task(struct iscsi_task *task); extern void __iscsi_get_task(struct iscsi_task *task); +extern void iscsi_complete_scsi_task(struct iscsi_task *task, + uint32_t exp_cmdsn, uint32_t max_cmdsn); /* * generic helpers From 661134ad3765348ecd6150a92e736bf28ba40f80 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Sat, 5 Sep 2009 07:35:33 +0530 Subject: [PATCH 207/209] [SCSI] libiscsi, bnx2i: make bound ep check common bnx2i currently has a check for if a ep is properly bound, so if iscsi_queuecommand/xmit_task is called while there is no ep we will not queue IO. be2iscsi sends IO from queuecommand/xmit_task like how bnx2i does and needs a similar test. This patch has us just use the suspend_bit test for this. When ep_poll has succeeed iscsid will call conn_bind, the LLD will then call iscsi_conn_bind which will clear the suspend bit. When ep_disconnect is called (or if there is a conn error) we set the suspend bit. For the ep_disconnect case I am adding a helper in this patch that will take the session lock to make sure iscsi_queuecommand/xmit_task is not running and it will set the suspend bit. Signed-off-by: Mike Christie Signed-off-by: Jayamohan Kallickal Signed-off-by: James Bottomley --- drivers/scsi/libiscsi.c | 33 +++++++++++++++++++++++++++++++++ include/scsi/libiscsi.h | 1 + 2 files changed, 34 insertions(+) diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 67b2a2b00286..8dc73c489a17 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1571,6 +1571,12 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) goto fault; } + if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { + reason = FAILURE_SESSION_IN_RECOVERY; + sc->result = DID_REQUEUE; + goto fault; + } + if (iscsi_check_cmdsn_window_closed(conn)) { reason = FAILURE_WINDOW_CLOSED; goto reject; @@ -1810,6 +1816,33 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun, } } +/** + * iscsi_suspend_queue - suspend iscsi_queuecommand + * @conn: iscsi conn to stop queueing IO on + * + * This grabs the session lock to make sure no one is in + * xmit_task/queuecommand, and then sets suspend to prevent + * new commands from being queued. This only needs to be called + * by offload drivers that need to sync a path like ep disconnect + * with the iscsi_queuecommand/xmit_task. To start IO again libiscsi + * will call iscsi_start_tx and iscsi_unblock_session when in FFP. + */ +void iscsi_suspend_queue(struct iscsi_conn *conn) +{ + spin_lock_bh(&conn->session->lock); + set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); + spin_unlock_bh(&conn->session->lock); +} +EXPORT_SYMBOL_GPL(iscsi_suspend_queue); + +/** + * iscsi_suspend_tx - suspend iscsi_data_xmit + * @conn: iscsi conn tp stop processing IO on. + * + * This function sets the suspend bit to prevent iscsi_data_xmit + * from sending new IO, and if work is queued on the xmit thread + * it will wait for it to be completed. + */ void iscsi_suspend_tx(struct iscsi_conn *conn) { struct Scsi_Host *shost = conn->session->host; diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 439c8b75cb69..887e57e3e223 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -390,6 +390,7 @@ extern void iscsi_session_failure(struct iscsi_session *session, extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf); extern void iscsi_suspend_tx(struct iscsi_conn *conn); +extern void iscsi_suspend_queue(struct iscsi_conn *conn); extern void iscsi_conn_queue_work(struct iscsi_conn *conn); #define iscsi_conn_printk(prefix, _c, fmt, a...) \ From afffd3dabe5209882c8cc59a373a4d33b5db304a Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 30 Aug 2009 12:36:48 +0200 Subject: [PATCH 208/209] [SCSI] libsrp: fix memory leak in srp_ring_free() This patch fixes a memory leak in the libsrp function srp_ring_free(). It is not documented whether or not this function should free the ring pointer itself. But the source code of the callers of this function (srp_target_alloc() and srp_target_free()) makes it clear that srp_ring_free() should deallocate the ring pointer itself. Furthermore, the patch below makes srp_ring_free() deallocate all memory allocated by srp_ring_alloc(). This patch affects the ibmvstgt driver, which is the only in-tree driver that calls the srp_ring_free() function (indirectly). Signed-off-by: Bart Van Assche Acked-by: FUJITA Tomonori Cc: Stable Tree Signed-off-by: James Bottomley --- drivers/scsi/libsrp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c index 2742ae8a3d09..9ad38e81e343 100644 --- a/drivers/scsi/libsrp.c +++ b/drivers/scsi/libsrp.c @@ -124,6 +124,7 @@ static void srp_ring_free(struct device *dev, struct srp_buf **ring, size_t max, dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma); kfree(ring[i]); } + kfree(ring); } int srp_target_alloc(struct srp_target *target, struct device *dev, From ea038f63ac52439e7816295fa6064fe95e6c1f51 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Fri, 21 Aug 2009 09:47:54 -0600 Subject: [PATCH 209/209] [SCSI] fix oops during scsi scanning Chris Webb reported: p0# uname -a Linux f7ea8425-d45b-490f-a738-d181d0df6963.host.elastichosts.com 2.6.30.4-elastic-lon-p #2 SMP PREEMPT Thu Aug 20 14:30:50 BST 2009 x86_64 Intel(R) Xeon(R) CPU E5420 @ 2.50GHz GenuineIntel GNU/Linux p0# zgrep SCAN_ASYNC /proc/config.gz # CONFIG_SCSI_SCAN_ASYNC is not set p0# cat /var/log/kern/2009-08-20 [...] 15:27:10.485 kernel: scsi9 : iSCSI Initiator over TCP/IP 15:27:11.493 kernel: scsi 9:0:0:0: RAID IET Controller 0001 PQ: 0 ANSI: 5 15:27:11.493 kernel: scsi 9:0:0:0: Attached scsi generic sg6 type 12 15:27:11.495 kernel: scsi 9:0:0:1: Direct-Access IET VIRTUAL-DISK 0001 PQ: 0 ANSI: 5 15:27:11.495 kernel: sd 9:0:0:1: Attached scsi generic sg7 type 0 15:27:11.495 kernel: sd 9:0:0:1: [sdg] 4194304 512-byte hardware sectors: (2.14 GB/2.00 GiB) 15:27:11.495 kernel: sd 9:0:0:1: [sdg] Write Protect is off 15:27:11.495 kernel: sd 9:0:0:1: [sdg] Write cache: disabled, read cache: enabled, doesn't support DPO or FUA 15:27:13.012 kernel: sdg:<6>scsi 9:0:0:1: [sdg] Unhandled error code 15:27:13.012 kernel: scsi 9:0:0:1: [sdg] Result: hostbyte=0x07 driverbyte=0x00 15:27:13.012 kernel: end_request: I/O error, dev sdg, sector 0 15:27:13.012 kernel: Buffer I/O error on device sdg, logical block 0 15:27:13.012 kernel: ldm_validate_partition_table(): Disk read failed. 15:27:13.012 kernel: unable to read partition table 15:27:13.014 kernel: BUG: unable to handle kernel NULL pointer dereference at 0000000000000010 15:27:13.014 kernel: IP: [] disk_part_iter_next+0x74/0xfd 15:27:13.014 kernel: PGD 82ad0b067 PUD 82cd7e067 PMD 0 15:27:13.014 kernel: Oops: 0000 [#1] PREEMPT SMP 15:27:13.014 kernel: last sysfs file: /sys/devices/platform/host9/session4/iscsi_session/session4/ifacename 15:27:13.014 kernel: CPU 5 15:27:13.014 kernel: Modules linked in: 15:27:13.014 kernel: Pid: 13999, comm: async/0 Not tainted 2.6.30.4-elastic-lon-p #2 X7DBN 15:27:13.014 kernel: RIP: 0010:[] [] disk_part_iter_next+0x74/0xfd 15:27:13.014 kernel: RSP: 0018:ffff88066afa3dd0 EFLAGS: 00010246 15:27:13.014 kernel: RAX: ffff88082b58a000 RBX: ffff88066afa3e00 RCX: 0000000000000000 15:27:13.014 kernel: RDX: 0000000000000000 RSI: ffff88082b58a000 RDI: 0000000000000000 15:27:13.014 kernel: RBP: ffff88066afa3df0 R08: ffff88066afa2000 R09: ffff8806a204f000 15:27:13.014 kernel: R10: 000000fb12c7d274 R11: ffff8806c2bf0628 R12: ffff88066afa3e00 15:27:13.014 kernel: R13: ffff88082c829a00 R14: 0000000000000000 R15: ffff8806bc50c920 15:27:13.014 kernel: FS: 0000000000000000(0000) GS:ffff88002818a000(0000) knlGS:0000000000000000 15:27:13.014 kernel: CS: 0010 DS: 0018 ES: 0018 CR0: 000000008005003b 15:27:13.014 kernel: CR2: 0000000000000010 CR3: 000000082ade3000 CR4: 00000000000426e0 15:27:13.014 kernel: DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 15:27:13.014 kernel: DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 15:27:13.014 kernel: Process async/0 (pid: 13999, threadinfo ffff88066afa2000, task ffff8806c2bf05e0) 15:27:13.014 kernel: Stack: 15:27:13.014 kernel: 0000000000000000 ffff88066afa3e00 ffff88066afa3e00 ffff88082c829a00 15:27:13.014 kernel: ffff88066afa3e40 ffffffff80306feb ffff88082b58a000 0000000000000000 15:27:13.014 kernel: 0000000000000001 ffff8806bc50c920 ffff88066afa3e40 ffff88082b58a000 15:27:13.014 kernel: Call Trace: 15:27:13.014 kernel: [] register_disk+0x122/0x13a 15:27:13.014 kernel: [] add_disk+0xaa/0x106 15:27:13.014 kernel: [] sd_probe_async+0x198/0x25b 15:27:13.014 kernel: [] async_thread+0x10c/0x20d 15:27:13.014 kernel: [] ? default_wake_function+0x0/0xf 15:27:13.014 kernel: [] ? async_thread+0x0/0x20d 15:27:13.014 kernel: [] kthread+0x55/0x80 15:27:13.014 kernel: [] child_rip+0xa/0x20 15:27:13.014 kernel: [] ? kthread+0x0/0x80 15:27:13.014 kernel: [] ? child_rip+0x0/0x20 15:27:13.014 kernel: Code: c8 ff 80 e1 0c b9 00 00 00 00 0f 44 c1 41 83 cd ff 48 8d 7a 20 48 be ff ff ff ff 08 00 00 00 48 b9 00 00 00 00 08 00 00 00 eb 50 <8b> 42 10 41 bd 01 00 00 00 eb db 4c 63 c2 4e 8d 04 c7 4d 8b 20 15:27:13.015 kernel: RIP [] disk_part_iter_next+0x74/0xfd 15:27:13.015 kernel: RSP 15:27:13.015 kernel: CR2: 0000000000000010 15:27:13.015 kernel: ---[ end trace 6104b56ef5590e25 ]--- The problem is caused because the async scanning split in sd.c doesn't hold any reference to the device when it kicks off the async piece. What's happening is that an iSCSI disconnect is destorying the device again *before* the async sd scanning thread even starts. Fix this by taking a reference before starting the thread and dropping it again when the thread completes. Reported-by: Chris Webb Cc: Stable Tree Signed-off-by: James Bottomley --- drivers/scsi/sd.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b7b9fec67a98..a89c421dab51 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2021,6 +2021,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", sdp->removable ? "removable " : ""); + put_device(&sdkp->dev); } /** @@ -2106,6 +2107,7 @@ static int sd_probe(struct device *dev) get_device(&sdp->sdev_gendev); + get_device(&sdkp->dev); /* prevent release before async_schedule */ async_schedule(sd_probe_async, sdkp); return 0;