Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A set of fixes that wasn't included in the first merge window pull
  request.  This pull request contains:

   - A set of NVMe fixes from Keith, and one from Nic for the integrity
     side of it.

   - Fix from Ming, clearing ->mq_ops if we don't successfully setup a
     queue for multiqueue.

   - A set of stability fixes for bcache from Jiri, and also marking
     bcache as orphaned as it's no longer actively maintained (in
     mainline, at least)"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq: clear q->mq_ops if init fail
  MAINTAINERS: mark bcache as orphan
  bcache: bch_gc_thread() is not freezable
  bcache: bch_allocator_thread() is not freezable
  bcache: bch_writeback_thread() is not freezable
  nvme/host: Add missing blk_integrity tag_size + flags assignments
  NVMe: Add device ID's with stripe quirk
  NVMe: Short-cut removal on surprise hot-unplug
  NVMe: Allow user initiated rescan
  NVMe: Reduce driver log spamming
  NVMe: Unbind driver on failure
  NVMe: Delete only created queues
  NVMe: Allocate queues only for online cpus
This commit is contained in:
Linus Torvalds 2016-05-27 14:28:09 -07:00
commit 564884fbde
9 changed files with 61 additions and 14 deletions

View file

@ -2304,7 +2304,7 @@ BCACHE (BLOCK LAYER CACHE)
M: Kent Overstreet <kent.overstreet@gmail.com> M: Kent Overstreet <kent.overstreet@gmail.com>
L: linux-bcache@vger.kernel.org L: linux-bcache@vger.kernel.org
W: http://bcache.evilpiepirate.org W: http://bcache.evilpiepirate.org
S: Maintained S: Orphan
F: drivers/md/bcache/ F: drivers/md/bcache/
BDISP ST MEDIA DRIVER BDISP ST MEDIA DRIVER

View file

@ -2020,7 +2020,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
q->queue_ctx = alloc_percpu(struct blk_mq_ctx); q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
if (!q->queue_ctx) if (!q->queue_ctx)
return ERR_PTR(-ENOMEM); goto err_exit;
q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)), q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
GFP_KERNEL, set->numa_node); GFP_KERNEL, set->numa_node);
@ -2084,6 +2084,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
kfree(q->queue_hw_ctx); kfree(q->queue_hw_ctx);
err_percpu: err_percpu:
free_percpu(q->queue_ctx); free_percpu(q->queue_ctx);
err_exit:
q->mq_ops = NULL;
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
EXPORT_SYMBOL(blk_mq_init_allocated_queue); EXPORT_SYMBOL(blk_mq_init_allocated_queue);

View file

@ -64,7 +64,6 @@
#include "btree.h" #include "btree.h"
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/freezer.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/random.h> #include <linux/random.h>
#include <trace/events/bcache.h> #include <trace/events/bcache.h>
@ -288,7 +287,6 @@ do { \
if (kthread_should_stop()) \ if (kthread_should_stop()) \
return 0; \ return 0; \
\ \
try_to_freeze(); \
schedule(); \ schedule(); \
mutex_lock(&(ca)->set->bucket_lock); \ mutex_lock(&(ca)->set->bucket_lock); \
} \ } \

View file

@ -27,7 +27,6 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/freezer.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/prefetch.h> #include <linux/prefetch.h>
@ -1787,7 +1786,6 @@ static int bch_gc_thread(void *arg)
mutex_unlock(&c->bucket_lock); mutex_unlock(&c->bucket_lock);
try_to_freeze();
schedule(); schedule();
} }

View file

@ -12,7 +12,6 @@
#include "writeback.h" #include "writeback.h"
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <trace/events/bcache.h> #include <trace/events/bcache.h>
@ -228,7 +227,6 @@ static void read_dirty(struct cached_dev *dc)
*/ */
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
try_to_freeze();
w = bch_keybuf_next(&dc->writeback_keys); w = bch_keybuf_next(&dc->writeback_keys);
if (!w) if (!w)
@ -433,7 +431,6 @@ static int bch_writeback_thread(void *arg)
if (kthread_should_stop()) if (kthread_should_stop())
return 0; return 0;
try_to_freeze();
schedule(); schedule();
continue; continue;
} }

View file

@ -95,6 +95,15 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
break; break;
} }
break; break;
case NVME_CTRL_DEAD:
switch (old_state) {
case NVME_CTRL_DELETING:
changed = true;
/* FALLTHRU */
default:
break;
}
break;
default: default:
break; break;
} }
@ -720,10 +729,14 @@ static void nvme_init_integrity(struct nvme_ns *ns)
switch (ns->pi_type) { switch (ns->pi_type) {
case NVME_NS_DPS_PI_TYPE3: case NVME_NS_DPS_PI_TYPE3:
integrity.profile = &t10_pi_type3_crc; integrity.profile = &t10_pi_type3_crc;
integrity.tag_size = sizeof(u16) + sizeof(u32);
integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
break; break;
case NVME_NS_DPS_PI_TYPE1: case NVME_NS_DPS_PI_TYPE1:
case NVME_NS_DPS_PI_TYPE2: case NVME_NS_DPS_PI_TYPE2:
integrity.profile = &t10_pi_type1_crc; integrity.profile = &t10_pi_type1_crc;
integrity.tag_size = sizeof(u16);
integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
break; break;
default: default:
integrity.profile = NULL; integrity.profile = NULL;
@ -1212,6 +1225,9 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
return ctrl->ops->reset_ctrl(ctrl); return ctrl->ops->reset_ctrl(ctrl);
case NVME_IOCTL_SUBSYS_RESET: case NVME_IOCTL_SUBSYS_RESET:
return nvme_reset_subsystem(ctrl); return nvme_reset_subsystem(ctrl);
case NVME_IOCTL_RESCAN:
nvme_queue_scan(ctrl);
return 0;
default: default:
return -ENOTTY; return -ENOTTY;
} }
@ -1239,6 +1255,17 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
} }
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
static ssize_t nvme_sysfs_rescan(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
nvme_queue_scan(ctrl);
return count;
}
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
{ {
@ -1342,6 +1369,7 @@ nvme_show_int_function(cntlid);
static struct attribute *nvme_dev_attrs[] = { static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr, &dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr,
&dev_attr_model.attr, &dev_attr_model.attr,
&dev_attr_serial.attr, &dev_attr_serial.attr,
&dev_attr_firmware_rev.attr, &dev_attr_firmware_rev.attr,
@ -1580,6 +1608,15 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{ {
struct nvme_ns *ns, *next; struct nvme_ns *ns, *next;
/*
* The dead states indicates the controller was not gracefully
* disconnected. In that case, we won't be able to flush any data while
* removing the namespaces' disks; fail all the queues now to avoid
* potentially having to clean up the failed sync later.
*/
if (ctrl->state == NVME_CTRL_DEAD)
nvme_kill_queues(ctrl);
mutex_lock(&ctrl->namespaces_mutex); mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
nvme_ns_remove(ns); nvme_ns_remove(ns);

View file

@ -72,6 +72,7 @@ enum nvme_ctrl_state {
NVME_CTRL_LIVE, NVME_CTRL_LIVE,
NVME_CTRL_RESETTING, NVME_CTRL_RESETTING,
NVME_CTRL_DELETING, NVME_CTRL_DELETING,
NVME_CTRL_DEAD,
}; };
struct nvme_ctrl { struct nvme_ctrl {

View file

@ -1394,7 +1394,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
struct pci_dev *pdev = to_pci_dev(dev->dev); struct pci_dev *pdev = to_pci_dev(dev->dev);
int result, i, vecs, nr_io_queues, size; int result, i, vecs, nr_io_queues, size;
nr_io_queues = num_possible_cpus(); nr_io_queues = num_online_cpus();
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
if (result < 0) if (result < 0)
return result; return result;
@ -1551,12 +1551,12 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
static void nvme_disable_io_queues(struct nvme_dev *dev) static void nvme_disable_io_queues(struct nvme_dev *dev)
{ {
int pass; int pass, queues = dev->online_queues - 1;
unsigned long timeout; unsigned long timeout;
u8 opcode = nvme_admin_delete_sq; u8 opcode = nvme_admin_delete_sq;
for (pass = 0; pass < 2; pass++) { for (pass = 0; pass < 2; pass++) {
int sent = 0, i = dev->queue_count - 1; int sent = 0, i = queues;
reinit_completion(&dev->ioq_wait); reinit_completion(&dev->ioq_wait);
retry: retry:
@ -1857,7 +1857,7 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
nvme_kill_queues(&dev->ctrl); nvme_kill_queues(&dev->ctrl);
if (pci_get_drvdata(pdev)) if (pci_get_drvdata(pdev))
pci_stop_and_remove_bus_device_locked(pdev); device_release_driver(&pdev->dev);
nvme_put_ctrl(&dev->ctrl); nvme_put_ctrl(&dev->ctrl);
} }
@ -2017,6 +2017,10 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
if (!pci_device_is_present(pdev))
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
flush_work(&dev->reset_work); flush_work(&dev->reset_work);
nvme_uninit_ctrl(&dev->ctrl); nvme_uninit_ctrl(&dev->ctrl);
nvme_dev_disable(dev, true); nvme_dev_disable(dev, true);
@ -2060,14 +2064,17 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
* shutdown the controller to quiesce. The controller will be restarted * shutdown the controller to quiesce. The controller will be restarted
* after the slot reset through driver's slot_reset callback. * after the slot reset through driver's slot_reset callback.
*/ */
dev_warn(dev->ctrl.device, "error detected: state:%d\n", state);
switch (state) { switch (state) {
case pci_channel_io_normal: case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER; return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen: case pci_channel_io_frozen:
dev_warn(dev->ctrl.device,
"frozen state error detected, reset controller\n");
nvme_dev_disable(dev, false); nvme_dev_disable(dev, false);
return PCI_ERS_RESULT_NEED_RESET; return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure: case pci_channel_io_perm_failure:
dev_warn(dev->ctrl.device,
"failure state error detected, request disconnect\n");
return PCI_ERS_RESULT_DISCONNECT; return PCI_ERS_RESULT_DISCONNECT;
} }
return PCI_ERS_RESULT_NEED_RESET; return PCI_ERS_RESULT_NEED_RESET;
@ -2102,6 +2109,12 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0953), { PCI_VDEVICE(INTEL, 0x0953),
.driver_data = NVME_QUIRK_STRIPE_SIZE | .driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DISCARD_ZEROES, }, NVME_QUIRK_DISCARD_ZEROES, },
{ PCI_VDEVICE(INTEL, 0x0a53),
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DISCARD_ZEROES, },
{ PCI_VDEVICE(INTEL, 0x0a54),
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DISCARD_ZEROES, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS, }, .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },

View file

@ -61,5 +61,6 @@ struct nvme_passthru_cmd {
#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd) #define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd)
#define NVME_IOCTL_RESET _IO('N', 0x44) #define NVME_IOCTL_RESET _IO('N', 0x44)
#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45) #define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45)
#define NVME_IOCTL_RESCAN _IO('N', 0x46)
#endif /* _UAPI_LINUX_NVME_IOCTL_H */ #endif /* _UAPI_LINUX_NVME_IOCTL_H */