2015-05-19 20:54:31 -06:00
|
|
|
/*
|
|
|
|
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of version 2 of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*/
|
|
|
|
#include <linux/list_sort.h>
|
|
|
|
#include <linux/libnvdimm.h>
|
|
|
|
#include <linux/module.h>
|
2015-06-25 02:21:02 -06:00
|
|
|
#include <linux/mutex.h>
|
2015-06-08 12:27:06 -06:00
|
|
|
#include <linux/ndctl.h>
|
2015-12-24 19:21:43 -07:00
|
|
|
#include <linux/delay.h>
|
2015-05-19 20:54:31 -06:00
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/acpi.h>
|
2015-05-01 11:11:27 -06:00
|
|
|
#include <linux/sort.h>
|
2015-07-10 11:06:13 -06:00
|
|
|
#include <linux/pmem.h>
|
2015-06-25 02:21:02 -06:00
|
|
|
#include <linux/io.h>
|
2015-08-24 16:29:38 -06:00
|
|
|
#include <asm/cacheflush.h>
|
2015-05-19 20:54:31 -06:00
|
|
|
#include "nfit.h"
|
|
|
|
|
2015-06-25 02:21:02 -06:00
|
|
|
/*
|
|
|
|
* For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
|
|
|
|
* irrelevant.
|
|
|
|
*/
|
2015-08-28 01:27:14 -06:00
|
|
|
#include <linux/io-64-nonatomic-hi-lo.h>
|
2015-06-25 02:21:02 -06:00
|
|
|
|
2015-05-31 12:41:48 -06:00
|
|
|
static bool force_enable_dimms;
|
|
|
|
module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
|
|
|
|
|
2015-10-27 16:58:27 -06:00
|
|
|
struct nfit_table_prev {
|
|
|
|
struct list_head spas;
|
|
|
|
struct list_head memdevs;
|
|
|
|
struct list_head dcrs;
|
|
|
|
struct list_head bdws;
|
|
|
|
struct list_head idts;
|
|
|
|
struct list_head flushes;
|
|
|
|
};
|
|
|
|
|
2015-05-19 20:54:31 -06:00
|
|
|
static u8 nfit_uuid[NFIT_UUID_MAX][16];
|
|
|
|
|
2015-06-17 15:23:32 -06:00
|
|
|
const u8 *to_nfit_uuid(enum nfit_uuids id)
|
2015-05-19 20:54:31 -06:00
|
|
|
{
|
|
|
|
return nfit_uuid[id];
|
|
|
|
}
|
2015-06-17 15:23:32 -06:00
|
|
|
EXPORT_SYMBOL(to_nfit_uuid);
|
2015-05-19 20:54:31 -06:00
|
|
|
|
2015-06-08 12:27:06 -06:00
|
|
|
static struct acpi_nfit_desc *to_acpi_nfit_desc(
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc)
|
|
|
|
{
|
|
|
|
return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
|
|
|
|
* acpi_device.
|
|
|
|
*/
|
|
|
|
if (!nd_desc->provider_name
|
|
|
|
|| strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return to_acpi_device(acpi_desc->dev);
|
|
|
|
}
|
|
|
|
|
2015-05-19 20:54:31 -06:00
|
|
|
static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
|
|
|
|
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
|
|
|
|
unsigned int buf_len)
|
|
|
|
{
|
2015-06-08 12:27:06 -06:00
|
|
|
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
|
|
|
|
const struct nd_cmd_desc *desc = NULL;
|
|
|
|
union acpi_object in_obj, in_buf, *out_obj;
|
|
|
|
struct device *dev = acpi_desc->dev;
|
|
|
|
const char *cmd_name, *dimm_name;
|
|
|
|
unsigned long dsm_mask;
|
|
|
|
acpi_handle handle;
|
|
|
|
const u8 *uuid;
|
|
|
|
u32 offset;
|
|
|
|
int rc, i;
|
|
|
|
|
|
|
|
if (nvdimm) {
|
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
struct acpi_device *adev = nfit_mem->adev;
|
|
|
|
|
|
|
|
if (!adev)
|
|
|
|
return -ENOTTY;
|
2015-06-25 02:21:02 -06:00
|
|
|
dimm_name = nvdimm_name(nvdimm);
|
2015-06-08 12:27:06 -06:00
|
|
|
cmd_name = nvdimm_cmd_name(cmd);
|
|
|
|
dsm_mask = nfit_mem->dsm_mask;
|
|
|
|
desc = nd_cmd_dimm_desc(cmd);
|
|
|
|
uuid = to_nfit_uuid(NFIT_DEV_DIMM);
|
|
|
|
handle = adev->handle;
|
|
|
|
} else {
|
|
|
|
struct acpi_device *adev = to_acpi_dev(acpi_desc);
|
|
|
|
|
|
|
|
cmd_name = nvdimm_bus_cmd_name(cmd);
|
|
|
|
dsm_mask = nd_desc->dsm_mask;
|
|
|
|
desc = nd_cmd_bus_desc(cmd);
|
|
|
|
uuid = to_nfit_uuid(NFIT_DEV_BUS);
|
|
|
|
handle = adev->handle;
|
|
|
|
dimm_name = "bus";
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
|
|
|
|
return -ENOTTY;
|
|
|
|
|
|
|
|
if (!test_bit(cmd, &dsm_mask))
|
|
|
|
return -ENOTTY;
|
|
|
|
|
|
|
|
in_obj.type = ACPI_TYPE_PACKAGE;
|
|
|
|
in_obj.package.count = 1;
|
|
|
|
in_obj.package.elements = &in_buf;
|
|
|
|
in_buf.type = ACPI_TYPE_BUFFER;
|
|
|
|
in_buf.buffer.pointer = buf;
|
|
|
|
in_buf.buffer.length = 0;
|
|
|
|
|
|
|
|
/* libnvdimm has already validated the input envelope */
|
|
|
|
for (i = 0; i < desc->in_num; i++)
|
|
|
|
in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
|
|
|
|
i, buf);
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
|
|
|
|
dev_dbg(dev, "%s:%s cmd: %s input length: %d\n", __func__,
|
|
|
|
dimm_name, cmd_name, in_buf.buffer.length);
|
|
|
|
print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
|
|
|
|
4, in_buf.buffer.pointer, min_t(u32, 128,
|
|
|
|
in_buf.buffer.length), true);
|
|
|
|
}
|
|
|
|
|
|
|
|
out_obj = acpi_evaluate_dsm(handle, uuid, 1, cmd, &in_obj);
|
|
|
|
if (!out_obj) {
|
|
|
|
dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
|
|
|
|
cmd_name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (out_obj->package.type != ACPI_TYPE_BUFFER) {
|
|
|
|
dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
|
|
|
|
__func__, dimm_name, cmd_name, out_obj->type);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
|
|
|
|
dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
|
|
|
|
dimm_name, cmd_name, out_obj->buffer.length);
|
|
|
|
print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
|
|
|
|
4, out_obj->buffer.pointer, min_t(u32, 128,
|
|
|
|
out_obj->buffer.length), true);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0, offset = 0; i < desc->out_num; i++) {
|
|
|
|
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
|
|
|
|
(u32 *) out_obj->buffer.pointer);
|
|
|
|
|
|
|
|
if (offset + out_size > out_obj->buffer.length) {
|
|
|
|
dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
|
|
|
|
__func__, dimm_name, cmd_name, i);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (in_buf.buffer.length + offset + out_size > buf_len) {
|
|
|
|
dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
|
|
|
|
__func__, dimm_name, cmd_name, i);
|
|
|
|
rc = -ENXIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
memcpy(buf + in_buf.buffer.length + offset,
|
|
|
|
out_obj->buffer.pointer + offset, out_size);
|
|
|
|
offset += out_size;
|
|
|
|
}
|
|
|
|
if (offset + in_buf.buffer.length < buf_len) {
|
|
|
|
if (i >= 1) {
|
|
|
|
/*
|
|
|
|
* status valid, return the number of bytes left
|
|
|
|
* unfilled in the output buffer
|
|
|
|
*/
|
|
|
|
rc = buf_len - offset - in_buf.buffer.length;
|
|
|
|
} else {
|
|
|
|
dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
|
|
|
|
__func__, dimm_name, cmd_name, buf_len,
|
|
|
|
offset);
|
|
|
|
rc = -ENXIO;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
rc = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
ACPI_FREE(out_obj);
|
|
|
|
|
|
|
|
return rc;
|
2015-05-19 20:54:31 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static const char *spa_type_name(u16 type)
|
|
|
|
{
|
|
|
|
static const char *to_name[] = {
|
|
|
|
[NFIT_SPA_VOLATILE] = "volatile",
|
|
|
|
[NFIT_SPA_PM] = "pmem",
|
|
|
|
[NFIT_SPA_DCR] = "dimm-control-region",
|
|
|
|
[NFIT_SPA_BDW] = "block-data-window",
|
|
|
|
[NFIT_SPA_VDISK] = "volatile-disk",
|
|
|
|
[NFIT_SPA_VCD] = "volatile-cd",
|
|
|
|
[NFIT_SPA_PDISK] = "persistent-disk",
|
|
|
|
[NFIT_SPA_PCD] = "persistent-cd",
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
if (type > NFIT_SPA_PCD)
|
|
|
|
return "unknown";
|
|
|
|
|
|
|
|
return to_name[type];
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfit_spa_type(struct acpi_nfit_system_address *spa)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NFIT_UUID_MAX; i++)
|
|
|
|
if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
|
|
|
|
return i;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool add_spa(struct acpi_nfit_desc *acpi_desc,
|
2015-10-27 16:58:27 -06:00
|
|
|
struct nfit_table_prev *prev,
|
2015-05-19 20:54:31 -06:00
|
|
|
struct acpi_nfit_system_address *spa)
|
|
|
|
{
|
2015-11-20 17:05:47 -07:00
|
|
|
size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
|
2015-05-19 20:54:31 -06:00
|
|
|
struct device *dev = acpi_desc->dev;
|
2015-10-27 16:58:27 -06:00
|
|
|
struct nfit_spa *nfit_spa;
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_spa, &prev->spas, list) {
|
2015-11-20 17:05:47 -07:00
|
|
|
if (memcmp(nfit_spa->spa, spa, length) == 0) {
|
2015-10-27 16:58:27 -06:00
|
|
|
list_move_tail(&nfit_spa->list, &acpi_desc->spas);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2015-05-19 20:54:31 -06:00
|
|
|
|
2015-10-27 16:58:27 -06:00
|
|
|
nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL);
|
2015-05-19 20:54:31 -06:00
|
|
|
if (!nfit_spa)
|
|
|
|
return false;
|
|
|
|
INIT_LIST_HEAD(&nfit_spa->list);
|
|
|
|
nfit_spa->spa = spa;
|
|
|
|
list_add_tail(&nfit_spa->list, &acpi_desc->spas);
|
|
|
|
dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
|
|
|
|
spa->range_index,
|
|
|
|
spa_type_name(nfit_spa_type(spa)));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
|
2015-10-27 16:58:27 -06:00
|
|
|
struct nfit_table_prev *prev,
|
2015-05-19 20:54:31 -06:00
|
|
|
struct acpi_nfit_memory_map *memdev)
|
|
|
|
{
|
2015-11-20 17:05:47 -07:00
|
|
|
size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
|
2015-05-19 20:54:31 -06:00
|
|
|
struct device *dev = acpi_desc->dev;
|
2015-10-27 16:58:27 -06:00
|
|
|
struct nfit_memdev *nfit_memdev;
|
2015-05-19 20:54:31 -06:00
|
|
|
|
2015-10-27 16:58:27 -06:00
|
|
|
list_for_each_entry(nfit_memdev, &prev->memdevs, list)
|
2015-11-20 17:05:47 -07:00
|
|
|
if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
|
2015-10-27 16:58:27 -06:00
|
|
|
list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL);
|
2015-05-19 20:54:31 -06:00
|
|
|
if (!nfit_memdev)
|
|
|
|
return false;
|
|
|
|
INIT_LIST_HEAD(&nfit_memdev->list);
|
|
|
|
nfit_memdev->memdev = memdev;
|
|
|
|
list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
|
|
|
|
dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
|
|
|
|
__func__, memdev->device_handle, memdev->range_index,
|
|
|
|
memdev->region_index);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
|
2015-10-27 16:58:27 -06:00
|
|
|
struct nfit_table_prev *prev,
|
2015-05-19 20:54:31 -06:00
|
|
|
struct acpi_nfit_control_region *dcr)
|
|
|
|
{
|
2015-11-20 17:05:47 -07:00
|
|
|
size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
|
2015-05-19 20:54:31 -06:00
|
|
|
struct device *dev = acpi_desc->dev;
|
2015-10-27 16:58:27 -06:00
|
|
|
struct nfit_dcr *nfit_dcr;
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_dcr, &prev->dcrs, list)
|
2015-11-20 17:05:47 -07:00
|
|
|
if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
|
2015-10-27 16:58:27 -06:00
|
|
|
list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
|
|
|
|
return true;
|
|
|
|
}
|
2015-05-19 20:54:31 -06:00
|
|
|
|
2015-10-27 16:58:27 -06:00
|
|
|
nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL);
|
2015-05-19 20:54:31 -06:00
|
|
|
if (!nfit_dcr)
|
|
|
|
return false;
|
|
|
|
INIT_LIST_HEAD(&nfit_dcr->list);
|
|
|
|
nfit_dcr->dcr = dcr;
|
|
|
|
list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
|
|
|
|
dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
|
|
|
|
dcr->region_index, dcr->windows);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
|
2015-10-27 16:58:27 -06:00
|
|
|
struct nfit_table_prev *prev,
|
2015-05-19 20:54:31 -06:00
|
|
|
struct acpi_nfit_data_region *bdw)
|
|
|
|
{
|
2015-11-20 17:05:47 -07:00
|
|
|
size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
|
2015-05-19 20:54:31 -06:00
|
|
|
struct device *dev = acpi_desc->dev;
|
2015-10-27 16:58:27 -06:00
|
|
|
struct nfit_bdw *nfit_bdw;
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_bdw, &prev->bdws, list)
|
2015-11-20 17:05:47 -07:00
|
|
|
if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
|
2015-10-27 16:58:27 -06:00
|
|
|
list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
|
|
|
|
return true;
|
|
|
|
}
|
2015-05-19 20:54:31 -06:00
|
|
|
|
2015-10-27 16:58:27 -06:00
|
|
|
nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL);
|
2015-05-19 20:54:31 -06:00
|
|
|
if (!nfit_bdw)
|
|
|
|
return false;
|
|
|
|
INIT_LIST_HEAD(&nfit_bdw->list);
|
|
|
|
nfit_bdw->bdw = bdw;
|
|
|
|
list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
|
|
|
|
dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
|
|
|
|
bdw->region_index, bdw->windows);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-06-25 02:21:02 -06:00
|
|
|
static bool add_idt(struct acpi_nfit_desc *acpi_desc,
|
2015-10-27 16:58:27 -06:00
|
|
|
struct nfit_table_prev *prev,
|
2015-06-25 02:21:02 -06:00
|
|
|
struct acpi_nfit_interleave *idt)
|
|
|
|
{
|
2015-11-20 17:05:47 -07:00
|
|
|
size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
|
2015-06-25 02:21:02 -06:00
|
|
|
struct device *dev = acpi_desc->dev;
|
2015-10-27 16:58:27 -06:00
|
|
|
struct nfit_idt *nfit_idt;
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_idt, &prev->idts, list)
|
2015-11-20 17:05:47 -07:00
|
|
|
if (memcmp(nfit_idt->idt, idt, length) == 0) {
|
2015-10-27 16:58:27 -06:00
|
|
|
list_move_tail(&nfit_idt->list, &acpi_desc->idts);
|
|
|
|
return true;
|
|
|
|
}
|
2015-06-25 02:21:02 -06:00
|
|
|
|
2015-10-27 16:58:27 -06:00
|
|
|
nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL);
|
2015-06-25 02:21:02 -06:00
|
|
|
if (!nfit_idt)
|
|
|
|
return false;
|
|
|
|
INIT_LIST_HEAD(&nfit_idt->list);
|
|
|
|
nfit_idt->idt = idt;
|
|
|
|
list_add_tail(&nfit_idt->list, &acpi_desc->idts);
|
|
|
|
dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
|
|
|
|
idt->interleave_index, idt->line_count);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-07-10 11:06:13 -06:00
|
|
|
static bool add_flush(struct acpi_nfit_desc *acpi_desc,
|
2015-10-27 16:58:27 -06:00
|
|
|
struct nfit_table_prev *prev,
|
2015-07-10 11:06:13 -06:00
|
|
|
struct acpi_nfit_flush_address *flush)
|
|
|
|
{
|
2015-11-20 17:05:47 -07:00
|
|
|
size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
|
2015-07-10 11:06:13 -06:00
|
|
|
struct device *dev = acpi_desc->dev;
|
2015-10-27 16:58:27 -06:00
|
|
|
struct nfit_flush *nfit_flush;
|
2015-07-10 11:06:13 -06:00
|
|
|
|
2015-10-27 16:58:27 -06:00
|
|
|
list_for_each_entry(nfit_flush, &prev->flushes, list)
|
2015-11-20 17:05:47 -07:00
|
|
|
if (memcmp(nfit_flush->flush, flush, length) == 0) {
|
2015-10-27 16:58:27 -06:00
|
|
|
list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL);
|
2015-07-10 11:06:13 -06:00
|
|
|
if (!nfit_flush)
|
|
|
|
return false;
|
|
|
|
INIT_LIST_HEAD(&nfit_flush->list);
|
|
|
|
nfit_flush->flush = flush;
|
|
|
|
list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
|
|
|
|
dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
|
|
|
|
flush->device_handle, flush->hint_count);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-10-27 16:58:27 -06:00
|
|
|
static void *add_table(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nfit_table_prev *prev, void *table, const void *end)
|
2015-05-19 20:54:31 -06:00
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
|
|
|
struct acpi_nfit_header *hdr;
|
|
|
|
void *err = ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
if (table >= end)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
hdr = table;
|
2015-10-27 16:58:26 -06:00
|
|
|
if (!hdr->length) {
|
|
|
|
dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
|
|
|
|
hdr->type);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-05-19 20:54:31 -06:00
|
|
|
switch (hdr->type) {
|
|
|
|
case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
|
2015-10-27 16:58:27 -06:00
|
|
|
if (!add_spa(acpi_desc, prev, table))
|
2015-05-19 20:54:31 -06:00
|
|
|
return err;
|
|
|
|
break;
|
|
|
|
case ACPI_NFIT_TYPE_MEMORY_MAP:
|
2015-10-27 16:58:27 -06:00
|
|
|
if (!add_memdev(acpi_desc, prev, table))
|
2015-05-19 20:54:31 -06:00
|
|
|
return err;
|
|
|
|
break;
|
|
|
|
case ACPI_NFIT_TYPE_CONTROL_REGION:
|
2015-10-27 16:58:27 -06:00
|
|
|
if (!add_dcr(acpi_desc, prev, table))
|
2015-05-19 20:54:31 -06:00
|
|
|
return err;
|
|
|
|
break;
|
|
|
|
case ACPI_NFIT_TYPE_DATA_REGION:
|
2015-10-27 16:58:27 -06:00
|
|
|
if (!add_bdw(acpi_desc, prev, table))
|
2015-05-19 20:54:31 -06:00
|
|
|
return err;
|
|
|
|
break;
|
|
|
|
case ACPI_NFIT_TYPE_INTERLEAVE:
|
2015-10-27 16:58:27 -06:00
|
|
|
if (!add_idt(acpi_desc, prev, table))
|
2015-06-25 02:21:02 -06:00
|
|
|
return err;
|
2015-05-19 20:54:31 -06:00
|
|
|
break;
|
|
|
|
case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
|
2015-10-27 16:58:27 -06:00
|
|
|
if (!add_flush(acpi_desc, prev, table))
|
2015-07-10 11:06:13 -06:00
|
|
|
return err;
|
2015-05-19 20:54:31 -06:00
|
|
|
break;
|
|
|
|
case ACPI_NFIT_TYPE_SMBIOS:
|
|
|
|
dev_dbg(dev, "%s: smbios\n", __func__);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return table + hdr->length;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nfit_mem *nfit_mem)
|
|
|
|
{
|
|
|
|
u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
|
|
|
|
u16 dcr = nfit_mem->dcr->region_index;
|
|
|
|
struct nfit_spa *nfit_spa;
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
|
|
|
|
u16 range_index = nfit_spa->spa->range_index;
|
|
|
|
int type = nfit_spa_type(nfit_spa->spa);
|
|
|
|
struct nfit_memdev *nfit_memdev;
|
|
|
|
|
|
|
|
if (type != NFIT_SPA_BDW)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
|
|
|
|
if (nfit_memdev->memdev->range_index != range_index)
|
|
|
|
continue;
|
|
|
|
if (nfit_memdev->memdev->device_handle != device_handle)
|
|
|
|
continue;
|
|
|
|
if (nfit_memdev->memdev->region_index != dcr)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nfit_mem->spa_bdw = nfit_spa->spa;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
|
|
|
|
nfit_mem->spa_dcr->range_index);
|
|
|
|
nfit_mem->bdw = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
|
|
|
|
{
|
|
|
|
u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
|
2015-06-25 02:21:02 -06:00
|
|
|
struct nfit_memdev *nfit_memdev;
|
2015-07-10 11:06:13 -06:00
|
|
|
struct nfit_flush *nfit_flush;
|
2015-05-19 20:54:31 -06:00
|
|
|
struct nfit_dcr *nfit_dcr;
|
|
|
|
struct nfit_bdw *nfit_bdw;
|
2015-06-25 02:21:02 -06:00
|
|
|
struct nfit_idt *nfit_idt;
|
|
|
|
u16 idt_idx, range_index;
|
2015-05-19 20:54:31 -06:00
|
|
|
|
|
|
|
list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
|
|
|
|
if (nfit_dcr->dcr->region_index != dcr)
|
|
|
|
continue;
|
|
|
|
nfit_mem->dcr = nfit_dcr->dcr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!nfit_mem->dcr) {
|
|
|
|
dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
|
|
|
|
spa->range_index, __to_nfit_memdev(nfit_mem)
|
|
|
|
? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We've found enough to create an nvdimm, optionally
|
|
|
|
* find an associated BDW
|
|
|
|
*/
|
|
|
|
list_add(&nfit_mem->list, &acpi_desc->dimms);
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
|
|
|
|
if (nfit_bdw->bdw->region_index != dcr)
|
|
|
|
continue;
|
|
|
|
nfit_mem->bdw = nfit_bdw->bdw;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!nfit_mem->bdw)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
|
2015-06-25 02:21:02 -06:00
|
|
|
|
|
|
|
if (!nfit_mem->spa_bdw)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
range_index = nfit_mem->spa_bdw->range_index;
|
|
|
|
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
|
|
|
|
if (nfit_memdev->memdev->range_index != range_index ||
|
|
|
|
nfit_memdev->memdev->region_index != dcr)
|
|
|
|
continue;
|
|
|
|
nfit_mem->memdev_bdw = nfit_memdev->memdev;
|
|
|
|
idt_idx = nfit_memdev->memdev->interleave_index;
|
|
|
|
list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
|
|
|
|
if (nfit_idt->idt->interleave_index != idt_idx)
|
|
|
|
continue;
|
|
|
|
nfit_mem->idt_bdw = nfit_idt->idt;
|
|
|
|
break;
|
|
|
|
}
|
2015-07-10 11:06:13 -06:00
|
|
|
|
|
|
|
list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
|
|
|
|
if (nfit_flush->flush->device_handle !=
|
|
|
|
nfit_memdev->memdev->device_handle)
|
|
|
|
continue;
|
|
|
|
nfit_mem->nfit_flush = nfit_flush;
|
|
|
|
break;
|
|
|
|
}
|
2015-06-25 02:21:02 -06:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-05-19 20:54:31 -06:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct acpi_nfit_system_address *spa)
|
|
|
|
{
|
|
|
|
struct nfit_mem *nfit_mem, *found;
|
|
|
|
struct nfit_memdev *nfit_memdev;
|
|
|
|
int type = nfit_spa_type(spa);
|
|
|
|
u16 dcr;
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case NFIT_SPA_DCR:
|
|
|
|
case NFIT_SPA_PM:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (nfit_memdev->memdev->range_index != spa->range_index)
|
|
|
|
continue;
|
|
|
|
found = NULL;
|
|
|
|
dcr = nfit_memdev->memdev->region_index;
|
|
|
|
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
|
|
|
|
if (__to_nfit_memdev(nfit_mem)->region_index == dcr) {
|
|
|
|
found = nfit_mem;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (found)
|
|
|
|
nfit_mem = found;
|
|
|
|
else {
|
|
|
|
nfit_mem = devm_kzalloc(acpi_desc->dev,
|
|
|
|
sizeof(*nfit_mem), GFP_KERNEL);
|
|
|
|
if (!nfit_mem)
|
|
|
|
return -ENOMEM;
|
|
|
|
INIT_LIST_HEAD(&nfit_mem->list);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type == NFIT_SPA_DCR) {
|
2015-06-25 02:21:02 -06:00
|
|
|
struct nfit_idt *nfit_idt;
|
|
|
|
u16 idt_idx;
|
|
|
|
|
2015-05-19 20:54:31 -06:00
|
|
|
/* multiple dimms may share a SPA when interleaved */
|
|
|
|
nfit_mem->spa_dcr = spa;
|
|
|
|
nfit_mem->memdev_dcr = nfit_memdev->memdev;
|
2015-06-25 02:21:02 -06:00
|
|
|
idt_idx = nfit_memdev->memdev->interleave_index;
|
|
|
|
list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
|
|
|
|
if (nfit_idt->idt->interleave_index != idt_idx)
|
|
|
|
continue;
|
|
|
|
nfit_mem->idt_dcr = nfit_idt->idt;
|
|
|
|
break;
|
|
|
|
}
|
2015-05-19 20:54:31 -06:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* A single dimm may belong to multiple SPA-PM
|
|
|
|
* ranges, record at least one in addition to
|
|
|
|
* any SPA-DCR range.
|
|
|
|
*/
|
|
|
|
nfit_mem->memdev_pmem = nfit_memdev->memdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (found)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
|
|
|
|
{
|
|
|
|
struct nfit_mem *a = container_of(_a, typeof(*a), list);
|
|
|
|
struct nfit_mem *b = container_of(_b, typeof(*b), list);
|
|
|
|
u32 handleA, handleB;
|
|
|
|
|
|
|
|
handleA = __to_nfit_memdev(a)->device_handle;
|
|
|
|
handleB = __to_nfit_memdev(b)->device_handle;
|
|
|
|
if (handleA < handleB)
|
|
|
|
return -1;
|
|
|
|
else if (handleA > handleB)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
|
|
|
|
{
|
|
|
|
struct nfit_spa *nfit_spa;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For each SPA-DCR or SPA-PMEM address range find its
|
|
|
|
* corresponding MEMDEV(s). From each MEMDEV find the
|
|
|
|
* corresponding DCR. Then, if we're operating on a SPA-DCR,
|
|
|
|
* try to find a SPA-BDW and a corresponding BDW that references
|
|
|
|
* the DCR. Throw it all into an nfit_mem object. Note, that
|
|
|
|
* BDWs are optional.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-26 17:26:48 -06:00
|
|
|
static ssize_t revision_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
|
|
|
|
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
|
|
|
|
2015-11-20 17:05:49 -07:00
|
|
|
return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
|
2015-04-26 17:26:48 -06:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(revision);
|
|
|
|
|
|
|
|
static struct attribute *acpi_nfit_attributes[] = {
|
|
|
|
&dev_attr_revision.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group acpi_nfit_attribute_group = {
|
|
|
|
.name = "nfit",
|
|
|
|
.attrs = acpi_nfit_attributes,
|
|
|
|
};
|
|
|
|
|
2015-06-17 15:23:32 -06:00
|
|
|
const struct attribute_group *acpi_nfit_attribute_groups[] = {
|
2015-04-26 17:26:48 -06:00
|
|
|
&nvdimm_bus_attribute_group,
|
|
|
|
&acpi_nfit_attribute_group,
|
|
|
|
NULL,
|
|
|
|
};
|
2015-06-17 15:23:32 -06:00
|
|
|
EXPORT_SYMBOL_GPL(acpi_nfit_attribute_groups);
|
2015-04-26 17:26:48 -06:00
|
|
|
|
2015-04-25 01:56:17 -06:00
|
|
|
static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nvdimm *nvdimm = to_nvdimm(dev);
|
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
|
|
|
|
return __to_nfit_memdev(nfit_mem);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nvdimm *nvdimm = to_nvdimm(dev);
|
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
|
|
|
|
return nfit_mem->dcr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t handle_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "%#x\n", memdev->device_handle);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(handle);
|
|
|
|
|
|
|
|
static ssize_t phys_id_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "%#x\n", memdev->physical_id);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(phys_id);
|
|
|
|
|
|
|
|
static ssize_t vendor_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "%#x\n", dcr->vendor_id);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(vendor);
|
|
|
|
|
|
|
|
static ssize_t rev_id_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "%#x\n", dcr->revision_id);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(rev_id);
|
|
|
|
|
|
|
|
static ssize_t device_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "%#x\n", dcr->device_id);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(device);
|
|
|
|
|
|
|
|
static ssize_t format_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "%#x\n", dcr->code);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(format);
|
|
|
|
|
|
|
|
static ssize_t serial_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "%#x\n", dcr->serial_number);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(serial);
|
|
|
|
|
2015-06-23 18:08:34 -06:00
|
|
|
static ssize_t flags_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
u16 flags = to_nfit_memdev(dev)->flags;
|
|
|
|
|
|
|
|
return sprintf(buf, "%s%s%s%s%s\n",
|
nfit: Clarify memory device state flags strings
ACPI 6.0 NFIT Memory Device State Flags in Table 5-129 defines
NVDIMM status as follows. These bits indicate multiple info,
such as failures, pending event, and capability.
Bit [0] set to 1 to indicate that the previous SAVE to the
Memory Device failed.
Bit [1] set to 1 to indicate that the last RESTORE from the
Memory Device failed.
Bit [2] set to 1 to indicate that platform flush of data to
Memory Device failed. As a result, the restored data content
may be inconsistent even if SAVE and RESTORE do not indicate
failure.
Bit [3] set to 1 to indicate that the Memory Device is observed
to be not armed prior to OSPM hand off. A Memory Device is
considered armed if it is able to accept persistent writes.
Bit [4] set to 1 to indicate that the Memory Device observed
SMART and health events prior to OSPM handoff.
/sys/bus/nd/devices/nmemX/nfit/flags shows this flags info.
The output strings associated with the bits are "save", "restore",
"smart", etc., which can be confusing as they may be interpreted
as positive status, i.e. save succeeded.
Change also the dev_info() message in acpi_nfit_register_dimms()
to be consistent with the sysfs flags strings.
Reported-by: Robert Elliott <elliott@hp.com>
Signed-off-by: Toshi Kani <toshi.kani@hp.com>
[ross: rename 'not_arm' to 'not_armed']
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
[djbw: defer adding bit5, HEALTH_ENABLED, for now]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-26 10:20:23 -06:00
|
|
|
flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
|
|
|
|
flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
|
|
|
|
flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
|
2015-10-18 20:24:52 -06:00
|
|
|
flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
|
nfit: Clarify memory device state flags strings
ACPI 6.0 NFIT Memory Device State Flags in Table 5-129 defines
NVDIMM status as follows. These bits indicate multiple info,
such as failures, pending event, and capability.
Bit [0] set to 1 to indicate that the previous SAVE to the
Memory Device failed.
Bit [1] set to 1 to indicate that the last RESTORE from the
Memory Device failed.
Bit [2] set to 1 to indicate that platform flush of data to
Memory Device failed. As a result, the restored data content
may be inconsistent even if SAVE and RESTORE do not indicate
failure.
Bit [3] set to 1 to indicate that the Memory Device is observed
to be not armed prior to OSPM hand off. A Memory Device is
considered armed if it is able to accept persistent writes.
Bit [4] set to 1 to indicate that the Memory Device observed
SMART and health events prior to OSPM handoff.
/sys/bus/nd/devices/nmemX/nfit/flags shows this flags info.
The output strings associated with the bits are "save", "restore",
"smart", etc., which can be confusing as they may be interpreted
as positive status, i.e. save succeeded.
Change also the dev_info() message in acpi_nfit_register_dimms()
to be consistent with the sysfs flags strings.
Reported-by: Robert Elliott <elliott@hp.com>
Signed-off-by: Toshi Kani <toshi.kani@hp.com>
[ross: rename 'not_arm' to 'not_armed']
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
[djbw: defer adding bit5, HEALTH_ENABLED, for now]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-26 10:20:23 -06:00
|
|
|
flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
|
2015-06-23 18:08:34 -06:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(flags);
|
|
|
|
|
2015-04-25 01:56:17 -06:00
|
|
|
static struct attribute *acpi_nfit_dimm_attributes[] = {
|
|
|
|
&dev_attr_handle.attr,
|
|
|
|
&dev_attr_phys_id.attr,
|
|
|
|
&dev_attr_vendor.attr,
|
|
|
|
&dev_attr_device.attr,
|
|
|
|
&dev_attr_format.attr,
|
|
|
|
&dev_attr_serial.attr,
|
|
|
|
&dev_attr_rev_id.attr,
|
2015-06-23 18:08:34 -06:00
|
|
|
&dev_attr_flags.attr,
|
2015-04-25 01:56:17 -06:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
|
|
|
|
struct attribute *a, int n)
|
|
|
|
{
|
|
|
|
struct device *dev = container_of(kobj, struct device, kobj);
|
|
|
|
|
|
|
|
if (to_nfit_dcr(dev))
|
|
|
|
return a->mode;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct attribute_group acpi_nfit_dimm_attribute_group = {
|
|
|
|
.name = "nfit",
|
|
|
|
.attrs = acpi_nfit_dimm_attributes,
|
|
|
|
.is_visible = acpi_nfit_dimm_attr_visible,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
|
2015-06-08 12:27:06 -06:00
|
|
|
&nvdimm_attribute_group,
|
2015-05-31 12:41:48 -06:00
|
|
|
&nd_device_attribute_group,
|
2015-04-25 01:56:17 -06:00
|
|
|
&acpi_nfit_dimm_attribute_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
u32 device_handle)
|
|
|
|
{
|
|
|
|
struct nfit_mem *nfit_mem;
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
|
|
|
|
if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
|
|
|
|
return nfit_mem->nvdimm;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-06-08 12:27:06 -06:00
|
|
|
static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nfit_mem *nfit_mem, u32 device_handle)
|
|
|
|
{
|
|
|
|
struct acpi_device *adev, *adev_dimm;
|
|
|
|
struct device *dev = acpi_desc->dev;
|
|
|
|
const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM);
|
2015-07-22 14:17:22 -06:00
|
|
|
int i;
|
2015-06-08 12:27:06 -06:00
|
|
|
|
|
|
|
nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en;
|
|
|
|
adev = to_acpi_dev(acpi_desc);
|
|
|
|
if (!adev)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
adev_dimm = acpi_find_child_device(adev, device_handle, false);
|
|
|
|
nfit_mem->adev = adev_dimm;
|
|
|
|
if (!adev_dimm) {
|
|
|
|
dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
|
|
|
|
device_handle);
|
2015-05-31 12:41:48 -06:00
|
|
|
return force_enable_dimms ? 0 : -ENODEV;
|
2015-06-08 12:27:06 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++)
|
|
|
|
if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
|
|
|
|
set_bit(i, &nfit_mem->dsm_mask);
|
|
|
|
|
2015-07-22 14:17:22 -06:00
|
|
|
return 0;
|
2015-06-08 12:27:06 -06:00
|
|
|
}
|
|
|
|
|
2015-04-25 01:56:17 -06:00
|
|
|
static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
|
|
|
|
{
|
|
|
|
struct nfit_mem *nfit_mem;
|
2015-05-31 12:41:48 -06:00
|
|
|
int dimm_count = 0;
|
2015-04-25 01:56:17 -06:00
|
|
|
|
|
|
|
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
|
|
|
|
struct nvdimm *nvdimm;
|
|
|
|
unsigned long flags = 0;
|
|
|
|
u32 device_handle;
|
2015-06-23 18:08:34 -06:00
|
|
|
u16 mem_flags;
|
2015-06-08 12:27:06 -06:00
|
|
|
int rc;
|
2015-04-25 01:56:17 -06:00
|
|
|
|
|
|
|
device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
|
|
|
|
nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
|
|
|
|
if (nvdimm) {
|
2015-10-27 16:58:27 -06:00
|
|
|
dimm_count++;
|
2015-04-25 01:56:17 -06:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nfit_mem->bdw && nfit_mem->memdev_pmem)
|
|
|
|
flags |= NDD_ALIASING;
|
|
|
|
|
2015-06-23 18:08:34 -06:00
|
|
|
mem_flags = __to_nfit_memdev(nfit_mem)->flags;
|
2015-10-18 20:24:52 -06:00
|
|
|
if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
|
2015-06-23 18:08:34 -06:00
|
|
|
flags |= NDD_UNARMED;
|
|
|
|
|
2015-06-08 12:27:06 -06:00
|
|
|
rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
|
|
|
|
if (rc)
|
|
|
|
continue;
|
|
|
|
|
2015-04-25 01:56:17 -06:00
|
|
|
nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
|
2015-06-08 12:27:06 -06:00
|
|
|
acpi_nfit_dimm_attribute_groups,
|
|
|
|
flags, &nfit_mem->dsm_mask);
|
2015-04-25 01:56:17 -06:00
|
|
|
if (!nvdimm)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
nfit_mem->nvdimm = nvdimm;
|
2015-05-31 12:41:48 -06:00
|
|
|
dimm_count++;
|
2015-06-23 18:08:34 -06:00
|
|
|
|
|
|
|
if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
|
|
|
|
continue;
|
|
|
|
|
nfit: Clarify memory device state flags strings
ACPI 6.0 NFIT Memory Device State Flags in Table 5-129 defines
NVDIMM status as follows. These bits indicate multiple info,
such as failures, pending event, and capability.
Bit [0] set to 1 to indicate that the previous SAVE to the
Memory Device failed.
Bit [1] set to 1 to indicate that the last RESTORE from the
Memory Device failed.
Bit [2] set to 1 to indicate that platform flush of data to
Memory Device failed. As a result, the restored data content
may be inconsistent even if SAVE and RESTORE do not indicate
failure.
Bit [3] set to 1 to indicate that the Memory Device is observed
to be not armed prior to OSPM hand off. A Memory Device is
considered armed if it is able to accept persistent writes.
Bit [4] set to 1 to indicate that the Memory Device observed
SMART and health events prior to OSPM handoff.
/sys/bus/nd/devices/nmemX/nfit/flags shows this flags info.
The output strings associated with the bits are "save", "restore",
"smart", etc., which can be confusing as they may be interpreted
as positive status, i.e. save succeeded.
Change also the dev_info() message in acpi_nfit_register_dimms()
to be consistent with the sysfs flags strings.
Reported-by: Robert Elliott <elliott@hp.com>
Signed-off-by: Toshi Kani <toshi.kani@hp.com>
[ross: rename 'not_arm' to 'not_armed']
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
[djbw: defer adding bit5, HEALTH_ENABLED, for now]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-26 10:20:23 -06:00
|
|
|
dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
|
2015-06-23 18:08:34 -06:00
|
|
|
nvdimm_name(nvdimm),
|
nfit: Clarify memory device state flags strings
ACPI 6.0 NFIT Memory Device State Flags in Table 5-129 defines
NVDIMM status as follows. These bits indicate multiple info,
such as failures, pending event, and capability.
Bit [0] set to 1 to indicate that the previous SAVE to the
Memory Device failed.
Bit [1] set to 1 to indicate that the last RESTORE from the
Memory Device failed.
Bit [2] set to 1 to indicate that platform flush of data to
Memory Device failed. As a result, the restored data content
may be inconsistent even if SAVE and RESTORE do not indicate
failure.
Bit [3] set to 1 to indicate that the Memory Device is observed
to be not armed prior to OSPM hand off. A Memory Device is
considered armed if it is able to accept persistent writes.
Bit [4] set to 1 to indicate that the Memory Device observed
SMART and health events prior to OSPM handoff.
/sys/bus/nd/devices/nmemX/nfit/flags shows this flags info.
The output strings associated with the bits are "save", "restore",
"smart", etc., which can be confusing as they may be interpreted
as positive status, i.e. save succeeded.
Change also the dev_info() message in acpi_nfit_register_dimms()
to be consistent with the sysfs flags strings.
Reported-by: Robert Elliott <elliott@hp.com>
Signed-off-by: Toshi Kani <toshi.kani@hp.com>
[ross: rename 'not_arm' to 'not_armed']
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
[djbw: defer adding bit5, HEALTH_ENABLED, for now]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-26 10:20:23 -06:00
|
|
|
mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
|
|
|
|
mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
|
|
|
|
mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
|
2015-10-18 20:24:52 -06:00
|
|
|
mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "");
|
2015-06-23 18:08:34 -06:00
|
|
|
|
2015-04-25 01:56:17 -06:00
|
|
|
}
|
|
|
|
|
2015-05-31 12:41:48 -06:00
|
|
|
return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
|
2015-04-25 01:56:17 -06:00
|
|
|
}
|
|
|
|
|
2015-06-08 12:27:06 -06:00
|
|
|
static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
|
|
|
|
const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
|
|
|
|
struct acpi_device *adev;
|
|
|
|
int i;
|
|
|
|
|
2015-07-09 13:25:36 -06:00
|
|
|
nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en;
|
2015-06-08 12:27:06 -06:00
|
|
|
adev = to_acpi_dev(acpi_desc);
|
|
|
|
if (!adev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = ND_CMD_ARS_CAP; i <= ND_CMD_ARS_STATUS; i++)
|
|
|
|
if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
|
|
|
|
set_bit(i, &nd_desc->dsm_mask);
|
|
|
|
}
|
|
|
|
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
static ssize_t range_index_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct nd_region *nd_region = to_nd_region(dev);
|
|
|
|
struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
|
|
|
|
|
|
|
|
return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(range_index);
|
|
|
|
|
|
|
|
static struct attribute *acpi_nfit_region_attributes[] = {
|
|
|
|
&dev_attr_range_index.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group acpi_nfit_region_attribute_group = {
|
|
|
|
.name = "nfit",
|
|
|
|
.attrs = acpi_nfit_region_attributes,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
|
|
|
|
&nd_region_attribute_group,
|
|
|
|
&nd_mapping_attribute_group,
|
2015-05-31 13:02:11 -06:00
|
|
|
&nd_device_attribute_group,
|
2015-06-19 12:18:34 -06:00
|
|
|
&nd_numa_attribute_group,
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
&acpi_nfit_region_attribute_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2015-05-01 11:11:27 -06:00
|
|
|
/* enough info to uniquely specify an interleave set */
|
|
|
|
struct nfit_set_info {
|
|
|
|
struct nfit_set_info_map {
|
|
|
|
u64 region_offset;
|
|
|
|
u32 serial_number;
|
|
|
|
u32 pad;
|
|
|
|
} mapping[0];
|
|
|
|
};
|
|
|
|
|
|
|
|
static size_t sizeof_nfit_set_info(int num_mappings)
|
|
|
|
{
|
|
|
|
return sizeof(struct nfit_set_info)
|
|
|
|
+ num_mappings * sizeof(struct nfit_set_info_map);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cmp_map(const void *m0, const void *m1)
|
|
|
|
{
|
|
|
|
const struct nfit_set_info_map *map0 = m0;
|
|
|
|
const struct nfit_set_info_map *map1 = m1;
|
|
|
|
|
|
|
|
return memcmp(&map0->region_offset, &map1->region_offset,
|
|
|
|
sizeof(u64));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Retrieve the nth entry referencing this spa */
|
|
|
|
static struct acpi_nfit_memory_map *memdev_from_spa(
|
|
|
|
struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
|
|
|
|
{
|
|
|
|
struct nfit_memdev *nfit_memdev;
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
|
|
|
|
if (nfit_memdev->memdev->range_index == range_index)
|
|
|
|
if (n-- == 0)
|
|
|
|
return nfit_memdev->memdev;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nd_region_desc *ndr_desc,
|
|
|
|
struct acpi_nfit_system_address *spa)
|
|
|
|
{
|
|
|
|
int i, spa_type = nfit_spa_type(spa);
|
|
|
|
struct device *dev = acpi_desc->dev;
|
|
|
|
struct nd_interleave_set *nd_set;
|
|
|
|
u16 nr = ndr_desc->num_mappings;
|
|
|
|
struct nfit_set_info *info;
|
|
|
|
|
|
|
|
if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
|
|
|
|
/* pass */;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
|
|
|
|
if (!nd_set)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
|
|
|
|
if (!info)
|
|
|
|
return -ENOMEM;
|
|
|
|
for (i = 0; i < nr; i++) {
|
|
|
|
struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
|
|
|
|
struct nfit_set_info_map *map = &info->mapping[i];
|
|
|
|
struct nvdimm *nvdimm = nd_mapping->nvdimm;
|
|
|
|
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
|
|
|
|
spa->range_index, i);
|
|
|
|
|
|
|
|
if (!memdev || !nfit_mem->dcr) {
|
|
|
|
dev_err(dev, "%s: failed to find DCR\n", __func__);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
map->region_offset = memdev->region_offset;
|
|
|
|
map->serial_number = nfit_mem->dcr->serial_number;
|
|
|
|
}
|
|
|
|
|
|
|
|
sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
|
|
|
|
cmp_map, NULL);
|
|
|
|
nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
|
|
|
|
ndr_desc->nd_set = nd_set;
|
|
|
|
devm_kfree(dev, info);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-25 02:21:02 -06:00
|
|
|
static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_interleave *idt = mmio->idt;
|
|
|
|
u32 sub_line_offset, line_index, line_offset;
|
|
|
|
u64 line_no, table_skip_count, table_offset;
|
|
|
|
|
|
|
|
line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
|
|
|
|
table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
|
|
|
|
line_offset = idt->line_offset[line_index]
|
|
|
|
* mmio->line_size;
|
|
|
|
table_offset = table_skip_count * mmio->table_size;
|
|
|
|
|
|
|
|
return mmio->base_offset + line_offset + table_offset + sub_line_offset;
|
|
|
|
}
|
|
|
|
|
2015-07-10 11:06:13 -06:00
|
|
|
static void wmb_blk(struct nfit_blk *nfit_blk)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (nfit_blk->nvdimm_flush) {
|
|
|
|
/*
|
|
|
|
* The first wmb() is needed to 'sfence' all previous writes
|
|
|
|
* such that they are architecturally visible for the platform
|
|
|
|
* buffer flush. Note that we've already arranged for pmem
|
|
|
|
* writes to avoid the cache via arch_memcpy_to_pmem(). The
|
|
|
|
* final wmb() ensures ordering for the NVDIMM flush write.
|
|
|
|
*/
|
|
|
|
wmb();
|
|
|
|
writeq(1, nfit_blk->nvdimm_flush);
|
|
|
|
wmb();
|
|
|
|
} else
|
|
|
|
wmb_pmem();
|
|
|
|
}
|
|
|
|
|
2015-08-20 16:27:38 -06:00
|
|
|
static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
|
2015-06-25 02:21:02 -06:00
|
|
|
{
|
|
|
|
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
|
|
|
|
u64 offset = nfit_blk->stat_offset + mmio->size * bw;
|
|
|
|
|
|
|
|
if (mmio->num_lines)
|
|
|
|
offset = to_interleave_offset(offset, mmio);
|
|
|
|
|
libnvdimm for 4.3:
1/ Introduce ZONE_DEVICE and devm_memremap_pages() as a generic
mechanism for adding device-driver-discovered memory regions to the
kernel's direct map. This facility is used by the pmem driver to
enable pfn_to_page() operations on the page frames returned by DAX
('direct_access' in 'struct block_device_operations'). For now, the
'memmap' allocation for these "device" pages comes from "System
RAM". Support for allocating the memmap from device memory will
arrive in a later kernel.
2/ Introduce memremap() to replace usages of ioremap_cache() and
ioremap_wt(). memremap() drops the __iomem annotation for these
mappings to memory that do not have i/o side effects. The
replacement of ioremap_cache() with memremap() is limited to the
pmem driver to ease merging the api change in v4.3. Completion of
the conversion is targeted for v4.4.
3/ Similar to the usage of memcpy_to_pmem() + wmb_pmem() in the pmem
driver, update the VFS DAX implementation and PMEM api to provide
persistence guarantees for kernel operations on a DAX mapping.
4/ Convert the ACPI NFIT 'BLK' driver to map the block apertures as
cacheable to improve performance.
5/ Miscellaneous updates and fixes to libnvdimm including support
for issuing "address range scrub" commands, clarifying the optimal
'sector size' of pmem devices, a clarification of the usage of the
ACPI '_STA' (status) property for DIMM devices, and other minor
fixes.
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1
iQIcBAABAgAGBQJV6Nx7AAoJEB7SkWpmfYgCWyYQAI5ju6Gvw27RNFtPovHcZUf5
JGnxXejI6/AqeTQ+IulgprxtEUCrXOHjCDA5dkjr1qvsoqK1qxug+vJHOZLgeW0R
OwDtmdW4Qrgeqm+CPoxETkorJ8wDOc8mol81kTiMgeV3UqbYeeHIiTAmwe7VzZ0C
nNdCRDm5g8dHCjTKcvK3rvozgyoNoWeBiHkPe76EbnxDICxCB5dak7XsVKNMIVFQ
NuYlnw6IYN7+rMHgpgpRux38NtIW8VlYPWTmHExejc2mlioWMNBG/bmtwLyJ6M3e
zliz4/cnonTMUaizZaVozyinTa65m7wcnpjK+vlyGV2deDZPJpDRvSOtB0lH30bR
1gy+qrKzuGKpaN6thOISxFLLjmEeYwzYd7SvC9n118r32qShz+opN9XX0WmWSFlA
sajE1ehm4M7s5pkMoa/dRnAyR8RUPu4RNINdQ/Z9jFfAOx+Q26rLdQXwf9+uqbEb
bIeSQwOteK5vYYCstvpAcHSMlJAglzIX5UfZBvtEIJN7rlb0VhmGWfxAnTu+ktG1
o9cqAt+J4146xHaFwj5duTsyKhWb8BL9+xqbKPNpXEp+PbLsrnE/+WkDLFD67jxz
dgIoK60mGnVXp+16I2uMqYYDgAyO5zUdmM4OygOMnZNa1mxesjbDJC6Wat1Wsndn
slsw6DkrWT60CRE42nbK
=o57/
-----END PGP SIGNATURE-----
Merge tag 'libnvdimm-for-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm updates from Dan Williams:
"This update has successfully completed a 0day-kbuild run and has
appeared in a linux-next release. The changes outside of the typical
drivers/nvdimm/ and drivers/acpi/nfit.[ch] paths are related to the
removal of IORESOURCE_CACHEABLE, the introduction of memremap(), and
the introduction of ZONE_DEVICE + devm_memremap_pages().
Summary:
- Introduce ZONE_DEVICE and devm_memremap_pages() as a generic
mechanism for adding device-driver-discovered memory regions to the
kernel's direct map.
This facility is used by the pmem driver to enable pfn_to_page()
operations on the page frames returned by DAX ('direct_access' in
'struct block_device_operations').
For now, the 'memmap' allocation for these "device" pages comes
from "System RAM". Support for allocating the memmap from device
memory will arrive in a later kernel.
- Introduce memremap() to replace usages of ioremap_cache() and
ioremap_wt(). memremap() drops the __iomem annotation for these
mappings to memory that do not have i/o side effects. The
replacement of ioremap_cache() with memremap() is limited to the
pmem driver to ease merging the api change in v4.3.
Completion of the conversion is targeted for v4.4.
- Similar to the usage of memcpy_to_pmem() + wmb_pmem() in the pmem
driver, update the VFS DAX implementation and PMEM api to provide
persistence guarantees for kernel operations on a DAX mapping.
- Convert the ACPI NFIT 'BLK' driver to map the block apertures as
cacheable to improve performance.
- Miscellaneous updates and fixes to libnvdimm including support for
issuing "address range scrub" commands, clarifying the optimal
'sector size' of pmem devices, a clarification of the usage of the
ACPI '_STA' (status) property for DIMM devices, and other minor
fixes"
* tag 'libnvdimm-for-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (34 commits)
libnvdimm, pmem: direct map legacy pmem by default
libnvdimm, pmem: 'struct page' for pmem
libnvdimm, pfn: 'struct page' provider infrastructure
x86, pmem: clarify that ARCH_HAS_PMEM_API implies PMEM mapped WB
add devm_memremap_pages
mm: ZONE_DEVICE for "device memory"
mm: move __phys_to_pfn and __pfn_to_phys to asm/generic/memory_model.h
dax: drop size parameter to ->direct_access()
nd_blk: change aperture mapping from WC to WB
nvdimm: change to use generic kvfree()
pmem, dax: have direct_access use __pmem annotation
dax: update I/O path to do proper PMEM flushing
pmem: add copy_from_iter_pmem() and clear_pmem()
pmem, x86: clean up conditional pmem includes
pmem: remove layer when calling arch_has_wmb_pmem()
pmem, x86: move x86 PMEM API to new pmem.h header
libnvdimm, e820: make CONFIG_X86_PMEM_LEGACY a tristate option
pmem: switch to devm_ allocations
devres: add devm_memremap
libnvdimm, btt: write and validate parent_uuid
...
2015-09-08 15:35:59 -06:00
|
|
|
return readl(mmio->addr.base + offset);
|
2015-06-25 02:21:02 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
|
|
|
|
resource_size_t dpa, unsigned int len, unsigned int write)
|
|
|
|
{
|
|
|
|
u64 cmd, offset;
|
|
|
|
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
|
|
|
|
|
|
|
|
enum {
|
|
|
|
BCW_OFFSET_MASK = (1ULL << 48)-1,
|
|
|
|
BCW_LEN_SHIFT = 48,
|
|
|
|
BCW_LEN_MASK = (1ULL << 8) - 1,
|
|
|
|
BCW_CMD_SHIFT = 56,
|
|
|
|
};
|
|
|
|
|
|
|
|
cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
|
|
|
|
len = len >> L1_CACHE_SHIFT;
|
|
|
|
cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
|
|
|
|
cmd |= ((u64) write) << BCW_CMD_SHIFT;
|
|
|
|
|
|
|
|
offset = nfit_blk->cmd_offset + mmio->size * bw;
|
|
|
|
if (mmio->num_lines)
|
|
|
|
offset = to_interleave_offset(offset, mmio);
|
|
|
|
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 13:14:20 -06:00
|
|
|
writeq(cmd, mmio->addr.base + offset);
|
2015-07-10 11:06:13 -06:00
|
|
|
wmb_blk(nfit_blk);
|
2015-07-10 11:06:14 -06:00
|
|
|
|
|
|
|
if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH)
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 13:14:20 -06:00
|
|
|
readq(mmio->addr.base + offset);
|
2015-06-25 02:21:02 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
|
|
|
|
resource_size_t dpa, void *iobuf, size_t len, int rw,
|
|
|
|
unsigned int lane)
|
|
|
|
{
|
|
|
|
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
|
|
|
|
unsigned int copied = 0;
|
|
|
|
u64 base_offset;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
|
|
|
|
+ lane * mmio->size;
|
|
|
|
write_blk_ctl(nfit_blk, lane, dpa, len, rw);
|
|
|
|
while (len) {
|
|
|
|
unsigned int c;
|
|
|
|
u64 offset;
|
|
|
|
|
|
|
|
if (mmio->num_lines) {
|
|
|
|
u32 line_offset;
|
|
|
|
|
|
|
|
offset = to_interleave_offset(base_offset + copied,
|
|
|
|
mmio);
|
|
|
|
div_u64_rem(offset, mmio->line_size, &line_offset);
|
|
|
|
c = min_t(size_t, len, mmio->line_size - line_offset);
|
|
|
|
} else {
|
|
|
|
offset = base_offset + nfit_blk->bdw_offset;
|
|
|
|
c = len;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rw)
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 13:14:20 -06:00
|
|
|
memcpy_to_pmem(mmio->addr.aperture + offset,
|
2015-07-10 11:06:13 -06:00
|
|
|
iobuf + copied, c);
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 13:14:20 -06:00
|
|
|
else {
|
|
|
|
if (nfit_blk->dimm_flags & ND_BLK_READ_FLUSH)
|
|
|
|
mmio_flush_range((void __force *)
|
|
|
|
mmio->addr.aperture + offset, c);
|
|
|
|
|
2015-07-10 11:06:13 -06:00
|
|
|
memcpy_from_pmem(iobuf + copied,
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 13:14:20 -06:00
|
|
|
mmio->addr.aperture + offset, c);
|
|
|
|
}
|
2015-06-25 02:21:02 -06:00
|
|
|
|
|
|
|
copied += c;
|
|
|
|
len -= c;
|
|
|
|
}
|
2015-07-10 11:06:13 -06:00
|
|
|
|
|
|
|
if (rw)
|
|
|
|
wmb_blk(nfit_blk);
|
|
|
|
|
2015-06-25 02:21:02 -06:00
|
|
|
rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
|
|
|
|
resource_size_t dpa, void *iobuf, u64 len, int rw)
|
|
|
|
{
|
|
|
|
struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
|
|
|
|
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
|
|
|
|
struct nd_region *nd_region = nfit_blk->nd_region;
|
|
|
|
unsigned int lane, copied = 0;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
lane = nd_region_acquire_lane(nd_region);
|
|
|
|
while (len) {
|
|
|
|
u64 c = min(len, mmio->size);
|
|
|
|
|
|
|
|
rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
|
|
|
|
iobuf + copied, c, rw, lane);
|
|
|
|
if (rc)
|
|
|
|
break;
|
|
|
|
|
|
|
|
copied += c;
|
|
|
|
len -= c;
|
|
|
|
}
|
|
|
|
nd_region_release_lane(nd_region, lane);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfit_spa_mapping_release(struct kref *kref)
|
|
|
|
{
|
|
|
|
struct nfit_spa_mapping *spa_map = to_spa_map(kref);
|
|
|
|
struct acpi_nfit_system_address *spa = spa_map->spa;
|
|
|
|
struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc;
|
|
|
|
|
|
|
|
WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
|
|
|
|
dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 13:14:20 -06:00
|
|
|
if (spa_map->type == SPA_MAP_APERTURE)
|
|
|
|
memunmap((void __force *)spa_map->addr.aperture);
|
|
|
|
else
|
|
|
|
iounmap(spa_map->addr.base);
|
2015-06-25 02:21:02 -06:00
|
|
|
release_mem_region(spa->address, spa->length);
|
|
|
|
list_del(&spa_map->list);
|
|
|
|
kfree(spa_map);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct nfit_spa_mapping *find_spa_mapping(
|
|
|
|
struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct acpi_nfit_system_address *spa)
|
|
|
|
{
|
|
|
|
struct nfit_spa_mapping *spa_map;
|
|
|
|
|
|
|
|
WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
|
|
|
|
list_for_each_entry(spa_map, &acpi_desc->spa_maps, list)
|
|
|
|
if (spa_map->spa == spa)
|
|
|
|
return spa_map;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct acpi_nfit_system_address *spa)
|
|
|
|
{
|
|
|
|
struct nfit_spa_mapping *spa_map;
|
|
|
|
|
|
|
|
mutex_lock(&acpi_desc->spa_map_mutex);
|
|
|
|
spa_map = find_spa_mapping(acpi_desc, spa);
|
|
|
|
|
|
|
|
if (spa_map)
|
|
|
|
kref_put(&spa_map->kref, nfit_spa_mapping_release);
|
|
|
|
mutex_unlock(&acpi_desc->spa_map_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
|
2015-07-10 11:06:13 -06:00
|
|
|
struct acpi_nfit_system_address *spa, enum spa_map_type type)
|
2015-06-25 02:21:02 -06:00
|
|
|
{
|
|
|
|
resource_size_t start = spa->address;
|
|
|
|
resource_size_t n = spa->length;
|
|
|
|
struct nfit_spa_mapping *spa_map;
|
|
|
|
struct resource *res;
|
|
|
|
|
|
|
|
WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
|
|
|
|
|
|
|
|
spa_map = find_spa_mapping(acpi_desc, spa);
|
|
|
|
if (spa_map) {
|
|
|
|
kref_get(&spa_map->kref);
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 13:14:20 -06:00
|
|
|
return spa_map->addr.base;
|
2015-06-25 02:21:02 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
|
|
|
|
if (!spa_map)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&spa_map->list);
|
|
|
|
spa_map->spa = spa;
|
|
|
|
kref_init(&spa_map->kref);
|
|
|
|
spa_map->acpi_desc = acpi_desc;
|
|
|
|
|
|
|
|
res = request_mem_region(start, n, dev_name(acpi_desc->dev));
|
|
|
|
if (!res)
|
|
|
|
goto err_mem;
|
|
|
|
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 13:14:20 -06:00
|
|
|
spa_map->type = type;
|
|
|
|
if (type == SPA_MAP_APERTURE)
|
|
|
|
spa_map->addr.aperture = (void __pmem *)memremap(start, n,
|
|
|
|
ARCH_MEMREMAP_PMEM);
|
|
|
|
else
|
|
|
|
spa_map->addr.base = ioremap_nocache(start, n);
|
|
|
|
|
2015-07-10 11:06:13 -06:00
|
|
|
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 13:14:20 -06:00
|
|
|
if (!spa_map->addr.base)
|
2015-06-25 02:21:02 -06:00
|
|
|
goto err_map;
|
|
|
|
|
|
|
|
list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 13:14:20 -06:00
|
|
|
return spa_map->addr.base;
|
2015-06-25 02:21:02 -06:00
|
|
|
|
|
|
|
err_map:
|
|
|
|
release_mem_region(start, n);
|
|
|
|
err_mem:
|
|
|
|
kfree(spa_map);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
|
|
|
|
* @nvdimm_bus: NFIT-bus that provided the spa table entry
|
|
|
|
* @nfit_spa: spa table to map
|
2015-07-10 11:06:13 -06:00
|
|
|
* @type: aperture or control region
|
2015-06-25 02:21:02 -06:00
|
|
|
*
|
|
|
|
* In the case where block-data-window apertures and
|
|
|
|
* dimm-control-regions are interleaved they will end up sharing a
|
|
|
|
* single request_mem_region() + ioremap() for the address range. In
|
|
|
|
* the style of devm nfit_spa_map() mappings are automatically dropped
|
|
|
|
* when all region devices referencing the same mapping are disabled /
|
|
|
|
* unbound.
|
|
|
|
*/
|
|
|
|
static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
|
2015-07-10 11:06:13 -06:00
|
|
|
struct acpi_nfit_system_address *spa, enum spa_map_type type)
|
2015-06-25 02:21:02 -06:00
|
|
|
{
|
|
|
|
void __iomem *iomem;
|
|
|
|
|
|
|
|
mutex_lock(&acpi_desc->spa_map_mutex);
|
2015-07-10 11:06:13 -06:00
|
|
|
iomem = __nfit_spa_map(acpi_desc, spa, type);
|
2015-06-25 02:21:02 -06:00
|
|
|
mutex_unlock(&acpi_desc->spa_map_mutex);
|
|
|
|
|
|
|
|
return iomem;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
|
|
|
|
struct acpi_nfit_interleave *idt, u16 interleave_ways)
|
|
|
|
{
|
|
|
|
if (idt) {
|
|
|
|
mmio->num_lines = idt->line_count;
|
|
|
|
mmio->line_size = idt->line_size;
|
|
|
|
if (interleave_ways == 0)
|
|
|
|
return -ENXIO;
|
|
|
|
mmio->table_size = mmio->num_lines * interleave_ways
|
|
|
|
* mmio->line_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-10 11:06:14 -06:00
|
|
|
static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
|
|
|
|
struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
|
|
|
|
{
|
|
|
|
struct nd_cmd_dimm_flags flags;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
memset(&flags, 0, sizeof(flags));
|
|
|
|
rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
|
|
|
|
sizeof(flags));
|
|
|
|
|
|
|
|
if (rc >= 0 && flags.status == 0)
|
|
|
|
nfit_blk->dimm_flags = flags.flags;
|
|
|
|
else if (rc == -ENOTTY) {
|
|
|
|
/* fall back to a conservative default */
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 13:14:20 -06:00
|
|
|
nfit_blk->dimm_flags = ND_BLK_DCR_LATCH | ND_BLK_READ_FLUSH;
|
2015-07-10 11:06:14 -06:00
|
|
|
rc = 0;
|
|
|
|
} else
|
|
|
|
rc = -ENXIO;
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-06-25 02:21:02 -06:00
|
|
|
static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
|
|
|
|
struct device *dev)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
|
|
|
|
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
|
|
|
struct nd_blk_region *ndbr = to_nd_blk_region(dev);
|
2015-07-10 11:06:13 -06:00
|
|
|
struct nfit_flush *nfit_flush;
|
2015-06-25 02:21:02 -06:00
|
|
|
struct nfit_blk_mmio *mmio;
|
|
|
|
struct nfit_blk *nfit_blk;
|
|
|
|
struct nfit_mem *nfit_mem;
|
|
|
|
struct nvdimm *nvdimm;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
nvdimm = nd_blk_region_to_dimm(ndbr);
|
|
|
|
nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
|
|
|
|
dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
|
|
|
|
nfit_mem ? "" : " nfit_mem",
|
2015-06-30 14:09:39 -06:00
|
|
|
(nfit_mem && nfit_mem->dcr) ? "" : " dcr",
|
|
|
|
(nfit_mem && nfit_mem->bdw) ? "" : " bdw");
|
2015-06-25 02:21:02 -06:00
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
|
|
|
|
if (!nfit_blk)
|
|
|
|
return -ENOMEM;
|
|
|
|
nd_blk_region_set_provider_data(ndbr, nfit_blk);
|
|
|
|
nfit_blk->nd_region = to_nd_region(dev);
|
|
|
|
|
|
|
|
/* map block aperture memory */
|
|
|
|
nfit_blk->bdw_offset = nfit_mem->bdw->offset;
|
|
|
|
mmio = &nfit_blk->mmio[BDW];
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 13:14:20 -06:00
|
|
|
mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
|
2015-07-10 11:06:13 -06:00
|
|
|
SPA_MAP_APERTURE);
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 13:14:20 -06:00
|
|
|
if (!mmio->addr.base) {
|
2015-06-25 02:21:02 -06:00
|
|
|
dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
|
|
|
|
nvdimm_name(nvdimm));
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
mmio->size = nfit_mem->bdw->size;
|
|
|
|
mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
|
|
|
|
mmio->idt = nfit_mem->idt_bdw;
|
|
|
|
mmio->spa = nfit_mem->spa_bdw;
|
|
|
|
rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
|
|
|
|
nfit_mem->memdev_bdw->interleave_ways);
|
|
|
|
if (rc) {
|
|
|
|
dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
|
|
|
|
__func__, nvdimm_name(nvdimm));
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* map block control memory */
|
|
|
|
nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
|
|
|
|
nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
|
|
|
|
mmio = &nfit_blk->mmio[DCR];
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 13:14:20 -06:00
|
|
|
mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
|
2015-07-10 11:06:13 -06:00
|
|
|
SPA_MAP_CONTROL);
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 13:14:20 -06:00
|
|
|
if (!mmio->addr.base) {
|
2015-06-25 02:21:02 -06:00
|
|
|
dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
|
|
|
|
nvdimm_name(nvdimm));
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
mmio->size = nfit_mem->dcr->window_size;
|
|
|
|
mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
|
|
|
|
mmio->idt = nfit_mem->idt_dcr;
|
|
|
|
mmio->spa = nfit_mem->spa_dcr;
|
|
|
|
rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
|
|
|
|
nfit_mem->memdev_dcr->interleave_ways);
|
|
|
|
if (rc) {
|
|
|
|
dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
|
|
|
|
__func__, nvdimm_name(nvdimm));
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-07-10 11:06:14 -06:00
|
|
|
rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
|
|
|
|
if (rc < 0) {
|
|
|
|
dev_dbg(dev, "%s: %s failed get DIMM flags\n",
|
|
|
|
__func__, nvdimm_name(nvdimm));
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-07-10 11:06:13 -06:00
|
|
|
nfit_flush = nfit_mem->nfit_flush;
|
|
|
|
if (nfit_flush && nfit_flush->flush->hint_count != 0) {
|
|
|
|
nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
|
|
|
|
nfit_flush->flush->hint_address[0], 8);
|
|
|
|
if (!nfit_blk->nvdimm_flush)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2015-08-24 16:29:38 -06:00
|
|
|
if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush)
|
2015-07-10 11:06:13 -06:00
|
|
|
dev_warn(dev, "unable to guarantee persistence of writes\n");
|
|
|
|
|
2015-06-25 02:21:02 -06:00
|
|
|
if (mmio->line_size == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if ((u32) nfit_blk->cmd_offset % mmio->line_size
|
|
|
|
+ 8 > mmio->line_size) {
|
|
|
|
dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
|
|
|
|
return -ENXIO;
|
|
|
|
} else if ((u32) nfit_blk->stat_offset % mmio->line_size
|
|
|
|
+ 8 > mmio->line_size) {
|
|
|
|
dev_dbg(dev, "stat_offset crosses interleave boundary\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
|
|
|
|
struct device *dev)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
|
|
|
|
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
|
|
|
struct nd_blk_region *ndbr = to_nd_blk_region(dev);
|
|
|
|
struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!nfit_blk)
|
|
|
|
return; /* never enabled */
|
|
|
|
|
|
|
|
/* auto-free BLK spa mappings */
|
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
|
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
|
|
|
|
|
nd_blk: change aperture mapping from WC to WB
This should result in a pretty sizeable performance gain for reads. For
rough comparison I did some simple read testing using PMEM to compare
reads of write combining (WC) mappings vs write-back (WB). This was
done on a random lab machine.
PMEM reads from a write combining mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB) copied, 9.2855 s, 44.1 MB/s
PMEM reads from a write-back mapping:
# dd of=/dev/null if=/dev/pmem0 bs=4096 count=1000000
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 3.44034 s, 1.2 GB/s
To be able to safely support a write-back aperture I needed to add
support for the "read flush" _DSM flag, as outlined in the DSM spec:
http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
This flag tells the ND BLK driver that it needs to flush the cache lines
associated with the aperture after the aperture is moved but before any
new data is read. This ensures that any stale cache lines from the
previous contents of the aperture will be discarded from the processor
cache, and the new data will be read properly from the DIMM. We know
that the cache lines are clean and will be discarded without any
writeback because either a) the previous aperture operation was a read,
and we never modified the contents of the aperture, or b) the previous
aperture operation was a write and we must have written back the dirtied
contents of the aperture to the DIMM before the I/O was completed.
In order to add support for the "read flush" flag I needed to add a
generic routine to invalidate cache lines, mmio_flush_range(). This is
protected by the ARCH_HAS_MMIO_FLUSH Kconfig variable, and is currently
only supported on x86.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-08-27 13:14:20 -06:00
|
|
|
if (mmio->addr.base)
|
2015-06-25 02:21:02 -06:00
|
|
|
nfit_spa_unmap(acpi_desc, mmio->spa);
|
|
|
|
}
|
|
|
|
nd_blk_region_set_provider_data(ndbr, NULL);
|
|
|
|
/* devm will free nfit_blk */
|
|
|
|
}
|
|
|
|
|
2015-12-24 19:21:43 -07:00
|
|
|
static int ars_get_cap(struct nvdimm_bus_descriptor *nd_desc,
|
|
|
|
struct nd_cmd_ars_cap *cmd, u64 addr, u64 length)
|
|
|
|
{
|
|
|
|
cmd->address = addr;
|
|
|
|
cmd->length = length;
|
|
|
|
|
|
|
|
return nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
|
|
|
|
sizeof(*cmd));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
|
|
|
|
struct nd_cmd_ars_start *cmd, u64 addr, u64 length)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
cmd->address = addr;
|
|
|
|
cmd->length = length;
|
|
|
|
cmd->type = ND_ARS_PERSISTENT;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, cmd,
|
|
|
|
sizeof(*cmd));
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
switch (cmd->status) {
|
|
|
|
case 0:
|
|
|
|
return 0;
|
|
|
|
case 1:
|
|
|
|
/* ARS unsupported, but we should never get here */
|
|
|
|
return 0;
|
|
|
|
case 2:
|
|
|
|
return -EINVAL;
|
|
|
|
case 3:
|
|
|
|
/* ARS is in progress */
|
|
|
|
msleep(1000);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
|
|
|
|
struct nd_cmd_ars_status *cmd)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd,
|
|
|
|
sizeof(*cmd));
|
|
|
|
if (rc || cmd->status & 0xffff)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
/* Check extended status (Upper two bytes) */
|
|
|
|
switch (cmd->status >> 16) {
|
|
|
|
case 0:
|
|
|
|
return 0;
|
|
|
|
case 1:
|
|
|
|
/* ARS is in progress */
|
|
|
|
msleep(1000);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
/* No ARS performed for the current boot */
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
|
|
|
|
struct nd_cmd_ars_status *ars_status, u64 start)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
u32 i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The address field returned by ars_status should be either
|
|
|
|
* less than or equal to the address we last started ARS for.
|
|
|
|
* The (start, length) returned by ars_status should also have
|
|
|
|
* non-zero overlap with the range we started ARS for.
|
|
|
|
* If this is not the case, bail.
|
|
|
|
*/
|
|
|
|
if (ars_status->address > start ||
|
|
|
|
(ars_status->address + ars_status->length < start))
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
for (i = 0; i < ars_status->num_records; i++) {
|
|
|
|
rc = nvdimm_bus_add_poison(nvdimm_bus,
|
|
|
|
ars_status->records[i].err_address,
|
|
|
|
ars_status->records[i].length);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nd_region_desc *ndr_desc)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
|
|
|
|
struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
|
|
|
|
struct nd_cmd_ars_status *ars_status = NULL;
|
|
|
|
struct nd_cmd_ars_start *ars_start = NULL;
|
|
|
|
struct nd_cmd_ars_cap *ars_cap = NULL;
|
|
|
|
u64 start, len, cur, remaining;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL);
|
|
|
|
if (!ars_cap)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
start = ndr_desc->res->start;
|
|
|
|
len = ndr_desc->res->end - ndr_desc->res->start + 1;
|
|
|
|
|
|
|
|
rc = ars_get_cap(nd_desc, ars_cap, start, len);
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If ARS is unsupported, or if the 'Persistent Memory Scrub' flag in
|
|
|
|
* extended status is not set, skip this but continue initialization
|
|
|
|
*/
|
|
|
|
if ((ars_cap->status & 0xffff) ||
|
|
|
|
!(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) {
|
|
|
|
dev_warn(acpi_desc->dev,
|
|
|
|
"ARS unsupported (status: 0x%x), won't create an error list\n",
|
|
|
|
ars_cap->status);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if a full-range ARS has been run. If so, use those results
|
|
|
|
* without having to start a new ARS.
|
|
|
|
*/
|
|
|
|
ars_status = kzalloc(ars_cap->max_ars_out + sizeof(*ars_status),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!ars_status) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = ars_get_status(nd_desc, ars_status);
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (ars_status->address <= start &&
|
|
|
|
(ars_status->address + ars_status->length >= start + len)) {
|
|
|
|
rc = ars_status_process_records(nvdimm_bus, ars_status, start);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ARS_STATUS can overflow if the number of poison entries found is
|
|
|
|
* greater than the maximum buffer size (ars_cap->max_ars_out)
|
|
|
|
* To detect overflow, check if the length field of ars_status
|
|
|
|
* is less than the length we supplied. If so, process the
|
|
|
|
* error entries we got, adjust the start point, and start again
|
|
|
|
*/
|
|
|
|
ars_start = kzalloc(sizeof(*ars_start), GFP_KERNEL);
|
|
|
|
if (!ars_start)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
cur = start;
|
|
|
|
remaining = len;
|
|
|
|
do {
|
|
|
|
u64 done, end;
|
|
|
|
|
|
|
|
rc = ars_do_start(nd_desc, ars_start, cur, remaining);
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
rc = ars_get_status(nd_desc, ars_status);
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
rc = ars_status_process_records(nvdimm_bus, ars_status, cur);
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
end = min(cur + remaining,
|
|
|
|
ars_status->address + ars_status->length);
|
|
|
|
done = end - cur;
|
|
|
|
cur += done;
|
|
|
|
remaining -= done;
|
|
|
|
} while (remaining);
|
|
|
|
|
|
|
|
out:
|
|
|
|
kfree(ars_cap);
|
|
|
|
kfree(ars_start);
|
|
|
|
kfree(ars_status);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
|
|
|
|
struct acpi_nfit_memory_map *memdev,
|
|
|
|
struct acpi_nfit_system_address *spa)
|
|
|
|
{
|
|
|
|
struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
|
|
|
|
memdev->device_handle);
|
2015-06-25 02:21:02 -06:00
|
|
|
struct nd_blk_region_desc *ndbr_desc;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
struct nfit_mem *nfit_mem;
|
|
|
|
int blk_valid = 0;
|
|
|
|
|
|
|
|
if (!nvdimm) {
|
|
|
|
dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
|
|
|
|
spa->range_index, memdev->device_handle);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
nd_mapping->nvdimm = nvdimm;
|
|
|
|
switch (nfit_spa_type(spa)) {
|
|
|
|
case NFIT_SPA_PM:
|
|
|
|
case NFIT_SPA_VOLATILE:
|
|
|
|
nd_mapping->start = memdev->address;
|
|
|
|
nd_mapping->size = memdev->region_size;
|
|
|
|
break;
|
|
|
|
case NFIT_SPA_DCR:
|
|
|
|
nfit_mem = nvdimm_provider_data(nvdimm);
|
|
|
|
if (!nfit_mem || !nfit_mem->bdw) {
|
|
|
|
dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
|
|
|
|
spa->range_index, nvdimm_name(nvdimm));
|
|
|
|
} else {
|
|
|
|
nd_mapping->size = nfit_mem->bdw->capacity;
|
|
|
|
nd_mapping->start = nfit_mem->bdw->start_address;
|
nd_btt: atomic sector updates
BTT stands for Block Translation Table, and is a way to provide power
fail sector atomicity semantics for block devices that have the ability
to perform byte granularity IO. It relies on the capability of libnvdimm
namespace devices to do byte aligned IO.
The BTT works as a stacked blocked device, and reserves a chunk of space
from the backing device for its accounting metadata. It is a bio-based
driver because all IO is done synchronously, and there is no queuing or
asynchronous completions at either the device or the driver level.
The BTT uses 'lanes' to index into various 'on-disk' data structures,
and lanes also act as a synchronization mechanism in case there are more
CPUs than available lanes. We did a comparison between two lane lock
strategies - first where we kept an atomic counter around that tracked
which was the last lane that was used, and 'our' lane was determined by
atomically incrementing that. That way, for the nr_cpus > nr_lanes case,
theoretically, no CPU would be blocked waiting for a lane. The other
strategy was to use the cpu number we're scheduled on to and hash it to
a lane number. Theoretically, this could block an IO that could've
otherwise run using a different, free lane. But some fio workloads
showed that the direct cpu -> lane hash performed faster than tracking
'last lane' - my reasoning is the cache thrash caused by moving the
atomic variable made that approach slower than simply waiting out the
in-progress IO. This supports the conclusion that the driver can be a
very simple bio-based one that does synchronous IOs instead of queuing.
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Boaz Harrosh <boaz@plexistor.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jens Axboe <axboe@fb.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Neil Brown <neilb@suse.de>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
[jmoyer: fix nmi watchdog timeout in btt_map_init]
[jmoyer: move btt initialization to module load path]
[jmoyer: fix memory leak in the btt initialization path]
[jmoyer: Don't overwrite corrupted arenas]
Signed-off-by: Vishal Verma <vishal.l.verma@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-25 02:20:32 -06:00
|
|
|
ndr_desc->num_lanes = nfit_mem->bdw->windows;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
blk_valid = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ndr_desc->nd_mapping = nd_mapping;
|
|
|
|
ndr_desc->num_mappings = blk_valid;
|
2015-06-25 02:21:02 -06:00
|
|
|
ndbr_desc = to_blk_region_desc(ndr_desc);
|
|
|
|
ndbr_desc->enable = acpi_nfit_blk_region_enable;
|
|
|
|
ndbr_desc->disable = acpi_nfit_blk_region_disable;
|
2015-06-17 15:23:32 -06:00
|
|
|
ndbr_desc->do_io = acpi_desc->blk_do_io;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
if (!nvdimm_blk_region_create(acpi_desc->nvdimm_bus, ndr_desc))
|
|
|
|
return -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nfit_spa *nfit_spa)
|
|
|
|
{
|
|
|
|
static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
|
|
|
|
struct acpi_nfit_system_address *spa = nfit_spa->spa;
|
2015-06-25 02:21:02 -06:00
|
|
|
struct nd_blk_region_desc ndbr_desc;
|
|
|
|
struct nd_region_desc *ndr_desc;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
struct nfit_memdev *nfit_memdev;
|
|
|
|
struct nvdimm_bus *nvdimm_bus;
|
|
|
|
struct resource res;
|
2015-05-01 11:11:27 -06:00
|
|
|
int count = 0, rc;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
|
2015-10-27 16:58:27 -06:00
|
|
|
if (nfit_spa->is_registered)
|
|
|
|
return 0;
|
|
|
|
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
if (spa->range_index == 0) {
|
|
|
|
dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
|
|
|
|
__func__);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&res, 0, sizeof(res));
|
|
|
|
memset(&nd_mappings, 0, sizeof(nd_mappings));
|
2015-06-25 02:21:02 -06:00
|
|
|
memset(&ndbr_desc, 0, sizeof(ndbr_desc));
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
res.start = spa->address;
|
|
|
|
res.end = res.start + spa->length - 1;
|
2015-06-25 02:21:02 -06:00
|
|
|
ndr_desc = &ndbr_desc.ndr_desc;
|
|
|
|
ndr_desc->res = &res;
|
|
|
|
ndr_desc->provider_data = nfit_spa;
|
|
|
|
ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
|
2015-06-19 12:18:33 -06:00
|
|
|
if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
|
|
|
|
ndr_desc->numa_node = acpi_map_pxm_to_online_node(
|
|
|
|
spa->proximity_domain);
|
|
|
|
else
|
|
|
|
ndr_desc->numa_node = NUMA_NO_NODE;
|
|
|
|
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
|
|
|
|
struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
|
|
|
|
struct nd_mapping *nd_mapping;
|
|
|
|
|
|
|
|
if (memdev->range_index != spa->range_index)
|
|
|
|
continue;
|
|
|
|
if (count >= ND_MAX_MAPPINGS) {
|
|
|
|
dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
|
|
|
|
spa->range_index, ND_MAX_MAPPINGS);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
nd_mapping = &nd_mappings[count++];
|
2015-06-25 02:21:02 -06:00
|
|
|
rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
memdev, spa);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-06-25 02:21:02 -06:00
|
|
|
ndr_desc->nd_mapping = nd_mappings;
|
|
|
|
ndr_desc->num_mappings = count;
|
|
|
|
rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
|
2015-05-01 11:11:27 -06:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
nvdimm_bus = acpi_desc->nvdimm_bus;
|
|
|
|
if (nfit_spa_type(spa) == NFIT_SPA_PM) {
|
2015-12-24 19:21:43 -07:00
|
|
|
rc = acpi_nfit_find_poison(acpi_desc, ndr_desc);
|
|
|
|
if (rc) {
|
|
|
|
dev_err(acpi_desc->dev,
|
|
|
|
"error while performing ARS to find poison: %d\n",
|
|
|
|
rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2015-06-25 02:21:02 -06:00
|
|
|
if (!nvdimm_pmem_region_create(nvdimm_bus, ndr_desc))
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
return -ENOMEM;
|
|
|
|
} else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
|
2015-06-25 02:21:02 -06:00
|
|
|
if (!nvdimm_volatile_region_create(nvdimm_bus, ndr_desc))
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2015-10-27 16:58:27 -06:00
|
|
|
|
|
|
|
nfit_spa->is_registered = 1;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
|
|
|
|
{
|
|
|
|
struct nfit_spa *nfit_spa;
|
|
|
|
|
|
|
|
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
|
|
|
|
int rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
|
|
|
|
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-27 16:58:27 -06:00
|
|
|
static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
|
|
|
|
struct nfit_table_prev *prev)
|
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
|
|
|
|
|
|
|
if (!list_empty(&prev->spas) ||
|
|
|
|
!list_empty(&prev->memdevs) ||
|
|
|
|
!list_empty(&prev->dcrs) ||
|
|
|
|
!list_empty(&prev->bdws) ||
|
|
|
|
!list_empty(&prev->idts) ||
|
|
|
|
!list_empty(&prev->flushes)) {
|
|
|
|
dev_err(dev, "new nfit deletes entries (unsupported)\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-17 15:23:32 -06:00
|
|
|
int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
|
2015-05-19 20:54:31 -06:00
|
|
|
{
|
|
|
|
struct device *dev = acpi_desc->dev;
|
2015-10-27 16:58:27 -06:00
|
|
|
struct nfit_table_prev prev;
|
2015-05-19 20:54:31 -06:00
|
|
|
const void *end;
|
|
|
|
u8 *data;
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
int rc;
|
2015-05-19 20:54:31 -06:00
|
|
|
|
2015-10-27 16:58:27 -06:00
|
|
|
mutex_lock(&acpi_desc->init_mutex);
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&prev.spas);
|
|
|
|
INIT_LIST_HEAD(&prev.memdevs);
|
|
|
|
INIT_LIST_HEAD(&prev.dcrs);
|
|
|
|
INIT_LIST_HEAD(&prev.bdws);
|
|
|
|
INIT_LIST_HEAD(&prev.idts);
|
|
|
|
INIT_LIST_HEAD(&prev.flushes);
|
|
|
|
|
|
|
|
list_cut_position(&prev.spas, &acpi_desc->spas,
|
|
|
|
acpi_desc->spas.prev);
|
|
|
|
list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
|
|
|
|
acpi_desc->memdevs.prev);
|
|
|
|
list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
|
|
|
|
acpi_desc->dcrs.prev);
|
|
|
|
list_cut_position(&prev.bdws, &acpi_desc->bdws,
|
|
|
|
acpi_desc->bdws.prev);
|
|
|
|
list_cut_position(&prev.idts, &acpi_desc->idts,
|
|
|
|
acpi_desc->idts.prev);
|
|
|
|
list_cut_position(&prev.flushes, &acpi_desc->flushes,
|
|
|
|
acpi_desc->flushes.prev);
|
2015-05-19 20:54:31 -06:00
|
|
|
|
|
|
|
data = (u8 *) acpi_desc->nfit;
|
|
|
|
end = data + sz;
|
|
|
|
while (!IS_ERR_OR_NULL(data))
|
2015-10-27 16:58:27 -06:00
|
|
|
data = add_table(acpi_desc, &prev, data, end);
|
2015-05-19 20:54:31 -06:00
|
|
|
|
|
|
|
if (IS_ERR(data)) {
|
|
|
|
dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
|
|
|
|
PTR_ERR(data));
|
2015-10-27 16:58:27 -06:00
|
|
|
rc = PTR_ERR(data);
|
|
|
|
goto out_unlock;
|
2015-05-19 20:54:31 -06:00
|
|
|
}
|
|
|
|
|
2015-10-27 16:58:27 -06:00
|
|
|
rc = acpi_nfit_check_deletions(acpi_desc, &prev);
|
|
|
|
if (rc)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
if (nfit_mem_init(acpi_desc) != 0) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
2015-05-19 20:54:31 -06:00
|
|
|
|
2015-06-08 12:27:06 -06:00
|
|
|
acpi_nfit_init_dsms(acpi_desc);
|
|
|
|
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
rc = acpi_nfit_register_dimms(acpi_desc);
|
|
|
|
if (rc)
|
2015-10-27 16:58:27 -06:00
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
rc = acpi_nfit_register_regions(acpi_desc);
|
libnvdimm, nfit: regions (block-data-window, persistent memory, volatile memory)
A "region" device represents the maximum capacity of a BLK range (mmio
block-data-window(s)), or a PMEM range (DAX-capable persistent memory or
volatile memory), without regard for aliasing. Aliasing, in the
dimm-local address space (DPA), is resolved by metadata on a dimm to
designate which exclusive interface will access the aliased DPA ranges.
Support for the per-dimm metadata/label arrvies is in a subsequent
patch.
The name format of "region" devices is "regionN" where, like dimms, N is
a global ida index assigned at discovery time. This id is not reliable
across reboots nor in the presence of hotplug. Look to attributes of
the region or static id-data of the sub-namespace to generate a
persistent name. However, if the platform configuration does not change
it is reasonable to expect the same region id to be assigned at the next
boot.
"region"s have 2 generic attributes "size", and "mapping"s where:
- size: the BLK accessible capacity or the span of the
system physical address range in the case of PMEM.
- mappingN: a tuple describing a dimm's contribution to the region's
capacity in the format (<nmemX>,<dpa>,<size>). For a PMEM-region
there will be at least one mapping per dimm in the interleave set. For
a BLK-region there is only "mapping0" listing the starting DPA of the
BLK-region and the available DPA capacity of that space (matches "size"
above).
The max number of mappings per "region" is hard coded per the
constraints of sysfs attribute groups. That said the number of mappings
per region should never exceed the maximum number of possible dimms in
the system. If the current number turns out to not be enough then the
"mappings" attribute clarifies how many there are supposed to be. "32
should be enough for anybody...".
Cc: Neil Brown <neilb@suse.de>
Cc: <linux-acpi@vger.kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2015-06-09 18:13:14 -06:00
|
|
|
|
2015-10-27 16:58:27 -06:00
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&acpi_desc->init_mutex);
|
|
|
|
return rc;
|
2015-05-19 20:54:31 -06:00
|
|
|
}
|
2015-06-17 15:23:32 -06:00
|
|
|
EXPORT_SYMBOL_GPL(acpi_nfit_init);
|
2015-05-19 20:54:31 -06:00
|
|
|
|
2015-10-27 16:58:27 -06:00
|
|
|
static struct acpi_nfit_desc *acpi_nfit_desc_init(struct acpi_device *adev)
|
2015-05-19 20:54:31 -06:00
|
|
|
{
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc;
|
|
|
|
struct acpi_nfit_desc *acpi_desc;
|
|
|
|
struct device *dev = &adev->dev;
|
|
|
|
|
|
|
|
acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
|
|
|
|
if (!acpi_desc)
|
2015-10-27 16:58:27 -06:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2015-05-19 20:54:31 -06:00
|
|
|
|
|
|
|
dev_set_drvdata(dev, acpi_desc);
|
|
|
|
acpi_desc->dev = dev;
|
2015-06-17 15:23:32 -06:00
|
|
|
acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
|
2015-05-19 20:54:31 -06:00
|
|
|
nd_desc = &acpi_desc->nd_desc;
|
|
|
|
nd_desc->provider_name = "ACPI.NFIT";
|
|
|
|
nd_desc->ndctl = acpi_nfit_ctl;
|
2015-04-26 17:26:48 -06:00
|
|
|
nd_desc->attr_groups = acpi_nfit_attribute_groups;
|
2015-05-19 20:54:31 -06:00
|
|
|
|
|
|
|
acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, nd_desc);
|
2015-10-27 16:58:27 -06:00
|
|
|
if (!acpi_desc->nvdimm_bus) {
|
|
|
|
devm_kfree(dev, acpi_desc);
|
|
|
|
return ERR_PTR(-ENXIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&acpi_desc->spa_maps);
|
|
|
|
INIT_LIST_HEAD(&acpi_desc->spas);
|
|
|
|
INIT_LIST_HEAD(&acpi_desc->dcrs);
|
|
|
|
INIT_LIST_HEAD(&acpi_desc->bdws);
|
|
|
|
INIT_LIST_HEAD(&acpi_desc->idts);
|
|
|
|
INIT_LIST_HEAD(&acpi_desc->flushes);
|
|
|
|
INIT_LIST_HEAD(&acpi_desc->memdevs);
|
|
|
|
INIT_LIST_HEAD(&acpi_desc->dimms);
|
|
|
|
mutex_init(&acpi_desc->spa_map_mutex);
|
|
|
|
mutex_init(&acpi_desc->init_mutex);
|
|
|
|
|
|
|
|
return acpi_desc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int acpi_nfit_add(struct acpi_device *adev)
|
|
|
|
{
|
|
|
|
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
|
|
|
|
struct acpi_nfit_desc *acpi_desc;
|
|
|
|
struct device *dev = &adev->dev;
|
|
|
|
struct acpi_table_header *tbl;
|
|
|
|
acpi_status status = AE_OK;
|
|
|
|
acpi_size sz;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz);
|
|
|
|
if (ACPI_FAILURE(status)) {
|
|
|
|
/* This is ok, we could have an nvdimm hotplugged later */
|
|
|
|
dev_dbg(dev, "failed to find NFIT at startup\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
acpi_desc = acpi_nfit_desc_init(adev);
|
|
|
|
if (IS_ERR(acpi_desc)) {
|
|
|
|
dev_err(dev, "%s: error initializing acpi_desc: %ld\n",
|
|
|
|
__func__, PTR_ERR(acpi_desc));
|
|
|
|
return PTR_ERR(acpi_desc);
|
|
|
|
}
|
|
|
|
|
2015-11-20 17:05:49 -07:00
|
|
|
/*
|
|
|
|
* Save the acpi header for later and then skip it,
|
|
|
|
* making nfit point to the first nfit table header.
|
|
|
|
*/
|
|
|
|
acpi_desc->acpi_header = *tbl;
|
|
|
|
acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
|
|
|
|
sz -= sizeof(struct acpi_table_nfit);
|
2015-10-27 16:58:27 -06:00
|
|
|
|
|
|
|
/* Evaluate _FIT and override with that if present */
|
|
|
|
status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
|
|
|
|
if (ACPI_SUCCESS(status) && buf.length > 0) {
|
2015-11-20 17:05:49 -07:00
|
|
|
union acpi_object *obj;
|
|
|
|
/*
|
|
|
|
* Adjust for the acpi_object header of the _FIT
|
|
|
|
*/
|
|
|
|
obj = buf.pointer;
|
|
|
|
if (obj->type == ACPI_TYPE_BUFFER) {
|
|
|
|
acpi_desc->nfit =
|
|
|
|
(struct acpi_nfit_header *)obj->buffer.pointer;
|
|
|
|
sz = obj->buffer.length;
|
|
|
|
} else
|
|
|
|
dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
|
|
|
|
__func__, (int) obj->type);
|
2015-10-27 16:58:27 -06:00
|
|
|
}
|
2015-05-19 20:54:31 -06:00
|
|
|
|
|
|
|
rc = acpi_nfit_init(acpi_desc, sz);
|
|
|
|
if (rc) {
|
|
|
|
nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int acpi_nfit_remove(struct acpi_device *adev)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
|
|
|
|
|
|
|
|
nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-27 16:58:27 -06:00
|
|
|
static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
|
|
|
|
{
|
|
|
|
struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
|
|
|
|
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
|
2015-11-20 17:05:49 -07:00
|
|
|
struct acpi_nfit_header *nfit_saved;
|
|
|
|
union acpi_object *obj;
|
2015-10-27 16:58:27 -06:00
|
|
|
struct device *dev = &adev->dev;
|
|
|
|
acpi_status status;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
dev_dbg(dev, "%s: event: %d\n", __func__, event);
|
|
|
|
|
|
|
|
device_lock(dev);
|
|
|
|
if (!dev->driver) {
|
|
|
|
/* dev->driver may be null if we're being removed */
|
|
|
|
dev_dbg(dev, "%s: no driver found for dev\n", __func__);
|
2015-12-11 13:24:10 -07:00
|
|
|
goto out_unlock;
|
2015-10-27 16:58:27 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!acpi_desc) {
|
|
|
|
acpi_desc = acpi_nfit_desc_init(adev);
|
|
|
|
if (IS_ERR(acpi_desc)) {
|
|
|
|
dev_err(dev, "%s: error initializing acpi_desc: %ld\n",
|
|
|
|
__func__, PTR_ERR(acpi_desc));
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Evaluate _FIT */
|
|
|
|
status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
|
|
|
|
if (ACPI_FAILURE(status)) {
|
|
|
|
dev_err(dev, "failed to evaluate _FIT\n");
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
nfit_saved = acpi_desc->nfit;
|
2015-11-20 17:05:49 -07:00
|
|
|
obj = buf.pointer;
|
|
|
|
if (obj->type == ACPI_TYPE_BUFFER) {
|
|
|
|
acpi_desc->nfit =
|
|
|
|
(struct acpi_nfit_header *)obj->buffer.pointer;
|
|
|
|
ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
|
|
|
|
if (ret) {
|
|
|
|
/* Merge failed, restore old nfit, and exit */
|
|
|
|
acpi_desc->nfit = nfit_saved;
|
|
|
|
dev_err(dev, "failed to merge updated NFIT\n");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Bad _FIT, restore old nfit */
|
|
|
|
dev_err(dev, "Invalid _FIT\n");
|
2015-10-27 16:58:27 -06:00
|
|
|
}
|
|
|
|
kfree(buf.pointer);
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
device_unlock(dev);
|
|
|
|
}
|
|
|
|
|
2015-05-19 20:54:31 -06:00
|
|
|
static const struct acpi_device_id acpi_nfit_ids[] = {
|
|
|
|
{ "ACPI0012", 0 },
|
|
|
|
{ "", 0 },
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
|
|
|
|
|
|
|
|
static struct acpi_driver acpi_nfit_driver = {
|
|
|
|
.name = KBUILD_MODNAME,
|
|
|
|
.ids = acpi_nfit_ids,
|
|
|
|
.ops = {
|
|
|
|
.add = acpi_nfit_add,
|
|
|
|
.remove = acpi_nfit_remove,
|
2015-10-27 16:58:27 -06:00
|
|
|
.notify = acpi_nfit_notify,
|
2015-05-19 20:54:31 -06:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static __init int nfit_init(void)
|
|
|
|
{
|
|
|
|
BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
|
|
|
|
BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
|
|
|
|
BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
|
|
|
|
BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
|
|
|
|
BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
|
|
|
|
BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
|
|
|
|
BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
|
|
|
|
|
|
|
|
acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
|
|
|
|
acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
|
|
|
|
acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
|
|
|
|
acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
|
|
|
|
acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
|
|
|
|
acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
|
|
|
|
acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
|
|
|
|
acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
|
|
|
|
acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
|
|
|
|
acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
|
|
|
|
|
|
|
|
return acpi_bus_register_driver(&acpi_nfit_driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __exit void nfit_exit(void)
|
|
|
|
{
|
|
|
|
acpi_bus_unregister_driver(&acpi_nfit_driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(nfit_init);
|
|
|
|
module_exit(nfit_exit);
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_AUTHOR("Intel Corporation");
|