Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull scsi target updates from Nicholas Bellinger: "The highlights in this series include: - Improve sg_table lookup scalability in RAMDISK_MCP (martin) - Add device attribute to expose config name for INQUIRY model (tregaron) - Convert tcm_vhost to use lock-less list for cmd completion (asias) - Add tcm_vhost support for multiple target's per endpoint (asias) - Add tcm_vhost support for multiple queues per vhost (asias) - Add missing mapped_lun bounds checking during make_mappedlun setup in generic fabric configfs code (jan engelhardt + nab) - Enforce individual iscsi-target network portal export once per TargetName endpoint (grover + nab) - Add WRITE_SAME w/ UNMAP=0 emulation to FILEIO backend (nab) Things have been mostly quiet this round, with majority of the work being done on the iser-target WIP driver + associated iscsi-target refactoring patches currently in flight for v3.10 code. At this point there is one patch series left outstanding from Asias to add support for UNMAP + WRITE_SAME w/ UNMAP=1 to FILEIO awaiting feedback from hch & Co, that will likely be included in a post v3.9-rc1 PULL request if there are no objections. Also, there is a regression bug recently reported off-list that seems to be effecting v3.5 and v3.6 kernels with MSFT iSCSI initiators that is still being tracked down. No word if this effects >= v3.7 just yet, but if so there will likely another PULL request coming your way.." * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (26 commits) target: Rename spc_get_write_same_sectors -> sbc_get_write_same_sectors target/file: Add WRITE_SAME w/ UNMAP=0 emulation support iscsi-target: Enforce individual network portal export once per TargetName iscsi-target: Refactor iscsit_get_np sockaddr matching into iscsit_check_np_match target: Add missing mapped_lun bounds checking during make_mappedlun setup target: Fix lookup of dynamic NodeACLs during cached demo-mode operation target: Fix parameter list length checking in MODE SELECT target: Fix error checking for UNMAP commands target: Fix sense data for out-of-bounds IO operations target_core_rd: break out unterminated loop during copy tcm_vhost: Multi-queue support tcm_vhost: Multi-target support target: Add device attribute to expose config_item_name for INQUIRY model target: don't truncate the fail intr address target: don't always say "ipv6" as address type target/iblock: Use backend REQ_FLUSH hint for WriteCacheEnabled status iscsi-target: make some temporary buffers larger tcm_vhost: Optimize gup in vhost_scsi_map_to_sgl tcm_vhost: Use iov_num_pages to calculate sgl_count tcm_vhost: Introduce iov_num_pages ...
This commit is contained in:
commit
ecc88efbe7
21 changed files with 563 additions and 229 deletions
|
@ -264,16 +264,50 @@ int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct iscsi_np *iscsit_get_np(
|
||||
bool iscsit_check_np_match(
|
||||
struct __kernel_sockaddr_storage *sockaddr,
|
||||
struct iscsi_np *np,
|
||||
int network_transport)
|
||||
{
|
||||
struct sockaddr_in *sock_in, *sock_in_e;
|
||||
struct sockaddr_in6 *sock_in6, *sock_in6_e;
|
||||
struct iscsi_np *np;
|
||||
int ip_match = 0;
|
||||
bool ip_match = false;
|
||||
u16 port;
|
||||
|
||||
if (sockaddr->ss_family == AF_INET6) {
|
||||
sock_in6 = (struct sockaddr_in6 *)sockaddr;
|
||||
sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
|
||||
|
||||
if (!memcmp(&sock_in6->sin6_addr.in6_u,
|
||||
&sock_in6_e->sin6_addr.in6_u,
|
||||
sizeof(struct in6_addr)))
|
||||
ip_match = true;
|
||||
|
||||
port = ntohs(sock_in6->sin6_port);
|
||||
} else {
|
||||
sock_in = (struct sockaddr_in *)sockaddr;
|
||||
sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
|
||||
|
||||
if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
|
||||
ip_match = true;
|
||||
|
||||
port = ntohs(sock_in->sin_port);
|
||||
}
|
||||
|
||||
if ((ip_match == true) && (np->np_port == port) &&
|
||||
(np->np_network_transport == network_transport))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct iscsi_np *iscsit_get_np(
|
||||
struct __kernel_sockaddr_storage *sockaddr,
|
||||
int network_transport)
|
||||
{
|
||||
struct iscsi_np *np;
|
||||
bool match;
|
||||
|
||||
spin_lock_bh(&np_lock);
|
||||
list_for_each_entry(np, &g_np_list, np_list) {
|
||||
spin_lock(&np->np_thread_lock);
|
||||
|
@ -282,29 +316,8 @@ static struct iscsi_np *iscsit_get_np(
|
|||
continue;
|
||||
}
|
||||
|
||||
if (sockaddr->ss_family == AF_INET6) {
|
||||
sock_in6 = (struct sockaddr_in6 *)sockaddr;
|
||||
sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
|
||||
|
||||
if (!memcmp(&sock_in6->sin6_addr.in6_u,
|
||||
&sock_in6_e->sin6_addr.in6_u,
|
||||
sizeof(struct in6_addr)))
|
||||
ip_match = 1;
|
||||
|
||||
port = ntohs(sock_in6->sin6_port);
|
||||
} else {
|
||||
sock_in = (struct sockaddr_in *)sockaddr;
|
||||
sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
|
||||
|
||||
if (sock_in->sin_addr.s_addr ==
|
||||
sock_in_e->sin_addr.s_addr)
|
||||
ip_match = 1;
|
||||
|
||||
port = ntohs(sock_in->sin_port);
|
||||
}
|
||||
|
||||
if ((ip_match == 1) && (np->np_port == port) &&
|
||||
(np->np_network_transport == network_transport)) {
|
||||
match = iscsit_check_np_match(sockaddr, np, network_transport);
|
||||
if (match == true) {
|
||||
/*
|
||||
* Increment the np_exports reference count now to
|
||||
* prevent iscsit_del_np() below from being called
|
||||
|
|
|
@ -8,6 +8,8 @@ extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
|
|||
extern void iscsit_del_tiqn(struct iscsi_tiqn *);
|
||||
extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
|
||||
extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *);
|
||||
extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *,
|
||||
struct iscsi_np *, int);
|
||||
extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
|
||||
char *, int);
|
||||
extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
|
||||
|
|
|
@ -1095,11 +1095,11 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
|
|||
SET_PSTATE_REPLY_OPTIONAL(param);
|
||||
}
|
||||
} else if (IS_TYPE_NUMBER(param)) {
|
||||
char *tmpptr, buf[10];
|
||||
char *tmpptr, buf[11];
|
||||
u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0);
|
||||
u32 proposer_value = simple_strtoul(value, &tmpptr, 0);
|
||||
|
||||
memset(buf, 0, 10);
|
||||
memset(buf, 0, sizeof(buf));
|
||||
|
||||
if (!strcmp(param->name, MAXCONNECTIONS) ||
|
||||
!strcmp(param->name, MAXBURSTLENGTH) ||
|
||||
|
@ -1503,8 +1503,8 @@ static int iscsi_enforce_integrity_rules(
|
|||
FirstBurstLength = simple_strtoul(param->value,
|
||||
&tmpptr, 0);
|
||||
if (FirstBurstLength > MaxBurstLength) {
|
||||
char tmpbuf[10];
|
||||
memset(tmpbuf, 0, 10);
|
||||
char tmpbuf[11];
|
||||
memset(tmpbuf, 0, sizeof(tmpbuf));
|
||||
sprintf(tmpbuf, "%u", MaxBurstLength);
|
||||
if (iscsi_update_param_value(param, tmpbuf))
|
||||
return -1;
|
||||
|
|
|
@ -410,14 +410,16 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type(
|
|||
struct iscsi_tiqn *tiqn = container_of(igrps,
|
||||
struct iscsi_tiqn, tiqn_stat_grps);
|
||||
struct iscsi_login_stats *lstat = &tiqn->login_stats;
|
||||
unsigned char buf[8];
|
||||
int ret;
|
||||
|
||||
spin_lock(&lstat->lock);
|
||||
snprintf(buf, 8, "%s", (lstat->last_intr_fail_ip_addr != NULL) ?
|
||||
"ipv6" : "ipv4");
|
||||
if (lstat->last_intr_fail_ip_family == AF_INET6)
|
||||
ret = snprintf(page, PAGE_SIZE, "ipv6\n");
|
||||
else
|
||||
ret = snprintf(page, PAGE_SIZE, "ipv4\n");
|
||||
spin_unlock(&lstat->lock);
|
||||
|
||||
return snprintf(page, PAGE_SIZE, "%s\n", buf);
|
||||
return ret;
|
||||
}
|
||||
ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type);
|
||||
|
||||
|
@ -427,16 +429,19 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
|
|||
struct iscsi_tiqn *tiqn = container_of(igrps,
|
||||
struct iscsi_tiqn, tiqn_stat_grps);
|
||||
struct iscsi_login_stats *lstat = &tiqn->login_stats;
|
||||
unsigned char buf[32];
|
||||
int ret;
|
||||
|
||||
spin_lock(&lstat->lock);
|
||||
if (lstat->last_intr_fail_ip_family == AF_INET6)
|
||||
snprintf(buf, 32, "[%s]", lstat->last_intr_fail_ip_addr);
|
||||
else
|
||||
snprintf(buf, 32, "%s", lstat->last_intr_fail_ip_addr);
|
||||
if (lstat->last_intr_fail_ip_family == AF_INET6) {
|
||||
ret = snprintf(page, PAGE_SIZE, "[%s]\n",
|
||||
lstat->last_intr_fail_ip_addr);
|
||||
} else {
|
||||
ret = snprintf(page, PAGE_SIZE, "%s\n",
|
||||
lstat->last_intr_fail_ip_addr);
|
||||
}
|
||||
spin_unlock(&lstat->lock);
|
||||
|
||||
return snprintf(page, PAGE_SIZE, "%s\n", buf);
|
||||
return ret;
|
||||
}
|
||||
ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr);
|
||||
|
||||
|
|
|
@ -422,6 +422,35 @@ struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static bool iscsit_tpg_check_network_portal(
|
||||
struct iscsi_tiqn *tiqn,
|
||||
struct __kernel_sockaddr_storage *sockaddr,
|
||||
int network_transport)
|
||||
{
|
||||
struct iscsi_portal_group *tpg;
|
||||
struct iscsi_tpg_np *tpg_np;
|
||||
struct iscsi_np *np;
|
||||
bool match = false;
|
||||
|
||||
spin_lock(&tiqn->tiqn_tpg_lock);
|
||||
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
|
||||
|
||||
spin_lock(&tpg->tpg_np_lock);
|
||||
list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
|
||||
np = tpg_np->tpg_np;
|
||||
|
||||
match = iscsit_check_np_match(sockaddr, np,
|
||||
network_transport);
|
||||
if (match == true)
|
||||
break;
|
||||
}
|
||||
spin_unlock(&tpg->tpg_np_lock);
|
||||
}
|
||||
spin_unlock(&tiqn->tiqn_tpg_lock);
|
||||
|
||||
return match;
|
||||
}
|
||||
|
||||
struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
|
||||
struct iscsi_portal_group *tpg,
|
||||
struct __kernel_sockaddr_storage *sockaddr,
|
||||
|
@ -432,6 +461,16 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
|
|||
struct iscsi_np *np;
|
||||
struct iscsi_tpg_np *tpg_np;
|
||||
|
||||
if (!tpg_np_parent) {
|
||||
if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
|
||||
network_transport) == true) {
|
||||
pr_err("Network Portal: %s already exists on a"
|
||||
" different TPG on %s\n", ip_str,
|
||||
tpg->tpg_tiqn->tiqn);
|
||||
return ERR_PTR(-EEXIST);
|
||||
}
|
||||
}
|
||||
|
||||
tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL);
|
||||
if (!tpg_np) {
|
||||
pr_err("Unable to allocate memory for"
|
||||
|
|
|
@ -609,6 +609,9 @@ static struct target_core_dev_attrib_attribute \
|
|||
__CONFIGFS_EATTR_RO(_name, \
|
||||
target_core_dev_show_attr_##_name);
|
||||
|
||||
DEF_DEV_ATTRIB(emulate_model_alias);
|
||||
SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(emulate_dpo);
|
||||
SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
|
||||
|
||||
|
@ -681,6 +684,7 @@ SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR);
|
|||
CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
|
||||
|
||||
static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
|
||||
&target_core_dev_attrib_emulate_model_alias.attr,
|
||||
&target_core_dev_attrib_emulate_dpo.attr,
|
||||
&target_core_dev_attrib_emulate_fua_write.attr,
|
||||
&target_core_dev_attrib_emulate_fua_read.attr,
|
||||
|
|
|
@ -713,6 +713,44 @@ int se_dev_set_max_write_same_len(
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dev_set_t10_wwn_model_alias(struct se_device *dev)
|
||||
{
|
||||
const char *configname;
|
||||
|
||||
configname = config_item_name(&dev->dev_group.cg_item);
|
||||
if (strlen(configname) >= 16) {
|
||||
pr_warn("dev[%p]: Backstore name '%s' is too long for "
|
||||
"INQUIRY_MODEL, truncating to 16 bytes\n", dev,
|
||||
configname);
|
||||
}
|
||||
snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
|
||||
}
|
||||
|
||||
int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
|
||||
{
|
||||
if (dev->export_count) {
|
||||
pr_err("dev[%p]: Unable to change model alias"
|
||||
" while export_count is %d\n",
|
||||
dev, dev->export_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (flag != 0 && flag != 1) {
|
||||
pr_err("Illegal value %d\n", flag);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (flag) {
|
||||
dev_set_t10_wwn_model_alias(dev);
|
||||
} else {
|
||||
strncpy(&dev->t10_wwn.model[0],
|
||||
dev->transport->inquiry_prod, 16);
|
||||
}
|
||||
dev->dev_attrib.emulate_model_alias = flag;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
|
||||
{
|
||||
if (flag != 0 && flag != 1) {
|
||||
|
@ -772,6 +810,12 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
|
|||
pr_err("emulate_write_cache not supported for pSCSI\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (dev->transport->get_write_cache) {
|
||||
pr_warn("emulate_write_cache cannot be changed when underlying"
|
||||
" HW reports WriteCacheEnabled, ignoring request\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
dev->dev_attrib.emulate_write_cache = flag;
|
||||
pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
|
||||
dev, dev->dev_attrib.emulate_write_cache);
|
||||
|
@ -1182,24 +1226,18 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
|
|||
|
||||
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
|
||||
struct se_portal_group *tpg,
|
||||
struct se_node_acl *nacl,
|
||||
u32 mapped_lun,
|
||||
char *initiatorname,
|
||||
int *ret)
|
||||
{
|
||||
struct se_lun_acl *lacl;
|
||||
struct se_node_acl *nacl;
|
||||
|
||||
if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
|
||||
if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
|
||||
pr_err("%s InitiatorName exceeds maximum size.\n",
|
||||
tpg->se_tpg_tfo->get_fabric_name());
|
||||
*ret = -EOVERFLOW;
|
||||
return NULL;
|
||||
}
|
||||
nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
|
||||
if (!nacl) {
|
||||
*ret = -EINVAL;
|
||||
return NULL;
|
||||
}
|
||||
lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
|
||||
if (!lacl) {
|
||||
pr_err("Unable to allocate memory for struct se_lun_acl.\n");
|
||||
|
@ -1210,7 +1248,8 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
|
|||
INIT_LIST_HEAD(&lacl->lacl_list);
|
||||
lacl->mapped_lun = mapped_lun;
|
||||
lacl->se_lun_nacl = nacl;
|
||||
snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
|
||||
snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
|
||||
nacl->initiatorname);
|
||||
|
||||
return lacl;
|
||||
}
|
||||
|
@ -1390,6 +1429,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
|
|||
dev->t10_alua.t10_dev = dev;
|
||||
|
||||
dev->dev_attrib.da_dev = dev;
|
||||
dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
|
||||
dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
|
||||
dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
|
||||
dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
|
||||
|
|
|
@ -354,9 +354,17 @@ static struct config_group *target_fabric_make_mappedlun(
|
|||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
|
||||
pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG"
|
||||
"-1: %u for Target Portal Group: %u\n", mapped_lun,
|
||||
TRANSPORT_MAX_LUNS_PER_TPG-1,
|
||||
se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
|
||||
config_item_name(acl_ci), &ret);
|
||||
lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
|
||||
mapped_lun, &ret);
|
||||
if (!lacl) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
|
|
|
@ -190,6 +190,11 @@ static int fd_configure_device(struct se_device *dev)
|
|||
|
||||
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
|
||||
fd_dev->fd_queue_depth = dev->queue_depth;
|
||||
/*
|
||||
* Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
|
||||
* based upon struct iovec limit for vfs_writev()
|
||||
*/
|
||||
dev->dev_attrib.max_write_same_len = 0x1000;
|
||||
|
||||
pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
|
||||
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
|
||||
|
@ -328,6 +333,114 @@ fd_execute_sync_cache(struct se_cmd *cmd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static unsigned char *
|
||||
fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg,
|
||||
unsigned int len)
|
||||
{
|
||||
struct se_device *se_dev = cmd->se_dev;
|
||||
unsigned int block_size = se_dev->dev_attrib.block_size;
|
||||
unsigned int i = 0, end;
|
||||
unsigned char *buf, *p, *kmap_buf;
|
||||
|
||||
buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL);
|
||||
if (!buf) {
|
||||
pr_err("Unable to allocate fd_execute_write_same buf\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
kmap_buf = kmap(sg_page(sg)) + sg->offset;
|
||||
if (!kmap_buf) {
|
||||
pr_err("kmap() failed in fd_setup_write_same\n");
|
||||
kfree(buf);
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* Fill local *buf to contain multiple WRITE_SAME blocks up to
|
||||
* min(len, PAGE_SIZE)
|
||||
*/
|
||||
p = buf;
|
||||
end = min_t(unsigned int, len, PAGE_SIZE);
|
||||
|
||||
while (i < end) {
|
||||
memcpy(p, kmap_buf, block_size);
|
||||
|
||||
i += block_size;
|
||||
p += block_size;
|
||||
}
|
||||
kunmap(sg_page(sg));
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
fd_execute_write_same(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *se_dev = cmd->se_dev;
|
||||
struct fd_dev *fd_dev = FD_DEV(se_dev);
|
||||
struct file *f = fd_dev->fd_file;
|
||||
struct scatterlist *sg;
|
||||
struct iovec *iov;
|
||||
mm_segment_t old_fs;
|
||||
sector_t nolb = sbc_get_write_same_sectors(cmd);
|
||||
loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
|
||||
unsigned int len, len_tmp, iov_num;
|
||||
int i, rc;
|
||||
unsigned char *buf;
|
||||
|
||||
if (!nolb) {
|
||||
target_complete_cmd(cmd, SAM_STAT_GOOD);
|
||||
return 0;
|
||||
}
|
||||
sg = &cmd->t_data_sg[0];
|
||||
|
||||
if (cmd->t_data_nents > 1 ||
|
||||
sg->length != cmd->se_dev->dev_attrib.block_size) {
|
||||
pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
|
||||
" block_size: %u\n", cmd->t_data_nents, sg->length,
|
||||
cmd->se_dev->dev_attrib.block_size);
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
}
|
||||
|
||||
len = len_tmp = nolb * se_dev->dev_attrib.block_size;
|
||||
iov_num = DIV_ROUND_UP(len, PAGE_SIZE);
|
||||
|
||||
buf = fd_setup_write_same_buf(cmd, sg, len);
|
||||
if (!buf)
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
||||
iov = vzalloc(sizeof(struct iovec) * iov_num);
|
||||
if (!iov) {
|
||||
pr_err("Unable to allocate fd_execute_write_same iovecs\n");
|
||||
kfree(buf);
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
}
|
||||
/*
|
||||
* Map the single fabric received scatterlist block now populated
|
||||
* in *buf into each iovec for I/O submission.
|
||||
*/
|
||||
for (i = 0; i < iov_num; i++) {
|
||||
iov[i].iov_base = buf;
|
||||
iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE);
|
||||
len_tmp -= iov[i].iov_len;
|
||||
}
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(get_ds());
|
||||
rc = vfs_writev(f, &iov[0], iov_num, &pos);
|
||||
set_fs(old_fs);
|
||||
|
||||
vfree(iov);
|
||||
kfree(buf);
|
||||
|
||||
if (rc < 0 || rc != len) {
|
||||
pr_err("vfs_writev() returned %d for write same\n", rc);
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
}
|
||||
|
||||
target_complete_cmd(cmd, SAM_STAT_GOOD);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
fd_execute_rw(struct se_cmd *cmd)
|
||||
{
|
||||
|
@ -486,6 +599,7 @@ static sector_t fd_get_blocks(struct se_device *dev)
|
|||
static struct sbc_ops fd_sbc_ops = {
|
||||
.execute_rw = fd_execute_rw,
|
||||
.execute_sync_cache = fd_execute_sync_cache,
|
||||
.execute_write_same = fd_execute_write_same,
|
||||
};
|
||||
|
||||
static sense_reason_t
|
||||
|
|
|
@ -154,6 +154,7 @@ static int iblock_configure_device(struct se_device *dev)
|
|||
|
||||
if (blk_queue_nonrot(q))
|
||||
dev->dev_attrib.is_nonrot = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_bioset:
|
||||
|
@ -390,10 +391,19 @@ iblock_execute_unmap(struct se_cmd *cmd)
|
|||
sense_reason_t ret = 0;
|
||||
int dl, bd_dl, err;
|
||||
|
||||
/* We never set ANC_SUP */
|
||||
if (cmd->t_task_cdb[1])
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
|
||||
if (cmd->data_length == 0) {
|
||||
target_complete_cmd(cmd, SAM_STAT_GOOD);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (cmd->data_length < 8) {
|
||||
pr_warn("UNMAP parameter list length %u too small\n",
|
||||
cmd->data_length);
|
||||
return TCM_INVALID_PARAMETER_LIST;
|
||||
return TCM_PARAMETER_LIST_LENGTH_ERROR;
|
||||
}
|
||||
|
||||
buf = transport_kmap_data_sg(cmd);
|
||||
|
@ -463,7 +473,7 @@ iblock_execute_write_same_unmap(struct se_cmd *cmd)
|
|||
int rc;
|
||||
|
||||
rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba,
|
||||
spc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
|
||||
sbc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
|
||||
if (rc < 0) {
|
||||
pr_warn("blkdev_issue_discard() failed: %d\n", rc);
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
@ -481,7 +491,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
|
|||
struct bio *bio;
|
||||
struct bio_list list;
|
||||
sector_t block_lba = cmd->t_task_lba;
|
||||
sector_t sectors = spc_get_write_same_sectors(cmd);
|
||||
sector_t sectors = sbc_get_write_same_sectors(cmd);
|
||||
|
||||
sg = &cmd->t_data_sg[0];
|
||||
|
||||
|
@ -654,20 +664,24 @@ iblock_execute_rw(struct se_cmd *cmd)
|
|||
u32 sg_num = sgl_nents;
|
||||
sector_t block_lba;
|
||||
unsigned bio_cnt;
|
||||
int rw;
|
||||
int rw = 0;
|
||||
int i;
|
||||
|
||||
if (data_direction == DMA_TO_DEVICE) {
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
|
||||
/*
|
||||
* Force data to disk if we pretend to not have a volatile
|
||||
* write cache, or the initiator set the Force Unit Access bit.
|
||||
* Force writethrough using WRITE_FUA if a volatile write cache
|
||||
* is not enabled, or if initiator set the Force Unit Access bit.
|
||||
*/
|
||||
if (dev->dev_attrib.emulate_write_cache == 0 ||
|
||||
(dev->dev_attrib.emulate_fua_write > 0 &&
|
||||
(cmd->se_cmd_flags & SCF_FUA)))
|
||||
rw = WRITE_FUA;
|
||||
else
|
||||
if (q->flush_flags & REQ_FUA) {
|
||||
if (cmd->se_cmd_flags & SCF_FUA)
|
||||
rw = WRITE_FUA;
|
||||
else if (!(q->flush_flags & REQ_FLUSH))
|
||||
rw = WRITE_FUA;
|
||||
} else {
|
||||
rw = WRITE;
|
||||
}
|
||||
} else {
|
||||
rw = READ;
|
||||
}
|
||||
|
@ -774,6 +788,15 @@ iblock_parse_cdb(struct se_cmd *cmd)
|
|||
return sbc_parse_cdb(cmd, &iblock_sbc_ops);
|
||||
}
|
||||
|
||||
bool iblock_get_write_cache(struct se_device *dev)
|
||||
{
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
struct block_device *bd = ib_dev->ibd_bd;
|
||||
struct request_queue *q = bdev_get_queue(bd);
|
||||
|
||||
return q->flush_flags & REQ_FLUSH;
|
||||
}
|
||||
|
||||
static struct se_subsystem_api iblock_template = {
|
||||
.name = "iblock",
|
||||
.inquiry_prod = "IBLOCK",
|
||||
|
@ -790,6 +813,7 @@ static struct se_subsystem_api iblock_template = {
|
|||
.show_configfs_dev_params = iblock_show_configfs_dev_params,
|
||||
.get_device_type = sbc_get_device_type,
|
||||
.get_blocks = iblock_get_blocks,
|
||||
.get_write_cache = iblock_get_write_cache,
|
||||
};
|
||||
|
||||
static int __init iblock_module_init(void)
|
||||
|
|
|
@ -25,6 +25,7 @@ int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
|
|||
int se_dev_set_unmap_granularity(struct se_device *, u32);
|
||||
int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
|
||||
int se_dev_set_max_write_same_len(struct se_device *, u32);
|
||||
int se_dev_set_emulate_model_alias(struct se_device *, int);
|
||||
int se_dev_set_emulate_dpo(struct se_device *, int);
|
||||
int se_dev_set_emulate_fua_write(struct se_device *, int);
|
||||
int se_dev_set_emulate_fua_read(struct se_device *, int);
|
||||
|
@ -45,7 +46,7 @@ struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u3
|
|||
int core_dev_del_lun(struct se_portal_group *, u32);
|
||||
struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
|
||||
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
|
||||
u32, char *, int *);
|
||||
struct se_node_acl *, u32, int *);
|
||||
int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
|
||||
struct se_lun_acl *, u32, u32);
|
||||
int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
|
||||
|
|
|
@ -256,10 +256,12 @@ static void rd_free_device(struct se_device *dev)
|
|||
|
||||
static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
|
||||
{
|
||||
u32 i;
|
||||
struct rd_dev_sg_table *sg_table;
|
||||
u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
|
||||
sizeof(struct scatterlist));
|
||||
|
||||
for (i = 0; i < rd_dev->sg_table_count; i++) {
|
||||
i = page / sg_per_table;
|
||||
if (i < rd_dev->sg_table_count) {
|
||||
sg_table = &rd_dev->sg_table_array[i];
|
||||
if ((sg_table->page_start_offset <= page) &&
|
||||
(sg_table->page_end_offset >= page))
|
||||
|
@ -314,7 +316,19 @@ rd_execute_rw(struct se_cmd *cmd)
|
|||
void *rd_addr;
|
||||
|
||||
sg_miter_next(&m);
|
||||
if (!(u32)m.length) {
|
||||
pr_debug("RD[%u]: invalid sgl %p len %zu\n",
|
||||
dev->rd_dev_id, m.addr, m.length);
|
||||
sg_miter_stop(&m);
|
||||
return TCM_INCORRECT_AMOUNT_OF_DATA;
|
||||
}
|
||||
len = min((u32)m.length, src_len);
|
||||
if (len > rd_size) {
|
||||
pr_debug("RD[%u]: size underrun page %d offset %d "
|
||||
"size %d\n", dev->rd_dev_id,
|
||||
rd_page, rd_offset, rd_size);
|
||||
len = rd_size;
|
||||
}
|
||||
m.consumed = len;
|
||||
|
||||
rd_addr = sg_virt(rd_sg) + rd_offset;
|
||||
|
|
|
@ -105,7 +105,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
sector_t spc_get_write_same_sectors(struct se_cmd *cmd)
|
||||
sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
|
||||
{
|
||||
u32 num_blocks;
|
||||
|
||||
|
@ -126,7 +126,7 @@ sector_t spc_get_write_same_sectors(struct se_cmd *cmd)
|
|||
return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
|
||||
cmd->t_task_lba + 1;
|
||||
}
|
||||
EXPORT_SYMBOL(spc_get_write_same_sectors);
|
||||
EXPORT_SYMBOL(sbc_get_write_same_sectors);
|
||||
|
||||
static sense_reason_t
|
||||
sbc_emulate_noop(struct se_cmd *cmd)
|
||||
|
@ -233,7 +233,7 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
|
|||
static sense_reason_t
|
||||
sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
|
||||
{
|
||||
unsigned int sectors = spc_get_write_same_sectors(cmd);
|
||||
unsigned int sectors = sbc_get_write_same_sectors(cmd);
|
||||
|
||||
if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
|
||||
pr_err("WRITE_SAME PBDATA and LBDATA"
|
||||
|
@ -486,7 +486,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
*/
|
||||
if (cmd->t_task_lba || sectors) {
|
||||
if (sbc_check_valid_sectors(cmd) < 0)
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
return TCM_ADDRESS_OUT_OF_RANGE;
|
||||
}
|
||||
cmd->execute_cmd = ops->execute_sync_cache;
|
||||
break;
|
||||
|
|
|
@ -66,8 +66,8 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
|
|||
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
|
||||
sense_reason_t
|
||||
spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
|
||||
{
|
||||
struct se_lun *lun = cmd->se_lun;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
|
@ -104,6 +104,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(spc_emulate_inquiry_std);
|
||||
|
||||
/* unit serial number */
|
||||
static sense_reason_t
|
||||
|
@ -160,7 +161,7 @@ static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
|
|||
* Device identification VPD, for a complete list of
|
||||
* DESIGNATOR TYPEs see spc4r17 Table 459.
|
||||
*/
|
||||
static sense_reason_t
|
||||
sense_reason_t
|
||||
spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
|
@ -404,17 +405,33 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
|
|||
buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(spc_emulate_evpd_83);
|
||||
|
||||
static bool
|
||||
spc_check_dev_wce(struct se_device *dev)
|
||||
{
|
||||
bool wce = false;
|
||||
|
||||
if (dev->transport->get_write_cache)
|
||||
wce = dev->transport->get_write_cache(dev);
|
||||
else if (dev->dev_attrib.emulate_write_cache > 0)
|
||||
wce = true;
|
||||
|
||||
return wce;
|
||||
}
|
||||
|
||||
/* Extended INQUIRY Data VPD Page */
|
||||
static sense_reason_t
|
||||
spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
|
||||
buf[3] = 0x3c;
|
||||
/* Set HEADSUP, ORDSUP, SIMPSUP */
|
||||
buf[5] = 0x07;
|
||||
|
||||
/* If WriteCache emulation is enabled, set V_SUP */
|
||||
if (cmd->se_dev->dev_attrib.emulate_write_cache > 0)
|
||||
if (spc_check_dev_wce(dev))
|
||||
buf[6] = 0x01;
|
||||
return 0;
|
||||
}
|
||||
|
@ -764,7 +781,7 @@ static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
|
|||
if (pc == 1)
|
||||
goto out;
|
||||
|
||||
if (dev->dev_attrib.emulate_write_cache > 0)
|
||||
if (spc_check_dev_wce(dev))
|
||||
p[2] = 0x04; /* Write Cache Enable */
|
||||
p[12] = 0x20; /* Disabled Read Ahead */
|
||||
|
||||
|
@ -876,7 +893,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
|
|||
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
|
||||
spc_modesense_write_protect(&buf[length], type);
|
||||
|
||||
if ((dev->dev_attrib.emulate_write_cache > 0) &&
|
||||
if ((spc_check_dev_wce(dev)) &&
|
||||
(dev->dev_attrib.emulate_fua_write > 0))
|
||||
spc_modesense_dpofua(&buf[length], type);
|
||||
|
||||
|
@ -983,6 +1000,14 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
|
|||
int ret = 0;
|
||||
int i;
|
||||
|
||||
if (!cmd->data_length) {
|
||||
target_complete_cmd(cmd, GOOD);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (cmd->data_length < off + 2)
|
||||
return TCM_PARAMETER_LIST_LENGTH_ERROR;
|
||||
|
||||
buf = transport_kmap_data_sg(cmd);
|
||||
if (!buf)
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
@ -1007,6 +1032,11 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
|
|||
goto out;
|
||||
|
||||
check_contents:
|
||||
if (cmd->data_length < off + length) {
|
||||
ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (memcmp(buf + off, tbuf, length))
|
||||
ret = TCM_INVALID_PARAMETER_LIST;
|
||||
|
||||
|
|
|
@ -331,18 +331,6 @@ static void core_tmr_drain_state_list(
|
|||
|
||||
fe_count = atomic_read(&cmd->t_fe_count);
|
||||
|
||||
if (!(cmd->transport_state & CMD_T_ACTIVE)) {
|
||||
pr_debug("LUN_RESET: got CMD_T_ACTIVE for"
|
||||
" cdb: %p, t_fe_count: %d dev: %p\n", cmd,
|
||||
fe_count, dev);
|
||||
cmd->transport_state |= CMD_T_ABORTED;
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
|
||||
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
|
||||
continue;
|
||||
}
|
||||
pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for cdb: %p,"
|
||||
" t_fe_count: %d dev: %p\n", cmd, fe_count, dev);
|
||||
cmd->transport_state |= CMD_T_ABORTED;
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
|
||||
|
|
|
@ -111,16 +111,10 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
|
|||
struct se_node_acl *acl;
|
||||
|
||||
spin_lock_irq(&tpg->acl_node_lock);
|
||||
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
|
||||
if (!strcmp(acl->initiatorname, initiatorname) &&
|
||||
!acl->dynamic_node_acl) {
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
return acl;
|
||||
}
|
||||
}
|
||||
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
|
||||
return NULL;
|
||||
return acl;
|
||||
}
|
||||
|
||||
/* core_tpg_add_node_to_devs():
|
||||
|
|
|
@ -907,15 +907,18 @@ int transport_dump_vpd_ident(
|
|||
|
||||
switch (vpd->device_identifier_code_set) {
|
||||
case 0x01: /* Binary */
|
||||
sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
|
||||
snprintf(buf, sizeof(buf),
|
||||
"T10 VPD Binary Device Identifier: %s\n",
|
||||
&vpd->device_identifier[0]);
|
||||
break;
|
||||
case 0x02: /* ASCII */
|
||||
sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
|
||||
snprintf(buf, sizeof(buf),
|
||||
"T10 VPD ASCII Device Identifier: %s\n",
|
||||
&vpd->device_identifier[0]);
|
||||
break;
|
||||
case 0x03: /* UTF-8 */
|
||||
sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
|
||||
snprintf(buf, sizeof(buf),
|
||||
"T10 VPD UTF-8 Device Identifier: %s\n",
|
||||
&vpd->device_identifier[0]);
|
||||
break;
|
||||
default:
|
||||
|
@ -1514,6 +1517,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
|
|||
case TCM_UNSUPPORTED_SCSI_OPCODE:
|
||||
case TCM_INVALID_CDB_FIELD:
|
||||
case TCM_INVALID_PARAMETER_LIST:
|
||||
case TCM_PARAMETER_LIST_LENGTH_ERROR:
|
||||
case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
|
||||
case TCM_UNKNOWN_MODE_PAGE:
|
||||
case TCM_WRITE_PROTECTED:
|
||||
|
@ -2674,6 +2678,15 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
|
|||
/* INVALID FIELD IN PARAMETER LIST */
|
||||
buffer[SPC_ASC_KEY_OFFSET] = 0x26;
|
||||
break;
|
||||
case TCM_PARAMETER_LIST_LENGTH_ERROR:
|
||||
/* CURRENT ERROR */
|
||||
buffer[0] = 0x70;
|
||||
buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
|
||||
/* ILLEGAL REQUEST */
|
||||
buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
|
||||
/* PARAMETER LIST LENGTH ERROR */
|
||||
buffer[SPC_ASC_KEY_OFFSET] = 0x1a;
|
||||
break;
|
||||
case TCM_UNEXPECTED_UNSOLICITED_DATA:
|
||||
/* CURRENT ERROR */
|
||||
buffer[0] = 0x70;
|
||||
|
|
|
@ -47,6 +47,8 @@
|
|||
#include <linux/vhost.h>
|
||||
#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
|
||||
#include <linux/virtio_scsi.h>
|
||||
#include <linux/llist.h>
|
||||
#include <linux/bitmap.h>
|
||||
|
||||
#include "vhost.c"
|
||||
#include "vhost.h"
|
||||
|
@ -58,14 +60,20 @@ enum {
|
|||
VHOST_SCSI_VQ_IO = 2,
|
||||
};
|
||||
|
||||
#define VHOST_SCSI_MAX_TARGET 256
|
||||
#define VHOST_SCSI_MAX_VQ 128
|
||||
|
||||
struct vhost_scsi {
|
||||
struct tcm_vhost_tpg *vs_tpg; /* Protected by vhost_scsi->dev.mutex */
|
||||
/* Protected by vhost_scsi->dev.mutex */
|
||||
struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET];
|
||||
char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
|
||||
bool vs_endpoint;
|
||||
|
||||
struct vhost_dev dev;
|
||||
struct vhost_virtqueue vqs[3];
|
||||
struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
|
||||
|
||||
struct vhost_work vs_completion_work; /* cmd completion work item */
|
||||
struct list_head vs_completion_list; /* cmd completion queue */
|
||||
spinlock_t vs_completion_lock; /* protects s_completion_list */
|
||||
struct llist_head vs_completion_list; /* cmd completion queue */
|
||||
};
|
||||
|
||||
/* Local pointer to allocated TCM configfs fabric module */
|
||||
|
@ -77,6 +85,12 @@ static struct workqueue_struct *tcm_vhost_workqueue;
|
|||
static DEFINE_MUTEX(tcm_vhost_mutex);
|
||||
static LIST_HEAD(tcm_vhost_list);
|
||||
|
||||
static int iov_num_pages(struct iovec *iov)
|
||||
{
|
||||
return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
|
||||
((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
|
||||
{
|
||||
return 1;
|
||||
|
@ -301,9 +315,7 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
|
|||
{
|
||||
struct vhost_scsi *vs = tv_cmd->tvc_vhost;
|
||||
|
||||
spin_lock_bh(&vs->vs_completion_lock);
|
||||
list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
|
||||
spin_unlock_bh(&vs->vs_completion_lock);
|
||||
llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
|
||||
|
||||
vhost_work_queue(&vs->dev, &vs->vs_completion_work);
|
||||
}
|
||||
|
@ -347,27 +359,6 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
|
|||
kfree(tv_cmd);
|
||||
}
|
||||
|
||||
/* Dequeue a command from the completion list */
|
||||
static struct tcm_vhost_cmd *vhost_scsi_get_cmd_from_completion(
|
||||
struct vhost_scsi *vs)
|
||||
{
|
||||
struct tcm_vhost_cmd *tv_cmd = NULL;
|
||||
|
||||
spin_lock_bh(&vs->vs_completion_lock);
|
||||
if (list_empty(&vs->vs_completion_list)) {
|
||||
spin_unlock_bh(&vs->vs_completion_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
list_for_each_entry(tv_cmd, &vs->vs_completion_list,
|
||||
tvc_completion_list) {
|
||||
list_del(&tv_cmd->tvc_completion_list);
|
||||
break;
|
||||
}
|
||||
spin_unlock_bh(&vs->vs_completion_lock);
|
||||
return tv_cmd;
|
||||
}
|
||||
|
||||
/* Fill in status and signal that we are done processing this command
|
||||
*
|
||||
* This is scheduled in the vhost work queue so we are called with the owner
|
||||
|
@ -377,12 +368,20 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
|
|||
{
|
||||
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
|
||||
vs_completion_work);
|
||||
DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
|
||||
struct virtio_scsi_cmd_resp v_rsp;
|
||||
struct tcm_vhost_cmd *tv_cmd;
|
||||
struct llist_node *llnode;
|
||||
struct se_cmd *se_cmd;
|
||||
int ret, vq;
|
||||
|
||||
while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) {
|
||||
struct virtio_scsi_cmd_resp v_rsp;
|
||||
struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
|
||||
int ret;
|
||||
bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
|
||||
llnode = llist_del_all(&vs->vs_completion_list);
|
||||
while (llnode) {
|
||||
tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
|
||||
tvc_completion_list);
|
||||
llnode = llist_next(llnode);
|
||||
se_cmd = &tv_cmd->tvc_se_cmd;
|
||||
|
||||
pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
|
||||
tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
|
||||
|
@ -395,15 +394,20 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
|
|||
memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
|
||||
v_rsp.sense_len);
|
||||
ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
|
||||
if (likely(ret == 0))
|
||||
vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0);
|
||||
else
|
||||
if (likely(ret == 0)) {
|
||||
vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
|
||||
vq = tv_cmd->tvc_vq - vs->vqs;
|
||||
__set_bit(vq, signal);
|
||||
} else
|
||||
pr_err("Faulted on virtio_scsi_cmd_resp\n");
|
||||
|
||||
vhost_scsi_free_cmd(tv_cmd);
|
||||
}
|
||||
|
||||
vhost_signal(&vs->dev, &vs->vqs[2]);
|
||||
vq = -1;
|
||||
while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
|
||||
< VHOST_SCSI_MAX_VQ)
|
||||
vhost_signal(&vs->dev, &vs->vqs[vq]);
|
||||
}
|
||||
|
||||
static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
|
||||
|
@ -426,7 +430,6 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
|
|||
pr_err("Unable to allocate struct tcm_vhost_cmd\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
INIT_LIST_HEAD(&tv_cmd->tvc_completion_list);
|
||||
tv_cmd->tvc_tag = v_req->tag;
|
||||
tv_cmd->tvc_task_attr = v_req->task_attr;
|
||||
tv_cmd->tvc_exp_data_len = exp_data_len;
|
||||
|
@ -442,40 +445,47 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
|
|||
* Returns the number of scatterlist entries used or -errno on error.
|
||||
*/
|
||||
static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
|
||||
unsigned int sgl_count, void __user *ptr, size_t len, int write)
|
||||
unsigned int sgl_count, struct iovec *iov, int write)
|
||||
{
|
||||
unsigned int npages = 0, pages_nr, offset, nbytes;
|
||||
struct scatterlist *sg = sgl;
|
||||
unsigned int npages = 0;
|
||||
int ret;
|
||||
void __user *ptr = iov->iov_base;
|
||||
size_t len = iov->iov_len;
|
||||
struct page **pages;
|
||||
int ret, i;
|
||||
|
||||
pages_nr = iov_num_pages(iov);
|
||||
if (pages_nr > sgl_count)
|
||||
return -ENOBUFS;
|
||||
|
||||
pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
|
||||
/* No pages were pinned */
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
/* Less pages pinned than wanted */
|
||||
if (ret != pages_nr) {
|
||||
for (i = 0; i < ret; i++)
|
||||
put_page(pages[i]);
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (len > 0) {
|
||||
struct page *page;
|
||||
unsigned int offset = (uintptr_t)ptr & ~PAGE_MASK;
|
||||
unsigned int nbytes = min_t(unsigned int,
|
||||
PAGE_SIZE - offset, len);
|
||||
|
||||
if (npages == sgl_count) {
|
||||
ret = -ENOBUFS;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = get_user_pages_fast((unsigned long)ptr, 1, write, &page);
|
||||
BUG_ON(ret == 0); /* we should either get our page or fail */
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
sg_set_page(sg, page, nbytes, offset);
|
||||
offset = (uintptr_t)ptr & ~PAGE_MASK;
|
||||
nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
|
||||
sg_set_page(sg, pages[npages], nbytes, offset);
|
||||
ptr += nbytes;
|
||||
len -= nbytes;
|
||||
sg++;
|
||||
npages++;
|
||||
}
|
||||
return npages;
|
||||
|
||||
err:
|
||||
/* Put pages that we hold */
|
||||
for (sg = sgl; sg != &sgl[npages]; sg++)
|
||||
put_page(sg_page(sg));
|
||||
out:
|
||||
kfree(pages);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -491,11 +501,9 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
|
|||
* Find out how long sglist needs to be
|
||||
*/
|
||||
sgl_count = 0;
|
||||
for (i = 0; i < niov; i++) {
|
||||
sgl_count += (((uintptr_t)iov[i].iov_base + iov[i].iov_len +
|
||||
PAGE_SIZE - 1) >> PAGE_SHIFT) -
|
||||
((uintptr_t)iov[i].iov_base >> PAGE_SHIFT);
|
||||
}
|
||||
for (i = 0; i < niov; i++)
|
||||
sgl_count += iov_num_pages(&iov[i]);
|
||||
|
||||
/* TODO overflow checking */
|
||||
|
||||
sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
|
||||
|
@ -510,8 +518,7 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
|
|||
|
||||
pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
|
||||
for (i = 0; i < niov; i++) {
|
||||
ret = vhost_scsi_map_to_sgl(sg, sgl_count, iov[i].iov_base,
|
||||
iov[i].iov_len, write);
|
||||
ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
|
||||
if (ret < 0) {
|
||||
for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
|
||||
put_page(sg_page(&tv_cmd->tvc_sgl[i]));
|
||||
|
@ -563,19 +570,19 @@ static void tcm_vhost_submission_work(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
|
||||
static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
|
||||
static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
|
||||
struct vhost_virtqueue *vq)
|
||||
{
|
||||
struct vhost_virtqueue *vq = &vs->vqs[2];
|
||||
struct virtio_scsi_cmd_req v_req;
|
||||
struct tcm_vhost_tpg *tv_tpg;
|
||||
struct tcm_vhost_cmd *tv_cmd;
|
||||
u32 exp_data_len, data_first, data_num, data_direction;
|
||||
unsigned out, in, i;
|
||||
int head, ret;
|
||||
u8 target;
|
||||
|
||||
/* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
|
||||
tv_tpg = vs->vs_tpg;
|
||||
if (unlikely(!tv_tpg))
|
||||
if (unlikely(!vs->vs_endpoint))
|
||||
return;
|
||||
|
||||
mutex_lock(&vq->mutex);
|
||||
|
@ -643,6 +650,28 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
|
|||
break;
|
||||
}
|
||||
|
||||
/* Extract the tpgt */
|
||||
target = v_req.lun[1];
|
||||
tv_tpg = vs->vs_tpg[target];
|
||||
|
||||
/* Target does not exist, fail the request */
|
||||
if (unlikely(!tv_tpg)) {
|
||||
struct virtio_scsi_cmd_resp __user *resp;
|
||||
struct virtio_scsi_cmd_resp rsp;
|
||||
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
|
||||
resp = vq->iov[out].iov_base;
|
||||
ret = __copy_to_user(resp, &rsp, sizeof(rsp));
|
||||
if (!ret)
|
||||
vhost_add_used_and_signal(&vs->dev,
|
||||
vq, head, 0);
|
||||
else
|
||||
pr_err("Faulted on virtio_scsi_cmd_resp\n");
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
exp_data_len = 0;
|
||||
for (i = 0; i < data_num; i++)
|
||||
exp_data_len += vq->iov[data_first + i].iov_len;
|
||||
|
@ -658,6 +687,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
|
|||
": %d\n", tv_cmd, exp_data_len, data_direction);
|
||||
|
||||
tv_cmd->tvc_vhost = vs;
|
||||
tv_cmd->tvc_vq = vq;
|
||||
|
||||
if (unlikely(vq->iov[out].iov_len !=
|
||||
sizeof(struct virtio_scsi_cmd_resp))) {
|
||||
|
@ -738,7 +768,7 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
|
|||
poll.work);
|
||||
struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
|
||||
|
||||
vhost_scsi_handle_vq(vs);
|
||||
vhost_scsi_handle_vq(vs, vq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -751,7 +781,8 @@ static int vhost_scsi_set_endpoint(
|
|||
{
|
||||
struct tcm_vhost_tport *tv_tport;
|
||||
struct tcm_vhost_tpg *tv_tpg;
|
||||
int index;
|
||||
bool match = false;
|
||||
int index, ret;
|
||||
|
||||
mutex_lock(&vs->dev.mutex);
|
||||
/* Verify that ring has been setup correctly. */
|
||||
|
@ -762,7 +793,6 @@ static int vhost_scsi_set_endpoint(
|
|||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
|
||||
mutex_lock(&tcm_vhost_mutex);
|
||||
list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
|
||||
|
@ -777,30 +807,33 @@ static int vhost_scsi_set_endpoint(
|
|||
}
|
||||
tv_tport = tv_tpg->tport;
|
||||
|
||||
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&
|
||||
(tv_tpg->tport_tpgt == t->vhost_tpgt)) {
|
||||
tv_tpg->tv_tpg_vhost_count++;
|
||||
mutex_unlock(&tv_tpg->tv_tpg_mutex);
|
||||
mutex_unlock(&tcm_vhost_mutex);
|
||||
|
||||
mutex_lock(&vs->dev.mutex);
|
||||
if (vs->vs_tpg) {
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
mutex_lock(&tv_tpg->tv_tpg_mutex);
|
||||
tv_tpg->tv_tpg_vhost_count--;
|
||||
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
|
||||
if (vs->vs_tpg[tv_tpg->tport_tpgt]) {
|
||||
mutex_unlock(&tv_tpg->tv_tpg_mutex);
|
||||
mutex_unlock(&tcm_vhost_mutex);
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
vs->vs_tpg = tv_tpg;
|
||||
tv_tpg->tv_tpg_vhost_count++;
|
||||
vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
|
||||
smp_mb__after_atomic_inc();
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return 0;
|
||||
match = true;
|
||||
}
|
||||
mutex_unlock(&tv_tpg->tv_tpg_mutex);
|
||||
}
|
||||
mutex_unlock(&tcm_vhost_mutex);
|
||||
return -EINVAL;
|
||||
|
||||
if (match) {
|
||||
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
|
||||
sizeof(vs->vs_vhost_wwpn));
|
||||
vs->vs_endpoint = true;
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = -EEXIST;
|
||||
}
|
||||
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vhost_scsi_clear_endpoint(
|
||||
|
@ -809,7 +842,8 @@ static int vhost_scsi_clear_endpoint(
|
|||
{
|
||||
struct tcm_vhost_tport *tv_tport;
|
||||
struct tcm_vhost_tpg *tv_tpg;
|
||||
int index, ret;
|
||||
int index, ret, i;
|
||||
u8 target;
|
||||
|
||||
mutex_lock(&vs->dev.mutex);
|
||||
/* Verify that ring has been setup correctly. */
|
||||
|
@ -819,27 +853,32 @@ static int vhost_scsi_clear_endpoint(
|
|||
goto err;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
|
||||
target = i;
|
||||
|
||||
if (!vs->vs_tpg) {
|
||||
ret = -ENODEV;
|
||||
goto err;
|
||||
}
|
||||
tv_tpg = vs->vs_tpg;
|
||||
tv_tport = tv_tpg->tport;
|
||||
tv_tpg = vs->vs_tpg[target];
|
||||
if (!tv_tpg)
|
||||
continue;
|
||||
|
||||
if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||
|
||||
(tv_tpg->tport_tpgt != t->vhost_tpgt)) {
|
||||
pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
|
||||
" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
|
||||
tv_tport->tport_name, tv_tpg->tport_tpgt,
|
||||
t->vhost_wwpn, t->vhost_tpgt);
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
tv_tport = tv_tpg->tport;
|
||||
if (!tv_tport) {
|
||||
ret = -ENODEV;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
|
||||
pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
|
||||
" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
|
||||
tv_tport->tport_name, tv_tpg->tport_tpgt,
|
||||
t->vhost_wwpn, t->vhost_tpgt);
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
tv_tpg->tv_tpg_vhost_count--;
|
||||
vs->vs_tpg[target] = NULL;
|
||||
vs->vs_endpoint = false;
|
||||
}
|
||||
tv_tpg->tv_tpg_vhost_count--;
|
||||
vs->vs_tpg = NULL;
|
||||
mutex_unlock(&vs->dev.mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
|
@ -850,20 +889,19 @@ static int vhost_scsi_clear_endpoint(
|
|||
static int vhost_scsi_open(struct inode *inode, struct file *f)
|
||||
{
|
||||
struct vhost_scsi *s;
|
||||
int r;
|
||||
int r, i;
|
||||
|
||||
s = kzalloc(sizeof(*s), GFP_KERNEL);
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
|
||||
INIT_LIST_HEAD(&s->vs_completion_list);
|
||||
spin_lock_init(&s->vs_completion_lock);
|
||||
|
||||
s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
|
||||
s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
|
||||
s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick;
|
||||
r = vhost_dev_init(&s->dev, s->vqs, 3);
|
||||
for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++)
|
||||
s->vqs[i].handle_kick = vhost_scsi_handle_kick;
|
||||
r = vhost_dev_init(&s->dev, s->vqs, VHOST_SCSI_MAX_VQ);
|
||||
if (r < 0) {
|
||||
kfree(s);
|
||||
return r;
|
||||
|
@ -876,16 +914,12 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
|
|||
static int vhost_scsi_release(struct inode *inode, struct file *f)
|
||||
{
|
||||
struct vhost_scsi *s = f->private_data;
|
||||
struct vhost_scsi_target t;
|
||||
|
||||
if (s->vs_tpg && s->vs_tpg->tport) {
|
||||
struct vhost_scsi_target backend;
|
||||
|
||||
memcpy(backend.vhost_wwpn, s->vs_tpg->tport->tport_name,
|
||||
sizeof(backend.vhost_wwpn));
|
||||
backend.vhost_tpgt = s->vs_tpg->tport_tpgt;
|
||||
vhost_scsi_clear_endpoint(s, &backend);
|
||||
}
|
||||
|
||||
mutex_lock(&s->dev.mutex);
|
||||
memcpy(t.vhost_wwpn, s->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
|
||||
mutex_unlock(&s->dev.mutex);
|
||||
vhost_scsi_clear_endpoint(s, &t);
|
||||
vhost_dev_stop(&s->dev);
|
||||
vhost_dev_cleanup(&s->dev, false);
|
||||
kfree(s);
|
||||
|
@ -899,9 +933,10 @@ static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
|
|||
|
||||
static void vhost_scsi_flush(struct vhost_scsi *vs)
|
||||
{
|
||||
vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL);
|
||||
vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT);
|
||||
vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
|
||||
vhost_scsi_flush_vq(vs, i);
|
||||
}
|
||||
|
||||
static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
|
||||
|
|
|
@ -23,6 +23,8 @@ struct tcm_vhost_cmd {
|
|||
struct virtio_scsi_cmd_resp __user *tvc_resp;
|
||||
/* Pointer to vhost_scsi for our device */
|
||||
struct vhost_scsi *tvc_vhost;
|
||||
/* Pointer to vhost_virtqueue for the cmd */
|
||||
struct vhost_virtqueue *tvc_vq;
|
||||
/* Pointer to vhost nexus memory */
|
||||
struct tcm_vhost_nexus *tvc_nexus;
|
||||
/* The TCM I/O descriptor that is accessed via container_of() */
|
||||
|
@ -34,7 +36,7 @@ struct tcm_vhost_cmd {
|
|||
/* Sense buffer that will be mapped into outgoing status */
|
||||
unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
|
||||
/* Completed commands list, serviced from vhost worker thread */
|
||||
struct list_head tvc_completion_list;
|
||||
struct llist_node tvc_completion_list;
|
||||
};
|
||||
|
||||
struct tcm_vhost_nexus {
|
||||
|
@ -93,9 +95,11 @@ struct tcm_vhost_tport {
|
|||
*
|
||||
* ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
|
||||
* RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage
|
||||
* ABI Rev 1: January 2013. Ignore vhost_tpgt filed in struct vhost_scsi_target.
|
||||
* All the targets under vhost_wwpn can be seen and used by guset.
|
||||
*/
|
||||
|
||||
#define VHOST_SCSI_ABI_VERSION 0
|
||||
#define VHOST_SCSI_ABI_VERSION 1
|
||||
|
||||
struct vhost_scsi_target {
|
||||
int abi_version;
|
||||
|
|
|
@ -35,6 +35,7 @@ struct se_subsystem_api {
|
|||
u32 (*get_device_type)(struct se_device *);
|
||||
sector_t (*get_blocks)(struct se_device *);
|
||||
unsigned char *(*get_sense_buffer)(struct se_cmd *);
|
||||
bool (*get_write_cache)(struct se_device *);
|
||||
};
|
||||
|
||||
struct sbc_ops {
|
||||
|
@ -52,11 +53,13 @@ void target_complete_cmd(struct se_cmd *, u8);
|
|||
|
||||
sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
|
||||
sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
|
||||
sector_t spc_get_write_same_sectors(struct se_cmd *cmd);
|
||||
sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *);
|
||||
sense_reason_t spc_emulate_evpd_83(struct se_cmd *, unsigned char *);
|
||||
|
||||
sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
|
||||
u32 sbc_get_device_rev(struct se_device *dev);
|
||||
u32 sbc_get_device_type(struct se_device *dev);
|
||||
sector_t sbc_get_write_same_sectors(struct se_cmd *cmd);
|
||||
|
||||
void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
|
||||
int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
/* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */
|
||||
#define TG_PT_GROUP_NAME_BUF 256
|
||||
/* Used to parse VPD into struct t10_vpd */
|
||||
#define VPD_TMP_BUF_SIZE 128
|
||||
#define VPD_TMP_BUF_SIZE 254
|
||||
/* Used by transport_generic_cmd_sequencer() */
|
||||
#define READ_BLOCK_LEN 6
|
||||
#define READ_CAP_LEN 8
|
||||
|
@ -75,6 +75,8 @@
|
|||
#define DA_MAX_WRITE_SAME_LEN 0
|
||||
/* Default max transfer length */
|
||||
#define DA_FABRIC_MAX_SECTORS 8192
|
||||
/* Use a model alias based on the configfs backend device name */
|
||||
#define DA_EMULATE_MODEL_ALIAS 0
|
||||
/* Emulation for Direct Page Out */
|
||||
#define DA_EMULATE_DPO 0
|
||||
/* Emulation for Forced Unit Access WRITEs */
|
||||
|
@ -193,6 +195,7 @@ enum tcm_sense_reason_table {
|
|||
TCM_RESERVATION_CONFLICT = R(0x10),
|
||||
TCM_ADDRESS_OUT_OF_RANGE = R(0x11),
|
||||
TCM_OUT_OF_RESOURCES = R(0x12),
|
||||
TCM_PARAMETER_LIST_LENGTH_ERROR = R(0x13),
|
||||
#undef R
|
||||
};
|
||||
|
||||
|
@ -211,7 +214,6 @@ enum tcm_tmreq_table {
|
|||
TMR_LUN_RESET = 5,
|
||||
TMR_TARGET_WARM_RESET = 6,
|
||||
TMR_TARGET_COLD_RESET = 7,
|
||||
TMR_FABRIC_TMR = 255,
|
||||
};
|
||||
|
||||
/* fabric independent task management response values */
|
||||
|
@ -592,6 +594,7 @@ struct se_dev_entry {
|
|||
};
|
||||
|
||||
struct se_dev_attrib {
|
||||
int emulate_model_alias;
|
||||
int emulate_dpo;
|
||||
int emulate_fua_write;
|
||||
int emulate_fua_read;
|
||||
|
|
Loading…
Reference in a new issue