mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-14 09:47:20 +00:00
Merge branch 'for-linus-core' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
* 'for-linus-core' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (38 commits) target: Bump version to v4.1.0-rc1-ml target: remove custom hex2bin() implementation target: fix typo Assoication -> Association target: Update QUEUE ALGORITHM MODIFIER control page default target: ->map_task_SG conversion to ->map_control_SG and ->map_data_SG target: Follow up core updates from AGrover and HCH (round 4) target: Eliminate usage of struct se_mem target: Pass 2nd param of transport_split_cdb by value target: Enforce 1 page max for control cdb buffer sizes target: Make all control CDBs scatter-gather target: Implement Block Device Characteristics VPD page target: Fix reporting of supported VPD pages target: Allow for built-in target modules tcm_fc: Convert to wake_up_process and schedule_timeout_interruptible tcm_fc: Makefile cleanups loopback: Fix memory leak in tcm_loop_make_scsi_hba() loopback: Remove duplicate scsi/scsi_tcq.h include loopback: off by one in tcm_loop_make_naa_tpg() target/iblock: Remove unused iblock_dev members target/iblock: Use request_queue->nr_request for se_device defaults ...
This commit is contained in:
commit
72f96e0e38
@ -3,9 +3,3 @@ config LOOPBACK_TARGET
|
||||
help
|
||||
Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
|
||||
fabric loopback module.
|
||||
|
||||
config LOOPBACK_TARGET_CDB_DEBUG
|
||||
bool "TCM loopback fabric module CDB debug code"
|
||||
depends on LOOPBACK_TARGET
|
||||
help
|
||||
Say Y here to enable the TCM loopback fabric module CDB debug code
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
|
||||
#include <target/target_core_base.h>
|
||||
#include <target/target_core_transport.h>
|
||||
@ -80,7 +79,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
|
||||
|
||||
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
|
||||
if (!tl_cmd) {
|
||||
printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n");
|
||||
pr_err("Unable to allocate struct tcm_loop_cmd\n");
|
||||
set_host_byte(sc, DID_ERROR);
|
||||
return NULL;
|
||||
}
|
||||
@ -118,17 +117,16 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
|
||||
* Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
|
||||
*/
|
||||
if (scsi_bidi_cmnd(sc))
|
||||
T_TASK(se_cmd)->t_tasks_bidi = 1;
|
||||
se_cmd->t_tasks_bidi = 1;
|
||||
/*
|
||||
* Locate the struct se_lun pointer and attach it to struct se_cmd
|
||||
*/
|
||||
if (transport_get_lun_for_cmd(se_cmd, NULL, tl_cmd->sc->device->lun) < 0) {
|
||||
if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
|
||||
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
|
||||
set_host_byte(sc, DID_NO_CONNECT);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
transport_device_setup_cmd(se_cmd);
|
||||
return se_cmd;
|
||||
}
|
||||
|
||||
@ -143,17 +141,17 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
|
||||
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
|
||||
struct tcm_loop_cmd, tl_se_cmd);
|
||||
struct scsi_cmnd *sc = tl_cmd->sc;
|
||||
void *mem_ptr, *mem_bidi_ptr = NULL;
|
||||
u32 sg_no_bidi = 0;
|
||||
struct scatterlist *sgl_bidi = NULL;
|
||||
u32 sgl_bidi_count = 0;
|
||||
int ret;
|
||||
/*
|
||||
* Allocate the necessary tasks to complete the received CDB+data
|
||||
*/
|
||||
ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd);
|
||||
if (ret == -1) {
|
||||
ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
|
||||
if (ret == -ENOMEM) {
|
||||
/* Out of Resources */
|
||||
return PYX_TRANSPORT_LU_COMM_FAILURE;
|
||||
} else if (ret == -2) {
|
||||
} else if (ret == -EINVAL) {
|
||||
/*
|
||||
* Handle case for SAM_STAT_RESERVATION_CONFLICT
|
||||
*/
|
||||
@ -165,35 +163,21 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
|
||||
*/
|
||||
return PYX_TRANSPORT_USE_SENSE_REASON;
|
||||
}
|
||||
/*
|
||||
* Setup the struct scatterlist memory from the received
|
||||
* struct scsi_cmnd.
|
||||
*/
|
||||
if (scsi_sg_count(sc)) {
|
||||
se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM;
|
||||
mem_ptr = (void *)scsi_sglist(sc);
|
||||
/*
|
||||
* For BIDI commands, pass in the extra READ buffer
|
||||
* to transport_generic_map_mem_to_cmd() below..
|
||||
*/
|
||||
if (T_TASK(se_cmd)->t_tasks_bidi) {
|
||||
struct scsi_data_buffer *sdb = scsi_in(sc);
|
||||
|
||||
mem_bidi_ptr = (void *)sdb->table.sgl;
|
||||
sg_no_bidi = sdb->table.nents;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Used for DMA_NONE
|
||||
*/
|
||||
mem_ptr = NULL;
|
||||
}
|
||||
/*
|
||||
* Map the SG memory into struct se_mem->page linked list using the same
|
||||
* physical memory at sg->page_link.
|
||||
* For BIDI commands, pass in the extra READ buffer
|
||||
* to transport_generic_map_mem_to_cmd() below..
|
||||
*/
|
||||
ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr,
|
||||
scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi);
|
||||
if (se_cmd->t_tasks_bidi) {
|
||||
struct scsi_data_buffer *sdb = scsi_in(sc);
|
||||
|
||||
sgl_bidi = sdb->table.sgl;
|
||||
sgl_bidi_count = sdb->table.nents;
|
||||
}
|
||||
|
||||
/* Tell the core about our preallocated memory */
|
||||
ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
|
||||
scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
|
||||
if (ret < 0)
|
||||
return PYX_TRANSPORT_LU_COMM_FAILURE;
|
||||
|
||||
@ -216,13 +200,10 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd)
|
||||
* Release the struct se_cmd, which will make a callback to release
|
||||
* struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
|
||||
*/
|
||||
transport_generic_free_cmd(se_cmd, 0, 1, 0);
|
||||
transport_generic_free_cmd(se_cmd, 0, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from struct target_core_fabric_ops->release_cmd_to_pool()
|
||||
*/
|
||||
static void tcm_loop_deallocate_core_cmd(struct se_cmd *se_cmd)
|
||||
static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
|
||||
{
|
||||
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
|
||||
struct tcm_loop_cmd, tl_se_cmd);
|
||||
@ -300,7 +281,7 @@ static int tcm_loop_queuecommand(
|
||||
struct tcm_loop_hba *tl_hba;
|
||||
struct tcm_loop_tpg *tl_tpg;
|
||||
|
||||
TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
|
||||
pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
|
||||
" scsi_buf_len: %u\n", sc->device->host->host_no,
|
||||
sc->device->id, sc->device->channel, sc->device->lun,
|
||||
sc->cmnd[0], scsi_bufflen(sc));
|
||||
@ -350,7 +331,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
|
||||
*/
|
||||
tl_nexus = tl_hba->tl_nexus;
|
||||
if (!tl_nexus) {
|
||||
printk(KERN_ERR "Unable to perform device reset without"
|
||||
pr_err("Unable to perform device reset without"
|
||||
" active I_T Nexus\n");
|
||||
return FAILED;
|
||||
}
|
||||
@ -363,13 +344,13 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
|
||||
|
||||
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
|
||||
if (!tl_cmd) {
|
||||
printk(KERN_ERR "Unable to allocate memory for tl_cmd\n");
|
||||
pr_err("Unable to allocate memory for tl_cmd\n");
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
|
||||
if (!tl_tmr) {
|
||||
printk(KERN_ERR "Unable to allocate memory for tl_tmr\n");
|
||||
pr_err("Unable to allocate memory for tl_tmr\n");
|
||||
goto release;
|
||||
}
|
||||
init_waitqueue_head(&tl_tmr->tl_tmr_wait);
|
||||
@ -384,14 +365,14 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
|
||||
/*
|
||||
* Allocate the LUN_RESET TMR
|
||||
*/
|
||||
se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr,
|
||||
se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr,
|
||||
TMR_LUN_RESET);
|
||||
if (IS_ERR(se_cmd->se_tmr_req))
|
||||
goto release;
|
||||
/*
|
||||
* Locate the underlying TCM struct se_lun from sc->device->lun
|
||||
*/
|
||||
if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0)
|
||||
if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
|
||||
goto release;
|
||||
/*
|
||||
* Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
|
||||
@ -407,7 +388,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
|
||||
SUCCESS : FAILED;
|
||||
release:
|
||||
if (se_cmd)
|
||||
transport_generic_free_cmd(se_cmd, 1, 1, 0);
|
||||
transport_generic_free_cmd(se_cmd, 1, 0);
|
||||
else
|
||||
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
|
||||
kfree(tl_tmr);
|
||||
@ -454,7 +435,7 @@ static int tcm_loop_driver_probe(struct device *dev)
|
||||
sh = scsi_host_alloc(&tcm_loop_driver_template,
|
||||
sizeof(struct tcm_loop_hba));
|
||||
if (!sh) {
|
||||
printk(KERN_ERR "Unable to allocate struct scsi_host\n");
|
||||
pr_err("Unable to allocate struct scsi_host\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
tl_hba->sh = sh;
|
||||
@ -473,7 +454,7 @@ static int tcm_loop_driver_probe(struct device *dev)
|
||||
|
||||
error = scsi_add_host(sh, &tl_hba->dev);
|
||||
if (error) {
|
||||
printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
|
||||
pr_err("%s: scsi_add_host failed\n", __func__);
|
||||
scsi_host_put(sh);
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -514,7 +495,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host
|
||||
|
||||
ret = device_register(&tl_hba->dev);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "device_register() failed for"
|
||||
pr_err("device_register() failed for"
|
||||
" tl_hba->dev: %d\n", ret);
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -532,24 +513,24 @@ static int tcm_loop_alloc_core_bus(void)
|
||||
|
||||
tcm_loop_primary = root_device_register("tcm_loop_0");
|
||||
if (IS_ERR(tcm_loop_primary)) {
|
||||
printk(KERN_ERR "Unable to allocate tcm_loop_primary\n");
|
||||
pr_err("Unable to allocate tcm_loop_primary\n");
|
||||
return PTR_ERR(tcm_loop_primary);
|
||||
}
|
||||
|
||||
ret = bus_register(&tcm_loop_lld_bus);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n");
|
||||
pr_err("bus_register() failed for tcm_loop_lld_bus\n");
|
||||
goto dev_unreg;
|
||||
}
|
||||
|
||||
ret = driver_register(&tcm_loop_driverfs);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "driver_register() failed for"
|
||||
pr_err("driver_register() failed for"
|
||||
"tcm_loop_driverfs\n");
|
||||
goto bus_unreg;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "Initialized TCM Loop Core Bus\n");
|
||||
pr_debug("Initialized TCM Loop Core Bus\n");
|
||||
return ret;
|
||||
|
||||
bus_unreg:
|
||||
@ -565,7 +546,7 @@ static void tcm_loop_release_core_bus(void)
|
||||
bus_unregister(&tcm_loop_lld_bus);
|
||||
root_device_unregister(tcm_loop_primary);
|
||||
|
||||
printk(KERN_INFO "Releasing TCM Loop Core BUS\n");
|
||||
pr_debug("Releasing TCM Loop Core BUS\n");
|
||||
}
|
||||
|
||||
static char *tcm_loop_get_fabric_name(void)
|
||||
@ -593,7 +574,7 @@ static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
|
||||
case SCSI_PROTOCOL_ISCSI:
|
||||
return iscsi_get_fabric_proto_ident(se_tpg);
|
||||
default:
|
||||
printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
|
||||
pr_err("Unknown tl_proto_id: 0x%02x, using"
|
||||
" SAS emulation\n", tl_hba->tl_proto_id);
|
||||
break;
|
||||
}
|
||||
@ -649,7 +630,7 @@ static u32 tcm_loop_get_pr_transport_id(
|
||||
return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
|
||||
format_code, buf);
|
||||
default:
|
||||
printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
|
||||
pr_err("Unknown tl_proto_id: 0x%02x, using"
|
||||
" SAS emulation\n", tl_hba->tl_proto_id);
|
||||
break;
|
||||
}
|
||||
@ -679,7 +660,7 @@ static u32 tcm_loop_get_pr_transport_id_len(
|
||||
return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
|
||||
format_code);
|
||||
default:
|
||||
printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
|
||||
pr_err("Unknown tl_proto_id: 0x%02x, using"
|
||||
" SAS emulation\n", tl_hba->tl_proto_id);
|
||||
break;
|
||||
}
|
||||
@ -713,7 +694,7 @@ static char *tcm_loop_parse_pr_out_transport_id(
|
||||
return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
|
||||
port_nexus_ptr);
|
||||
default:
|
||||
printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
|
||||
pr_err("Unknown tl_proto_id: 0x%02x, using"
|
||||
" SAS emulation\n", tl_hba->tl_proto_id);
|
||||
break;
|
||||
}
|
||||
@ -762,7 +743,7 @@ static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
|
||||
|
||||
tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
|
||||
if (!tl_nacl) {
|
||||
printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n");
|
||||
pr_err("Unable to allocate struct tcm_loop_nacl\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -784,16 +765,6 @@ static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void tcm_loop_new_cmd_failure(struct se_cmd *se_cmd)
|
||||
{
|
||||
/*
|
||||
* Since TCM_loop is already passing struct scatterlist data from
|
||||
* struct scsi_cmnd, no more Linux/SCSI failure dependent state need
|
||||
* to be handled here.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
static int tcm_loop_is_state_remove(struct se_cmd *se_cmd)
|
||||
{
|
||||
/*
|
||||
@ -882,7 +853,7 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
|
||||
struct tcm_loop_cmd, tl_se_cmd);
|
||||
struct scsi_cmnd *sc = tl_cmd->sc;
|
||||
|
||||
TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
|
||||
pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
|
||||
" cdb: 0x%02x\n", sc, sc->cmnd[0]);
|
||||
|
||||
sc->result = SAM_STAT_GOOD;
|
||||
@ -897,14 +868,14 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
|
||||
struct tcm_loop_cmd, tl_se_cmd);
|
||||
struct scsi_cmnd *sc = tl_cmd->sc;
|
||||
|
||||
TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p"
|
||||
pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
|
||||
" cdb: 0x%02x\n", sc, sc->cmnd[0]);
|
||||
|
||||
if (se_cmd->sense_buffer &&
|
||||
((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
|
||||
(se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
|
||||
|
||||
memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer,
|
||||
memcpy(sc->sense_buffer, se_cmd->sense_buffer,
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
sc->result = SAM_STAT_CHECK_CONDITION;
|
||||
set_driver_byte(sc, DRIVER_SENSE);
|
||||
@ -972,7 +943,7 @@ static int tcm_loop_port_link(
|
||||
*/
|
||||
scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
|
||||
|
||||
printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n");
|
||||
pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -990,7 +961,7 @@ static void tcm_loop_port_unlink(
|
||||
sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
|
||||
se_lun->unpacked_lun);
|
||||
if (!sd) {
|
||||
printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:"
|
||||
pr_err("Unable to locate struct scsi_device for %d:%d:"
|
||||
"%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
|
||||
return;
|
||||
}
|
||||
@ -1003,7 +974,7 @@ static void tcm_loop_port_unlink(
|
||||
atomic_dec(&tl_tpg->tl_tpg_port_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
|
||||
printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n");
|
||||
pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
|
||||
}
|
||||
|
||||
/* End items for tcm_loop_port_cit */
|
||||
@ -1020,14 +991,14 @@ static int tcm_loop_make_nexus(
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (tl_tpg->tl_hba->tl_nexus) {
|
||||
printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n");
|
||||
pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
|
||||
return -EEXIST;
|
||||
}
|
||||
se_tpg = &tl_tpg->tl_se_tpg;
|
||||
|
||||
tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
|
||||
if (!tl_nexus) {
|
||||
printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n");
|
||||
pr_err("Unable to allocate struct tcm_loop_nexus\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
/*
|
||||
@ -1054,9 +1025,9 @@ static int tcm_loop_make_nexus(
|
||||
* transport_register_session()
|
||||
*/
|
||||
__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
|
||||
tl_nexus->se_sess, (void *)tl_nexus);
|
||||
tl_nexus->se_sess, tl_nexus);
|
||||
tl_tpg->tl_hba->tl_nexus = tl_nexus;
|
||||
printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
|
||||
pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
|
||||
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
|
||||
name);
|
||||
return 0;
|
||||
@ -1082,13 +1053,13 @@ static int tcm_loop_drop_nexus(
|
||||
return -ENODEV;
|
||||
|
||||
if (atomic_read(&tpg->tl_tpg_port_count)) {
|
||||
printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with"
|
||||
pr_err("Unable to remove TCM_Loop I_T Nexus with"
|
||||
" active TPG port count: %d\n",
|
||||
atomic_read(&tpg->tl_tpg_port_count));
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
|
||||
pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
|
||||
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
|
||||
tl_nexus->se_sess->se_node_acl->initiatorname);
|
||||
/*
|
||||
@ -1144,7 +1115,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
|
||||
* tcm_loop_make_nexus()
|
||||
*/
|
||||
if (strlen(page) >= TL_WWN_ADDR_LEN) {
|
||||
printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds"
|
||||
pr_err("Emulated NAA Sas Address: %s, exceeds"
|
||||
" max: %d\n", page, TL_WWN_ADDR_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1153,7 +1124,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
|
||||
ptr = strstr(i_port, "naa.");
|
||||
if (ptr) {
|
||||
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
|
||||
printk(KERN_ERR "Passed SAS Initiator Port %s does not"
|
||||
pr_err("Passed SAS Initiator Port %s does not"
|
||||
" match target port protoid: %s\n", i_port,
|
||||
tcm_loop_dump_proto_id(tl_hba));
|
||||
return -EINVAL;
|
||||
@ -1164,7 +1135,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
|
||||
ptr = strstr(i_port, "fc.");
|
||||
if (ptr) {
|
||||
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
|
||||
printk(KERN_ERR "Passed FCP Initiator Port %s does not"
|
||||
pr_err("Passed FCP Initiator Port %s does not"
|
||||
" match target port protoid: %s\n", i_port,
|
||||
tcm_loop_dump_proto_id(tl_hba));
|
||||
return -EINVAL;
|
||||
@ -1175,7 +1146,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
|
||||
ptr = strstr(i_port, "iqn.");
|
||||
if (ptr) {
|
||||
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
|
||||
printk(KERN_ERR "Passed iSCSI Initiator Port %s does not"
|
||||
pr_err("Passed iSCSI Initiator Port %s does not"
|
||||
" match target port protoid: %s\n", i_port,
|
||||
tcm_loop_dump_proto_id(tl_hba));
|
||||
return -EINVAL;
|
||||
@ -1183,7 +1154,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
|
||||
port_ptr = &i_port[0];
|
||||
goto check_newline;
|
||||
}
|
||||
printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:"
|
||||
pr_err("Unable to locate prefix for emulated Initiator Port:"
|
||||
" %s\n", i_port);
|
||||
return -EINVAL;
|
||||
/*
|
||||
@ -1223,15 +1194,15 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
|
||||
|
||||
tpgt_str = strstr(name, "tpgt_");
|
||||
if (!tpgt_str) {
|
||||
printk(KERN_ERR "Unable to locate \"tpgt_#\" directory"
|
||||
pr_err("Unable to locate \"tpgt_#\" directory"
|
||||
" group\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
tpgt_str += 5; /* Skip ahead of "tpgt_" */
|
||||
tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
|
||||
|
||||
if (tpgt > TL_TPGS_PER_HBA) {
|
||||
printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
|
||||
if (tpgt >= TL_TPGS_PER_HBA) {
|
||||
pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
|
||||
" %u\n", tpgt, TL_TPGS_PER_HBA);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
@ -1242,12 +1213,12 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
|
||||
* Register the tl_tpg as a emulated SAS TCM Target Endpoint
|
||||
*/
|
||||
ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
|
||||
wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg,
|
||||
wwn, &tl_tpg->tl_se_tpg, tl_tpg,
|
||||
TRANSPORT_TPG_TYPE_NORMAL);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s"
|
||||
pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
|
||||
" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
|
||||
config_item_name(&wwn->wwn_group.cg_item), tpgt);
|
||||
|
||||
@ -1274,7 +1245,7 @@ void tcm_loop_drop_naa_tpg(
|
||||
*/
|
||||
core_tpg_deregister(se_tpg);
|
||||
|
||||
printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s"
|
||||
pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
|
||||
" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
|
||||
config_item_name(&wwn->wwn_group.cg_item), tpgt);
|
||||
}
|
||||
@ -1295,7 +1266,7 @@ struct se_wwn *tcm_loop_make_scsi_hba(
|
||||
|
||||
tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
|
||||
if (!tl_hba) {
|
||||
printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n");
|
||||
pr_err("Unable to allocate struct tcm_loop_hba\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
/*
|
||||
@ -1314,22 +1285,21 @@ struct se_wwn *tcm_loop_make_scsi_hba(
|
||||
goto check_len;
|
||||
}
|
||||
ptr = strstr(name, "iqn.");
|
||||
if (ptr) {
|
||||
tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
|
||||
goto check_len;
|
||||
if (!ptr) {
|
||||
pr_err("Unable to locate prefix for emulated Target "
|
||||
"Port: %s\n", name);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
printk(KERN_ERR "Unable to locate prefix for emulated Target Port:"
|
||||
" %s\n", name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
|
||||
|
||||
check_len:
|
||||
if (strlen(name) >= TL_WWN_ADDR_LEN) {
|
||||
printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds"
|
||||
pr_err("Emulated NAA %s Address: %s, exceeds"
|
||||
" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
|
||||
TL_WWN_ADDR_LEN);
|
||||
kfree(tl_hba);
|
||||
return ERR_PTR(-EINVAL);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
|
||||
|
||||
@ -1344,7 +1314,7 @@ check_len:
|
||||
|
||||
sh = tl_hba->sh;
|
||||
tcm_loop_hba_no_cnt++;
|
||||
printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target"
|
||||
pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
|
||||
" %s Address: %s at Linux/SCSI Host ID: %d\n",
|
||||
tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
|
||||
|
||||
@ -1367,7 +1337,7 @@ void tcm_loop_drop_scsi_hba(
|
||||
*/
|
||||
device_unregister(&tl_hba->dev);
|
||||
|
||||
printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target"
|
||||
pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
|
||||
" SAS Address: %s at Linux/SCSI Host ID: %d\n",
|
||||
config_item_name(&wwn->wwn_group.cg_item), host_no);
|
||||
}
|
||||
@ -1402,9 +1372,9 @@ static int tcm_loop_register_configfs(void)
|
||||
* Register the top level struct config_item_type with TCM core
|
||||
*/
|
||||
fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
|
||||
if (!fabric) {
|
||||
printk(KERN_ERR "tcm_loop_register_configfs() failed!\n");
|
||||
return -1;
|
||||
if (IS_ERR(fabric)) {
|
||||
pr_err("tcm_loop_register_configfs() failed!\n");
|
||||
return PTR_ERR(fabric);
|
||||
}
|
||||
/*
|
||||
* Setup the fabric API of function pointers used by target_core_mod
|
||||
@ -1435,20 +1405,12 @@ static int tcm_loop_register_configfs(void)
|
||||
fabric->tf_ops.tpg_release_fabric_acl =
|
||||
&tcm_loop_tpg_release_fabric_acl;
|
||||
fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
|
||||
/*
|
||||
* Since tcm_loop is mapping physical memory from Linux/SCSI
|
||||
* struct scatterlist arrays for each struct scsi_cmnd I/O,
|
||||
* we do not need TCM to allocate a iovec array for
|
||||
* virtual memory address mappings
|
||||
*/
|
||||
fabric->tf_ops.alloc_cmd_iovecs = NULL;
|
||||
/*
|
||||
* Used for setting up remaining TCM resources in process context
|
||||
*/
|
||||
fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map;
|
||||
fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
|
||||
fabric->tf_ops.release_cmd_to_pool = &tcm_loop_deallocate_core_cmd;
|
||||
fabric->tf_ops.release_cmd_direct = &tcm_loop_deallocate_core_cmd;
|
||||
fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
|
||||
fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
|
||||
fabric->tf_ops.close_session = &tcm_loop_close_session;
|
||||
fabric->tf_ops.stop_session = &tcm_loop_stop_session;
|
||||
@ -1465,7 +1427,6 @@ static int tcm_loop_register_configfs(void)
|
||||
&tcm_loop_set_default_node_attributes;
|
||||
fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
|
||||
fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
|
||||
fabric->tf_ops.new_cmd_failure = &tcm_loop_new_cmd_failure;
|
||||
fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
|
||||
fabric->tf_ops.queue_status = &tcm_loop_queue_status;
|
||||
fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
|
||||
@ -1503,7 +1464,7 @@ static int tcm_loop_register_configfs(void)
|
||||
*/
|
||||
ret = target_fabric_configfs_register(fabric);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "target_fabric_configfs_register() for"
|
||||
pr_err("target_fabric_configfs_register() for"
|
||||
" TCM_Loop failed!\n");
|
||||
target_fabric_configfs_free(fabric);
|
||||
return -1;
|
||||
@ -1512,7 +1473,7 @@ static int tcm_loop_register_configfs(void)
|
||||
* Setup our local pointer to *fabric.
|
||||
*/
|
||||
tcm_loop_fabric_configfs = fabric;
|
||||
printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->"
|
||||
pr_debug("TCM_LOOP[0] - Set fabric ->"
|
||||
" tcm_loop_fabric_configfs\n");
|
||||
return 0;
|
||||
}
|
||||
@ -1524,7 +1485,7 @@ static void tcm_loop_deregister_configfs(void)
|
||||
|
||||
target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
|
||||
tcm_loop_fabric_configfs = NULL;
|
||||
printk(KERN_INFO "TCM_LOOP[0] - Cleared"
|
||||
pr_debug("TCM_LOOP[0] - Cleared"
|
||||
" tcm_loop_fabric_configfs\n");
|
||||
}
|
||||
|
||||
@ -1537,7 +1498,7 @@ static int __init tcm_loop_fabric_init(void)
|
||||
__alignof__(struct tcm_loop_cmd),
|
||||
0, NULL);
|
||||
if (!tcm_loop_cmd_cache) {
|
||||
printk(KERN_ERR "kmem_cache_create() for"
|
||||
pr_debug("kmem_cache_create() for"
|
||||
" tcm_loop_cmd_cache failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -16,12 +16,6 @@
|
||||
*/
|
||||
#define TL_SCSI_MAX_CMD_LEN 32
|
||||
|
||||
#ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG
|
||||
# define TL_CDB_DEBUG(x...) printk(KERN_INFO x)
|
||||
#else
|
||||
# define TL_CDB_DEBUG(x...)
|
||||
#endif
|
||||
|
||||
struct tcm_loop_cmd {
|
||||
/* State of Linux/SCSI CDB+Data descriptor */
|
||||
u32 sc_cmd_state;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -23,6 +23,7 @@
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <scsi/scsi.h>
|
||||
|
||||
@ -64,20 +65,22 @@ target_fill_alua_data(struct se_port *port, unsigned char *buf)
|
||||
static int
|
||||
target_emulate_inquiry_std(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_lun *lun = SE_LUN(cmd);
|
||||
struct se_device *dev = SE_DEV(cmd);
|
||||
unsigned char *buf = cmd->t_task->t_task_buf;
|
||||
struct se_lun *lun = cmd->se_lun;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
unsigned char *buf;
|
||||
|
||||
/*
|
||||
* Make sure we at least have 6 bytes of INQUIRY response
|
||||
* payload going back for EVPD=0
|
||||
*/
|
||||
if (cmd->data_length < 6) {
|
||||
printk(KERN_ERR "SCSI Inquiry payload length: %u"
|
||||
pr_err("SCSI Inquiry payload length: %u"
|
||||
" too small for EVPD=0\n", cmd->data_length);
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buf = transport_kmap_first_data_page(cmd);
|
||||
|
||||
buf[0] = dev->transport->get_device_type(dev);
|
||||
if (buf[0] == TYPE_TAPE)
|
||||
buf[1] = 0x80;
|
||||
@ -86,12 +89,12 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
|
||||
/*
|
||||
* Enable SCCS and TPGS fields for Emulated ALUA
|
||||
*/
|
||||
if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED)
|
||||
if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
|
||||
target_fill_alua_data(lun->lun_sep, buf);
|
||||
|
||||
if (cmd->data_length < 8) {
|
||||
buf[4] = 1; /* Set additional length to 1 */
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
|
||||
@ -102,40 +105,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
|
||||
*/
|
||||
if (cmd->data_length < 36) {
|
||||
buf[4] = 3; /* Set additional length to 3 */
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
|
||||
snprintf((unsigned char *)&buf[16], 16, "%s",
|
||||
&DEV_T10_WWN(dev)->model[0]);
|
||||
&dev->se_sub_dev->t10_wwn.model[0]);
|
||||
snprintf((unsigned char *)&buf[32], 4, "%s",
|
||||
&DEV_T10_WWN(dev)->revision[0]);
|
||||
&dev->se_sub_dev->t10_wwn.revision[0]);
|
||||
buf[4] = 31; /* Set additional length to 31 */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* supported vital product data pages */
|
||||
static int
|
||||
target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
|
||||
{
|
||||
buf[1] = 0x00;
|
||||
if (cmd->data_length < 8)
|
||||
return 0;
|
||||
|
||||
buf[4] = 0x0;
|
||||
/*
|
||||
* Only report the INQUIRY EVPD=1 pages after a valid NAA
|
||||
* Registered Extended LUN WWN has been set via ConfigFS
|
||||
* during device creation/restart.
|
||||
*/
|
||||
if (SE_DEV(cmd)->se_sub_dev->su_dev_flags &
|
||||
SDF_EMULATED_VPD_UNIT_SERIAL) {
|
||||
buf[3] = 3;
|
||||
buf[5] = 0x80;
|
||||
buf[6] = 0x83;
|
||||
buf[7] = 0x86;
|
||||
}
|
||||
|
||||
out:
|
||||
transport_kunmap_first_data_page(cmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -143,16 +124,15 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
|
||||
static int
|
||||
target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
|
||||
{
|
||||
struct se_device *dev = SE_DEV(cmd);
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
u16 len = 0;
|
||||
|
||||
buf[1] = 0x80;
|
||||
if (dev->se_sub_dev->su_dev_flags &
|
||||
SDF_EMULATED_VPD_UNIT_SERIAL) {
|
||||
u32 unit_serial_len;
|
||||
|
||||
unit_serial_len =
|
||||
strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
|
||||
strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
|
||||
unit_serial_len++; /* For NULL Terminator */
|
||||
|
||||
if (((len + 4) + unit_serial_len) > cmd->data_length) {
|
||||
@ -162,7 +142,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
|
||||
return 0;
|
||||
}
|
||||
len += sprintf((unsigned char *)&buf[4], "%s",
|
||||
&DEV_T10_WWN(dev)->unit_serial[0]);
|
||||
&dev->se_sub_dev->t10_wwn.unit_serial[0]);
|
||||
len++; /* Extra Byte for NULL Terminator */
|
||||
buf[3] = len;
|
||||
}
|
||||
@ -176,21 +156,18 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
|
||||
static int
|
||||
target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
|
||||
{
|
||||
struct se_device *dev = SE_DEV(cmd);
|
||||
struct se_lun *lun = SE_LUN(cmd);
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct se_lun *lun = cmd->se_lun;
|
||||
struct se_port *port = NULL;
|
||||
struct se_portal_group *tpg = NULL;
|
||||
struct t10_alua_lu_gp_member *lu_gp_mem;
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp;
|
||||
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
|
||||
unsigned char binary, binary_new;
|
||||
unsigned char *prod = &DEV_T10_WWN(dev)->model[0];
|
||||
unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0];
|
||||
u32 prod_len;
|
||||
u32 unit_serial_len, off = 0;
|
||||
int i;
|
||||
u16 len = 0, id_len;
|
||||
|
||||
buf[1] = 0x83;
|
||||
off = 4;
|
||||
|
||||
/*
|
||||
@ -210,11 +187,11 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
|
||||
/* CODE SET == Binary */
|
||||
buf[off++] = 0x1;
|
||||
|
||||
/* Set ASSOICATION == addressed logical unit: 0)b */
|
||||
/* Set ASSOCIATION == addressed logical unit: 0)b */
|
||||
buf[off] = 0x00;
|
||||
|
||||
/* Identifier/Designator type == NAA identifier */
|
||||
buf[off++] = 0x3;
|
||||
buf[off++] |= 0x3;
|
||||
off++;
|
||||
|
||||
/* Identifier/Designator length */
|
||||
@ -237,16 +214,9 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
|
||||
* VENDOR_SPECIFIC_IDENTIFIER and
|
||||
* VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
|
||||
*/
|
||||
binary = transport_asciihex_to_binaryhex(
|
||||
&DEV_T10_WWN(dev)->unit_serial[0]);
|
||||
buf[off++] |= (binary & 0xf0) >> 4;
|
||||
for (i = 0; i < 24; i += 2) {
|
||||
binary_new = transport_asciihex_to_binaryhex(
|
||||
&DEV_T10_WWN(dev)->unit_serial[i+2]);
|
||||
buf[off] = (binary & 0x0f) << 4;
|
||||
buf[off++] |= (binary_new & 0xf0) >> 4;
|
||||
binary = binary_new;
|
||||
}
|
||||
buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]);
|
||||
hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12);
|
||||
|
||||
len = 20;
|
||||
off = (len + 4);
|
||||
|
||||
@ -263,7 +233,7 @@ check_t10_vend_desc:
|
||||
if (dev->se_sub_dev->su_dev_flags &
|
||||
SDF_EMULATED_VPD_UNIT_SERIAL) {
|
||||
unit_serial_len =
|
||||
strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
|
||||
strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
|
||||
unit_serial_len++; /* For NULL Terminator */
|
||||
|
||||
if ((len + (id_len + 4) +
|
||||
@ -274,7 +244,7 @@ check_t10_vend_desc:
|
||||
}
|
||||
id_len += sprintf((unsigned char *)&buf[off+12],
|
||||
"%s:%s", prod,
|
||||
&DEV_T10_WWN(dev)->unit_serial[0]);
|
||||
&dev->se_sub_dev->t10_wwn.unit_serial[0]);
|
||||
}
|
||||
buf[off] = 0x2; /* ASCII */
|
||||
buf[off+1] = 0x1; /* T10 Vendor ID */
|
||||
@ -312,10 +282,10 @@ check_port:
|
||||
goto check_tpgi;
|
||||
}
|
||||
buf[off] =
|
||||
(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
|
||||
(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
|
||||
buf[off++] |= 0x1; /* CODE SET == Binary */
|
||||
buf[off] = 0x80; /* Set PIV=1 */
|
||||
/* Set ASSOICATION == target port: 01b */
|
||||
/* Set ASSOCIATION == target port: 01b */
|
||||
buf[off] |= 0x10;
|
||||
/* DESIGNATOR TYPE == Relative target port identifer */
|
||||
buf[off++] |= 0x4;
|
||||
@ -335,7 +305,7 @@ check_port:
|
||||
* section 7.5.1 Table 362
|
||||
*/
|
||||
check_tpgi:
|
||||
if (T10_ALUA(dev->se_sub_dev)->alua_type !=
|
||||
if (dev->se_sub_dev->t10_alua.alua_type !=
|
||||
SPC3_ALUA_EMULATED)
|
||||
goto check_scsi_name;
|
||||
|
||||
@ -349,7 +319,7 @@ check_tpgi:
|
||||
|
||||
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
||||
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
|
||||
if (!(tg_pt_gp)) {
|
||||
if (!tg_pt_gp) {
|
||||
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
||||
goto check_lu_gp;
|
||||
}
|
||||
@ -357,10 +327,10 @@ check_tpgi:
|
||||
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
||||
|
||||
buf[off] =
|
||||
(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
|
||||
(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
|
||||
buf[off++] |= 0x1; /* CODE SET == Binary */
|
||||
buf[off] = 0x80; /* Set PIV=1 */
|
||||
/* Set ASSOICATION == target port: 01b */
|
||||
/* Set ASSOCIATION == target port: 01b */
|
||||
buf[off] |= 0x10;
|
||||
/* DESIGNATOR TYPE == Target port group identifier */
|
||||
buf[off++] |= 0x5;
|
||||
@ -380,12 +350,12 @@ check_lu_gp:
|
||||
goto check_scsi_name;
|
||||
}
|
||||
lu_gp_mem = dev->dev_alua_lu_gp_mem;
|
||||
if (!(lu_gp_mem))
|
||||
if (!lu_gp_mem)
|
||||
goto check_scsi_name;
|
||||
|
||||
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
|
||||
lu_gp = lu_gp_mem->lu_gp;
|
||||
if (!(lu_gp)) {
|
||||
if (!lu_gp) {
|
||||
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
|
||||
goto check_scsi_name;
|
||||
}
|
||||
@ -409,7 +379,7 @@ check_lu_gp:
|
||||
* section 7.5.1 Table 362
|
||||
*/
|
||||
check_scsi_name:
|
||||
scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg));
|
||||
scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
|
||||
/* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
|
||||
scsi_name_len += 10;
|
||||
/* Check for 4-byte padding */
|
||||
@ -424,10 +394,10 @@ check_scsi_name:
|
||||
goto set_len;
|
||||
}
|
||||
buf[off] =
|
||||
(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
|
||||
(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
|
||||
buf[off++] |= 0x3; /* CODE SET == UTF-8 */
|
||||
buf[off] = 0x80; /* Set PIV=1 */
|
||||
/* Set ASSOICATION == target port: 01b */
|
||||
/* Set ASSOCIATION == target port: 01b */
|
||||
buf[off] |= 0x10;
|
||||
/* DESIGNATOR TYPE == SCSI name string */
|
||||
buf[off++] |= 0x8;
|
||||
@ -438,9 +408,9 @@ check_scsi_name:
|
||||
* Target Port, this means "<iSCSI name>,t,0x<TPGT> in
|
||||
* UTF-8 encoding.
|
||||
*/
|
||||
tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
|
||||
tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
|
||||
scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
|
||||
TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt);
|
||||
tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
|
||||
scsi_name_len += 1 /* Include NULL terminator */;
|
||||
/*
|
||||
* The null-terminated, null-padded (see 4.4.2) SCSI
|
||||
@ -471,13 +441,12 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
|
||||
if (cmd->data_length < 60)
|
||||
return 0;
|
||||
|
||||
buf[1] = 0x86;
|
||||
buf[2] = 0x3c;
|
||||
/* Set HEADSUP, ORDSUP, SIMPSUP */
|
||||
buf[5] = 0x07;
|
||||
|
||||
/* If WriteCache emulation is enabled, set V_SUP */
|
||||
if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0)
|
||||
if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
|
||||
buf[6] = 0x01;
|
||||
return 0;
|
||||
}
|
||||
@ -486,7 +455,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
|
||||
static int
|
||||
target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
|
||||
{
|
||||
struct se_device *dev = SE_DEV(cmd);
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
int have_tp = 0;
|
||||
|
||||
/*
|
||||
@ -494,27 +463,29 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
|
||||
* emulate_tpu=1 or emulate_tpws=1 we will be expect a
|
||||
* different page length for Thin Provisioning.
|
||||
*/
|
||||
if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
|
||||
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
|
||||
have_tp = 1;
|
||||
|
||||
if (cmd->data_length < (0x10 + 4)) {
|
||||
printk(KERN_INFO "Received data_length: %u"
|
||||
pr_debug("Received data_length: %u"
|
||||
" too small for EVPD 0xb0\n",
|
||||
cmd->data_length);
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (have_tp && cmd->data_length < (0x3c + 4)) {
|
||||
printk(KERN_INFO "Received data_length: %u"
|
||||
pr_debug("Received data_length: %u"
|
||||
" too small for TPE=1 EVPD 0xb0\n",
|
||||
cmd->data_length);
|
||||
have_tp = 0;
|
||||
}
|
||||
|
||||
buf[0] = dev->transport->get_device_type(dev);
|
||||
buf[1] = 0xb0;
|
||||
buf[3] = have_tp ? 0x3c : 0x10;
|
||||
|
||||
/* Set WSNZ to 1 */
|
||||
buf[4] = 0x01;
|
||||
|
||||
/*
|
||||
* Set OPTIMAL TRANSFER LENGTH GRANULARITY
|
||||
*/
|
||||
@ -523,12 +494,12 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
|
||||
/*
|
||||
* Set MAXIMUM TRANSFER LENGTH
|
||||
*/
|
||||
put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]);
|
||||
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]);
|
||||
|
||||
/*
|
||||
* Set OPTIMAL TRANSFER LENGTH
|
||||
*/
|
||||
put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]);
|
||||
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]);
|
||||
|
||||
/*
|
||||
* Exit now if we don't support TP or the initiator sent a too
|
||||
@ -540,35 +511,51 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
|
||||
/*
|
||||
* Set MAXIMUM UNMAP LBA COUNT
|
||||
*/
|
||||
put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]);
|
||||
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]);
|
||||
|
||||
/*
|
||||
* Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
|
||||
*/
|
||||
put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count,
|
||||
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count,
|
||||
&buf[24]);
|
||||
|
||||
/*
|
||||
* Set OPTIMAL UNMAP GRANULARITY
|
||||
*/
|
||||
put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]);
|
||||
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]);
|
||||
|
||||
/*
|
||||
* UNMAP GRANULARITY ALIGNMENT
|
||||
*/
|
||||
put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment,
|
||||
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment,
|
||||
&buf[32]);
|
||||
if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0)
|
||||
if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0)
|
||||
buf[32] |= 0x80; /* Set the UGAVALID bit */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Block Device Characteristics VPD page */
|
||||
static int
|
||||
target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
|
||||
buf[0] = dev->transport->get_device_type(dev);
|
||||
buf[3] = 0x3c;
|
||||
|
||||
if (cmd->data_length >= 5 &&
|
||||
dev->se_sub_dev->se_dev_attrib.is_nonrot)
|
||||
buf[5] = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Thin Provisioning VPD */
|
||||
static int
|
||||
target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
|
||||
{
|
||||
struct se_device *dev = SE_DEV(cmd);
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
|
||||
/*
|
||||
* From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
|
||||
@ -579,7 +566,6 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
|
||||
* defined in table 162.
|
||||
*/
|
||||
buf[0] = dev->transport->get_device_type(dev);
|
||||
buf[1] = 0xb2;
|
||||
|
||||
/*
|
||||
* Set Hardcoded length mentioned above for DP=0
|
||||
@ -602,7 +588,7 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
|
||||
* the UNMAP command (see 5.25). A TPU bit set to zero indicates
|
||||
* that the device server does not support the UNMAP command.
|
||||
*/
|
||||
if (DEV_ATTRIB(dev)->emulate_tpu != 0)
|
||||
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0)
|
||||
buf[5] = 0x80;
|
||||
|
||||
/*
|
||||
@ -611,18 +597,59 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
|
||||
* A TPWS bit set to zero indicates that the device server does not
|
||||
* support the use of the WRITE SAME (16) command to unmap LBAs.
|
||||
*/
|
||||
if (DEV_ATTRIB(dev)->emulate_tpws != 0)
|
||||
if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0)
|
||||
buf[5] |= 0x40;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
|
||||
|
||||
static struct {
|
||||
uint8_t page;
|
||||
int (*emulate)(struct se_cmd *, unsigned char *);
|
||||
} evpd_handlers[] = {
|
||||
{ .page = 0x00, .emulate = target_emulate_evpd_00 },
|
||||
{ .page = 0x80, .emulate = target_emulate_evpd_80 },
|
||||
{ .page = 0x83, .emulate = target_emulate_evpd_83 },
|
||||
{ .page = 0x86, .emulate = target_emulate_evpd_86 },
|
||||
{ .page = 0xb0, .emulate = target_emulate_evpd_b0 },
|
||||
{ .page = 0xb1, .emulate = target_emulate_evpd_b1 },
|
||||
{ .page = 0xb2, .emulate = target_emulate_evpd_b2 },
|
||||
};
|
||||
|
||||
/* supported vital product data pages */
|
||||
static int
|
||||
target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
|
||||
{
|
||||
int p;
|
||||
|
||||
if (cmd->data_length < 8)
|
||||
return 0;
|
||||
/*
|
||||
* Only report the INQUIRY EVPD=1 pages after a valid NAA
|
||||
* Registered Extended LUN WWN has been set via ConfigFS
|
||||
* during device creation/restart.
|
||||
*/
|
||||
if (cmd->se_dev->se_sub_dev->su_dev_flags &
|
||||
SDF_EMULATED_VPD_UNIT_SERIAL) {
|
||||
buf[3] = ARRAY_SIZE(evpd_handlers);
|
||||
for (p = 0; p < min_t(int, ARRAY_SIZE(evpd_handlers),
|
||||
cmd->data_length - 4); ++p)
|
||||
buf[p + 4] = evpd_handlers[p].page;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
target_emulate_inquiry(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = SE_DEV(cmd);
|
||||
unsigned char *buf = cmd->t_task->t_task_buf;
|
||||
unsigned char *cdb = cmd->t_task->t_task_cdb;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
unsigned char *buf;
|
||||
unsigned char *cdb = cmd->t_task_cdb;
|
||||
int p, ret;
|
||||
|
||||
if (!(cdb[1] & 0x1))
|
||||
return target_emulate_inquiry_std(cmd);
|
||||
@ -635,38 +662,33 @@ target_emulate_inquiry(struct se_cmd *cmd)
|
||||
* payload length left for the next outgoing EVPD metadata
|
||||
*/
|
||||
if (cmd->data_length < 4) {
|
||||
printk(KERN_ERR "SCSI Inquiry payload length: %u"
|
||||
pr_err("SCSI Inquiry payload length: %u"
|
||||
" too small for EVPD=1\n", cmd->data_length);
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buf = transport_kmap_first_data_page(cmd);
|
||||
|
||||
buf[0] = dev->transport->get_device_type(dev);
|
||||
|
||||
switch (cdb[2]) {
|
||||
case 0x00:
|
||||
return target_emulate_evpd_00(cmd, buf);
|
||||
case 0x80:
|
||||
return target_emulate_evpd_80(cmd, buf);
|
||||
case 0x83:
|
||||
return target_emulate_evpd_83(cmd, buf);
|
||||
case 0x86:
|
||||
return target_emulate_evpd_86(cmd, buf);
|
||||
case 0xb0:
|
||||
return target_emulate_evpd_b0(cmd, buf);
|
||||
case 0xb2:
|
||||
return target_emulate_evpd_b2(cmd, buf);
|
||||
default:
|
||||
printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]);
|
||||
return -1;
|
||||
}
|
||||
for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
|
||||
if (cdb[2] == evpd_handlers[p].page) {
|
||||
buf[1] = cdb[2];
|
||||
ret = evpd_handlers[p].emulate(cmd, buf);
|
||||
transport_kunmap_first_data_page(cmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
transport_kunmap_first_data_page(cmd);
|
||||
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int
|
||||
target_emulate_readcapacity(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = SE_DEV(cmd);
|
||||
unsigned char *buf = cmd->t_task->t_task_buf;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
unsigned char *buf;
|
||||
unsigned long long blocks_long = dev->transport->get_blocks(dev);
|
||||
u32 blocks;
|
||||
|
||||
@ -675,30 +697,36 @@ target_emulate_readcapacity(struct se_cmd *cmd)
|
||||
else
|
||||
blocks = (u32)blocks_long;
|
||||
|
||||
buf = transport_kmap_first_data_page(cmd);
|
||||
|
||||
buf[0] = (blocks >> 24) & 0xff;
|
||||
buf[1] = (blocks >> 16) & 0xff;
|
||||
buf[2] = (blocks >> 8) & 0xff;
|
||||
buf[3] = blocks & 0xff;
|
||||
buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
|
||||
buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
|
||||
buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
|
||||
buf[7] = DEV_ATTRIB(dev)->block_size & 0xff;
|
||||
buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
|
||||
buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
|
||||
buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
|
||||
buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
|
||||
/*
|
||||
* Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
|
||||
*/
|
||||
if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
|
||||
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
|
||||
put_unaligned_be32(0xFFFFFFFF, &buf[0]);
|
||||
|
||||
transport_kunmap_first_data_page(cmd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
target_emulate_readcapacity_16(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = SE_DEV(cmd);
|
||||
unsigned char *buf = cmd->t_task->t_task_buf;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
unsigned char *buf;
|
||||
unsigned long long blocks = dev->transport->get_blocks(dev);
|
||||
|
||||
buf = transport_kmap_first_data_page(cmd);
|
||||
|
||||
buf[0] = (blocks >> 56) & 0xff;
|
||||
buf[1] = (blocks >> 48) & 0xff;
|
||||
buf[2] = (blocks >> 40) & 0xff;
|
||||
@ -707,17 +735,19 @@ target_emulate_readcapacity_16(struct se_cmd *cmd)
|
||||
buf[5] = (blocks >> 16) & 0xff;
|
||||
buf[6] = (blocks >> 8) & 0xff;
|
||||
buf[7] = blocks & 0xff;
|
||||
buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
|
||||
buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
|
||||
buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
|
||||
buf[11] = DEV_ATTRIB(dev)->block_size & 0xff;
|
||||
buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
|
||||
buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
|
||||
buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
|
||||
buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
|
||||
/*
|
||||
* Set Thin Provisioning Enable bit following sbc3r22 in section
|
||||
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
|
||||
*/
|
||||
if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
|
||||
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
|
||||
buf[14] = 0x80;
|
||||
|
||||
transport_kunmap_first_data_page(cmd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -736,6 +766,35 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
|
||||
p[0] = 0x0a;
|
||||
p[1] = 0x0a;
|
||||
p[2] = 2;
|
||||
/*
|
||||
* From spc4r23, 7.4.7 Control mode page
|
||||
*
|
||||
* The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
|
||||
* restrictions on the algorithm used for reordering commands
|
||||
* having the SIMPLE task attribute (see SAM-4).
|
||||
*
|
||||
* Table 368 -- QUEUE ALGORITHM MODIFIER field
|
||||
* Code Description
|
||||
* 0h Restricted reordering
|
||||
* 1h Unrestricted reordering allowed
|
||||
* 2h to 7h Reserved
|
||||
* 8h to Fh Vendor specific
|
||||
*
|
||||
* A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
|
||||
* the device server shall order the processing sequence of commands
|
||||
* having the SIMPLE task attribute such that data integrity is maintained
|
||||
* for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
|
||||
* requests is halted at any time, the final value of all data observable
|
||||
* on the medium shall be the same as if all the commands had been processed
|
||||
* with the ORDERED task attribute).
|
||||
*
|
||||
* A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
|
||||
* device server may reorder the processing sequence of commands having the
|
||||
* SIMPLE task attribute in any manner. Any data integrity exposures related to
|
||||
* command sequence order shall be explicitly handled by the application client
|
||||
* through the selection of appropriate ommands and task attributes.
|
||||
*/
|
||||
p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
|
||||
/*
|
||||
* From spc4r17, section 7.4.6 Control mode Page
|
||||
*
|
||||
@ -765,8 +824,8 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
|
||||
* for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
|
||||
* to the number of commands completed with one of those status codes.
|
||||
*/
|
||||
p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 :
|
||||
(DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
|
||||
p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
|
||||
(dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
|
||||
/*
|
||||
* From spc4r17, section 7.4.6 Control mode Page
|
||||
*
|
||||
@ -779,7 +838,7 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
|
||||
* which the command was received shall be completed with TASK ABORTED
|
||||
* status (see SAM-4).
|
||||
*/
|
||||
p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00;
|
||||
p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00;
|
||||
p[8] = 0xff;
|
||||
p[9] = 0xff;
|
||||
p[11] = 30;
|
||||
@ -792,7 +851,7 @@ target_modesense_caching(struct se_device *dev, unsigned char *p)
|
||||
{
|
||||
p[0] = 0x08;
|
||||
p[1] = 0x12;
|
||||
if (DEV_ATTRIB(dev)->emulate_write_cache > 0)
|
||||
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
|
||||
p[2] = 0x04; /* Write Cache Enable */
|
||||
p[12] = 0x20; /* Disabled Read Ahead */
|
||||
|
||||
@ -830,9 +889,9 @@ target_modesense_dpofua(unsigned char *buf, int type)
|
||||
static int
|
||||
target_emulate_modesense(struct se_cmd *cmd, int ten)
|
||||
{
|
||||
struct se_device *dev = SE_DEV(cmd);
|
||||
char *cdb = cmd->t_task->t_task_cdb;
|
||||
unsigned char *rbuf = cmd->t_task->t_task_buf;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
char *cdb = cmd->t_task_cdb;
|
||||
unsigned char *rbuf;
|
||||
int type = dev->transport->get_device_type(dev);
|
||||
int offset = (ten) ? 8 : 4;
|
||||
int length = 0;
|
||||
@ -856,7 +915,7 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
|
||||
length += target_modesense_control(dev, &buf[offset+length]);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n",
|
||||
pr_err("Got Unknown Mode Page: 0x%02x\n",
|
||||
cdb[2] & 0x3f);
|
||||
return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
|
||||
}
|
||||
@ -867,13 +926,13 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
|
||||
buf[0] = (offset >> 8) & 0xff;
|
||||
buf[1] = offset & 0xff;
|
||||
|
||||
if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
|
||||
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
|
||||
(cmd->se_deve &&
|
||||
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
|
||||
target_modesense_write_protect(&buf[3], type);
|
||||
|
||||
if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
|
||||
(DEV_ATTRIB(dev)->emulate_fua_write > 0))
|
||||
if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
|
||||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
|
||||
target_modesense_dpofua(&buf[3], type);
|
||||
|
||||
if ((offset + 2) > cmd->data_length)
|
||||
@ -883,19 +942,22 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
|
||||
offset -= 1;
|
||||
buf[0] = offset & 0xff;
|
||||
|
||||
if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
|
||||
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
|
||||
(cmd->se_deve &&
|
||||
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
|
||||
target_modesense_write_protect(&buf[2], type);
|
||||
|
||||
if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
|
||||
(DEV_ATTRIB(dev)->emulate_fua_write > 0))
|
||||
if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
|
||||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
|
||||
target_modesense_dpofua(&buf[2], type);
|
||||
|
||||
if ((offset + 1) > cmd->data_length)
|
||||
offset = cmd->data_length;
|
||||
}
|
||||
|
||||
rbuf = transport_kmap_first_data_page(cmd);
|
||||
memcpy(rbuf, buf, offset);
|
||||
transport_kunmap_first_data_page(cmd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -903,16 +965,20 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
|
||||
static int
|
||||
target_emulate_request_sense(struct se_cmd *cmd)
|
||||
{
|
||||
unsigned char *cdb = cmd->t_task->t_task_cdb;
|
||||
unsigned char *buf = cmd->t_task->t_task_buf;
|
||||
unsigned char *cdb = cmd->t_task_cdb;
|
||||
unsigned char *buf;
|
||||
u8 ua_asc = 0, ua_ascq = 0;
|
||||
int err = 0;
|
||||
|
||||
if (cdb[1] & 0x01) {
|
||||
printk(KERN_ERR "REQUEST_SENSE description emulation not"
|
||||
pr_err("REQUEST_SENSE description emulation not"
|
||||
" supported\n");
|
||||
return PYX_TRANSPORT_INVALID_CDB_FIELD;
|
||||
}
|
||||
if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) {
|
||||
|
||||
buf = transport_kmap_first_data_page(cmd);
|
||||
|
||||
if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
|
||||
/*
|
||||
* CURRENT ERROR, UNIT ATTENTION
|
||||
*/
|
||||
@ -924,7 +990,8 @@ target_emulate_request_sense(struct se_cmd *cmd)
|
||||
*/
|
||||
if (cmd->data_length <= 18) {
|
||||
buf[7] = 0x00;
|
||||
return 0;
|
||||
err = -EINVAL;
|
||||
goto end;
|
||||
}
|
||||
/*
|
||||
* The Additional Sense Code (ASC) from the UNIT ATTENTION
|
||||
@ -944,7 +1011,8 @@ target_emulate_request_sense(struct se_cmd *cmd)
|
||||
*/
|
||||
if (cmd->data_length <= 18) {
|
||||
buf[7] = 0x00;
|
||||
return 0;
|
||||
err = -EINVAL;
|
||||
goto end;
|
||||
}
|
||||
/*
|
||||
* NO ADDITIONAL SENSE INFORMATION
|
||||
@ -953,6 +1021,9 @@ target_emulate_request_sense(struct se_cmd *cmd)
|
||||
buf[7] = 0x0A;
|
||||
}
|
||||
|
||||
end:
|
||||
transport_kunmap_first_data_page(cmd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -963,13 +1034,13 @@ target_emulate_request_sense(struct se_cmd *cmd)
|
||||
static int
|
||||
target_emulate_unmap(struct se_task *task)
|
||||
{
|
||||
struct se_cmd *cmd = TASK_CMD(task);
|
||||
struct se_device *dev = SE_DEV(cmd);
|
||||
unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL;
|
||||
unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
|
||||
struct se_cmd *cmd = task->task_se_cmd;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
unsigned char *buf, *ptr = NULL;
|
||||
unsigned char *cdb = &cmd->t_task_cdb[0];
|
||||
sector_t lba;
|
||||
unsigned int size = cmd->data_length, range;
|
||||
int ret, offset;
|
||||
int ret = 0, offset;
|
||||
unsigned short dl, bd_dl;
|
||||
|
||||
/* First UNMAP block descriptor starts at 8 byte offset */
|
||||
@ -977,21 +1048,24 @@ target_emulate_unmap(struct se_task *task)
|
||||
size -= 8;
|
||||
dl = get_unaligned_be16(&cdb[0]);
|
||||
bd_dl = get_unaligned_be16(&cdb[2]);
|
||||
|
||||
buf = transport_kmap_first_data_page(cmd);
|
||||
|
||||
ptr = &buf[offset];
|
||||
printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
|
||||
pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
|
||||
" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
|
||||
|
||||
while (size) {
|
||||
lba = get_unaligned_be64(&ptr[0]);
|
||||
range = get_unaligned_be32(&ptr[8]);
|
||||
printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n",
|
||||
pr_debug("UNMAP: Using lba: %llu and range: %u\n",
|
||||
(unsigned long long)lba, range);
|
||||
|
||||
ret = dev->transport->do_discard(dev, lba, range);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "blkdev_issue_discard() failed: %d\n",
|
||||
pr_err("blkdev_issue_discard() failed: %d\n",
|
||||
ret);
|
||||
return -1;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ptr += 16;
|
||||
@ -1000,7 +1074,10 @@ target_emulate_unmap(struct se_task *task)
|
||||
|
||||
task->task_scsi_status = GOOD;
|
||||
transport_complete_task(task, 1);
|
||||
return 0;
|
||||
err:
|
||||
transport_kunmap_first_data_page(cmd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1008,23 +1085,36 @@ target_emulate_unmap(struct se_task *task)
|
||||
* Note this is not used for TCM/pSCSI passthrough
|
||||
*/
|
||||
static int
|
||||
target_emulate_write_same(struct se_task *task)
|
||||
target_emulate_write_same(struct se_task *task, int write_same32)
|
||||
{
|
||||
struct se_cmd *cmd = TASK_CMD(task);
|
||||
struct se_device *dev = SE_DEV(cmd);
|
||||
sector_t lba = cmd->t_task->t_task_lba;
|
||||
unsigned int range;
|
||||
struct se_cmd *cmd = task->task_se_cmd;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
sector_t range;
|
||||
sector_t lba = cmd->t_task_lba;
|
||||
unsigned int num_blocks;
|
||||
int ret;
|
||||
/*
|
||||
* Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict
|
||||
* range when non zero is supplied, otherwise calculate the remaining
|
||||
* range based on ->get_blocks() - starting LBA.
|
||||
*/
|
||||
if (write_same32)
|
||||
num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
|
||||
else
|
||||
num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
|
||||
|
||||
range = (cmd->data_length / DEV_ATTRIB(dev)->block_size);
|
||||
if (num_blocks != 0)
|
||||
range = num_blocks;
|
||||
else
|
||||
range = (dev->transport->get_blocks(dev) - lba);
|
||||
|
||||
printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n",
|
||||
(unsigned long long)lba, range);
|
||||
pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
|
||||
(unsigned long long)lba, (unsigned long long)range);
|
||||
|
||||
ret = dev->transport->do_discard(dev, lba, range);
|
||||
if (ret < 0) {
|
||||
printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n");
|
||||
return -1;
|
||||
pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
task->task_scsi_status = GOOD;
|
||||
@ -1035,12 +1125,12 @@ target_emulate_write_same(struct se_task *task)
|
||||
int
|
||||
transport_emulate_control_cdb(struct se_task *task)
|
||||
{
|
||||
struct se_cmd *cmd = TASK_CMD(task);
|
||||
struct se_device *dev = SE_DEV(cmd);
|
||||
struct se_cmd *cmd = task->task_se_cmd;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
unsigned short service_action;
|
||||
int ret = 0;
|
||||
|
||||
switch (cmd->t_task->t_task_cdb[0]) {
|
||||
switch (cmd->t_task_cdb[0]) {
|
||||
case INQUIRY:
|
||||
ret = target_emulate_inquiry(cmd);
|
||||
break;
|
||||
@ -1054,13 +1144,13 @@ transport_emulate_control_cdb(struct se_task *task)
|
||||
ret = target_emulate_modesense(cmd, 1);
|
||||
break;
|
||||
case SERVICE_ACTION_IN:
|
||||
switch (cmd->t_task->t_task_cdb[1] & 0x1f) {
|
||||
switch (cmd->t_task_cdb[1] & 0x1f) {
|
||||
case SAI_READ_CAPACITY_16:
|
||||
ret = target_emulate_readcapacity_16(cmd);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "Unsupported SA: 0x%02x\n",
|
||||
cmd->t_task->t_task_cdb[1] & 0x1f);
|
||||
pr_err("Unsupported SA: 0x%02x\n",
|
||||
cmd->t_task_cdb[1] & 0x1f);
|
||||
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
|
||||
}
|
||||
break;
|
||||
@ -1069,7 +1159,7 @@ transport_emulate_control_cdb(struct se_task *task)
|
||||
break;
|
||||
case UNMAP:
|
||||
if (!dev->transport->do_discard) {
|
||||
printk(KERN_ERR "UNMAP emulation not supported for: %s\n",
|
||||
pr_err("UNMAP emulation not supported for: %s\n",
|
||||
dev->transport->name);
|
||||
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
|
||||
}
|
||||
@ -1077,27 +1167,27 @@ transport_emulate_control_cdb(struct se_task *task)
|
||||
break;
|
||||
case WRITE_SAME_16:
|
||||
if (!dev->transport->do_discard) {
|
||||
printk(KERN_ERR "WRITE_SAME_16 emulation not supported"
|
||||
pr_err("WRITE_SAME_16 emulation not supported"
|
||||
" for: %s\n", dev->transport->name);
|
||||
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
|
||||
}
|
||||
ret = target_emulate_write_same(task);
|
||||
ret = target_emulate_write_same(task, 0);
|
||||
break;
|
||||
case VARIABLE_LENGTH_CMD:
|
||||
service_action =
|
||||
get_unaligned_be16(&cmd->t_task->t_task_cdb[8]);
|
||||
get_unaligned_be16(&cmd->t_task_cdb[8]);
|
||||
switch (service_action) {
|
||||
case WRITE_SAME_32:
|
||||
if (!dev->transport->do_discard) {
|
||||
printk(KERN_ERR "WRITE_SAME_32 SA emulation not"
|
||||
pr_err("WRITE_SAME_32 SA emulation not"
|
||||
" supported for: %s\n",
|
||||
dev->transport->name);
|
||||
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
|
||||
}
|
||||
ret = target_emulate_write_same(task);
|
||||
ret = target_emulate_write_same(task, 1);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:"
|
||||
pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
|
||||
" 0x%02x\n", service_action);
|
||||
break;
|
||||
}
|
||||
@ -1105,8 +1195,7 @@ transport_emulate_control_cdb(struct se_task *task)
|
||||
case SYNCHRONIZE_CACHE:
|
||||
case 0x91: /* SYNCHRONIZE_CACHE_16: */
|
||||
if (!dev->transport->do_sync_cache) {
|
||||
printk(KERN_ERR
|
||||
"SYNCHRONIZE_CACHE emulation not supported"
|
||||
pr_err("SYNCHRONIZE_CACHE emulation not supported"
|
||||
" for: %s\n", dev->transport->name);
|
||||
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
|
||||
}
|
||||
@ -1123,8 +1212,8 @@ transport_emulate_control_cdb(struct se_task *task)
|
||||
case WRITE_FILEMARKS:
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
|
||||
cmd->t_task->t_task_cdb[0], dev->transport->name);
|
||||
pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n",
|
||||
cmd->t_task_cdb[0], dev->transport->name);
|
||||
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -60,7 +60,7 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf)
|
||||
cit->ct_group_ops = _group_ops; \
|
||||
cit->ct_attrs = _attrs; \
|
||||
cit->ct_owner = tf->tf_module; \
|
||||
printk("Setup generic %s\n", __stringify(_name)); \
|
||||
pr_debug("Setup generic %s\n", __stringify(_name)); \
|
||||
}
|
||||
|
||||
/* Start of tfc_tpg_mappedlun_cit */
|
||||
@ -80,8 +80,8 @@ static int target_fabric_mappedlun_link(
|
||||
/*
|
||||
* Ensure that the source port exists
|
||||
*/
|
||||
if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) {
|
||||
printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep"
|
||||
if (!lun->lun_sep || !lun->lun_sep->sep_tpg) {
|
||||
pr_err("Source se_lun->lun_sep or lun->lun_sep->sep"
|
||||
"_tpg does not exist\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -96,12 +96,12 @@ static int target_fabric_mappedlun_link(
|
||||
* Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
|
||||
*/
|
||||
if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
|
||||
printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n",
|
||||
pr_err("Illegal Initiator ACL SymLink outside of %s\n",
|
||||
config_item_name(wwn_ci));
|
||||
return -EINVAL;
|
||||
}
|
||||
if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
|
||||
printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s"
|
||||
pr_err("Illegal Initiator ACL Symlink outside of %s"
|
||||
" TPGT: %s\n", config_item_name(wwn_ci),
|
||||
config_item_name(tpg_ci));
|
||||
return -EINVAL;
|
||||
@ -118,7 +118,7 @@ static int target_fabric_mappedlun_link(
|
||||
lun_access = deve->lun_flags;
|
||||
else
|
||||
lun_access =
|
||||
(TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect(
|
||||
(se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
|
||||
se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
|
||||
TRANSPORT_LUNFLAGS_READ_WRITE;
|
||||
spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
|
||||
@ -147,7 +147,7 @@ static int target_fabric_mappedlun_unlink(
|
||||
/*
|
||||
* Determine if the underlying MappedLUN has already been released..
|
||||
*/
|
||||
if (!(deve->se_lun))
|
||||
if (!deve->se_lun)
|
||||
return 0;
|
||||
|
||||
lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
|
||||
@ -202,9 +202,9 @@ static ssize_t target_fabric_mappedlun_store_write_protect(
|
||||
TRANSPORT_LUNFLAGS_READ_WRITE,
|
||||
lacl->se_lun_nacl);
|
||||
|
||||
printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
|
||||
pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
|
||||
" Mapped LUN: %u Write Protect bit to %s\n",
|
||||
TPG_TFO(se_tpg)->get_fabric_name(),
|
||||
se_tpg->se_tpg_tfo->get_fabric_name(),
|
||||
lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
|
||||
|
||||
return count;
|
||||
@ -327,14 +327,14 @@ static struct config_group *target_fabric_make_mappedlun(
|
||||
int ret = 0;
|
||||
|
||||
acl_ci = &group->cg_item;
|
||||
if (!(acl_ci)) {
|
||||
printk(KERN_ERR "Unable to locatel acl_ci\n");
|
||||
if (!acl_ci) {
|
||||
pr_err("Unable to locatel acl_ci\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
|
||||
if (!(buf)) {
|
||||
printk(KERN_ERR "Unable to allocate memory for name buf\n");
|
||||
if (!buf) {
|
||||
pr_err("Unable to allocate memory for name buf\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
snprintf(buf, strlen(name) + 1, "%s", name);
|
||||
@ -342,7 +342,7 @@ static struct config_group *target_fabric_make_mappedlun(
|
||||
* Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
|
||||
*/
|
||||
if (strstr(buf, "lun_") != buf) {
|
||||
printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s"
|
||||
pr_err("Unable to locate \"lun_\" from buf: %s"
|
||||
" name: %s\n", buf, name);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
@ -358,7 +358,7 @@ static struct config_group *target_fabric_make_mappedlun(
|
||||
|
||||
lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
|
||||
config_item_name(acl_ci), &ret);
|
||||
if (!(lacl)) {
|
||||
if (!lacl) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -367,7 +367,7 @@ static struct config_group *target_fabric_make_mappedlun(
|
||||
lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
|
||||
GFP_KERNEL);
|
||||
if (!lacl_cg->default_groups) {
|
||||
printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n");
|
||||
pr_err("Unable to allocate lacl_cg->default_groups\n");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -379,11 +379,11 @@ static struct config_group *target_fabric_make_mappedlun(
|
||||
lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
|
||||
lacl_cg->default_groups[1] = NULL;
|
||||
|
||||
ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
|
||||
ml_stat_grp = &lacl->ml_stat_grps.stat_group;
|
||||
ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
|
||||
GFP_KERNEL);
|
||||
if (!ml_stat_grp->default_groups) {
|
||||
printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n");
|
||||
pr_err("Unable to allocate ml_stat_grp->default_groups\n");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -408,7 +408,7 @@ static void target_fabric_drop_mappedlun(
|
||||
struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
|
||||
int i;
|
||||
|
||||
ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
|
||||
ml_stat_grp = &lacl->ml_stat_grps.stat_group;
|
||||
for (i = 0; ml_stat_grp->default_groups[i]; i++) {
|
||||
df_item = &ml_stat_grp->default_groups[i]->cg_item;
|
||||
ml_stat_grp->default_groups[i] = NULL;
|
||||
@ -474,8 +474,8 @@ static struct config_group *target_fabric_make_nodeacl(
|
||||
struct se_node_acl *se_nacl;
|
||||
struct config_group *nacl_cg;
|
||||
|
||||
if (!(tf->tf_ops.fabric_make_nodeacl)) {
|
||||
printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n");
|
||||
if (!tf->tf_ops.fabric_make_nodeacl) {
|
||||
pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n");
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
@ -572,13 +572,13 @@ static struct config_group *target_fabric_make_np(
|
||||
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
|
||||
struct se_tpg_np *se_tpg_np;
|
||||
|
||||
if (!(tf->tf_ops.fabric_make_np)) {
|
||||
printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n");
|
||||
if (!tf->tf_ops.fabric_make_np) {
|
||||
pr_err("tf->tf_ops.fabric_make_np is NULL\n");
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
|
||||
if (!(se_tpg_np) || IS_ERR(se_tpg_np))
|
||||
if (!se_tpg_np || IS_ERR(se_tpg_np))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
se_tpg_np->tpg_np_parent = se_tpg;
|
||||
@ -627,10 +627,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
|
||||
struct se_lun *lun,
|
||||
char *page)
|
||||
{
|
||||
if (!(lun))
|
||||
return -ENODEV;
|
||||
|
||||
if (!(lun->lun_sep))
|
||||
if (!lun || !lun->lun_sep)
|
||||
return -ENODEV;
|
||||
|
||||
return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
|
||||
@ -641,10 +638,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
|
||||
const char *page,
|
||||
size_t count)
|
||||
{
|
||||
if (!(lun))
|
||||
return -ENODEV;
|
||||
|
||||
if (!(lun->lun_sep))
|
||||
if (!lun || !lun->lun_sep)
|
||||
return -ENODEV;
|
||||
|
||||
return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
|
||||
@ -659,10 +653,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
|
||||
struct se_lun *lun,
|
||||
char *page)
|
||||
{
|
||||
if (!(lun))
|
||||
return -ENODEV;
|
||||
|
||||
if (!(lun->lun_sep))
|
||||
if (!lun || !lun->lun_sep)
|
||||
return -ENODEV;
|
||||
|
||||
return core_alua_show_offline_bit(lun, page);
|
||||
@ -673,10 +664,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
|
||||
const char *page,
|
||||
size_t count)
|
||||
{
|
||||
if (!(lun))
|
||||
return -ENODEV;
|
||||
|
||||
if (!(lun->lun_sep))
|
||||
if (!lun || !lun->lun_sep)
|
||||
return -ENODEV;
|
||||
|
||||
return core_alua_store_offline_bit(lun, page, count);
|
||||
@ -691,10 +679,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
|
||||
struct se_lun *lun,
|
||||
char *page)
|
||||
{
|
||||
if (!(lun))
|
||||
return -ENODEV;
|
||||
|
||||
if (!(lun->lun_sep))
|
||||
if (!lun || !lun->lun_sep)
|
||||
return -ENODEV;
|
||||
|
||||
return core_alua_show_secondary_status(lun, page);
|
||||
@ -705,10 +690,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
|
||||
const char *page,
|
||||
size_t count)
|
||||
{
|
||||
if (!(lun))
|
||||
return -ENODEV;
|
||||
|
||||
if (!(lun->lun_sep))
|
||||
if (!lun || !lun->lun_sep)
|
||||
return -ENODEV;
|
||||
|
||||
return core_alua_store_secondary_status(lun, page, count);
|
||||
@ -723,10 +705,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
|
||||
struct se_lun *lun,
|
||||
char *page)
|
||||
{
|
||||
if (!(lun))
|
||||
return -ENODEV;
|
||||
|
||||
if (!(lun->lun_sep))
|
||||
if (!lun || !lun->lun_sep)
|
||||
return -ENODEV;
|
||||
|
||||
return core_alua_show_secondary_write_metadata(lun, page);
|
||||
@ -737,10 +716,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
|
||||
const char *page,
|
||||
size_t count)
|
||||
{
|
||||
if (!(lun))
|
||||
return -ENODEV;
|
||||
|
||||
if (!(lun->lun_sep))
|
||||
if (!lun || !lun->lun_sep)
|
||||
return -ENODEV;
|
||||
|
||||
return core_alua_store_secondary_write_metadata(lun, page, count);
|
||||
@ -781,13 +757,13 @@ static int target_fabric_port_link(
|
||||
tf = se_tpg->se_tpg_wwn->wwn_tf;
|
||||
|
||||
if (lun->lun_se_dev != NULL) {
|
||||
printk(KERN_ERR "Port Symlink already exists\n");
|
||||
pr_err("Port Symlink already exists\n");
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
dev = se_dev->se_dev_ptr;
|
||||
if (!(dev)) {
|
||||
printk(KERN_ERR "Unable to locate struct se_device pointer from"
|
||||
if (!dev) {
|
||||
pr_err("Unable to locate struct se_device pointer from"
|
||||
" %s\n", config_item_name(se_dev_ci));
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
@ -795,8 +771,8 @@ static int target_fabric_port_link(
|
||||
|
||||
lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
|
||||
lun->unpacked_lun);
|
||||
if ((IS_ERR(lun_p)) || !(lun_p)) {
|
||||
printk(KERN_ERR "core_dev_add_lun() failed\n");
|
||||
if (IS_ERR(lun_p) || !lun_p) {
|
||||
pr_err("core_dev_add_lun() failed\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -888,7 +864,7 @@ static struct config_group *target_fabric_make_lun(
|
||||
int errno;
|
||||
|
||||
if (strstr(name, "lun_") != name) {
|
||||
printk(KERN_ERR "Unable to locate \'_\" in"
|
||||
pr_err("Unable to locate \'_\" in"
|
||||
" \"lun_$LUN_NUMBER\"\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
@ -896,14 +872,14 @@ static struct config_group *target_fabric_make_lun(
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
|
||||
if (!(lun))
|
||||
if (!lun)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
lun_cg = &lun->lun_group;
|
||||
lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
|
||||
GFP_KERNEL);
|
||||
if (!lun_cg->default_groups) {
|
||||
printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n");
|
||||
pr_err("Unable to allocate lun_cg->default_groups\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@ -914,11 +890,11 @@ static struct config_group *target_fabric_make_lun(
|
||||
lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
|
||||
lun_cg->default_groups[1] = NULL;
|
||||
|
||||
port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
|
||||
port_stat_grp = &lun->port_stat_grps.stat_group;
|
||||
port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
|
||||
GFP_KERNEL);
|
||||
if (!port_stat_grp->default_groups) {
|
||||
printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n");
|
||||
pr_err("Unable to allocate port_stat_grp->default_groups\n");
|
||||
errno = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -941,7 +917,7 @@ static void target_fabric_drop_lun(
|
||||
struct config_group *lun_cg, *port_stat_grp;
|
||||
int i;
|
||||
|
||||
port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
|
||||
port_stat_grp = &lun->port_stat_grps.stat_group;
|
||||
for (i = 0; port_stat_grp->default_groups[i]; i++) {
|
||||
df_item = &port_stat_grp->default_groups[i]->cg_item;
|
||||
port_stat_grp->default_groups[i] = NULL;
|
||||
@ -1031,13 +1007,13 @@ static struct config_group *target_fabric_make_tpg(
|
||||
struct target_fabric_configfs *tf = wwn->wwn_tf;
|
||||
struct se_portal_group *se_tpg;
|
||||
|
||||
if (!(tf->tf_ops.fabric_make_tpg)) {
|
||||
printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n");
|
||||
if (!tf->tf_ops.fabric_make_tpg) {
|
||||
pr_err("tf->tf_ops.fabric_make_tpg is NULL\n");
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
|
||||
if (!(se_tpg) || IS_ERR(se_tpg))
|
||||
if (!se_tpg || IS_ERR(se_tpg))
|
||||
return ERR_PTR(-EINVAL);
|
||||
/*
|
||||
* Setup default groups from pre-allocated se_tpg->tpg_default_groups
|
||||
@ -1130,13 +1106,13 @@ static struct config_group *target_fabric_make_wwn(
|
||||
struct target_fabric_configfs, tf_group);
|
||||
struct se_wwn *wwn;
|
||||
|
||||
if (!(tf->tf_ops.fabric_make_wwn)) {
|
||||
printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n");
|
||||
if (!tf->tf_ops.fabric_make_wwn) {
|
||||
pr_err("tf->tf_ops.fabric_make_wwn is NULL\n");
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
|
||||
if (!(wwn) || IS_ERR(wwn))
|
||||
if (!wwn || IS_ERR(wwn))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
wwn->wwn_tf = tf;
|
||||
|
@ -25,6 +25,7 @@
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/spinlock.h>
|
||||
@ -61,9 +62,8 @@ u32 sas_get_pr_transport_id(
|
||||
int *format_code,
|
||||
unsigned char *buf)
|
||||
{
|
||||
unsigned char binary, *ptr;
|
||||
int i;
|
||||
u32 off = 4;
|
||||
unsigned char *ptr;
|
||||
|
||||
/*
|
||||
* Set PROTOCOL IDENTIFIER to 6h for SAS
|
||||
*/
|
||||
@ -74,10 +74,8 @@ u32 sas_get_pr_transport_id(
|
||||
*/
|
||||
ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
|
||||
|
||||
for (i = 0; i < 16; i += 2) {
|
||||
binary = transport_asciihex_to_binaryhex(&ptr[i]);
|
||||
buf[off++] = binary;
|
||||
}
|
||||
hex2bin(&buf[4], ptr, 8);
|
||||
|
||||
/*
|
||||
* The SAS Transport ID is a hardcoded 24-byte length
|
||||
*/
|
||||
@ -157,7 +155,7 @@ u32 fc_get_pr_transport_id(
|
||||
int *format_code,
|
||||
unsigned char *buf)
|
||||
{
|
||||
unsigned char binary, *ptr;
|
||||
unsigned char *ptr;
|
||||
int i;
|
||||
u32 off = 8;
|
||||
/*
|
||||
@ -172,12 +170,11 @@ u32 fc_get_pr_transport_id(
|
||||
ptr = &se_nacl->initiatorname[0];
|
||||
|
||||
for (i = 0; i < 24; ) {
|
||||
if (!(strncmp(&ptr[i], ":", 1))) {
|
||||
if (!strncmp(&ptr[i], ":", 1)) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
binary = transport_asciihex_to_binaryhex(&ptr[i]);
|
||||
buf[off++] = binary;
|
||||
hex2bin(&buf[off++], &ptr[i], 1);
|
||||
i += 2;
|
||||
}
|
||||
/*
|
||||
@ -386,7 +383,7 @@ char *iscsi_parse_pr_out_transport_id(
|
||||
* Reserved
|
||||
*/
|
||||
if ((format_code != 0x00) && (format_code != 0x40)) {
|
||||
printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI"
|
||||
pr_err("Illegal format code: 0x%02x for iSCSI"
|
||||
" Initiator Transport ID\n", format_code);
|
||||
return NULL;
|
||||
}
|
||||
@ -406,7 +403,7 @@ char *iscsi_parse_pr_out_transport_id(
|
||||
tid_len += padding;
|
||||
|
||||
if ((add_len + 4) != tid_len) {
|
||||
printk(KERN_INFO "LIO-Target Extracted add_len: %hu "
|
||||
pr_debug("LIO-Target Extracted add_len: %hu "
|
||||
"does not match calculated tid_len: %u,"
|
||||
" using tid_len instead\n", add_len+4, tid_len);
|
||||
*out_tid_len = tid_len;
|
||||
@ -420,8 +417,8 @@ char *iscsi_parse_pr_out_transport_id(
|
||||
*/
|
||||
if (format_code == 0x40) {
|
||||
p = strstr((char *)&buf[4], ",i,0x");
|
||||
if (!(p)) {
|
||||
printk(KERN_ERR "Unable to locate \",i,0x\" seperator"
|
||||
if (!p) {
|
||||
pr_err("Unable to locate \",i,0x\" seperator"
|
||||
" for Initiator port identifier: %s\n",
|
||||
(char *)&buf[4]);
|
||||
return NULL;
|
||||
|
@ -42,18 +42,6 @@
|
||||
|
||||
#include "target_core_file.h"
|
||||
|
||||
#if 1
|
||||
#define DEBUG_FD_CACHE(x...) printk(x)
|
||||
#else
|
||||
#define DEBUG_FD_CACHE(x...)
|
||||
#endif
|
||||
|
||||
#if 1
|
||||
#define DEBUG_FD_FUA(x...) printk(x)
|
||||
#else
|
||||
#define DEBUG_FD_FUA(x...)
|
||||
#endif
|
||||
|
||||
static struct se_subsystem_api fileio_template;
|
||||
|
||||
/* fd_attach_hba(): (Part of se_subsystem_api_t template)
|
||||
@ -65,24 +53,21 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
|
||||
struct fd_host *fd_host;
|
||||
|
||||
fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
|
||||
if (!(fd_host)) {
|
||||
printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
|
||||
return -1;
|
||||
if (!fd_host) {
|
||||
pr_err("Unable to allocate memory for struct fd_host\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
fd_host->fd_host_id = host_id;
|
||||
|
||||
atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH);
|
||||
atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
|
||||
hba->hba_ptr = (void *) fd_host;
|
||||
hba->hba_ptr = fd_host;
|
||||
|
||||
printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
|
||||
pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
|
||||
" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
|
||||
TARGET_CORE_MOD_VERSION);
|
||||
printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
|
||||
" Target Core with TCQ Depth: %d MaxSectors: %u\n",
|
||||
hba->hba_id, fd_host->fd_host_id,
|
||||
atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
|
||||
pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
|
||||
" MaxSectors: %u\n",
|
||||
hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -91,7 +76,7 @@ static void fd_detach_hba(struct se_hba *hba)
|
||||
{
|
||||
struct fd_host *fd_host = hba->hba_ptr;
|
||||
|
||||
printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
|
||||
pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
|
||||
" Target Core\n", hba->hba_id, fd_host->fd_host_id);
|
||||
|
||||
kfree(fd_host);
|
||||
@ -104,14 +89,14 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
|
||||
struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
|
||||
|
||||
fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
|
||||
if (!(fd_dev)) {
|
||||
printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
|
||||
if (!fd_dev) {
|
||||
pr_err("Unable to allocate memory for struct fd_dev\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
fd_dev->fd_host = fd_host;
|
||||
|
||||
printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
|
||||
pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
|
||||
|
||||
return fd_dev;
|
||||
}
|
||||
@ -144,7 +129,7 @@ static struct se_device *fd_create_virtdevice(
|
||||
set_fs(old_fs);
|
||||
|
||||
if (IS_ERR(dev_p)) {
|
||||
printk(KERN_ERR "getname(%s) failed: %lu\n",
|
||||
pr_err("getname(%s) failed: %lu\n",
|
||||
fd_dev->fd_dev_name, IS_ERR(dev_p));
|
||||
ret = PTR_ERR(dev_p);
|
||||
goto fail;
|
||||
@ -167,12 +152,12 @@ static struct se_device *fd_create_virtdevice(
|
||||
|
||||
file = filp_open(dev_p, flags, 0600);
|
||||
if (IS_ERR(file)) {
|
||||
printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
|
||||
pr_err("filp_open(%s) failed\n", dev_p);
|
||||
ret = PTR_ERR(file);
|
||||
goto fail;
|
||||
}
|
||||
if (!file || !file->f_dentry) {
|
||||
printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
|
||||
pr_err("filp_open(%s) failed\n", dev_p);
|
||||
goto fail;
|
||||
}
|
||||
fd_dev->fd_file = file;
|
||||
@ -202,14 +187,14 @@ static struct se_device *fd_create_virtdevice(
|
||||
fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
|
||||
fd_dev->fd_block_size);
|
||||
|
||||
printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
|
||||
pr_debug("FILEIO: Using size: %llu bytes from struct"
|
||||
" block_device blocks: %llu logical_block_size: %d\n",
|
||||
fd_dev->fd_dev_size,
|
||||
div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
|
||||
fd_dev->fd_block_size);
|
||||
} else {
|
||||
if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
|
||||
printk(KERN_ERR "FILEIO: Missing fd_dev_size="
|
||||
pr_err("FILEIO: Missing fd_dev_size="
|
||||
" parameter, and no backing struct"
|
||||
" block_device\n");
|
||||
goto fail;
|
||||
@ -226,15 +211,15 @@ static struct se_device *fd_create_virtdevice(
|
||||
dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
|
||||
|
||||
dev = transport_add_device_to_core_hba(hba, &fileio_template,
|
||||
se_dev, dev_flags, (void *)fd_dev,
|
||||
se_dev, dev_flags, fd_dev,
|
||||
&dev_limits, "FILEIO", FD_VERSION);
|
||||
if (!(dev))
|
||||
if (!dev)
|
||||
goto fail;
|
||||
|
||||
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
|
||||
fd_dev->fd_queue_depth = dev->queue_depth;
|
||||
|
||||
printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
|
||||
pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
|
||||
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
|
||||
fd_dev->fd_dev_name, fd_dev->fd_dev_size);
|
||||
|
||||
@ -272,45 +257,45 @@ static inline struct fd_request *FILE_REQ(struct se_task *task)
|
||||
|
||||
|
||||
static struct se_task *
|
||||
fd_alloc_task(struct se_cmd *cmd)
|
||||
fd_alloc_task(unsigned char *cdb)
|
||||
{
|
||||
struct fd_request *fd_req;
|
||||
|
||||
fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
|
||||
if (!(fd_req)) {
|
||||
printk(KERN_ERR "Unable to allocate struct fd_request\n");
|
||||
if (!fd_req) {
|
||||
pr_err("Unable to allocate struct fd_request\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
fd_req->fd_dev = SE_DEV(cmd)->dev_ptr;
|
||||
|
||||
return &fd_req->fd_task;
|
||||
}
|
||||
|
||||
static int fd_do_readv(struct se_task *task)
|
||||
{
|
||||
struct fd_request *req = FILE_REQ(task);
|
||||
struct file *fd = req->fd_dev->fd_file;
|
||||
struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
|
||||
struct file *fd = dev->fd_file;
|
||||
struct scatterlist *sg = task->task_sg;
|
||||
struct iovec *iov;
|
||||
mm_segment_t old_fs;
|
||||
loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
|
||||
loff_t pos = (task->task_lba *
|
||||
task->se_dev->se_sub_dev->se_dev_attrib.block_size);
|
||||
int ret = 0, i;
|
||||
|
||||
iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
|
||||
if (!(iov)) {
|
||||
printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
|
||||
return -1;
|
||||
iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
|
||||
if (!iov) {
|
||||
pr_err("Unable to allocate fd_do_readv iov[]\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < task->task_sg_num; i++) {
|
||||
for (i = 0; i < task->task_sg_nents; i++) {
|
||||
iov[i].iov_len = sg[i].length;
|
||||
iov[i].iov_base = sg_virt(&sg[i]);
|
||||
}
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(get_ds());
|
||||
ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
|
||||
ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
|
||||
set_fs(old_fs);
|
||||
|
||||
kfree(iov);
|
||||
@ -321,16 +306,16 @@ static int fd_do_readv(struct se_task *task)
|
||||
*/
|
||||
if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
|
||||
if (ret < 0 || ret != task->task_size) {
|
||||
printk(KERN_ERR "vfs_readv() returned %d,"
|
||||
pr_err("vfs_readv() returned %d,"
|
||||
" expecting %d for S_ISBLK\n", ret,
|
||||
(int)task->task_size);
|
||||
return -1;
|
||||
return (ret < 0 ? ret : -EINVAL);
|
||||
}
|
||||
} else {
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "vfs_readv() returned %d for non"
|
||||
pr_err("vfs_readv() returned %d for non"
|
||||
" S_ISBLK\n", ret);
|
||||
return -1;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@ -340,34 +325,36 @@ static int fd_do_readv(struct se_task *task)
|
||||
static int fd_do_writev(struct se_task *task)
|
||||
{
|
||||
struct fd_request *req = FILE_REQ(task);
|
||||
struct file *fd = req->fd_dev->fd_file;
|
||||
struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
|
||||
struct file *fd = dev->fd_file;
|
||||
struct scatterlist *sg = task->task_sg;
|
||||
struct iovec *iov;
|
||||
mm_segment_t old_fs;
|
||||
loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
|
||||
loff_t pos = (task->task_lba *
|
||||
task->se_dev->se_sub_dev->se_dev_attrib.block_size);
|
||||
int ret, i = 0;
|
||||
|
||||
iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
|
||||
if (!(iov)) {
|
||||
printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
|
||||
return -1;
|
||||
iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
|
||||
if (!iov) {
|
||||
pr_err("Unable to allocate fd_do_writev iov[]\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < task->task_sg_num; i++) {
|
||||
for (i = 0; i < task->task_sg_nents; i++) {
|
||||
iov[i].iov_len = sg[i].length;
|
||||
iov[i].iov_base = sg_virt(&sg[i]);
|
||||
}
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(get_ds());
|
||||
ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
|
||||
ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
|
||||
set_fs(old_fs);
|
||||
|
||||
kfree(iov);
|
||||
|
||||
if (ret < 0 || ret != task->task_size) {
|
||||
printk(KERN_ERR "vfs_writev() returned %d\n", ret);
|
||||
return -1;
|
||||
pr_err("vfs_writev() returned %d\n", ret);
|
||||
return (ret < 0 ? ret : -EINVAL);
|
||||
}
|
||||
|
||||
return 1;
|
||||
@ -375,10 +362,10 @@ static int fd_do_writev(struct se_task *task)
|
||||
|
||||
static void fd_emulate_sync_cache(struct se_task *task)
|
||||
{
|
||||
struct se_cmd *cmd = TASK_CMD(task);
|
||||
struct se_cmd *cmd = task->task_se_cmd;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct fd_dev *fd_dev = dev->dev_ptr;
|
||||
int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
|
||||
int immed = (cmd->t_task_cdb[1] & 0x2);
|
||||
loff_t start, end;
|
||||
int ret;
|
||||
|
||||
@ -392,11 +379,11 @@ static void fd_emulate_sync_cache(struct se_task *task)
|
||||
/*
|
||||
* Determine if we will be flushing the entire device.
|
||||
*/
|
||||
if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) {
|
||||
if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
|
||||
start = 0;
|
||||
end = LLONG_MAX;
|
||||
} else {
|
||||
start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size;
|
||||
start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
|
||||
if (cmd->data_length)
|
||||
end = start + cmd->data_length;
|
||||
else
|
||||
@ -405,7 +392,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
|
||||
|
||||
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
|
||||
if (ret != 0)
|
||||
printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
|
||||
pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
|
||||
|
||||
if (!immed)
|
||||
transport_complete_sync_cache(cmd, ret == 0);
|
||||
@ -446,16 +433,16 @@ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct fd_dev *fd_dev = dev->dev_ptr;
|
||||
loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size;
|
||||
loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
|
||||
loff_t end = start + task->task_size;
|
||||
int ret;
|
||||
|
||||
DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
|
||||
pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
|
||||
task->task_lba, task->task_size);
|
||||
|
||||
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
|
||||
if (ret != 0)
|
||||
printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
|
||||
pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
|
||||
}
|
||||
|
||||
static int fd_do_task(struct se_task *task)
|
||||
@ -474,9 +461,9 @@ static int fd_do_task(struct se_task *task)
|
||||
ret = fd_do_writev(task);
|
||||
|
||||
if (ret > 0 &&
|
||||
DEV_ATTRIB(dev)->emulate_write_cache > 0 &&
|
||||
DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
|
||||
T_TASK(cmd)->t_tasks_fua) {
|
||||
dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
|
||||
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
|
||||
cmd->t_tasks_fua) {
|
||||
/*
|
||||
* We might need to be a bit smarter here
|
||||
* and return some sense data to let the initiator
|
||||
@ -549,7 +536,7 @@ static ssize_t fd_set_configfs_dev_params(
|
||||
snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
|
||||
"%s", arg_p);
|
||||
kfree(arg_p);
|
||||
printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
|
||||
pr_debug("FILEIO: Referencing Path: %s\n",
|
||||
fd_dev->fd_dev_name);
|
||||
fd_dev->fbd_flags |= FBDF_HAS_PATH;
|
||||
break;
|
||||
@ -562,23 +549,23 @@ static ssize_t fd_set_configfs_dev_params(
|
||||
ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
|
||||
kfree(arg_p);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "strict_strtoull() failed for"
|
||||
pr_err("strict_strtoull() failed for"
|
||||
" fd_dev_size=\n");
|
||||
goto out;
|
||||
}
|
||||
printk(KERN_INFO "FILEIO: Referencing Size: %llu"
|
||||
pr_debug("FILEIO: Referencing Size: %llu"
|
||||
" bytes\n", fd_dev->fd_dev_size);
|
||||
fd_dev->fbd_flags |= FBDF_HAS_SIZE;
|
||||
break;
|
||||
case Opt_fd_buffered_io:
|
||||
match_int(args, &arg);
|
||||
if (arg != 1) {
|
||||
printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
|
||||
pr_err("bogus fd_buffered_io=%d value\n", arg);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "FILEIO: Using buffered I/O"
|
||||
pr_debug("FILEIO: Using buffered I/O"
|
||||
" operations for struct fd_dev\n");
|
||||
|
||||
fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
|
||||
@ -598,8 +585,8 @@ static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
|
||||
struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
|
||||
|
||||
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
|
||||
printk(KERN_ERR "Missing fd_dev_name=\n");
|
||||
return -1;
|
||||
pr_err("Missing fd_dev_name=\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -654,7 +641,7 @@ static sector_t fd_get_blocks(struct se_device *dev)
|
||||
{
|
||||
struct fd_dev *fd_dev = dev->dev_ptr;
|
||||
unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
|
||||
DEV_ATTRIB(dev)->block_size);
|
||||
dev->se_sub_dev->se_dev_attrib.block_size);
|
||||
|
||||
return blocks_long;
|
||||
}
|
||||
|
@ -4,8 +4,6 @@
|
||||
#define FD_VERSION "4.0"
|
||||
|
||||
#define FD_MAX_DEV_NAME 256
|
||||
/* Maximum queuedepth for the FILEIO HBA */
|
||||
#define FD_HBA_QUEUE_DEPTH 256
|
||||
#define FD_DEVICE_QUEUE_DEPTH 32
|
||||
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
|
||||
#define FD_BLOCKSIZE 512
|
||||
@ -18,8 +16,6 @@ struct fd_request {
|
||||
struct se_task fd_task;
|
||||
/* SCSI CDB from iSCSI Command PDU */
|
||||
unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
|
||||
/* FILEIO device */
|
||||
struct fd_dev *fd_dev;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#define FBDF_HAS_PATH 0x01
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
* Filename: target_core_hba.c
|
||||
*
|
||||
* This file copntains the iSCSI HBA Transport related functions.
|
||||
* This file contains the TCM HBA Transport related functions.
|
||||
*
|
||||
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
|
||||
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
|
||||
@ -45,6 +45,11 @@
|
||||
static LIST_HEAD(subsystem_list);
|
||||
static DEFINE_MUTEX(subsystem_mutex);
|
||||
|
||||
static u32 hba_id_counter;
|
||||
|
||||
static DEFINE_SPINLOCK(hba_lock);
|
||||
static LIST_HEAD(hba_list);
|
||||
|
||||
int transport_subsystem_register(struct se_subsystem_api *sub_api)
|
||||
{
|
||||
struct se_subsystem_api *s;
|
||||
@ -53,8 +58,8 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api)
|
||||
|
||||
mutex_lock(&subsystem_mutex);
|
||||
list_for_each_entry(s, &subsystem_list, sub_api_list) {
|
||||
if (!(strcmp(s->name, sub_api->name))) {
|
||||
printk(KERN_ERR "%p is already registered with"
|
||||
if (!strcmp(s->name, sub_api->name)) {
|
||||
pr_err("%p is already registered with"
|
||||
" duplicate name %s, unable to process"
|
||||
" request\n", s, s->name);
|
||||
mutex_unlock(&subsystem_mutex);
|
||||
@ -64,7 +69,7 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api)
|
||||
list_add_tail(&sub_api->sub_api_list, &subsystem_list);
|
||||
mutex_unlock(&subsystem_mutex);
|
||||
|
||||
printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:"
|
||||
pr_debug("TCM: Registered subsystem plugin: %s struct module:"
|
||||
" %p\n", sub_api->name, sub_api->owner);
|
||||
return 0;
|
||||
}
|
||||
@ -104,21 +109,17 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
|
||||
|
||||
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
|
||||
if (!hba) {
|
||||
printk(KERN_ERR "Unable to allocate struct se_hba\n");
|
||||
pr_err("Unable to allocate struct se_hba\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&hba->hba_dev_list);
|
||||
spin_lock_init(&hba->device_lock);
|
||||
spin_lock_init(&hba->hba_queue_lock);
|
||||
mutex_init(&hba->hba_access_mutex);
|
||||
|
||||
hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
|
||||
hba->hba_flags |= hba_flags;
|
||||
|
||||
atomic_set(&hba->max_queue_depth, 0);
|
||||
atomic_set(&hba->left_queue_depth, 0);
|
||||
|
||||
hba->transport = core_get_backend(plugin_name);
|
||||
if (!hba->transport) {
|
||||
ret = -EINVAL;
|
||||
@ -129,12 +130,12 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
|
||||
if (ret < 0)
|
||||
goto out_module_put;
|
||||
|
||||
spin_lock(&se_global->hba_lock);
|
||||
hba->hba_id = se_global->g_hba_id_counter++;
|
||||
list_add_tail(&hba->hba_list, &se_global->g_hba_list);
|
||||
spin_unlock(&se_global->hba_lock);
|
||||
spin_lock(&hba_lock);
|
||||
hba->hba_id = hba_id_counter++;
|
||||
list_add_tail(&hba->hba_node, &hba_list);
|
||||
spin_unlock(&hba_lock);
|
||||
|
||||
printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
|
||||
pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
|
||||
" Core\n", hba->hba_id);
|
||||
|
||||
return hba;
|
||||
@ -156,11 +157,11 @@ core_delete_hba(struct se_hba *hba)
|
||||
|
||||
hba->transport->detach_hba(hba);
|
||||
|
||||
spin_lock(&se_global->hba_lock);
|
||||
list_del(&hba->hba_list);
|
||||
spin_unlock(&se_global->hba_lock);
|
||||
spin_lock(&hba_lock);
|
||||
list_del(&hba->hba_node);
|
||||
spin_unlock(&hba_lock);
|
||||
|
||||
printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
|
||||
pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
|
||||
" Core\n", hba->hba_id);
|
||||
|
||||
if (hba->transport->owner)
|
||||
|
@ -47,12 +47,6 @@
|
||||
|
||||
#include "target_core_iblock.h"
|
||||
|
||||
#if 0
|
||||
#define DEBUG_IBLOCK(x...) printk(x)
|
||||
#else
|
||||
#define DEBUG_IBLOCK(x...)
|
||||
#endif
|
||||
|
||||
static struct se_subsystem_api iblock_template;
|
||||
|
||||
static void iblock_bio_done(struct bio *, int);
|
||||
@ -66,25 +60,22 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
|
||||
struct iblock_hba *ib_host;
|
||||
|
||||
ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
|
||||
if (!(ib_host)) {
|
||||
printk(KERN_ERR "Unable to allocate memory for"
|
||||
if (!ib_host) {
|
||||
pr_err("Unable to allocate memory for"
|
||||
" struct iblock_hba\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ib_host->iblock_host_id = host_id;
|
||||
|
||||
atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
|
||||
atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
|
||||
hba->hba_ptr = (void *) ib_host;
|
||||
hba->hba_ptr = ib_host;
|
||||
|
||||
printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
|
||||
pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
|
||||
" Generic Target Core Stack %s\n", hba->hba_id,
|
||||
IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
|
||||
|
||||
printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
|
||||
" Target Core TCQ Depth: %d\n", hba->hba_id,
|
||||
ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth));
|
||||
pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
|
||||
hba->hba_id, ib_host->iblock_host_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -93,7 +84,7 @@ static void iblock_detach_hba(struct se_hba *hba)
|
||||
{
|
||||
struct iblock_hba *ib_host = hba->hba_ptr;
|
||||
|
||||
printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
|
||||
pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
|
||||
" Target Core\n", hba->hba_id, ib_host->iblock_host_id);
|
||||
|
||||
kfree(ib_host);
|
||||
@ -106,13 +97,13 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
|
||||
struct iblock_hba *ib_host = hba->hba_ptr;
|
||||
|
||||
ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
|
||||
if (!(ib_dev)) {
|
||||
printk(KERN_ERR "Unable to allocate struct iblock_dev\n");
|
||||
if (!ib_dev) {
|
||||
pr_err("Unable to allocate struct iblock_dev\n");
|
||||
return NULL;
|
||||
}
|
||||
ib_dev->ibd_host = ib_host;
|
||||
|
||||
printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name);
|
||||
pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
|
||||
|
||||
return ib_dev;
|
||||
}
|
||||
@ -131,8 +122,8 @@ static struct se_device *iblock_create_virtdevice(
|
||||
u32 dev_flags = 0;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!(ib_dev)) {
|
||||
printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
|
||||
if (!ib_dev) {
|
||||
pr_err("Unable to locate struct iblock_dev parameter\n");
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
|
||||
@ -140,16 +131,16 @@ static struct se_device *iblock_create_virtdevice(
|
||||
* These settings need to be made tunable..
|
||||
*/
|
||||
ib_dev->ibd_bio_set = bioset_create(32, 64);
|
||||
if (!(ib_dev->ibd_bio_set)) {
|
||||
printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
|
||||
if (!ib_dev->ibd_bio_set) {
|
||||
pr_err("IBLOCK: Unable to create bioset()\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
printk(KERN_INFO "IBLOCK: Created bio_set()\n");
|
||||
pr_debug("IBLOCK: Created bio_set()\n");
|
||||
/*
|
||||
* iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
|
||||
* must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
|
||||
*/
|
||||
printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n",
|
||||
pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
|
||||
ib_dev->ibd_udev_path);
|
||||
|
||||
bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
|
||||
@ -167,42 +158,41 @@ static struct se_device *iblock_create_virtdevice(
|
||||
limits->logical_block_size = bdev_logical_block_size(bd);
|
||||
limits->max_hw_sectors = queue_max_hw_sectors(q);
|
||||
limits->max_sectors = queue_max_sectors(q);
|
||||
dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH;
|
||||
dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH;
|
||||
dev_limits.hw_queue_depth = q->nr_requests;
|
||||
dev_limits.queue_depth = q->nr_requests;
|
||||
|
||||
ib_dev->ibd_major = MAJOR(bd->bd_dev);
|
||||
ib_dev->ibd_minor = MINOR(bd->bd_dev);
|
||||
ib_dev->ibd_bd = bd;
|
||||
|
||||
dev = transport_add_device_to_core_hba(hba,
|
||||
&iblock_template, se_dev, dev_flags, (void *)ib_dev,
|
||||
&iblock_template, se_dev, dev_flags, ib_dev,
|
||||
&dev_limits, "IBLOCK", IBLOCK_VERSION);
|
||||
if (!(dev))
|
||||
if (!dev)
|
||||
goto failed;
|
||||
|
||||
ib_dev->ibd_depth = dev->queue_depth;
|
||||
|
||||
/*
|
||||
* Check if the underlying struct block_device request_queue supports
|
||||
* the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
|
||||
* in ATA and we need to set TPE=1
|
||||
*/
|
||||
if (blk_queue_discard(q)) {
|
||||
DEV_ATTRIB(dev)->max_unmap_lba_count =
|
||||
dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
|
||||
q->limits.max_discard_sectors;
|
||||
/*
|
||||
* Currently hardcoded to 1 in Linux/SCSI code..
|
||||
*/
|
||||
DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1;
|
||||
DEV_ATTRIB(dev)->unmap_granularity =
|
||||
dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
|
||||
dev->se_sub_dev->se_dev_attrib.unmap_granularity =
|
||||
q->limits.discard_granularity;
|
||||
DEV_ATTRIB(dev)->unmap_granularity_alignment =
|
||||
dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
|
||||
q->limits.discard_alignment;
|
||||
|
||||
printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
|
||||
pr_debug("IBLOCK: BLOCK Discard support available,"
|
||||
" disabled by default\n");
|
||||
}
|
||||
|
||||
if (blk_queue_nonrot(q))
|
||||
dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
|
||||
|
||||
return dev;
|
||||
|
||||
failed:
|
||||
@ -211,8 +201,6 @@ failed:
|
||||
ib_dev->ibd_bio_set = NULL;
|
||||
}
|
||||
ib_dev->ibd_bd = NULL;
|
||||
ib_dev->ibd_major = 0;
|
||||
ib_dev->ibd_minor = 0;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
@ -233,17 +221,16 @@ static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
|
||||
}
|
||||
|
||||
static struct se_task *
|
||||
iblock_alloc_task(struct se_cmd *cmd)
|
||||
iblock_alloc_task(unsigned char *cdb)
|
||||
{
|
||||
struct iblock_req *ib_req;
|
||||
|
||||
ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
|
||||
if (!(ib_req)) {
|
||||
printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n");
|
||||
if (!ib_req) {
|
||||
pr_err("Unable to allocate memory for struct iblock_req\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ib_req->ib_dev = SE_DEV(cmd)->dev_ptr;
|
||||
atomic_set(&ib_req->ib_bio_cnt, 0);
|
||||
return &ib_req->ib_task;
|
||||
}
|
||||
@ -257,12 +244,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
|
||||
bdev_logical_block_size(bd)) - 1);
|
||||
u32 block_size = bdev_logical_block_size(bd);
|
||||
|
||||
if (block_size == DEV_ATTRIB(dev)->block_size)
|
||||
if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
|
||||
return blocks_long;
|
||||
|
||||
switch (block_size) {
|
||||
case 4096:
|
||||
switch (DEV_ATTRIB(dev)->block_size) {
|
||||
switch (dev->se_sub_dev->se_dev_attrib.block_size) {
|
||||
case 2048:
|
||||
blocks_long <<= 1;
|
||||
break;
|
||||
@ -276,7 +263,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
|
||||
}
|
||||
break;
|
||||
case 2048:
|
||||
switch (DEV_ATTRIB(dev)->block_size) {
|
||||
switch (dev->se_sub_dev->se_dev_attrib.block_size) {
|
||||
case 4096:
|
||||
blocks_long >>= 1;
|
||||
break;
|
||||
@ -291,7 +278,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
|
||||
}
|
||||
break;
|
||||
case 1024:
|
||||
switch (DEV_ATTRIB(dev)->block_size) {
|
||||
switch (dev->se_sub_dev->se_dev_attrib.block_size) {
|
||||
case 4096:
|
||||
blocks_long >>= 2;
|
||||
break;
|
||||
@ -306,7 +293,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
|
||||
}
|
||||
break;
|
||||
case 512:
|
||||
switch (DEV_ATTRIB(dev)->block_size) {
|
||||
switch (dev->se_sub_dev->se_dev_attrib.block_size) {
|
||||
case 4096:
|
||||
blocks_long >>= 3;
|
||||
break;
|
||||
@ -332,9 +319,9 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
|
||||
*/
|
||||
static void iblock_emulate_sync_cache(struct se_task *task)
|
||||
{
|
||||
struct se_cmd *cmd = TASK_CMD(task);
|
||||
struct se_cmd *cmd = task->task_se_cmd;
|
||||
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
|
||||
int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2);
|
||||
int immed = (cmd->t_task_cdb[1] & 0x2);
|
||||
sector_t error_sector;
|
||||
int ret;
|
||||
|
||||
@ -351,7 +338,7 @@ static void iblock_emulate_sync_cache(struct se_task *task)
|
||||
*/
|
||||
ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
|
||||
if (ret != 0) {
|
||||
printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
|
||||
pr_err("IBLOCK: block_issue_flush() failed: %d "
|
||||
" error_sector: %llu\n", ret,
|
||||
(unsigned long long)error_sector);
|
||||
}
|
||||
@ -401,9 +388,9 @@ static int iblock_do_task(struct se_task *task)
|
||||
* Force data to disk if we pretend to not have a volatile
|
||||
* write cache, or the initiator set the Force Unit Access bit.
|
||||
*/
|
||||
if (DEV_ATTRIB(dev)->emulate_write_cache == 0 ||
|
||||
(DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
|
||||
T_TASK(task->task_se_cmd)->t_tasks_fua))
|
||||
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
|
||||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
|
||||
task->task_se_cmd->t_tasks_fua))
|
||||
rw = WRITE_FUA;
|
||||
else
|
||||
rw = WRITE;
|
||||
@ -415,8 +402,9 @@ static int iblock_do_task(struct se_task *task)
|
||||
while (bio) {
|
||||
nbio = bio->bi_next;
|
||||
bio->bi_next = NULL;
|
||||
DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
|
||||
" bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
|
||||
pr_debug("Calling submit_bio() task: %p bio: %p"
|
||||
" bio->bi_sector: %llu\n", task, bio,
|
||||
(unsigned long long)bio->bi_sector);
|
||||
|
||||
submit_bio(rw, bio);
|
||||
bio = nbio;
|
||||
@ -470,7 +458,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
|
||||
struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
|
||||
char *orig, *ptr, *arg_p, *opts;
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
int ret = 0, arg, token;
|
||||
int ret = 0, token;
|
||||
|
||||
opts = kstrdup(page, GFP_KERNEL);
|
||||
if (!opts)
|
||||
@ -486,7 +474,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
|
||||
switch (token) {
|
||||
case Opt_udev_path:
|
||||
if (ib_dev->ibd_bd) {
|
||||
printk(KERN_ERR "Unable to set udev_path= while"
|
||||
pr_err("Unable to set udev_path= while"
|
||||
" ib_dev->ibd_bd exists\n");
|
||||
ret = -EEXIST;
|
||||
goto out;
|
||||
@ -499,15 +487,11 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
|
||||
snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
|
||||
"%s", arg_p);
|
||||
kfree(arg_p);
|
||||
printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
|
||||
pr_debug("IBLOCK: Referencing UDEV path: %s\n",
|
||||
ib_dev->ibd_udev_path);
|
||||
ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
|
||||
break;
|
||||
case Opt_force:
|
||||
match_int(args, &arg);
|
||||
ib_dev->ibd_force = arg;
|
||||
printk(KERN_INFO "IBLOCK: Set force=%d\n",
|
||||
ib_dev->ibd_force);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -526,8 +510,8 @@ static ssize_t iblock_check_configfs_dev_params(
|
||||
struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
|
||||
|
||||
if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
|
||||
printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
|
||||
return -1;
|
||||
pr_err("Missing udev_path= parameters for IBLOCK\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -555,12 +539,11 @@ static ssize_t iblock_show_configfs_dev_params(
|
||||
bl += sprintf(b + bl, " ");
|
||||
if (bd) {
|
||||
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
|
||||
ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ?
|
||||
MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
|
||||
"" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
|
||||
"CLAIMED: IBLOCK" : "CLAIMED: OS");
|
||||
} else {
|
||||
bl += sprintf(b + bl, "Major: %d Minor: %d\n",
|
||||
ibd->ibd_major, ibd->ibd_minor);
|
||||
bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
|
||||
}
|
||||
|
||||
return bl;
|
||||
@ -585,103 +568,103 @@ static struct bio *iblock_get_bio(
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
|
||||
if (!(bio)) {
|
||||
printk(KERN_ERR "Unable to allocate memory for bio\n");
|
||||
if (!bio) {
|
||||
pr_err("Unable to allocate memory for bio\n");
|
||||
*ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
|
||||
" %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
|
||||
DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
|
||||
pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
|
||||
" %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
|
||||
pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
|
||||
|
||||
bio->bi_bdev = ib_dev->ibd_bd;
|
||||
bio->bi_private = (void *) task;
|
||||
bio->bi_private = task;
|
||||
bio->bi_destructor = iblock_bio_destructor;
|
||||
bio->bi_end_io = &iblock_bio_done;
|
||||
bio->bi_sector = lba;
|
||||
atomic_inc(&ib_req->ib_bio_cnt);
|
||||
|
||||
DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
|
||||
DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
|
||||
pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
|
||||
pr_debug("Set ib_req->ib_bio_cnt: %d\n",
|
||||
atomic_read(&ib_req->ib_bio_cnt));
|
||||
return bio;
|
||||
}
|
||||
|
||||
static int iblock_map_task_SG(struct se_task *task)
|
||||
static int iblock_map_data_SG(struct se_task *task)
|
||||
{
|
||||
struct se_cmd *cmd = task->task_se_cmd;
|
||||
struct se_device *dev = SE_DEV(cmd);
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
|
||||
struct iblock_req *ib_req = IBLOCK_REQ(task);
|
||||
struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
|
||||
struct scatterlist *sg;
|
||||
int ret = 0;
|
||||
u32 i, sg_num = task->task_sg_num;
|
||||
u32 i, sg_num = task->task_sg_nents;
|
||||
sector_t block_lba;
|
||||
/*
|
||||
* Do starting conversion up from non 512-byte blocksize with
|
||||
* struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
|
||||
*/
|
||||
if (DEV_ATTRIB(dev)->block_size == 4096)
|
||||
if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
|
||||
block_lba = (task->task_lba << 3);
|
||||
else if (DEV_ATTRIB(dev)->block_size == 2048)
|
||||
else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
|
||||
block_lba = (task->task_lba << 2);
|
||||
else if (DEV_ATTRIB(dev)->block_size == 1024)
|
||||
else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
|
||||
block_lba = (task->task_lba << 1);
|
||||
else if (DEV_ATTRIB(dev)->block_size == 512)
|
||||
else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
|
||||
block_lba = task->task_lba;
|
||||
else {
|
||||
printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
|
||||
" %u\n", DEV_ATTRIB(dev)->block_size);
|
||||
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
|
||||
" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
|
||||
return PYX_TRANSPORT_LU_COMM_FAILURE;
|
||||
}
|
||||
|
||||
bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
|
||||
if (!(bio))
|
||||
if (!bio)
|
||||
return ret;
|
||||
|
||||
ib_req->ib_bio = bio;
|
||||
hbio = tbio = bio;
|
||||
/*
|
||||
* Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
|
||||
* from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
|
||||
* from task->task_sg -> struct scatterlist memory.
|
||||
*/
|
||||
for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
|
||||
DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
|
||||
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
|
||||
pr_debug("task: %p bio: %p Calling bio_add_page(): page:"
|
||||
" %p len: %u offset: %u\n", task, bio, sg_page(sg),
|
||||
sg->length, sg->offset);
|
||||
again:
|
||||
ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
|
||||
if (ret != sg->length) {
|
||||
|
||||
DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
|
||||
bio->bi_sector);
|
||||
DEBUG_IBLOCK("** task->task_size: %u\n",
|
||||
pr_debug("*** Set bio->bi_sector: %llu\n",
|
||||
(unsigned long long)bio->bi_sector);
|
||||
pr_debug("** task->task_size: %u\n",
|
||||
task->task_size);
|
||||
DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
|
||||
pr_debug("*** bio->bi_max_vecs: %u\n",
|
||||
bio->bi_max_vecs);
|
||||
DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
|
||||
pr_debug("*** bio->bi_vcnt: %u\n",
|
||||
bio->bi_vcnt);
|
||||
|
||||
bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
|
||||
block_lba, sg_num);
|
||||
if (!(bio))
|
||||
if (!bio)
|
||||
goto fail;
|
||||
|
||||
tbio = tbio->bi_next = bio;
|
||||
DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
|
||||
pr_debug("-----------------> Added +1 bio: %p to"
|
||||
" list, Going to again\n", bio);
|
||||
goto again;
|
||||
}
|
||||
/* Always in 512 byte units for Linux/Block */
|
||||
block_lba += sg->length >> IBLOCK_LBA_SHIFT;
|
||||
sg_num--;
|
||||
DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
|
||||
pr_debug("task: %p bio-add_page() passed!, decremented"
|
||||
" sg_num to %u\n", task, sg_num);
|
||||
DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
|
||||
" to %llu\n", task, block_lba);
|
||||
DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
|
||||
pr_debug("task: %p bio_add_page() passed!, increased lba"
|
||||
" to %llu\n", task, (unsigned long long)block_lba);
|
||||
pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:"
|
||||
" %u\n", task, bio->bi_vcnt);
|
||||
}
|
||||
|
||||
@ -727,11 +710,11 @@ static void iblock_bio_done(struct bio *bio, int err)
|
||||
/*
|
||||
* Set -EIO if !BIO_UPTODATE and the passed is still err=0
|
||||
*/
|
||||
if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
|
||||
if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
|
||||
err = -EIO;
|
||||
|
||||
if (err != 0) {
|
||||
printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
|
||||
pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
|
||||
" err: %d\n", bio, err);
|
||||
/*
|
||||
* Bump the ib_bio_err_cnt and release bio.
|
||||
@ -742,15 +725,15 @@ static void iblock_bio_done(struct bio *bio, int err)
|
||||
/*
|
||||
* Wait to complete the task until the last bio as completed.
|
||||
*/
|
||||
if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
|
||||
if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
|
||||
return;
|
||||
|
||||
ibr->ib_bio = NULL;
|
||||
transport_complete_task(task, 0);
|
||||
return;
|
||||
}
|
||||
DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
|
||||
task, bio, task->task_lba, bio->bi_sector, err);
|
||||
pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
|
||||
task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err);
|
||||
/*
|
||||
* bio_put() will call iblock_bio_destructor() to release the bio back
|
||||
* to ibr->ib_bio_set.
|
||||
@ -759,7 +742,7 @@ static void iblock_bio_done(struct bio *bio, int err)
|
||||
/*
|
||||
* Wait to complete the task until the last bio as completed.
|
||||
*/
|
||||
if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
|
||||
if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
|
||||
return;
|
||||
/*
|
||||
* Return GOOD status for task if zero ib_bio_err_cnt exists.
|
||||
@ -772,7 +755,7 @@ static struct se_subsystem_api iblock_template = {
|
||||
.name = "iblock",
|
||||
.owner = THIS_MODULE,
|
||||
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
|
||||
.map_task_SG = iblock_map_task_SG,
|
||||
.map_data_SG = iblock_map_data_SG,
|
||||
.attach_hba = iblock_attach_hba,
|
||||
.detach_hba = iblock_detach_hba,
|
||||
.allocate_virtdevice = iblock_allocate_virtdevice,
|
||||
|
@ -3,9 +3,6 @@
|
||||
|
||||
#define IBLOCK_VERSION "4.0"
|
||||
|
||||
#define IBLOCK_HBA_QUEUE_DEPTH 512
|
||||
#define IBLOCK_DEVICE_QUEUE_DEPTH 32
|
||||
#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128
|
||||
#define IBLOCK_MAX_CDBS 16
|
||||
#define IBLOCK_LBA_SHIFT 9
|
||||
|
||||
@ -15,18 +12,12 @@ struct iblock_req {
|
||||
atomic_t ib_bio_cnt;
|
||||
atomic_t ib_bio_err_cnt;
|
||||
struct bio *ib_bio;
|
||||
struct iblock_dev *ib_dev;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#define IBDF_HAS_UDEV_PATH 0x01
|
||||
#define IBDF_HAS_FORCE 0x02
|
||||
|
||||
struct iblock_dev {
|
||||
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
|
||||
int ibd_force;
|
||||
int ibd_major;
|
||||
int ibd_minor;
|
||||
u32 ibd_depth;
|
||||
u32 ibd_flags;
|
||||
struct bio_set *ibd_bio_set;
|
||||
struct block_device *ibd_bd;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -49,7 +49,7 @@ extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
|
||||
char *, u32);
|
||||
extern int core_scsi2_emulate_crh(struct se_cmd *);
|
||||
extern int core_scsi3_alloc_aptpl_registration(
|
||||
struct t10_reservation_template *, u64,
|
||||
struct t10_reservation *, u64,
|
||||
unsigned char *, unsigned char *, u32,
|
||||
unsigned char *, u16, u32, int, int, u8);
|
||||
extern int core_scsi3_check_aptpl_registration(struct se_device *,
|
||||
|
@ -55,24 +55,6 @@ static struct se_subsystem_api pscsi_template;
|
||||
|
||||
static void pscsi_req_done(struct request *, int);
|
||||
|
||||
/* pscsi_get_sh():
|
||||
*
|
||||
*
|
||||
*/
|
||||
static struct Scsi_Host *pscsi_get_sh(u32 host_no)
|
||||
{
|
||||
struct Scsi_Host *sh = NULL;
|
||||
|
||||
sh = scsi_host_lookup(host_no);
|
||||
if (IS_ERR(sh)) {
|
||||
printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:"
|
||||
" %u\n", host_no);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return sh;
|
||||
}
|
||||
|
||||
/* pscsi_attach_hba():
|
||||
*
|
||||
* pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
|
||||
@ -80,28 +62,23 @@ static struct Scsi_Host *pscsi_get_sh(u32 host_no)
|
||||
*/
|
||||
static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
|
||||
{
|
||||
int hba_depth;
|
||||
struct pscsi_hba_virt *phv;
|
||||
|
||||
phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
|
||||
if (!(phv)) {
|
||||
printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
|
||||
return -1;
|
||||
if (!phv) {
|
||||
pr_err("Unable to allocate struct pscsi_hba_virt\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
phv->phv_host_id = host_id;
|
||||
phv->phv_mode = PHV_VIRUTAL_HOST_ID;
|
||||
hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
|
||||
atomic_set(&hba->left_queue_depth, hba_depth);
|
||||
atomic_set(&hba->max_queue_depth, hba_depth);
|
||||
|
||||
hba->hba_ptr = (void *)phv;
|
||||
hba->hba_ptr = phv;
|
||||
|
||||
printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
|
||||
pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
|
||||
" Generic Target Core Stack %s\n", hba->hba_id,
|
||||
PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
|
||||
printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic"
|
||||
" Target Core with TCQ Depth: %d\n", hba->hba_id,
|
||||
atomic_read(&hba->max_queue_depth));
|
||||
pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
|
||||
hba->hba_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -114,12 +91,12 @@ static void pscsi_detach_hba(struct se_hba *hba)
|
||||
if (scsi_host) {
|
||||
scsi_host_put(scsi_host);
|
||||
|
||||
printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from"
|
||||
pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from"
|
||||
" Generic Target Core\n", hba->hba_id,
|
||||
(scsi_host->hostt->name) ? (scsi_host->hostt->name) :
|
||||
"Unknown");
|
||||
} else
|
||||
printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA"
|
||||
pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA"
|
||||
" from Generic Target Core\n", hba->hba_id);
|
||||
|
||||
kfree(phv);
|
||||
@ -130,20 +107,17 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
|
||||
{
|
||||
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
|
||||
struct Scsi_Host *sh = phv->phv_lld_host;
|
||||
int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
|
||||
/*
|
||||
* Release the struct Scsi_Host
|
||||
*/
|
||||
if (!(mode_flag)) {
|
||||
if (!(sh))
|
||||
if (!mode_flag) {
|
||||
if (!sh)
|
||||
return 0;
|
||||
|
||||
phv->phv_lld_host = NULL;
|
||||
phv->phv_mode = PHV_VIRUTAL_HOST_ID;
|
||||
atomic_set(&hba->left_queue_depth, hba_depth);
|
||||
atomic_set(&hba->max_queue_depth, hba_depth);
|
||||
|
||||
printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
|
||||
pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
|
||||
" %s\n", hba->hba_id, (sh->hostt->name) ?
|
||||
(sh->hostt->name) : "Unknown");
|
||||
|
||||
@ -154,27 +128,17 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
|
||||
* Otherwise, locate struct Scsi_Host from the original passed
|
||||
* pSCSI Host ID and enable for phba mode
|
||||
*/
|
||||
sh = pscsi_get_sh(phv->phv_host_id);
|
||||
if (!(sh)) {
|
||||
printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
|
||||
sh = scsi_host_lookup(phv->phv_host_id);
|
||||
if (IS_ERR(sh)) {
|
||||
pr_err("pSCSI: Unable to locate SCSI Host for"
|
||||
" phv_host_id: %d\n", phv->phv_host_id);
|
||||
return -1;
|
||||
return PTR_ERR(sh);
|
||||
}
|
||||
/*
|
||||
* Usually the SCSI LLD will use the hostt->can_queue value to define
|
||||
* its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set
|
||||
* this at all and set sh->can_queue at runtime.
|
||||
*/
|
||||
hba_depth = (sh->hostt->can_queue > sh->can_queue) ?
|
||||
sh->hostt->can_queue : sh->can_queue;
|
||||
|
||||
atomic_set(&hba->left_queue_depth, hba_depth);
|
||||
atomic_set(&hba->max_queue_depth, hba_depth);
|
||||
|
||||
phv->phv_lld_host = sh;
|
||||
phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
|
||||
|
||||
printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
|
||||
pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
|
||||
hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
|
||||
|
||||
return 1;
|
||||
@ -236,7 +200,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
|
||||
|
||||
buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -1;
|
||||
return -ENOMEM;
|
||||
|
||||
memset(cdb, 0, MAX_COMMAND_SIZE);
|
||||
cdb[0] = INQUIRY;
|
||||
@ -259,7 +223,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
|
||||
|
||||
out_free:
|
||||
kfree(buf);
|
||||
return -1;
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -293,15 +257,15 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
|
||||
page_83 = &buf[off];
|
||||
ident_len = page_83[3];
|
||||
if (!ident_len) {
|
||||
printk(KERN_ERR "page_83[3]: identifier"
|
||||
pr_err("page_83[3]: identifier"
|
||||
" length zero!\n");
|
||||
break;
|
||||
}
|
||||
printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len);
|
||||
pr_debug("T10 VPD Identifer Length: %d\n", ident_len);
|
||||
|
||||
vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
|
||||
if (!vpd) {
|
||||
printk(KERN_ERR "Unable to allocate memory for"
|
||||
pr_err("Unable to allocate memory for"
|
||||
" struct t10_vpd\n");
|
||||
goto out;
|
||||
}
|
||||
@ -353,7 +317,7 @@ static struct se_device *pscsi_add_device_to_list(
|
||||
if (!sd->queue_depth) {
|
||||
sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
|
||||
|
||||
printk(KERN_ERR "Set broken SCSI Device %d:%d:%d"
|
||||
pr_err("Set broken SCSI Device %d:%d:%d"
|
||||
" queue_depth to %d\n", sd->channel, sd->id,
|
||||
sd->lun, sd->queue_depth);
|
||||
}
|
||||
@ -364,10 +328,8 @@ static struct se_device *pscsi_add_device_to_list(
|
||||
q = sd->request_queue;
|
||||
limits = &dev_limits.limits;
|
||||
limits->logical_block_size = sd->sector_size;
|
||||
limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ?
|
||||
queue_max_hw_sectors(q) : sd->host->max_sectors;
|
||||
limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ?
|
||||
queue_max_sectors(q) : sd->host->max_sectors;
|
||||
limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
|
||||
limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q));
|
||||
dev_limits.hw_queue_depth = sd->queue_depth;
|
||||
dev_limits.queue_depth = sd->queue_depth;
|
||||
/*
|
||||
@ -391,9 +353,9 @@ static struct se_device *pscsi_add_device_to_list(
|
||||
pdv->pdv_sd = sd;
|
||||
|
||||
dev = transport_add_device_to_core_hba(hba, &pscsi_template,
|
||||
se_dev, dev_flags, (void *)pdv,
|
||||
se_dev, dev_flags, pdv,
|
||||
&dev_limits, NULL, NULL);
|
||||
if (!(dev)) {
|
||||
if (!dev) {
|
||||
pdv->pdv_sd = NULL;
|
||||
return NULL;
|
||||
}
|
||||
@ -423,14 +385,14 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
|
||||
struct pscsi_dev_virt *pdv;
|
||||
|
||||
pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
|
||||
if (!(pdv)) {
|
||||
printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n");
|
||||
if (!pdv) {
|
||||
pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
|
||||
return NULL;
|
||||
}
|
||||
pdv->pdv_se_hba = hba;
|
||||
|
||||
printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name);
|
||||
return (void *)pdv;
|
||||
pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
|
||||
return pdv;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -450,7 +412,7 @@ static struct se_device *pscsi_create_type_disk(
|
||||
u32 dev_flags = 0;
|
||||
|
||||
if (scsi_device_get(sd)) {
|
||||
printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
|
||||
pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
|
||||
sh->host_no, sd->channel, sd->id, sd->lun);
|
||||
spin_unlock_irq(sh->host_lock);
|
||||
return NULL;
|
||||
@ -463,19 +425,19 @@ static struct se_device *pscsi_create_type_disk(
|
||||
bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
|
||||
FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
|
||||
if (IS_ERR(bd)) {
|
||||
printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
|
||||
pr_err("pSCSI: blkdev_get_by_path() failed\n");
|
||||
scsi_device_put(sd);
|
||||
return NULL;
|
||||
}
|
||||
pdv->pdv_bd = bd;
|
||||
|
||||
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
|
||||
if (!(dev)) {
|
||||
if (!dev) {
|
||||
blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
|
||||
scsi_device_put(sd);
|
||||
return NULL;
|
||||
}
|
||||
printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
|
||||
pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
|
||||
phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
|
||||
|
||||
return dev;
|
||||
@ -497,7 +459,7 @@ static struct se_device *pscsi_create_type_rom(
|
||||
u32 dev_flags = 0;
|
||||
|
||||
if (scsi_device_get(sd)) {
|
||||
printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
|
||||
pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
|
||||
sh->host_no, sd->channel, sd->id, sd->lun);
|
||||
spin_unlock_irq(sh->host_lock);
|
||||
return NULL;
|
||||
@ -505,11 +467,11 @@ static struct se_device *pscsi_create_type_rom(
|
||||
spin_unlock_irq(sh->host_lock);
|
||||
|
||||
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
|
||||
if (!(dev)) {
|
||||
if (!dev) {
|
||||
scsi_device_put(sd);
|
||||
return NULL;
|
||||
}
|
||||
printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
|
||||
pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
|
||||
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
|
||||
sd->channel, sd->id, sd->lun);
|
||||
|
||||
@ -533,10 +495,10 @@ static struct se_device *pscsi_create_type_other(
|
||||
|
||||
spin_unlock_irq(sh->host_lock);
|
||||
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
|
||||
if (!(dev))
|
||||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
|
||||
pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
|
||||
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
|
||||
sd->channel, sd->id, sd->lun);
|
||||
|
||||
@ -555,8 +517,8 @@ static struct se_device *pscsi_create_virtdevice(
|
||||
struct Scsi_Host *sh = phv->phv_lld_host;
|
||||
int legacy_mode_enable = 0;
|
||||
|
||||
if (!(pdv)) {
|
||||
printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
|
||||
if (!pdv) {
|
||||
pr_err("Unable to locate struct pscsi_dev_virt"
|
||||
" parameter\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
@ -564,9 +526,9 @@ static struct se_device *pscsi_create_virtdevice(
|
||||
* If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
|
||||
* struct Scsi_Host we will need to bring the TCM/pSCSI object online
|
||||
*/
|
||||
if (!(sh)) {
|
||||
if (!sh) {
|
||||
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
|
||||
printk(KERN_ERR "pSCSI: Unable to locate struct"
|
||||
pr_err("pSCSI: Unable to locate struct"
|
||||
" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
@ -575,7 +537,7 @@ static struct se_device *pscsi_create_virtdevice(
|
||||
* reference, we enforce that udev_path has been set
|
||||
*/
|
||||
if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
|
||||
printk(KERN_ERR "pSCSI: udev_path attribute has not"
|
||||
pr_err("pSCSI: udev_path attribute has not"
|
||||
" been set before ENABLE=1\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
@ -586,8 +548,8 @@ static struct se_device *pscsi_create_virtdevice(
|
||||
*/
|
||||
if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
|
||||
spin_lock(&hba->device_lock);
|
||||
if (!(list_empty(&hba->hba_dev_list))) {
|
||||
printk(KERN_ERR "pSCSI: Unable to set hba_mode"
|
||||
if (!list_empty(&hba->hba_dev_list)) {
|
||||
pr_err("pSCSI: Unable to set hba_mode"
|
||||
" with active devices\n");
|
||||
spin_unlock(&hba->device_lock);
|
||||
return ERR_PTR(-EEXIST);
|
||||
@ -601,16 +563,16 @@ static struct se_device *pscsi_create_virtdevice(
|
||||
hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
|
||||
sh = phv->phv_lld_host;
|
||||
} else {
|
||||
sh = pscsi_get_sh(pdv->pdv_host_id);
|
||||
if (!(sh)) {
|
||||
printk(KERN_ERR "pSCSI: Unable to locate"
|
||||
sh = scsi_host_lookup(pdv->pdv_host_id);
|
||||
if (IS_ERR(sh)) {
|
||||
pr_err("pSCSI: Unable to locate"
|
||||
" pdv_host_id: %d\n", pdv->pdv_host_id);
|
||||
return ERR_PTR(-ENODEV);
|
||||
return (struct se_device *) sh;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
|
||||
printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
|
||||
pr_err("pSCSI: PHV_VIRUTAL_HOST_ID set while"
|
||||
" struct Scsi_Host exists\n");
|
||||
return ERR_PTR(-EEXIST);
|
||||
}
|
||||
@ -639,7 +601,7 @@ static struct se_device *pscsi_create_virtdevice(
|
||||
break;
|
||||
}
|
||||
|
||||
if (!(dev)) {
|
||||
if (!dev) {
|
||||
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
|
||||
scsi_host_put(sh);
|
||||
else if (legacy_mode_enable) {
|
||||
@ -653,7 +615,7 @@ static struct se_device *pscsi_create_virtdevice(
|
||||
}
|
||||
spin_unlock_irq(sh->host_lock);
|
||||
|
||||
printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
|
||||
pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
|
||||
pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id);
|
||||
|
||||
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
|
||||
@ -728,13 +690,12 @@ static int pscsi_transport_complete(struct se_task *task)
|
||||
*/
|
||||
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
|
||||
(status_byte(result) << 1) == SAM_STAT_GOOD) {
|
||||
if (!TASK_CMD(task)->se_deve)
|
||||
if (!task->task_se_cmd->se_deve)
|
||||
goto after_mode_sense;
|
||||
|
||||
if (TASK_CMD(task)->se_deve->lun_flags &
|
||||
if (task->task_se_cmd->se_deve->lun_flags &
|
||||
TRANSPORT_LUNFLAGS_READ_ONLY) {
|
||||
unsigned char *buf = (unsigned char *)
|
||||
T_TASK(task->task_se_cmd)->t_task_buf;
|
||||
unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd);
|
||||
|
||||
if (cdb[0] == MODE_SENSE_10) {
|
||||
if (!(buf[3] & 0x80))
|
||||
@ -743,6 +704,8 @@ static int pscsi_transport_complete(struct se_task *task)
|
||||
if (!(buf[2] & 0x80))
|
||||
buf[2] |= 0x80;
|
||||
}
|
||||
|
||||
transport_kunmap_first_data_page(task->task_se_cmd);
|
||||
}
|
||||
}
|
||||
after_mode_sense:
|
||||
@ -766,8 +729,8 @@ after_mode_sense:
|
||||
u32 blocksize;
|
||||
|
||||
buf = sg_virt(&sg[0]);
|
||||
if (!(buf)) {
|
||||
printk(KERN_ERR "Unable to get buf for scatterlist\n");
|
||||
if (!buf) {
|
||||
pr_err("Unable to get buf for scatterlist\n");
|
||||
goto after_mode_select;
|
||||
}
|
||||
|
||||
@ -797,33 +760,19 @@ after_mode_select:
|
||||
}
|
||||
|
||||
static struct se_task *
|
||||
pscsi_alloc_task(struct se_cmd *cmd)
|
||||
pscsi_alloc_task(unsigned char *cdb)
|
||||
{
|
||||
struct pscsi_plugin_task *pt;
|
||||
unsigned char *cdb = T_TASK(cmd)->t_task_cdb;
|
||||
|
||||
pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
|
||||
if (!pt) {
|
||||
printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation,
|
||||
* allocate the extended CDB buffer for per struct se_task context
|
||||
* pt->pscsi_cdb now.
|
||||
* Dynamically alloc cdb space, since it may be larger than
|
||||
* TCM_MAX_COMMAND_SIZE
|
||||
*/
|
||||
if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) {
|
||||
|
||||
pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
|
||||
if (!(pt->pscsi_cdb)) {
|
||||
printk(KERN_ERR "pSCSI: Unable to allocate extended"
|
||||
" pt->pscsi_cdb\n");
|
||||
kfree(pt);
|
||||
return NULL;
|
||||
}
|
||||
} else
|
||||
pt->pscsi_cdb = &pt->__pscsi_cdb[0];
|
||||
pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL);
|
||||
if (!pt) {
|
||||
pr_err("Unable to allocate struct pscsi_plugin_task\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &pt->pscsi_task;
|
||||
}
|
||||
@ -849,7 +798,7 @@ static inline void pscsi_blk_init_request(
|
||||
* also set the end_io_data pointer.to struct se_task.
|
||||
*/
|
||||
req->end_io = pscsi_req_done;
|
||||
req->end_io_data = (void *)task;
|
||||
req->end_io_data = task;
|
||||
/*
|
||||
* Load the referenced struct se_task's SCSI CDB into
|
||||
* include/linux/blkdev.h:struct request->cmd
|
||||
@ -859,7 +808,7 @@ static inline void pscsi_blk_init_request(
|
||||
/*
|
||||
* Setup pointer for outgoing sense data.
|
||||
*/
|
||||
req->sense = (void *)&pt->pscsi_sense[0];
|
||||
req->sense = &pt->pscsi_sense[0];
|
||||
req->sense_len = 0;
|
||||
}
|
||||
|
||||
@ -874,8 +823,8 @@ static int pscsi_blk_get_request(struct se_task *task)
|
||||
pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
|
||||
(task->task_data_direction == DMA_TO_DEVICE),
|
||||
GFP_KERNEL);
|
||||
if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) {
|
||||
printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n",
|
||||
if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) {
|
||||
pr_err("PSCSI: blk_get_request() failed: %ld\n",
|
||||
IS_ERR(pt->pscsi_req));
|
||||
return PYX_TRANSPORT_LU_COMM_FAILURE;
|
||||
}
|
||||
@ -920,14 +869,7 @@ static int pscsi_do_task(struct se_task *task)
|
||||
static void pscsi_free_task(struct se_task *task)
|
||||
{
|
||||
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
|
||||
struct se_cmd *cmd = task->task_se_cmd;
|
||||
|
||||
/*
|
||||
* Release the extended CDB allocation from pscsi_alloc_task()
|
||||
* if one exists.
|
||||
*/
|
||||
if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb)
|
||||
kfree(pt->pscsi_cdb);
|
||||
/*
|
||||
* We do not release the bio(s) here associated with this task, as
|
||||
* this is handled by bio_put() and pscsi_bi_endio().
|
||||
@ -973,7 +915,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
|
||||
switch (token) {
|
||||
case Opt_scsi_host_id:
|
||||
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
|
||||
printk(KERN_ERR "PSCSI[%d]: Unable to accept"
|
||||
pr_err("PSCSI[%d]: Unable to accept"
|
||||
" scsi_host_id while phv_mode =="
|
||||
" PHV_LLD_SCSI_HOST_NO\n",
|
||||
phv->phv_host_id);
|
||||
@ -982,14 +924,14 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
|
||||
}
|
||||
match_int(args, &arg);
|
||||
pdv->pdv_host_id = arg;
|
||||
printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:"
|
||||
pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
|
||||
" %d\n", phv->phv_host_id, pdv->pdv_host_id);
|
||||
pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
|
||||
break;
|
||||
case Opt_scsi_channel_id:
|
||||
match_int(args, &arg);
|
||||
pdv->pdv_channel_id = arg;
|
||||
printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel"
|
||||
pr_debug("PSCSI[%d]: Referencing SCSI Channel"
|
||||
" ID: %d\n", phv->phv_host_id,
|
||||
pdv->pdv_channel_id);
|
||||
pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
|
||||
@ -997,7 +939,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
|
||||
case Opt_scsi_target_id:
|
||||
match_int(args, &arg);
|
||||
pdv->pdv_target_id = arg;
|
||||
printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target"
|
||||
pr_debug("PSCSI[%d]: Referencing SCSI Target"
|
||||
" ID: %d\n", phv->phv_host_id,
|
||||
pdv->pdv_target_id);
|
||||
pdv->pdv_flags |= PDF_HAS_TARGET_ID;
|
||||
@ -1005,7 +947,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
|
||||
case Opt_scsi_lun_id:
|
||||
match_int(args, &arg);
|
||||
pdv->pdv_lun_id = arg;
|
||||
printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:"
|
||||
pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
|
||||
" %d\n", phv->phv_host_id, pdv->pdv_lun_id);
|
||||
pdv->pdv_flags |= PDF_HAS_LUN_ID;
|
||||
break;
|
||||
@ -1028,9 +970,9 @@ static ssize_t pscsi_check_configfs_dev_params(
|
||||
if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
|
||||
!(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
|
||||
!(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
|
||||
printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
|
||||
pr_err("Missing scsi_channel_id=, scsi_target_id= and"
|
||||
" scsi_lun_id= parameters\n");
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1090,7 +1032,7 @@ static void pscsi_bi_endio(struct bio *bio, int error)
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
|
||||
static inline struct bio *pscsi_get_bio(int sg_num)
|
||||
{
|
||||
struct bio *bio;
|
||||
/*
|
||||
@ -1098,8 +1040,8 @@ static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
|
||||
* in block/blk-core.c:blk_make_request()
|
||||
*/
|
||||
bio = bio_kmalloc(GFP_KERNEL, sg_num);
|
||||
if (!(bio)) {
|
||||
printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n");
|
||||
if (!bio) {
|
||||
pr_err("PSCSI: bio_kmalloc() failed\n");
|
||||
return NULL;
|
||||
}
|
||||
bio->bi_end_io = pscsi_bi_endio;
|
||||
@ -1107,13 +1049,7 @@ static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
|
||||
return bio;
|
||||
}
|
||||
|
||||
#if 0
|
||||
#define DEBUG_PSCSI(x...) printk(x)
|
||||
#else
|
||||
#define DEBUG_PSCSI(x...)
|
||||
#endif
|
||||
|
||||
static int __pscsi_map_task_SG(
|
||||
static int __pscsi_map_SG(
|
||||
struct se_task *task,
|
||||
struct scatterlist *task_sg,
|
||||
u32 task_sg_num,
|
||||
@ -1134,7 +1070,7 @@ static int __pscsi_map_task_SG(
|
||||
return 0;
|
||||
/*
|
||||
* For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup
|
||||
* the bio_vec maplist from TC< struct se_mem -> task->task_sg ->
|
||||
* the bio_vec maplist from task->task_sg ->
|
||||
* struct scatterlist memory. The struct se_task->task_sg[] currently needs
|
||||
* to be attached to struct bios for submission to Linux/SCSI using
|
||||
* struct request to struct scsi_device->request_queue.
|
||||
@ -1143,34 +1079,34 @@ static int __pscsi_map_task_SG(
|
||||
* is ported to upstream SCSI passthrough functionality that accepts
|
||||
* struct scatterlist->page_link or struct page as a paraemeter.
|
||||
*/
|
||||
DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages);
|
||||
pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
|
||||
|
||||
for_each_sg(task_sg, sg, task_sg_num, i) {
|
||||
page = sg_page(sg);
|
||||
off = sg->offset;
|
||||
len = sg->length;
|
||||
|
||||
DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i,
|
||||
pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
|
||||
page, len, off);
|
||||
|
||||
while (len > 0 && data_len > 0) {
|
||||
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
|
||||
bytes = min(bytes, data_len);
|
||||
|
||||
if (!(bio)) {
|
||||
if (!bio) {
|
||||
nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
|
||||
nr_pages -= nr_vecs;
|
||||
/*
|
||||
* Calls bio_kmalloc() and sets bio->bi_end_io()
|
||||
*/
|
||||
bio = pscsi_get_bio(pdv, nr_vecs);
|
||||
if (!(bio))
|
||||
bio = pscsi_get_bio(nr_vecs);
|
||||
if (!bio)
|
||||
goto fail;
|
||||
|
||||
if (rw)
|
||||
bio->bi_rw |= REQ_WRITE;
|
||||
|
||||
DEBUG_PSCSI("PSCSI: Allocated bio: %p,"
|
||||
pr_debug("PSCSI: Allocated bio: %p,"
|
||||
" dir: %s nr_vecs: %d\n", bio,
|
||||
(rw) ? "rw" : "r", nr_vecs);
|
||||
/*
|
||||
@ -1185,7 +1121,7 @@ static int __pscsi_map_task_SG(
|
||||
tbio = tbio->bi_next = bio;
|
||||
}
|
||||
|
||||
DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d"
|
||||
pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
|
||||
" bio: %p page: %p len: %d off: %d\n", i, bio,
|
||||
page, len, off);
|
||||
|
||||
@ -1194,11 +1130,11 @@ static int __pscsi_map_task_SG(
|
||||
if (rc != bytes)
|
||||
goto fail;
|
||||
|
||||
DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
|
||||
pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
|
||||
bio->bi_vcnt, nr_vecs);
|
||||
|
||||
if (bio->bi_vcnt > nr_vecs) {
|
||||
DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:"
|
||||
pr_debug("PSCSI: Reached bio->bi_vcnt max:"
|
||||
" %d i: %d bio: %p, allocating another"
|
||||
" bio\n", bio->bi_vcnt, i, bio);
|
||||
/*
|
||||
@ -1220,15 +1156,15 @@ static int __pscsi_map_task_SG(
|
||||
* Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
|
||||
* primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
|
||||
*/
|
||||
if (!(bidi_read)) {
|
||||
if (!bidi_read) {
|
||||
/*
|
||||
* Starting with v2.6.31, call blk_make_request() passing in *hbio to
|
||||
* allocate the pSCSI task a struct request.
|
||||
*/
|
||||
pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
|
||||
hbio, GFP_KERNEL);
|
||||
if (!(pt->pscsi_req)) {
|
||||
printk(KERN_ERR "pSCSI: blk_make_request() failed\n");
|
||||
if (!pt->pscsi_req) {
|
||||
pr_err("pSCSI: blk_make_request() failed\n");
|
||||
goto fail;
|
||||
}
|
||||
/*
|
||||
@ -1237,7 +1173,7 @@ static int __pscsi_map_task_SG(
|
||||
*/
|
||||
pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
|
||||
|
||||
return task->task_sg_num;
|
||||
return task->task_sg_nents;
|
||||
}
|
||||
/*
|
||||
* Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
|
||||
@ -1245,13 +1181,13 @@ static int __pscsi_map_task_SG(
|
||||
*/
|
||||
pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
|
||||
hbio, GFP_KERNEL);
|
||||
if (!(pt->pscsi_req->next_rq)) {
|
||||
printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n");
|
||||
if (!pt->pscsi_req->next_rq) {
|
||||
pr_err("pSCSI: blk_make_request() failed for BIDI\n");
|
||||
goto fail;
|
||||
}
|
||||
pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
|
||||
|
||||
return task->task_sg_num;
|
||||
return task->task_sg_nents;
|
||||
fail:
|
||||
while (hbio) {
|
||||
bio = hbio;
|
||||
@ -1262,7 +1198,10 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pscsi_map_task_SG(struct se_task *task)
|
||||
/*
|
||||
* pSCSI maps both ->map_control_SG() and ->map_data_SG() to a single call.
|
||||
*/
|
||||
static int pscsi_map_SG(struct se_task *task)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -1270,14 +1209,14 @@ static int pscsi_map_task_SG(struct se_task *task)
|
||||
* Setup the main struct request for the task->task_sg[] payload
|
||||
*/
|
||||
|
||||
ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0);
|
||||
ret = __pscsi_map_SG(task, task->task_sg, task->task_sg_nents, 0);
|
||||
if (ret >= 0 && task->task_sg_bidi) {
|
||||
/*
|
||||
* If present, set up the extra BIDI-COMMAND SCSI READ
|
||||
* struct request and payload.
|
||||
*/
|
||||
ret = __pscsi_map_task_SG(task, task->task_sg_bidi,
|
||||
task->task_sg_num, 1);
|
||||
ret = __pscsi_map_SG(task, task->task_sg_bidi,
|
||||
task->task_sg_nents, 1);
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
@ -1285,33 +1224,6 @@ static int pscsi_map_task_SG(struct se_task *task)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* pscsi_map_task_non_SG():
|
||||
*
|
||||
*
|
||||
*/
|
||||
static int pscsi_map_task_non_SG(struct se_task *task)
|
||||
{
|
||||
struct se_cmd *cmd = TASK_CMD(task);
|
||||
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
|
||||
struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
|
||||
int ret = 0;
|
||||
|
||||
if (pscsi_blk_get_request(task) < 0)
|
||||
return PYX_TRANSPORT_LU_COMM_FAILURE;
|
||||
|
||||
if (!task->task_size)
|
||||
return 0;
|
||||
|
||||
ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
|
||||
pt->pscsi_req, T_TASK(cmd)->t_task_buf,
|
||||
task->task_size, GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
|
||||
return PYX_TRANSPORT_LU_COMM_FAILURE;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pscsi_CDB_none(struct se_task *task)
|
||||
{
|
||||
return pscsi_blk_get_request(task);
|
||||
@ -1383,9 +1295,9 @@ static inline void pscsi_process_SAM_status(
|
||||
struct pscsi_plugin_task *pt)
|
||||
{
|
||||
task->task_scsi_status = status_byte(pt->pscsi_result);
|
||||
if ((task->task_scsi_status)) {
|
||||
if (task->task_scsi_status) {
|
||||
task->task_scsi_status <<= 1;
|
||||
printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:"
|
||||
pr_debug("PSCSI Status Byte exception at task: %p CDB:"
|
||||
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
|
||||
pt->pscsi_result);
|
||||
}
|
||||
@ -1395,18 +1307,16 @@ static inline void pscsi_process_SAM_status(
|
||||
transport_complete_task(task, (!task->task_scsi_status));
|
||||
break;
|
||||
default:
|
||||
printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:"
|
||||
pr_debug("PSCSI Host Byte exception at task: %p CDB:"
|
||||
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
|
||||
pt->pscsi_result);
|
||||
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
|
||||
task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
|
||||
TASK_CMD(task)->transport_error_status =
|
||||
task->task_se_cmd->transport_error_status =
|
||||
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
|
||||
transport_complete_task(task, 0);
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void pscsi_req_done(struct request *req, int uptodate)
|
||||
@ -1433,8 +1343,8 @@ static struct se_subsystem_api pscsi_template = {
|
||||
.owner = THIS_MODULE,
|
||||
.transport_type = TRANSPORT_PLUGIN_PHBA_PDEV,
|
||||
.cdb_none = pscsi_CDB_none,
|
||||
.map_task_non_SG = pscsi_map_task_non_SG,
|
||||
.map_task_SG = pscsi_map_task_SG,
|
||||
.map_control_SG = pscsi_map_SG,
|
||||
.map_data_SG = pscsi_map_SG,
|
||||
.attach_hba = pscsi_attach_hba,
|
||||
.detach_hba = pscsi_detach_hba,
|
||||
.pmode_enable_hba = pscsi_pmode_enable_hba,
|
||||
|
@ -2,7 +2,6 @@
|
||||
#define TARGET_CORE_PSCSI_H
|
||||
|
||||
#define PSCSI_VERSION "v4.0"
|
||||
#define PSCSI_VIRTUAL_HBA_DEPTH 2048
|
||||
|
||||
/* used in pscsi_find_alloc_len() */
|
||||
#ifndef INQUIRY_DATA_SIZE
|
||||
@ -24,13 +23,12 @@
|
||||
|
||||
struct pscsi_plugin_task {
|
||||
struct se_task pscsi_task;
|
||||
unsigned char *pscsi_cdb;
|
||||
unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
|
||||
unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
|
||||
int pscsi_direction;
|
||||
int pscsi_result;
|
||||
u32 pscsi_resid;
|
||||
struct request *pscsi_req;
|
||||
unsigned char pscsi_cdb[0];
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#define PDF_HAS_CHANNEL_ID 0x01
|
||||
|
@ -44,12 +44,8 @@
|
||||
|
||||
#include "target_core_rd.h"
|
||||
|
||||
static struct se_subsystem_api rd_dr_template;
|
||||
static struct se_subsystem_api rd_mcp_template;
|
||||
|
||||
/* #define DEBUG_RAMDISK_MCP */
|
||||
/* #define DEBUG_RAMDISK_DR */
|
||||
|
||||
/* rd_attach_hba(): (Part of se_subsystem_api_t template)
|
||||
*
|
||||
*
|
||||
@ -59,24 +55,21 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
|
||||
struct rd_host *rd_host;
|
||||
|
||||
rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
|
||||
if (!(rd_host)) {
|
||||
printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
|
||||
if (!rd_host) {
|
||||
pr_err("Unable to allocate memory for struct rd_host\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rd_host->rd_host_id = host_id;
|
||||
|
||||
atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
|
||||
atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
|
||||
hba->hba_ptr = (void *) rd_host;
|
||||
hba->hba_ptr = rd_host;
|
||||
|
||||
printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
|
||||
pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
|
||||
" Generic Target Core Stack %s\n", hba->hba_id,
|
||||
RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
|
||||
printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
|
||||
" Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
|
||||
rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
|
||||
RD_MAX_SECTORS);
|
||||
pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
|
||||
" MaxSectors: %u\n", hba->hba_id,
|
||||
rd_host->rd_host_id, RD_MAX_SECTORS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -85,7 +78,7 @@ static void rd_detach_hba(struct se_hba *hba)
|
||||
{
|
||||
struct rd_host *rd_host = hba->hba_ptr;
|
||||
|
||||
printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
|
||||
pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
|
||||
" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
|
||||
|
||||
kfree(rd_host);
|
||||
@ -114,7 +107,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
|
||||
|
||||
for (j = 0; j < sg_per_table; j++) {
|
||||
pg = sg_page(&sg[j]);
|
||||
if ((pg)) {
|
||||
if (pg) {
|
||||
__free_page(pg);
|
||||
page_count++;
|
||||
}
|
||||
@ -123,7 +116,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
|
||||
kfree(sg);
|
||||
}
|
||||
|
||||
printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
|
||||
pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
|
||||
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
|
||||
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
|
||||
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
|
||||
@ -148,7 +141,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
|
||||
struct scatterlist *sg;
|
||||
|
||||
if (rd_dev->rd_page_count <= 0) {
|
||||
printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
|
||||
pr_err("Illegal page count: %u for Ramdisk device\n",
|
||||
rd_dev->rd_page_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -157,8 +150,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
|
||||
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
|
||||
|
||||
sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
|
||||
if (!(sg_table)) {
|
||||
printk(KERN_ERR "Unable to allocate memory for Ramdisk"
|
||||
if (!sg_table) {
|
||||
pr_err("Unable to allocate memory for Ramdisk"
|
||||
" scatterlist tables\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -172,13 +165,13 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
|
||||
|
||||
sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
|
||||
GFP_KERNEL);
|
||||
if (!(sg)) {
|
||||
printk(KERN_ERR "Unable to allocate scatterlist array"
|
||||
if (!sg) {
|
||||
pr_err("Unable to allocate scatterlist array"
|
||||
" for struct rd_dev\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
|
||||
sg_init_table(sg, sg_per_table);
|
||||
|
||||
sg_table[i].sg_table = sg;
|
||||
sg_table[i].rd_sg_count = sg_per_table;
|
||||
@ -188,8 +181,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
|
||||
|
||||
for (j = 0; j < sg_per_table; j++) {
|
||||
pg = alloc_pages(GFP_KERNEL, 0);
|
||||
if (!(pg)) {
|
||||
printk(KERN_ERR "Unable to allocate scatterlist"
|
||||
if (!pg) {
|
||||
pr_err("Unable to allocate scatterlist"
|
||||
" pages for struct rd_dev_sg_table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -201,7 +194,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
|
||||
total_sg_needed -= sg_per_table;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
|
||||
pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
|
||||
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
|
||||
rd_dev->rd_dev_id, rd_dev->rd_page_count,
|
||||
rd_dev->sg_table_count);
|
||||
@ -218,8 +211,8 @@ static void *rd_allocate_virtdevice(
|
||||
struct rd_host *rd_host = hba->hba_ptr;
|
||||
|
||||
rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
|
||||
if (!(rd_dev)) {
|
||||
printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
|
||||
if (!rd_dev) {
|
||||
pr_err("Unable to allocate memory for struct rd_dev\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -229,11 +222,6 @@ static void *rd_allocate_virtdevice(
|
||||
return rd_dev;
|
||||
}
|
||||
|
||||
static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
|
||||
{
|
||||
return rd_allocate_virtdevice(hba, name, 1);
|
||||
}
|
||||
|
||||
static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
|
||||
{
|
||||
return rd_allocate_virtdevice(hba, name, 0);
|
||||
@ -273,16 +261,15 @@ static struct se_device *rd_create_virtdevice(
|
||||
dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
|
||||
|
||||
dev = transport_add_device_to_core_hba(hba,
|
||||
(rd_dev->rd_direct) ? &rd_dr_template :
|
||||
&rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
|
||||
&rd_mcp_template, se_dev, dev_flags, rd_dev,
|
||||
&dev_limits, prod, rev);
|
||||
if (!(dev))
|
||||
if (!dev)
|
||||
goto fail;
|
||||
|
||||
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
|
||||
rd_dev->rd_queue_depth = dev->queue_depth;
|
||||
|
||||
printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
|
||||
pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
|
||||
" %u pages in %u tables, %lu total bytes\n",
|
||||
rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
|
||||
"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
|
||||
@ -296,14 +283,6 @@ fail:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static struct se_device *rd_DIRECT_create_virtdevice(
|
||||
struct se_hba *hba,
|
||||
struct se_subsystem_dev *se_dev,
|
||||
void *p)
|
||||
{
|
||||
return rd_create_virtdevice(hba, se_dev, p, 1);
|
||||
}
|
||||
|
||||
static struct se_device *rd_MEMCPY_create_virtdevice(
|
||||
struct se_hba *hba,
|
||||
struct se_subsystem_dev *se_dev,
|
||||
@ -330,16 +309,15 @@ static inline struct rd_request *RD_REQ(struct se_task *task)
|
||||
}
|
||||
|
||||
static struct se_task *
|
||||
rd_alloc_task(struct se_cmd *cmd)
|
||||
rd_alloc_task(unsigned char *cdb)
|
||||
{
|
||||
struct rd_request *rd_req;
|
||||
|
||||
rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
|
||||
if (!rd_req) {
|
||||
printk(KERN_ERR "Unable to allocate struct rd_request\n");
|
||||
pr_err("Unable to allocate struct rd_request\n");
|
||||
return NULL;
|
||||
}
|
||||
rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
|
||||
|
||||
return &rd_req->rd_task;
|
||||
}
|
||||
@ -360,7 +338,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
|
||||
return sg_table;
|
||||
}
|
||||
|
||||
printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
|
||||
pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
|
||||
page);
|
||||
|
||||
return NULL;
|
||||
@ -373,7 +351,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
|
||||
static int rd_MEMCPY_read(struct rd_request *req)
|
||||
{
|
||||
struct se_task *task = &req->rd_task;
|
||||
struct rd_dev *dev = req->rd_dev;
|
||||
struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
|
||||
struct rd_dev_sg_table *table;
|
||||
struct scatterlist *sg_d, *sg_s;
|
||||
void *dst, *src;
|
||||
@ -382,32 +360,32 @@ static int rd_MEMCPY_read(struct rd_request *req)
|
||||
u32 rd_offset = req->rd_offset;
|
||||
|
||||
table = rd_get_sg_table(dev, req->rd_page);
|
||||
if (!(table))
|
||||
return -1;
|
||||
if (!table)
|
||||
return -EINVAL;
|
||||
|
||||
table_sg_end = (table->page_end_offset - req->rd_page);
|
||||
sg_d = task->task_sg;
|
||||
sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
|
||||
#ifdef DEBUG_RAMDISK_MCP
|
||||
printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
|
||||
|
||||
pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
|
||||
" %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
|
||||
req->rd_page, req->rd_offset);
|
||||
#endif
|
||||
|
||||
src_offset = rd_offset;
|
||||
|
||||
while (req->rd_size) {
|
||||
if ((sg_d[i].length - dst_offset) <
|
||||
(sg_s[j].length - src_offset)) {
|
||||
length = (sg_d[i].length - dst_offset);
|
||||
#ifdef DEBUG_RAMDISK_MCP
|
||||
printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
|
||||
|
||||
pr_debug("Step 1 - sg_d[%d]: %p length: %d"
|
||||
" offset: %u sg_s[%d].length: %u\n", i,
|
||||
&sg_d[i], sg_d[i].length, sg_d[i].offset, j,
|
||||
sg_s[j].length);
|
||||
printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
|
||||
pr_debug("Step 1 - length: %u dst_offset: %u"
|
||||
" src_offset: %u\n", length, dst_offset,
|
||||
src_offset);
|
||||
#endif
|
||||
|
||||
if (length > req->rd_size)
|
||||
length = req->rd_size;
|
||||
|
||||
@ -424,15 +402,15 @@ static int rd_MEMCPY_read(struct rd_request *req)
|
||||
page_end = 0;
|
||||
} else {
|
||||
length = (sg_s[j].length - src_offset);
|
||||
#ifdef DEBUG_RAMDISK_MCP
|
||||
printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
|
||||
|
||||
pr_debug("Step 2 - sg_d[%d]: %p length: %d"
|
||||
" offset: %u sg_s[%d].length: %u\n", i,
|
||||
&sg_d[i], sg_d[i].length, sg_d[i].offset,
|
||||
j, sg_s[j].length);
|
||||
printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
|
||||
pr_debug("Step 2 - length: %u dst_offset: %u"
|
||||
" src_offset: %u\n", length, dst_offset,
|
||||
src_offset);
|
||||
#endif
|
||||
|
||||
if (length > req->rd_size)
|
||||
length = req->rd_size;
|
||||
|
||||
@ -456,32 +434,29 @@ static int rd_MEMCPY_read(struct rd_request *req)
|
||||
|
||||
memcpy(dst, src, length);
|
||||
|
||||
#ifdef DEBUG_RAMDISK_MCP
|
||||
printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
|
||||
pr_debug("page: %u, remaining size: %u, length: %u,"
|
||||
" i: %u, j: %u\n", req->rd_page,
|
||||
(req->rd_size - length), length, i, j);
|
||||
#endif
|
||||
|
||||
req->rd_size -= length;
|
||||
if (!(req->rd_size))
|
||||
if (!req->rd_size)
|
||||
return 0;
|
||||
|
||||
if (!page_end)
|
||||
continue;
|
||||
|
||||
if (++req->rd_page <= table->page_end_offset) {
|
||||
#ifdef DEBUG_RAMDISK_MCP
|
||||
printk(KERN_INFO "page: %u in same page table\n",
|
||||
pr_debug("page: %u in same page table\n",
|
||||
req->rd_page);
|
||||
#endif
|
||||
continue;
|
||||
}
|
||||
#ifdef DEBUG_RAMDISK_MCP
|
||||
printk(KERN_INFO "getting new page table for page: %u\n",
|
||||
|
||||
pr_debug("getting new page table for page: %u\n",
|
||||
req->rd_page);
|
||||
#endif
|
||||
|
||||
table = rd_get_sg_table(dev, req->rd_page);
|
||||
if (!(table))
|
||||
return -1;
|
||||
if (!table)
|
||||
return -EINVAL;
|
||||
|
||||
sg_s = &table->sg_table[j = 0];
|
||||
}
|
||||
@ -496,7 +471,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
|
||||
static int rd_MEMCPY_write(struct rd_request *req)
|
||||
{
|
||||
struct se_task *task = &req->rd_task;
|
||||
struct rd_dev *dev = req->rd_dev;
|
||||
struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
|
||||
struct rd_dev_sg_table *table;
|
||||
struct scatterlist *sg_d, *sg_s;
|
||||
void *dst, *src;
|
||||
@ -505,32 +480,32 @@ static int rd_MEMCPY_write(struct rd_request *req)
|
||||
u32 rd_offset = req->rd_offset;
|
||||
|
||||
table = rd_get_sg_table(dev, req->rd_page);
|
||||
if (!(table))
|
||||
return -1;
|
||||
if (!table)
|
||||
return -EINVAL;
|
||||
|
||||
table_sg_end = (table->page_end_offset - req->rd_page);
|
||||
sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
|
||||
sg_s = task->task_sg;
|
||||
#ifdef DEBUG_RAMDISK_MCP
|
||||
printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
|
||||
|
||||
pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
|
||||
" Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
|
||||
req->rd_page, req->rd_offset);
|
||||
#endif
|
||||
|
||||
dst_offset = rd_offset;
|
||||
|
||||
while (req->rd_size) {
|
||||
if ((sg_s[i].length - src_offset) <
|
||||
(sg_d[j].length - dst_offset)) {
|
||||
length = (sg_s[i].length - src_offset);
|
||||
#ifdef DEBUG_RAMDISK_MCP
|
||||
printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
|
||||
|
||||
pr_debug("Step 1 - sg_s[%d]: %p length: %d"
|
||||
" offset: %d sg_d[%d].length: %u\n", i,
|
||||
&sg_s[i], sg_s[i].length, sg_s[i].offset,
|
||||
j, sg_d[j].length);
|
||||
printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
|
||||
pr_debug("Step 1 - length: %u src_offset: %u"
|
||||
" dst_offset: %u\n", length, src_offset,
|
||||
dst_offset);
|
||||
#endif
|
||||
|
||||
if (length > req->rd_size)
|
||||
length = req->rd_size;
|
||||
|
||||
@ -547,15 +522,15 @@ static int rd_MEMCPY_write(struct rd_request *req)
|
||||
page_end = 0;
|
||||
} else {
|
||||
length = (sg_d[j].length - dst_offset);
|
||||
#ifdef DEBUG_RAMDISK_MCP
|
||||
printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
|
||||
|
||||
pr_debug("Step 2 - sg_s[%d]: %p length: %d"
|
||||
" offset: %d sg_d[%d].length: %u\n", i,
|
||||
&sg_s[i], sg_s[i].length, sg_s[i].offset,
|
||||
j, sg_d[j].length);
|
||||
printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
|
||||
pr_debug("Step 2 - length: %u src_offset: %u"
|
||||
" dst_offset: %u\n", length, src_offset,
|
||||
dst_offset);
|
||||
#endif
|
||||
|
||||
if (length > req->rd_size)
|
||||
length = req->rd_size;
|
||||
|
||||
@ -579,32 +554,29 @@ static int rd_MEMCPY_write(struct rd_request *req)
|
||||
|
||||
memcpy(dst, src, length);
|
||||
|
||||
#ifdef DEBUG_RAMDISK_MCP
|
||||
printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
|
||||
pr_debug("page: %u, remaining size: %u, length: %u,"
|
||||
" i: %u, j: %u\n", req->rd_page,
|
||||
(req->rd_size - length), length, i, j);
|
||||
#endif
|
||||
|
||||
req->rd_size -= length;
|
||||
if (!(req->rd_size))
|
||||
if (!req->rd_size)
|
||||
return 0;
|
||||
|
||||
if (!page_end)
|
||||
continue;
|
||||
|
||||
if (++req->rd_page <= table->page_end_offset) {
|
||||
#ifdef DEBUG_RAMDISK_MCP
|
||||
printk(KERN_INFO "page: %u in same page table\n",
|
||||
pr_debug("page: %u in same page table\n",
|
||||
req->rd_page);
|
||||
#endif
|
||||
continue;
|
||||
}
|
||||
#ifdef DEBUG_RAMDISK_MCP
|
||||
printk(KERN_INFO "getting new page table for page: %u\n",
|
||||
|
||||
pr_debug("getting new page table for page: %u\n",
|
||||
req->rd_page);
|
||||
#endif
|
||||
|
||||
table = rd_get_sg_table(dev, req->rd_page);
|
||||
if (!(table))
|
||||
return -1;
|
||||
if (!table)
|
||||
return -EINVAL;
|
||||
|
||||
sg_d = &table->sg_table[j = 0];
|
||||
}
|
||||
@ -623,11 +595,11 @@ static int rd_MEMCPY_do_task(struct se_task *task)
|
||||
unsigned long long lba;
|
||||
int ret;
|
||||
|
||||
req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
|
||||
req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
|
||||
lba = task->task_lba;
|
||||
req->rd_offset = (do_div(lba,
|
||||
(PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
|
||||
DEV_ATTRIB(dev)->block_size;
|
||||
(PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
|
||||
dev->se_sub_dev->se_dev_attrib.block_size;
|
||||
req->rd_size = task->task_size;
|
||||
|
||||
if (task->task_data_direction == DMA_FROM_DEVICE)
|
||||
@ -644,274 +616,6 @@ static int rd_MEMCPY_do_task(struct se_task *task)
|
||||
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
|
||||
}
|
||||
|
||||
/* rd_DIRECT_with_offset():
|
||||
*
|
||||
*
|
||||
*/
|
||||
static int rd_DIRECT_with_offset(
|
||||
struct se_task *task,
|
||||
struct list_head *se_mem_list,
|
||||
u32 *se_mem_cnt,
|
||||
u32 *task_offset)
|
||||
{
|
||||
struct rd_request *req = RD_REQ(task);
|
||||
struct rd_dev *dev = req->rd_dev;
|
||||
struct rd_dev_sg_table *table;
|
||||
struct se_mem *se_mem;
|
||||
struct scatterlist *sg_s;
|
||||
u32 j = 0, set_offset = 1;
|
||||
u32 get_next_table = 0, offset_length, table_sg_end;
|
||||
|
||||
table = rd_get_sg_table(dev, req->rd_page);
|
||||
if (!(table))
|
||||
return -1;
|
||||
|
||||
table_sg_end = (table->page_end_offset - req->rd_page);
|
||||
sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
|
||||
#ifdef DEBUG_RAMDISK_DR
|
||||
printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
|
||||
(task->task_data_direction == DMA_TO_DEVICE) ?
|
||||
"Write" : "Read",
|
||||
task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
|
||||
#endif
|
||||
while (req->rd_size) {
|
||||
se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
|
||||
if (!(se_mem)) {
|
||||
printk(KERN_ERR "Unable to allocate struct se_mem\n");
|
||||
return -1;
|
||||
}
|
||||
INIT_LIST_HEAD(&se_mem->se_list);
|
||||
|
||||
if (set_offset) {
|
||||
offset_length = sg_s[j].length - req->rd_offset;
|
||||
if (offset_length > req->rd_size)
|
||||
offset_length = req->rd_size;
|
||||
|
||||
se_mem->se_page = sg_page(&sg_s[j++]);
|
||||
se_mem->se_off = req->rd_offset;
|
||||
se_mem->se_len = offset_length;
|
||||
|
||||
set_offset = 0;
|
||||
get_next_table = (j > table_sg_end);
|
||||
goto check_eot;
|
||||
}
|
||||
|
||||
offset_length = (req->rd_size < req->rd_offset) ?
|
||||
req->rd_size : req->rd_offset;
|
||||
|
||||
se_mem->se_page = sg_page(&sg_s[j]);
|
||||
se_mem->se_len = offset_length;
|
||||
|
||||
set_offset = 1;
|
||||
|
||||
check_eot:
|
||||
#ifdef DEBUG_RAMDISK_DR
|
||||
printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
|
||||
" se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
|
||||
req->rd_page, req->rd_size, offset_length, j, se_mem,
|
||||
se_mem->se_page, se_mem->se_off, se_mem->se_len);
|
||||
#endif
|
||||
list_add_tail(&se_mem->se_list, se_mem_list);
|
||||
(*se_mem_cnt)++;
|
||||
|
||||
req->rd_size -= offset_length;
|
||||
if (!(req->rd_size))
|
||||
goto out;
|
||||
|
||||
if (!set_offset && !get_next_table)
|
||||
continue;
|
||||
|
||||
if (++req->rd_page <= table->page_end_offset) {
|
||||
#ifdef DEBUG_RAMDISK_DR
|
||||
printk(KERN_INFO "page: %u in same page table\n",
|
||||
req->rd_page);
|
||||
#endif
|
||||
continue;
|
||||
}
|
||||
#ifdef DEBUG_RAMDISK_DR
|
||||
printk(KERN_INFO "getting new page table for page: %u\n",
|
||||
req->rd_page);
|
||||
#endif
|
||||
table = rd_get_sg_table(dev, req->rd_page);
|
||||
if (!(table))
|
||||
return -1;
|
||||
|
||||
sg_s = &table->sg_table[j = 0];
|
||||
}
|
||||
|
||||
out:
|
||||
T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
|
||||
#ifdef DEBUG_RAMDISK_DR
|
||||
printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
|
||||
*se_mem_cnt);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* rd_DIRECT_without_offset():
|
||||
*
|
||||
*
|
||||
*/
|
||||
static int rd_DIRECT_without_offset(
|
||||
struct se_task *task,
|
||||
struct list_head *se_mem_list,
|
||||
u32 *se_mem_cnt,
|
||||
u32 *task_offset)
|
||||
{
|
||||
struct rd_request *req = RD_REQ(task);
|
||||
struct rd_dev *dev = req->rd_dev;
|
||||
struct rd_dev_sg_table *table;
|
||||
struct se_mem *se_mem;
|
||||
struct scatterlist *sg_s;
|
||||
u32 length, j = 0;
|
||||
|
||||
table = rd_get_sg_table(dev, req->rd_page);
|
||||
if (!(table))
|
||||
return -1;
|
||||
|
||||
sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
|
||||
#ifdef DEBUG_RAMDISK_DR
|
||||
printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
|
||||
(task->task_data_direction == DMA_TO_DEVICE) ?
|
||||
"Write" : "Read",
|
||||
task->task_lba, req->rd_size, req->rd_page);
|
||||
#endif
|
||||
while (req->rd_size) {
|
||||
se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
|
||||
if (!(se_mem)) {
|
||||
printk(KERN_ERR "Unable to allocate struct se_mem\n");
|
||||
return -1;
|
||||
}
|
||||
INIT_LIST_HEAD(&se_mem->se_list);
|
||||
|
||||
length = (req->rd_size < sg_s[j].length) ?
|
||||
req->rd_size : sg_s[j].length;
|
||||
|
||||
se_mem->se_page = sg_page(&sg_s[j++]);
|
||||
se_mem->se_len = length;
|
||||
|
||||
#ifdef DEBUG_RAMDISK_DR
|
||||
printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
|
||||
" se_page: %p se_off: %u se_len: %u\n", req->rd_page,
|
||||
req->rd_size, j, se_mem, se_mem->se_page,
|
||||
se_mem->se_off, se_mem->se_len);
|
||||
#endif
|
||||
list_add_tail(&se_mem->se_list, se_mem_list);
|
||||
(*se_mem_cnt)++;
|
||||
|
||||
req->rd_size -= length;
|
||||
if (!(req->rd_size))
|
||||
goto out;
|
||||
|
||||
if (++req->rd_page <= table->page_end_offset) {
|
||||
#ifdef DEBUG_RAMDISK_DR
|
||||
printk("page: %u in same page table\n",
|
||||
req->rd_page);
|
||||
#endif
|
||||
continue;
|
||||
}
|
||||
#ifdef DEBUG_RAMDISK_DR
|
||||
printk(KERN_INFO "getting new page table for page: %u\n",
|
||||
req->rd_page);
|
||||
#endif
|
||||
table = rd_get_sg_table(dev, req->rd_page);
|
||||
if (!(table))
|
||||
return -1;
|
||||
|
||||
sg_s = &table->sg_table[j = 0];
|
||||
}
|
||||
|
||||
out:
|
||||
T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
|
||||
#ifdef DEBUG_RAMDISK_DR
|
||||
printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
|
||||
*se_mem_cnt);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* rd_DIRECT_do_se_mem_map():
|
||||
*
|
||||
*
|
||||
*/
|
||||
static int rd_DIRECT_do_se_mem_map(
|
||||
struct se_task *task,
|
||||
struct list_head *se_mem_list,
|
||||
void *in_mem,
|
||||
struct se_mem *in_se_mem,
|
||||
struct se_mem **out_se_mem,
|
||||
u32 *se_mem_cnt,
|
||||
u32 *task_offset_in)
|
||||
{
|
||||
struct se_cmd *cmd = task->task_se_cmd;
|
||||
struct rd_request *req = RD_REQ(task);
|
||||
u32 task_offset = *task_offset_in;
|
||||
unsigned long long lba;
|
||||
int ret;
|
||||
|
||||
req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
|
||||
PAGE_SIZE);
|
||||
lba = task->task_lba;
|
||||
req->rd_offset = (do_div(lba,
|
||||
(PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
|
||||
DEV_ATTRIB(task->se_dev)->block_size;
|
||||
req->rd_size = task->task_size;
|
||||
|
||||
if (req->rd_offset)
|
||||
ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
|
||||
task_offset_in);
|
||||
else
|
||||
ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
|
||||
task_offset_in);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (CMD_TFO(cmd)->task_sg_chaining == 0)
|
||||
return 0;
|
||||
/*
|
||||
* Currently prevent writers from multiple HW fabrics doing
|
||||
* pci_map_sg() to RD_DR's internal scatterlist memory.
|
||||
*/
|
||||
if (cmd->data_direction == DMA_TO_DEVICE) {
|
||||
printk(KERN_ERR "DMA_TO_DEVICE not supported for"
|
||||
" RAMDISK_DR with task_sg_chaining=1\n");
|
||||
return -1;
|
||||
}
|
||||
/*
|
||||
* Special case for if task_sg_chaining is enabled, then
|
||||
* we setup struct se_task->task_sg[], as it will be used by
|
||||
* transport_do_task_sg_chain() for creating chainged SGLs
|
||||
* across multiple struct se_task->task_sg[].
|
||||
*/
|
||||
if (!(transport_calc_sg_num(task,
|
||||
list_entry(T_TASK(cmd)->t_mem_list->next,
|
||||
struct se_mem, se_list),
|
||||
task_offset)))
|
||||
return -1;
|
||||
|
||||
return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
|
||||
list_entry(T_TASK(cmd)->t_mem_list->next,
|
||||
struct se_mem, se_list),
|
||||
out_se_mem, se_mem_cnt, task_offset_in);
|
||||
}
|
||||
|
||||
/* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
|
||||
*
|
||||
*
|
||||
*/
|
||||
static int rd_DIRECT_do_task(struct se_task *task)
|
||||
{
|
||||
/*
|
||||
* At this point the locally allocated RD tables have been mapped
|
||||
* to struct se_mem elements in rd_DIRECT_do_se_mem_map().
|
||||
*/
|
||||
task->task_scsi_status = GOOD;
|
||||
transport_complete_task(task, 1);
|
||||
|
||||
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
|
||||
}
|
||||
|
||||
/* rd_free_task(): (Part of se_subsystem_api_t template)
|
||||
*
|
||||
*
|
||||
@ -956,7 +660,7 @@ static ssize_t rd_set_configfs_dev_params(
|
||||
case Opt_rd_pages:
|
||||
match_int(args, &arg);
|
||||
rd_dev->rd_page_count = arg;
|
||||
printk(KERN_INFO "RAMDISK: Referencing Page"
|
||||
pr_debug("RAMDISK: Referencing Page"
|
||||
" Count: %u\n", rd_dev->rd_page_count);
|
||||
rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
|
||||
break;
|
||||
@ -974,8 +678,8 @@ static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
|
||||
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
|
||||
|
||||
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
|
||||
printk(KERN_INFO "Missing rd_pages= parameter\n");
|
||||
return -1;
|
||||
pr_debug("Missing rd_pages= parameter\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1021,32 +725,11 @@ static sector_t rd_get_blocks(struct se_device *dev)
|
||||
{
|
||||
struct rd_dev *rd_dev = dev->dev_ptr;
|
||||
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
|
||||
DEV_ATTRIB(dev)->block_size) - 1;
|
||||
dev->se_sub_dev->se_dev_attrib.block_size) - 1;
|
||||
|
||||
return blocks_long;
|
||||
}
|
||||
|
||||
static struct se_subsystem_api rd_dr_template = {
|
||||
.name = "rd_dr",
|
||||
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
|
||||
.attach_hba = rd_attach_hba,
|
||||
.detach_hba = rd_detach_hba,
|
||||
.allocate_virtdevice = rd_DIRECT_allocate_virtdevice,
|
||||
.create_virtdevice = rd_DIRECT_create_virtdevice,
|
||||
.free_device = rd_free_device,
|
||||
.alloc_task = rd_alloc_task,
|
||||
.do_task = rd_DIRECT_do_task,
|
||||
.free_task = rd_free_task,
|
||||
.check_configfs_dev_params = rd_check_configfs_dev_params,
|
||||
.set_configfs_dev_params = rd_set_configfs_dev_params,
|
||||
.show_configfs_dev_params = rd_show_configfs_dev_params,
|
||||
.get_cdb = rd_get_cdb,
|
||||
.get_device_rev = rd_get_device_rev,
|
||||
.get_device_type = rd_get_device_type,
|
||||
.get_blocks = rd_get_blocks,
|
||||
.do_se_mem_map = rd_DIRECT_do_se_mem_map,
|
||||
};
|
||||
|
||||
static struct se_subsystem_api rd_mcp_template = {
|
||||
.name = "rd_mcp",
|
||||
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
|
||||
@ -1071,13 +754,8 @@ int __init rd_module_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = transport_subsystem_register(&rd_dr_template);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = transport_subsystem_register(&rd_mcp_template);
|
||||
if (ret < 0) {
|
||||
transport_subsystem_release(&rd_dr_template);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1086,6 +764,5 @@ int __init rd_module_init(void)
|
||||
|
||||
void rd_module_exit(void)
|
||||
{
|
||||
transport_subsystem_release(&rd_dr_template);
|
||||
transport_subsystem_release(&rd_mcp_template);
|
||||
}
|
||||
|
@ -7,8 +7,6 @@
|
||||
|
||||
/* Largest piece of memory kmalloc can allocate */
|
||||
#define RD_MAX_ALLOCATION_SIZE 65536
|
||||
/* Maximum queuedepth for the Ramdisk HBA */
|
||||
#define RD_HBA_QUEUE_DEPTH 256
|
||||
#define RD_DEVICE_QUEUE_DEPTH 32
|
||||
#define RD_MAX_DEVICE_QUEUE_DEPTH 128
|
||||
#define RD_BLOCKSIZE 512
|
||||
@ -34,8 +32,6 @@ struct rd_request {
|
||||
u32 rd_page_count;
|
||||
/* Scatterlist count */
|
||||
u32 rd_size;
|
||||
/* Ramdisk device */
|
||||
struct rd_dev *rd_dev;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
struct rd_dev_sg_table {
|
||||
|
@ -42,13 +42,13 @@
|
||||
*/
|
||||
void split_cdb_XX_6(
|
||||
unsigned long long lba,
|
||||
u32 *sectors,
|
||||
u32 sectors,
|
||||
unsigned char *cdb)
|
||||
{
|
||||
cdb[1] = (lba >> 16) & 0x1f;
|
||||
cdb[2] = (lba >> 8) & 0xff;
|
||||
cdb[3] = lba & 0xff;
|
||||
cdb[4] = *sectors & 0xff;
|
||||
cdb[4] = sectors & 0xff;
|
||||
}
|
||||
|
||||
/* split_cdb_XX_10():
|
||||
@ -57,11 +57,11 @@ void split_cdb_XX_6(
|
||||
*/
|
||||
void split_cdb_XX_10(
|
||||
unsigned long long lba,
|
||||
u32 *sectors,
|
||||
u32 sectors,
|
||||
unsigned char *cdb)
|
||||
{
|
||||
put_unaligned_be32(lba, &cdb[2]);
|
||||
put_unaligned_be16(*sectors, &cdb[7]);
|
||||
put_unaligned_be16(sectors, &cdb[7]);
|
||||
}
|
||||
|
||||
/* split_cdb_XX_12():
|
||||
@ -70,11 +70,11 @@ void split_cdb_XX_10(
|
||||
*/
|
||||
void split_cdb_XX_12(
|
||||
unsigned long long lba,
|
||||
u32 *sectors,
|
||||
u32 sectors,
|
||||
unsigned char *cdb)
|
||||
{
|
||||
put_unaligned_be32(lba, &cdb[2]);
|
||||
put_unaligned_be32(*sectors, &cdb[6]);
|
||||
put_unaligned_be32(sectors, &cdb[6]);
|
||||
}
|
||||
|
||||
/* split_cdb_XX_16():
|
||||
@ -83,11 +83,11 @@ void split_cdb_XX_12(
|
||||
*/
|
||||
void split_cdb_XX_16(
|
||||
unsigned long long lba,
|
||||
u32 *sectors,
|
||||
u32 sectors,
|
||||
unsigned char *cdb)
|
||||
{
|
||||
put_unaligned_be64(lba, &cdb[2]);
|
||||
put_unaligned_be32(*sectors, &cdb[10]);
|
||||
put_unaligned_be32(sectors, &cdb[10]);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -97,9 +97,9 @@ void split_cdb_XX_16(
|
||||
*/
|
||||
void split_cdb_XX_32(
|
||||
unsigned long long lba,
|
||||
u32 *sectors,
|
||||
u32 sectors,
|
||||
unsigned char *cdb)
|
||||
{
|
||||
put_unaligned_be64(lba, &cdb[12]);
|
||||
put_unaligned_be32(*sectors, &cdb[28]);
|
||||
put_unaligned_be32(sectors, &cdb[28]);
|
||||
}
|
||||
|
@ -1,10 +1,10 @@
|
||||
#ifndef TARGET_CORE_SCDB_H
|
||||
#define TARGET_CORE_SCDB_H
|
||||
|
||||
extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *);
|
||||
extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *);
|
||||
extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *);
|
||||
extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *);
|
||||
extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *);
|
||||
extern void split_cdb_XX_6(unsigned long long, u32, unsigned char *);
|
||||
extern void split_cdb_XX_10(unsigned long long, u32, unsigned char *);
|
||||
extern void split_cdb_XX_12(unsigned long long, u32, unsigned char *);
|
||||
extern void split_cdb_XX_16(unsigned long long, u32, unsigned char *);
|
||||
extern void split_cdb_XX_32(unsigned long long, u32, unsigned char *);
|
||||
|
||||
#endif /* TARGET_CORE_SCDB_H */
|
||||
|
@ -402,8 +402,8 @@ static ssize_t target_stat_scsi_lu_show_attr_lu_name(
|
||||
return -ENODEV;
|
||||
/* scsiLuWwnName */
|
||||
return snprintf(page, PAGE_SIZE, "%s\n",
|
||||
(strlen(DEV_T10_WWN(dev)->unit_serial)) ?
|
||||
(char *)&DEV_T10_WWN(dev)->unit_serial[0] : "None");
|
||||
(strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ?
|
||||
dev->se_sub_dev->t10_wwn.unit_serial : "None");
|
||||
}
|
||||
DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
|
||||
|
||||
@ -413,17 +413,17 @@ static ssize_t target_stat_scsi_lu_show_attr_vend(
|
||||
struct se_subsystem_dev *se_subdev = container_of(sgrps,
|
||||
struct se_subsystem_dev, dev_stat_grps);
|
||||
struct se_device *dev = se_subdev->se_dev_ptr;
|
||||
int j;
|
||||
char str[28];
|
||||
int i;
|
||||
char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1];
|
||||
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
/* scsiLuVendorId */
|
||||
memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
|
||||
for (j = 0; j < 8; j++)
|
||||
str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
|
||||
DEV_T10_WWN(dev)->vendor[j] : 0x20;
|
||||
str[8] = 0;
|
||||
for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
|
||||
str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ?
|
||||
dev->se_sub_dev->t10_wwn.vendor[i] : ' ';
|
||||
str[i] = '\0';
|
||||
return snprintf(page, PAGE_SIZE, "%s\n", str);
|
||||
}
|
||||
DEV_STAT_SCSI_LU_ATTR_RO(vend);
|
||||
@ -434,18 +434,17 @@ static ssize_t target_stat_scsi_lu_show_attr_prod(
|
||||
struct se_subsystem_dev *se_subdev = container_of(sgrps,
|
||||
struct se_subsystem_dev, dev_stat_grps);
|
||||
struct se_device *dev = se_subdev->se_dev_ptr;
|
||||
int j;
|
||||
char str[28];
|
||||
int i;
|
||||
char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1];
|
||||
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
/* scsiLuProductId */
|
||||
memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
|
||||
for (j = 0; j < 16; j++)
|
||||
str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
|
||||
DEV_T10_WWN(dev)->model[j] : 0x20;
|
||||
str[16] = 0;
|
||||
for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
|
||||
str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ?
|
||||
dev->se_sub_dev->t10_wwn.model[i] : ' ';
|
||||
str[i] = '\0';
|
||||
return snprintf(page, PAGE_SIZE, "%s\n", str);
|
||||
}
|
||||
DEV_STAT_SCSI_LU_ATTR_RO(prod);
|
||||
@ -456,18 +455,17 @@ static ssize_t target_stat_scsi_lu_show_attr_rev(
|
||||
struct se_subsystem_dev *se_subdev = container_of(sgrps,
|
||||
struct se_subsystem_dev, dev_stat_grps);
|
||||
struct se_device *dev = se_subdev->se_dev_ptr;
|
||||
int j;
|
||||
char str[28];
|
||||
int i;
|
||||
char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1];
|
||||
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
/* scsiLuRevisionId */
|
||||
memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
|
||||
for (j = 0; j < 4; j++)
|
||||
str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
|
||||
DEV_T10_WWN(dev)->revision[j] : 0x20;
|
||||
str[4] = 0;
|
||||
for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++)
|
||||
str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ?
|
||||
dev->se_sub_dev->t10_wwn.revision[i] : ' ';
|
||||
str[i] = '\0';
|
||||
return snprintf(page, PAGE_SIZE, "%s\n", str);
|
||||
}
|
||||
DEV_STAT_SCSI_LU_ATTR_RO(rev);
|
||||
@ -484,7 +482,7 @@ static ssize_t target_stat_scsi_lu_show_attr_dev_type(
|
||||
|
||||
/* scsiLuPeripheralType */
|
||||
return snprintf(page, PAGE_SIZE, "%u\n",
|
||||
TRANSPORT(dev)->get_device_type(dev));
|
||||
dev->transport->get_device_type(dev));
|
||||
}
|
||||
DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
|
||||
|
||||
@ -668,18 +666,18 @@ static struct config_item_type target_stat_scsi_lu_cit = {
|
||||
*/
|
||||
void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev)
|
||||
{
|
||||
struct config_group *dev_stat_grp = &DEV_STAT_GRP(se_subdev)->stat_group;
|
||||
struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group;
|
||||
|
||||
config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_dev_group,
|
||||
config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group,
|
||||
"scsi_dev", &target_stat_scsi_dev_cit);
|
||||
config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group,
|
||||
config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group,
|
||||
"scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
|
||||
config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_lu_group,
|
||||
config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group,
|
||||
"scsi_lu", &target_stat_scsi_lu_cit);
|
||||
|
||||
dev_stat_grp->default_groups[0] = &DEV_STAT_GRP(se_subdev)->scsi_dev_group;
|
||||
dev_stat_grp->default_groups[1] = &DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group;
|
||||
dev_stat_grp->default_groups[2] = &DEV_STAT_GRP(se_subdev)->scsi_lu_group;
|
||||
dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group;
|
||||
dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group;
|
||||
dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group;
|
||||
dev_stat_grp->default_groups[3] = NULL;
|
||||
}
|
||||
|
||||
@ -922,7 +920,7 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_name(
|
||||
tpg = sep->sep_tpg;
|
||||
|
||||
ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
|
||||
TPG_TFO(tpg)->get_fabric_name(), sep->sep_index);
|
||||
tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index);
|
||||
spin_unlock(&lun->lun_sep_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -945,8 +943,8 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_port_index(
|
||||
tpg = sep->sep_tpg;
|
||||
|
||||
ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
|
||||
TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
|
||||
TPG_TFO(tpg)->tpg_get_tag(tpg));
|
||||
tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+",
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg));
|
||||
spin_unlock(&lun->lun_sep_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -1128,7 +1126,7 @@ static ssize_t target_stat_scsi_transport_show_attr_device(
|
||||
tpg = sep->sep_tpg;
|
||||
/* scsiTransportType */
|
||||
ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
|
||||
TPG_TFO(tpg)->get_fabric_name());
|
||||
tpg->se_tpg_tfo->get_fabric_name());
|
||||
spin_unlock(&lun->lun_sep_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -1150,7 +1148,7 @@ static ssize_t target_stat_scsi_transport_show_attr_indx(
|
||||
}
|
||||
tpg = sep->sep_tpg;
|
||||
ret = snprintf(page, PAGE_SIZE, "%u\n",
|
||||
TPG_TFO(tpg)->tpg_get_inst_index(tpg));
|
||||
tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
|
||||
spin_unlock(&lun->lun_sep_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -1173,10 +1171,10 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name(
|
||||
return -ENODEV;
|
||||
}
|
||||
tpg = sep->sep_tpg;
|
||||
wwn = DEV_T10_WWN(dev);
|
||||
wwn = &dev->se_sub_dev->t10_wwn;
|
||||
/* scsiTransportDevName */
|
||||
ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
|
||||
TPG_TFO(tpg)->tpg_get_wwn(tpg),
|
||||
tpg->se_tpg_tfo->tpg_get_wwn(tpg),
|
||||
(strlen(wwn->unit_serial)) ? wwn->unit_serial :
|
||||
wwn->vendor);
|
||||
spin_unlock(&lun->lun_sep_lock);
|
||||
@ -1212,18 +1210,18 @@ static struct config_item_type target_stat_scsi_transport_cit = {
|
||||
*/
|
||||
void target_stat_setup_port_default_groups(struct se_lun *lun)
|
||||
{
|
||||
struct config_group *port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
|
||||
struct config_group *port_stat_grp = &lun->port_stat_grps.stat_group;
|
||||
|
||||
config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_port_group,
|
||||
config_group_init_type_name(&lun->port_stat_grps.scsi_port_group,
|
||||
"scsi_port", &target_stat_scsi_port_cit);
|
||||
config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_tgt_port_group,
|
||||
config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group,
|
||||
"scsi_tgt_port", &target_stat_scsi_tgt_port_cit);
|
||||
config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_transport_group,
|
||||
config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group,
|
||||
"scsi_transport", &target_stat_scsi_transport_cit);
|
||||
|
||||
port_stat_grp->default_groups[0] = &PORT_STAT_GRP(lun)->scsi_port_group;
|
||||
port_stat_grp->default_groups[1] = &PORT_STAT_GRP(lun)->scsi_tgt_port_group;
|
||||
port_stat_grp->default_groups[2] = &PORT_STAT_GRP(lun)->scsi_transport_group;
|
||||
port_stat_grp->default_groups[0] = &lun->port_stat_grps.scsi_port_group;
|
||||
port_stat_grp->default_groups[1] = &lun->port_stat_grps.scsi_tgt_port_group;
|
||||
port_stat_grp->default_groups[2] = &lun->port_stat_grps.scsi_transport_group;
|
||||
port_stat_grp->default_groups[3] = NULL;
|
||||
}
|
||||
|
||||
@ -1264,7 +1262,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_inst(
|
||||
tpg = nacl->se_tpg;
|
||||
/* scsiInstIndex */
|
||||
ret = snprintf(page, PAGE_SIZE, "%u\n",
|
||||
TPG_TFO(tpg)->tpg_get_inst_index(tpg));
|
||||
tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
|
||||
spin_unlock_irq(&nacl->device_list_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -1314,7 +1312,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_port(
|
||||
}
|
||||
tpg = nacl->se_tpg;
|
||||
/* scsiAuthIntrTgtPortIndex */
|
||||
ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg));
|
||||
ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
|
||||
spin_unlock_irq(&nacl->device_list_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -1632,7 +1630,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_inst(
|
||||
tpg = nacl->se_tpg;
|
||||
/* scsiInstIndex */
|
||||
ret = snprintf(page, PAGE_SIZE, "%u\n",
|
||||
TPG_TFO(tpg)->tpg_get_inst_index(tpg));
|
||||
tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
|
||||
spin_unlock_irq(&nacl->device_list_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -1682,7 +1680,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port(
|
||||
}
|
||||
tpg = nacl->se_tpg;
|
||||
/* scsiPortIndex */
|
||||
ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg));
|
||||
ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
|
||||
spin_unlock_irq(&nacl->device_list_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -1708,7 +1706,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_indx(
|
||||
tpg = nacl->se_tpg;
|
||||
/* scsiAttIntrPortIndex */
|
||||
ret = snprintf(page, PAGE_SIZE, "%u\n",
|
||||
TPG_TFO(tpg)->sess_get_index(se_sess));
|
||||
tpg->se_tpg_tfo->sess_get_index(se_sess));
|
||||
spin_unlock_irq(&nacl->nacl_sess_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -1757,8 +1755,8 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
|
||||
tpg = nacl->se_tpg;
|
||||
/* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
|
||||
memset(buf, 0, 64);
|
||||
if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL)
|
||||
TPG_TFO(tpg)->sess_get_initiator_sid(se_sess,
|
||||
if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL)
|
||||
tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
|
||||
(unsigned char *)&buf[0], 64);
|
||||
|
||||
ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
|
||||
@ -1797,14 +1795,14 @@ static struct config_item_type target_stat_scsi_att_intr_port_cit = {
|
||||
*/
|
||||
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl)
|
||||
{
|
||||
struct config_group *ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
|
||||
struct config_group *ml_stat_grp = &lacl->ml_stat_grps.stat_group;
|
||||
|
||||
config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_auth_intr_group,
|
||||
config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group,
|
||||
"scsi_auth_intr", &target_stat_scsi_auth_intr_cit);
|
||||
config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_att_intr_port_group,
|
||||
config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group,
|
||||
"scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit);
|
||||
|
||||
ml_stat_grp->default_groups[0] = &ML_STAT_GRPS(lacl)->scsi_auth_intr_group;
|
||||
ml_stat_grp->default_groups[1] = &ML_STAT_GRPS(lacl)->scsi_att_intr_port_group;
|
||||
ml_stat_grp->default_groups[0] = &lacl->ml_stat_grps.scsi_auth_intr_group;
|
||||
ml_stat_grp->default_groups[1] = &lacl->ml_stat_grps.scsi_att_intr_port_group;
|
||||
ml_stat_grp->default_groups[2] = NULL;
|
||||
}
|
||||
|
@ -41,13 +41,6 @@
|
||||
#include "target_core_alua.h"
|
||||
#include "target_core_pr.h"
|
||||
|
||||
#define DEBUG_LUN_RESET
|
||||
#ifdef DEBUG_LUN_RESET
|
||||
#define DEBUG_LR(x...) printk(KERN_INFO x)
|
||||
#else
|
||||
#define DEBUG_LR(x...)
|
||||
#endif
|
||||
|
||||
struct se_tmr_req *core_tmr_alloc_req(
|
||||
struct se_cmd *se_cmd,
|
||||
void *fabric_tmr_ptr,
|
||||
@ -57,8 +50,8 @@ struct se_tmr_req *core_tmr_alloc_req(
|
||||
|
||||
tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
|
||||
GFP_ATOMIC : GFP_KERNEL);
|
||||
if (!(tmr)) {
|
||||
printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
|
||||
if (!tmr) {
|
||||
pr_err("Unable to allocate struct se_tmr_req\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
tmr->task_cmd = se_cmd;
|
||||
@ -80,9 +73,9 @@ void core_tmr_release_req(
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&dev->se_tmr_lock);
|
||||
spin_lock_irq(&dev->se_tmr_lock);
|
||||
list_del(&tmr->tmr_list);
|
||||
spin_unlock(&dev->se_tmr_lock);
|
||||
spin_unlock_irq(&dev->se_tmr_lock);
|
||||
|
||||
kmem_cache_free(se_tmr_req_cache, tmr);
|
||||
}
|
||||
@ -93,14 +86,14 @@ static void core_tmr_handle_tas_abort(
|
||||
int tas,
|
||||
int fe_count)
|
||||
{
|
||||
if (!(fe_count)) {
|
||||
if (!fe_count) {
|
||||
transport_cmd_finish_abort(cmd, 1);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* TASK ABORTED status (TAS) bit support
|
||||
*/
|
||||
if (((tmr_nacl != NULL) &&
|
||||
if ((tmr_nacl &&
|
||||
(tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
|
||||
transport_send_task_abort(cmd);
|
||||
|
||||
@ -113,15 +106,14 @@ int core_tmr_lun_reset(
|
||||
struct list_head *preempt_and_abort_list,
|
||||
struct se_cmd *prout_cmd)
|
||||
{
|
||||
struct se_cmd *cmd;
|
||||
struct se_queue_req *qr, *qr_tmp;
|
||||
struct se_cmd *cmd, *tcmd;
|
||||
struct se_node_acl *tmr_nacl = NULL;
|
||||
struct se_portal_group *tmr_tpg = NULL;
|
||||
struct se_queue_obj *qobj = dev->dev_queue_obj;
|
||||
struct se_queue_obj *qobj = &dev->dev_queue_obj;
|
||||
struct se_tmr_req *tmr_p, *tmr_pp;
|
||||
struct se_task *task, *task_tmp;
|
||||
unsigned long flags;
|
||||
int fe_count, state, tas;
|
||||
int fe_count, tas;
|
||||
/*
|
||||
* TASK_ABORTED status bit, this is configurable via ConfigFS
|
||||
* struct se_device attributes. spc4r17 section 7.4.6 Control mode page
|
||||
@ -133,7 +125,7 @@ int core_tmr_lun_reset(
|
||||
* which the command was received shall be completed with TASK ABORTED
|
||||
* status (see SAM-4).
|
||||
*/
|
||||
tas = DEV_ATTRIB(dev)->emulate_tas;
|
||||
tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
|
||||
/*
|
||||
* Determine if this se_tmr is coming from a $FABRIC_MOD
|
||||
* or struct se_device passthrough..
|
||||
@ -142,20 +134,20 @@ int core_tmr_lun_reset(
|
||||
tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
|
||||
tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
|
||||
if (tmr_nacl && tmr_tpg) {
|
||||
DEBUG_LR("LUN_RESET: TMR caller fabric: %s"
|
||||
pr_debug("LUN_RESET: TMR caller fabric: %s"
|
||||
" initiator port %s\n",
|
||||
TPG_TFO(tmr_tpg)->get_fabric_name(),
|
||||
tmr_tpg->se_tpg_tfo->get_fabric_name(),
|
||||
tmr_nacl->initiatorname);
|
||||
}
|
||||
}
|
||||
DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n",
|
||||
pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
|
||||
(preempt_and_abort_list) ? "Preempt" : "TMR",
|
||||
TRANSPORT(dev)->name, tas);
|
||||
dev->transport->name, tas);
|
||||
/*
|
||||
* Release all pending and outgoing TMRs aside from the received
|
||||
* LUN_RESET tmr..
|
||||
*/
|
||||
spin_lock(&dev->se_tmr_lock);
|
||||
spin_lock_irq(&dev->se_tmr_lock);
|
||||
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
|
||||
/*
|
||||
* Allow the received TMR to return with FUNCTION_COMPLETE.
|
||||
@ -164,8 +156,8 @@ int core_tmr_lun_reset(
|
||||
continue;
|
||||
|
||||
cmd = tmr_p->task_cmd;
|
||||
if (!(cmd)) {
|
||||
printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n");
|
||||
if (!cmd) {
|
||||
pr_err("Unable to locate struct se_cmd for TMR\n");
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
@ -173,33 +165,33 @@ int core_tmr_lun_reset(
|
||||
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
|
||||
* skip non regisration key matching TMRs.
|
||||
*/
|
||||
if ((preempt_and_abort_list != NULL) &&
|
||||
if (preempt_and_abort_list &&
|
||||
(core_scsi3_check_cdb_abort_and_preempt(
|
||||
preempt_and_abort_list, cmd) != 0))
|
||||
continue;
|
||||
spin_unlock(&dev->se_tmr_lock);
|
||||
spin_unlock_irq(&dev->se_tmr_lock);
|
||||
|
||||
spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
|
||||
if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) {
|
||||
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
|
||||
spin_lock(&dev->se_tmr_lock);
|
||||
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
||||
if (!atomic_read(&cmd->t_transport_active)) {
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
spin_lock_irq(&dev->se_tmr_lock);
|
||||
continue;
|
||||
}
|
||||
if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
|
||||
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
|
||||
spin_lock(&dev->se_tmr_lock);
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
spin_lock_irq(&dev->se_tmr_lock);
|
||||
continue;
|
||||
}
|
||||
DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
|
||||
pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
|
||||
" Response: 0x%02x, t_state: %d\n",
|
||||
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
|
||||
tmr_p->function, tmr_p->response, cmd->t_state);
|
||||
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
|
||||
transport_cmd_finish_abort_tmr(cmd);
|
||||
spin_lock(&dev->se_tmr_lock);
|
||||
spin_lock_irq(&dev->se_tmr_lock);
|
||||
}
|
||||
spin_unlock(&dev->se_tmr_lock);
|
||||
spin_unlock_irq(&dev->se_tmr_lock);
|
||||
/*
|
||||
* Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
|
||||
* This is following sam4r17, section 5.6 Aborting commands, Table 38
|
||||
@ -224,23 +216,17 @@ int core_tmr_lun_reset(
|
||||
spin_lock_irqsave(&dev->execute_task_lock, flags);
|
||||
list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
|
||||
t_state_list) {
|
||||
if (!(TASK_CMD(task))) {
|
||||
printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
|
||||
if (!task->task_se_cmd) {
|
||||
pr_err("task->task_se_cmd is NULL!\n");
|
||||
continue;
|
||||
}
|
||||
cmd = TASK_CMD(task);
|
||||
cmd = task->task_se_cmd;
|
||||
|
||||
if (!T_TASK(cmd)) {
|
||||
printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
|
||||
" %p ITT: 0x%08x\n", task, cmd,
|
||||
CMD_TFO(cmd)->get_task_tag(cmd));
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* For PREEMPT_AND_ABORT usage, only process commands
|
||||
* with a matching reservation key.
|
||||
*/
|
||||
if ((preempt_and_abort_list != NULL) &&
|
||||
if (preempt_and_abort_list &&
|
||||
(core_scsi3_check_cdb_abort_and_preempt(
|
||||
preempt_and_abort_list, cmd) != 0))
|
||||
continue;
|
||||
@ -254,38 +240,38 @@ int core_tmr_lun_reset(
|
||||
atomic_set(&task->task_state_active, 0);
|
||||
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
|
||||
|
||||
spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
|
||||
DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
|
||||
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
||||
pr_debug("LUN_RESET: %s cmd: %p task: %p"
|
||||
" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
|
||||
"def_t_state: %d/%d cdb: 0x%02x\n",
|
||||
(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
|
||||
CMD_TFO(cmd)->get_task_tag(cmd), 0,
|
||||
CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
|
||||
cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]);
|
||||
DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
|
||||
cmd->se_tfo->get_task_tag(cmd), 0,
|
||||
cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
|
||||
cmd->deferred_t_state, cmd->t_task_cdb[0]);
|
||||
pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
|
||||
" t_task_cdbs: %d t_task_cdbs_left: %d"
|
||||
" t_task_cdbs_sent: %d -- t_transport_active: %d"
|
||||
" t_transport_stop: %d t_transport_sent: %d\n",
|
||||
CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
|
||||
T_TASK(cmd)->t_task_cdbs,
|
||||
atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
|
||||
atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
|
||||
atomic_read(&T_TASK(cmd)->t_transport_active),
|
||||
atomic_read(&T_TASK(cmd)->t_transport_stop),
|
||||
atomic_read(&T_TASK(cmd)->t_transport_sent));
|
||||
cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
|
||||
cmd->t_task_list_num,
|
||||
atomic_read(&cmd->t_task_cdbs_left),
|
||||
atomic_read(&cmd->t_task_cdbs_sent),
|
||||
atomic_read(&cmd->t_transport_active),
|
||||
atomic_read(&cmd->t_transport_stop),
|
||||
atomic_read(&cmd->t_transport_sent));
|
||||
|
||||
if (atomic_read(&task->task_active)) {
|
||||
atomic_set(&task->task_stop, 1);
|
||||
spin_unlock_irqrestore(
|
||||
&T_TASK(cmd)->t_state_lock, flags);
|
||||
&cmd->t_state_lock, flags);
|
||||
|
||||
DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
|
||||
pr_debug("LUN_RESET: Waiting for task: %p to shutdown"
|
||||
" for dev: %p\n", task, dev);
|
||||
wait_for_completion(&task->task_stop_comp);
|
||||
DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
|
||||
pr_debug("LUN_RESET Completed task: %p shutdown for"
|
||||
" dev: %p\n", task, dev);
|
||||
spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
|
||||
atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
|
||||
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
||||
atomic_dec(&cmd->t_task_cdbs_left);
|
||||
|
||||
atomic_set(&task->task_active, 0);
|
||||
atomic_set(&task->task_stop, 0);
|
||||
@ -295,34 +281,34 @@ int core_tmr_lun_reset(
|
||||
}
|
||||
__transport_stop_task_timer(task, &flags);
|
||||
|
||||
if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
|
||||
if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
|
||||
spin_unlock_irqrestore(
|
||||
&T_TASK(cmd)->t_state_lock, flags);
|
||||
DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
|
||||
&cmd->t_state_lock, flags);
|
||||
pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
|
||||
" t_task_cdbs_ex_left: %d\n", task, dev,
|
||||
atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
|
||||
atomic_read(&cmd->t_task_cdbs_ex_left));
|
||||
|
||||
spin_lock_irqsave(&dev->execute_task_lock, flags);
|
||||
continue;
|
||||
}
|
||||
fe_count = atomic_read(&T_TASK(cmd)->t_fe_count);
|
||||
fe_count = atomic_read(&cmd->t_fe_count);
|
||||
|
||||
if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
|
||||
DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
|
||||
if (atomic_read(&cmd->t_transport_active)) {
|
||||
pr_debug("LUN_RESET: got t_transport_active = 1 for"
|
||||
" task: %p, t_fe_count: %d dev: %p\n", task,
|
||||
fe_count, dev);
|
||||
atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
|
||||
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
|
||||
atomic_set(&cmd->t_transport_aborted, 1);
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock,
|
||||
flags);
|
||||
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
|
||||
|
||||
spin_lock_irqsave(&dev->execute_task_lock, flags);
|
||||
continue;
|
||||
}
|
||||
DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
|
||||
pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
|
||||
" t_fe_count: %d dev: %p\n", task, fe_count, dev);
|
||||
atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
|
||||
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
|
||||
atomic_set(&cmd->t_transport_aborted, 1);
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
|
||||
|
||||
spin_lock_irqsave(&dev->execute_task_lock, flags);
|
||||
@ -337,25 +323,12 @@ int core_tmr_lun_reset(
|
||||
* reference, otherwise the struct se_cmd is released.
|
||||
*/
|
||||
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
|
||||
list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) {
|
||||
cmd = (struct se_cmd *)qr->cmd;
|
||||
if (!(cmd)) {
|
||||
/*
|
||||
* Skip these for non PREEMPT_AND_ABORT usage..
|
||||
*/
|
||||
if (preempt_and_abort_list != NULL)
|
||||
continue;
|
||||
|
||||
atomic_dec(&qobj->queue_cnt);
|
||||
list_del(&qr->qr_list);
|
||||
kfree(qr);
|
||||
continue;
|
||||
}
|
||||
list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) {
|
||||
/*
|
||||
* For PREEMPT_AND_ABORT usage, only process commands
|
||||
* with a matching reservation key.
|
||||
*/
|
||||
if ((preempt_and_abort_list != NULL) &&
|
||||
if (preempt_and_abort_list &&
|
||||
(core_scsi3_check_cdb_abort_and_preempt(
|
||||
preempt_and_abort_list, cmd) != 0))
|
||||
continue;
|
||||
@ -365,30 +338,22 @@ int core_tmr_lun_reset(
|
||||
if (prout_cmd == cmd)
|
||||
continue;
|
||||
|
||||
atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
|
||||
atomic_dec(&cmd->t_transport_queue_active);
|
||||
atomic_dec(&qobj->queue_cnt);
|
||||
list_del(&qr->qr_list);
|
||||
list_del(&cmd->se_queue_node);
|
||||
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
|
||||
|
||||
state = qr->state;
|
||||
kfree(qr);
|
||||
|
||||
DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
|
||||
pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
|
||||
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
|
||||
"Preempt" : "", cmd, state,
|
||||
atomic_read(&T_TASK(cmd)->t_fe_count));
|
||||
"Preempt" : "", cmd, cmd->t_state,
|
||||
atomic_read(&cmd->t_fe_count));
|
||||
/*
|
||||
* Signal that the command has failed via cmd->se_cmd_flags,
|
||||
* and call TFO->new_cmd_failure() to wakeup any fabric
|
||||
* dependent code used to wait for unsolicited data out
|
||||
* allocation to complete. The fabric module is expected
|
||||
* to dump any remaining unsolicited data out for the aborted
|
||||
* command at this point.
|
||||
*/
|
||||
transport_new_cmd_failure(cmd);
|
||||
|
||||
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
|
||||
atomic_read(&T_TASK(cmd)->t_fe_count));
|
||||
atomic_read(&cmd->t_fe_count));
|
||||
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
|
||||
@ -396,21 +361,21 @@ int core_tmr_lun_reset(
|
||||
* Clear any legacy SPC-2 reservation when called during
|
||||
* LOGICAL UNIT RESET
|
||||
*/
|
||||
if (!(preempt_and_abort_list) &&
|
||||
if (!preempt_and_abort_list &&
|
||||
(dev->dev_flags & DF_SPC2_RESERVATIONS)) {
|
||||
spin_lock(&dev->dev_reservation_lock);
|
||||
dev->dev_reserved_node_acl = NULL;
|
||||
dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
|
||||
spin_unlock(&dev->dev_reservation_lock);
|
||||
printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
|
||||
pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
|
||||
}
|
||||
|
||||
spin_lock_irq(&dev->stats_lock);
|
||||
dev->num_resets++;
|
||||
spin_unlock_irq(&dev->stats_lock);
|
||||
|
||||
DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
|
||||
pr_debug("LUN_RESET: %s for [%s] Complete\n",
|
||||
(preempt_and_abort_list) ? "Preempt" : "TMR",
|
||||
TRANSPORT(dev)->name);
|
||||
dev->transport->name);
|
||||
return 0;
|
||||
}
|
||||
|
@ -44,6 +44,12 @@
|
||||
#include <target/target_core_fabric_ops.h>
|
||||
|
||||
#include "target_core_hba.h"
|
||||
#include "target_core_stat.h"
|
||||
|
||||
extern struct se_device *g_lun0_dev;
|
||||
|
||||
static DEFINE_SPINLOCK(tpg_lock);
|
||||
static LIST_HEAD(tpg_list);
|
||||
|
||||
/* core_clear_initiator_node_from_tpg():
|
||||
*
|
||||
@ -66,9 +72,9 @@ static void core_clear_initiator_node_from_tpg(
|
||||
continue;
|
||||
|
||||
if (!deve->se_lun) {
|
||||
printk(KERN_ERR "%s device entries device pointer is"
|
||||
pr_err("%s device entries device pointer is"
|
||||
" NULL, but Initiator has access.\n",
|
||||
TPG_TFO(tpg)->get_fabric_name());
|
||||
tpg->se_tpg_tfo->get_fabric_name());
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -80,14 +86,13 @@ static void core_clear_initiator_node_from_tpg(
|
||||
spin_lock(&lun->lun_acl_lock);
|
||||
list_for_each_entry_safe(acl, acl_tmp,
|
||||
&lun->lun_acl_list, lacl_list) {
|
||||
if (!(strcmp(acl->initiatorname,
|
||||
nacl->initiatorname)) &&
|
||||
(acl->mapped_lun == deve->mapped_lun))
|
||||
if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
|
||||
(acl->mapped_lun == deve->mapped_lun))
|
||||
break;
|
||||
}
|
||||
|
||||
if (!acl) {
|
||||
printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
|
||||
pr_err("Unable to locate struct se_lun_acl for %s,"
|
||||
" mapped_lun: %u\n", nacl->initiatorname,
|
||||
deve->mapped_lun);
|
||||
spin_unlock(&lun->lun_acl_lock);
|
||||
@ -115,7 +120,7 @@ struct se_node_acl *__core_tpg_get_initiator_node_acl(
|
||||
struct se_node_acl *acl;
|
||||
|
||||
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
|
||||
if (!(strcmp(acl->initiatorname, initiatorname)))
|
||||
if (!strcmp(acl->initiatorname, initiatorname))
|
||||
return acl;
|
||||
}
|
||||
|
||||
@ -134,8 +139,8 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
|
||||
if (!(strcmp(acl->initiatorname, initiatorname)) &&
|
||||
(!(acl->dynamic_node_acl))) {
|
||||
if (!strcmp(acl->initiatorname, initiatorname) &&
|
||||
!acl->dynamic_node_acl) {
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
return acl;
|
||||
}
|
||||
@ -171,7 +176,7 @@ void core_tpg_add_node_to_devs(
|
||||
* By default in LIO-Target $FABRIC_MOD,
|
||||
* demo_mode_write_protect is ON, or READ_ONLY;
|
||||
*/
|
||||
if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
|
||||
if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
|
||||
if (dev->dev_flags & DF_READ_ONLY)
|
||||
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
|
||||
else
|
||||
@ -181,16 +186,16 @@ void core_tpg_add_node_to_devs(
|
||||
* Allow only optical drives to issue R/W in default RO
|
||||
* demo mode.
|
||||
*/
|
||||
if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
|
||||
if (dev->transport->get_device_type(dev) == TYPE_DISK)
|
||||
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
|
||||
else
|
||||
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
|
||||
pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
|
||||
" access for LUN in Demo Mode\n",
|
||||
TPG_TFO(tpg)->get_fabric_name(),
|
||||
TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
|
||||
tpg->se_tpg_tfo->get_fabric_name(),
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
|
||||
(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
|
||||
"READ-WRITE" : "READ-ONLY");
|
||||
|
||||
@ -210,8 +215,8 @@ static int core_set_queue_depth_for_node(
|
||||
struct se_node_acl *acl)
|
||||
{
|
||||
if (!acl->queue_depth) {
|
||||
printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
|
||||
"defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
|
||||
pr_err("Queue depth for %s Initiator Node: %s is 0,"
|
||||
"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
|
||||
acl->initiatorname);
|
||||
acl->queue_depth = 1;
|
||||
}
|
||||
@ -230,10 +235,10 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl)
|
||||
|
||||
nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
|
||||
TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
|
||||
if (!(nacl->device_list)) {
|
||||
printk(KERN_ERR "Unable to allocate memory for"
|
||||
if (!nacl->device_list) {
|
||||
pr_err("Unable to allocate memory for"
|
||||
" struct se_node_acl->device_list\n");
|
||||
return -1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
|
||||
deve = &nacl->device_list[i];
|
||||
@ -259,14 +264,14 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
|
||||
struct se_node_acl *acl;
|
||||
|
||||
acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
|
||||
if ((acl))
|
||||
if (acl)
|
||||
return acl;
|
||||
|
||||
if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
|
||||
if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
|
||||
return NULL;
|
||||
|
||||
acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
|
||||
if (!(acl))
|
||||
acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
|
||||
if (!acl)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&acl->acl_list);
|
||||
@ -274,23 +279,23 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
|
||||
spin_lock_init(&acl->device_list_lock);
|
||||
spin_lock_init(&acl->nacl_sess_lock);
|
||||
atomic_set(&acl->acl_pr_ref_count, 0);
|
||||
acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
|
||||
acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
|
||||
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
|
||||
acl->se_tpg = tpg;
|
||||
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
|
||||
spin_lock_init(&acl->stats_lock);
|
||||
acl->dynamic_node_acl = 1;
|
||||
|
||||
TPG_TFO(tpg)->set_default_node_attributes(acl);
|
||||
tpg->se_tpg_tfo->set_default_node_attributes(acl);
|
||||
|
||||
if (core_create_device_list_for_node(acl) < 0) {
|
||||
TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
|
||||
tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
|
||||
core_free_device_list_for_node(acl, tpg);
|
||||
TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
|
||||
tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -301,10 +306,10 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
|
||||
tpg->num_node_acls++;
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
|
||||
printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
|
||||
" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
|
||||
TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
|
||||
TPG_TFO(tpg)->get_fabric_name(), initiatorname);
|
||||
pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
|
||||
" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
|
||||
tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
|
||||
|
||||
return acl;
|
||||
}
|
||||
@ -351,12 +356,12 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
|
||||
if ((acl)) {
|
||||
if (acl) {
|
||||
if (acl->dynamic_node_acl) {
|
||||
acl->dynamic_node_acl = 0;
|
||||
printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
|
||||
" for %s\n", TPG_TFO(tpg)->get_fabric_name(),
|
||||
TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
|
||||
pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
|
||||
" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
/*
|
||||
* Release the locally allocated struct se_node_acl
|
||||
@ -364,22 +369,22 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
|
||||
* a pointer to an existing demo mode node ACL.
|
||||
*/
|
||||
if (se_nacl)
|
||||
TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
|
||||
tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
|
||||
se_nacl);
|
||||
goto done;
|
||||
}
|
||||
|
||||
printk(KERN_ERR "ACL entry for %s Initiator"
|
||||
pr_err("ACL entry for %s Initiator"
|
||||
" Node %s already exists for TPG %u, ignoring"
|
||||
" request.\n", TPG_TFO(tpg)->get_fabric_name(),
|
||||
initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
|
||||
" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
|
||||
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
return ERR_PTR(-EEXIST);
|
||||
}
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
|
||||
if (!(se_nacl)) {
|
||||
printk("struct se_node_acl pointer is NULL\n");
|
||||
if (!se_nacl) {
|
||||
pr_err("struct se_node_acl pointer is NULL\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
/*
|
||||
@ -400,16 +405,16 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
|
||||
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
|
||||
spin_lock_init(&acl->stats_lock);
|
||||
|
||||
TPG_TFO(tpg)->set_default_node_attributes(acl);
|
||||
tpg->se_tpg_tfo->set_default_node_attributes(acl);
|
||||
|
||||
if (core_create_device_list_for_node(acl) < 0) {
|
||||
TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
|
||||
tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
|
||||
core_free_device_list_for_node(acl, tpg);
|
||||
TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
|
||||
tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
@ -419,10 +424,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
|
||||
done:
|
||||
printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
|
||||
" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
|
||||
TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
|
||||
TPG_TFO(tpg)->get_fabric_name(), initiatorname);
|
||||
pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
|
||||
" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
|
||||
tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
|
||||
|
||||
return acl;
|
||||
}
|
||||
@ -457,7 +462,7 @@ int core_tpg_del_initiator_node_acl(
|
||||
/*
|
||||
* Determine if the session needs to be closed by our context.
|
||||
*/
|
||||
if (!(TPG_TFO(tpg)->shutdown_session(sess)))
|
||||
if (!tpg->se_tpg_tfo->shutdown_session(sess))
|
||||
continue;
|
||||
|
||||
spin_unlock_bh(&tpg->session_lock);
|
||||
@ -465,7 +470,7 @@ int core_tpg_del_initiator_node_acl(
|
||||
* If the $FABRIC_MOD session for the Initiator Node ACL exists,
|
||||
* forcefully shutdown the $FABRIC_MOD session/nexus.
|
||||
*/
|
||||
TPG_TFO(tpg)->close_session(sess);
|
||||
tpg->se_tpg_tfo->close_session(sess);
|
||||
|
||||
spin_lock_bh(&tpg->session_lock);
|
||||
}
|
||||
@ -475,10 +480,10 @@ int core_tpg_del_initiator_node_acl(
|
||||
core_clear_initiator_node_from_tpg(acl, tpg);
|
||||
core_free_device_list_for_node(acl, tpg);
|
||||
|
||||
printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
|
||||
" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
|
||||
TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
|
||||
TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
|
||||
pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
|
||||
" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
|
||||
tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -500,11 +505,11 @@ int core_tpg_set_initiator_node_queue_depth(
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
|
||||
if (!(acl)) {
|
||||
printk(KERN_ERR "Access Control List entry for %s Initiator"
|
||||
if (!acl) {
|
||||
pr_err("Access Control List entry for %s Initiator"
|
||||
" Node %s does not exists for TPG %hu, ignoring"
|
||||
" request.\n", TPG_TFO(tpg)->get_fabric_name(),
|
||||
initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
|
||||
" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
|
||||
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -520,12 +525,12 @@ int core_tpg_set_initiator_node_queue_depth(
|
||||
continue;
|
||||
|
||||
if (!force) {
|
||||
printk(KERN_ERR "Unable to change queue depth for %s"
|
||||
pr_err("Unable to change queue depth for %s"
|
||||
" Initiator Node: %s while session is"
|
||||
" operational. To forcefully change the queue"
|
||||
" depth and force session reinstatement"
|
||||
" use the \"force=1\" parameter.\n",
|
||||
TPG_TFO(tpg)->get_fabric_name(), initiatorname);
|
||||
tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
|
||||
spin_unlock_bh(&tpg->session_lock);
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
@ -537,7 +542,7 @@ int core_tpg_set_initiator_node_queue_depth(
|
||||
/*
|
||||
* Determine if the session needs to be closed by our context.
|
||||
*/
|
||||
if (!(TPG_TFO(tpg)->shutdown_session(sess)))
|
||||
if (!tpg->se_tpg_tfo->shutdown_session(sess))
|
||||
continue;
|
||||
|
||||
init_sess = sess;
|
||||
@ -549,7 +554,7 @@ int core_tpg_set_initiator_node_queue_depth(
|
||||
* Change the value in the Node's struct se_node_acl, and call
|
||||
* core_set_queue_depth_for_node() to add the requested queue depth.
|
||||
*
|
||||
* Finally call TPG_TFO(tpg)->close_session() to force session
|
||||
* Finally call tpg->se_tpg_tfo->close_session() to force session
|
||||
* reinstatement to occur if there is an active session for the
|
||||
* $FABRIC_MOD Initiator Node in question.
|
||||
*/
|
||||
@ -561,10 +566,10 @@ int core_tpg_set_initiator_node_queue_depth(
|
||||
* Force session reinstatement if
|
||||
* core_set_queue_depth_for_node() failed, because we assume
|
||||
* the $FABRIC_MOD has already the set session reinstatement
|
||||
* bit from TPG_TFO(tpg)->shutdown_session() called above.
|
||||
* bit from tpg->se_tpg_tfo->shutdown_session() called above.
|
||||
*/
|
||||
if (init_sess)
|
||||
TPG_TFO(tpg)->close_session(init_sess);
|
||||
tpg->se_tpg_tfo->close_session(init_sess);
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
if (dynamic_acl)
|
||||
@ -578,12 +583,12 @@ int core_tpg_set_initiator_node_queue_depth(
|
||||
* forcefully shutdown the $FABRIC_MOD session/nexus.
|
||||
*/
|
||||
if (init_sess)
|
||||
TPG_TFO(tpg)->close_session(init_sess);
|
||||
tpg->se_tpg_tfo->close_session(init_sess);
|
||||
|
||||
printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
|
||||
pr_debug("Successfuly changed queue depth to: %d for Initiator"
|
||||
" Node: %s on %s Target Portal Group: %u\n", queue_depth,
|
||||
initiatorname, TPG_TFO(tpg)->get_fabric_name(),
|
||||
TPG_TFO(tpg)->tpg_get_tag(tpg));
|
||||
initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg));
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
if (dynamic_acl)
|
||||
@ -597,7 +602,7 @@ EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
|
||||
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
|
||||
{
|
||||
/* Set in core_dev_setup_virtual_lun0() */
|
||||
struct se_device *dev = se_global->g_lun0_dev;
|
||||
struct se_device *dev = g_lun0_dev;
|
||||
struct se_lun *lun = &se_tpg->tpg_virt_lun0;
|
||||
u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
|
||||
int ret;
|
||||
@ -614,7 +619,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
|
||||
|
||||
ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
|
||||
if (ret < 0)
|
||||
return -1;
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -638,8 +643,8 @@ int core_tpg_register(
|
||||
|
||||
se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
|
||||
TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
|
||||
if (!(se_tpg->tpg_lun_list)) {
|
||||
printk(KERN_ERR "Unable to allocate struct se_portal_group->"
|
||||
if (!se_tpg->tpg_lun_list) {
|
||||
pr_err("Unable to allocate struct se_portal_group->"
|
||||
"tpg_lun_list\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -663,7 +668,7 @@ int core_tpg_register(
|
||||
se_tpg->se_tpg_wwn = se_wwn;
|
||||
atomic_set(&se_tpg->tpg_pr_ref_count, 0);
|
||||
INIT_LIST_HEAD(&se_tpg->acl_node_list);
|
||||
INIT_LIST_HEAD(&se_tpg->se_tpg_list);
|
||||
INIT_LIST_HEAD(&se_tpg->se_tpg_node);
|
||||
INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
|
||||
spin_lock_init(&se_tpg->acl_node_lock);
|
||||
spin_lock_init(&se_tpg->session_lock);
|
||||
@ -676,11 +681,11 @@ int core_tpg_register(
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_bh(&se_global->se_tpg_lock);
|
||||
list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
|
||||
spin_unlock_bh(&se_global->se_tpg_lock);
|
||||
spin_lock_bh(&tpg_lock);
|
||||
list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
|
||||
spin_unlock_bh(&tpg_lock);
|
||||
|
||||
printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
|
||||
pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
|
||||
" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
|
||||
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
|
||||
"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
|
||||
@ -694,16 +699,16 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
|
||||
{
|
||||
struct se_node_acl *nacl, *nacl_tmp;
|
||||
|
||||
printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
|
||||
pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
|
||||
" for endpoint: %s Portal Tag %u\n",
|
||||
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
|
||||
"Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
|
||||
TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
|
||||
TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
|
||||
"Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
|
||||
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
|
||||
se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
|
||||
|
||||
spin_lock_bh(&se_global->se_tpg_lock);
|
||||
list_del(&se_tpg->se_tpg_list);
|
||||
spin_unlock_bh(&se_global->se_tpg_lock);
|
||||
spin_lock_bh(&tpg_lock);
|
||||
list_del(&se_tpg->se_tpg_node);
|
||||
spin_unlock_bh(&tpg_lock);
|
||||
|
||||
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
|
||||
cpu_relax();
|
||||
@ -721,7 +726,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
|
||||
|
||||
core_tpg_wait_for_nacl_pr_ref(nacl);
|
||||
core_free_device_list_for_node(nacl, se_tpg);
|
||||
TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
|
||||
se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
|
||||
|
||||
spin_lock_bh(&se_tpg->acl_node_lock);
|
||||
}
|
||||
@ -743,21 +748,21 @@ struct se_lun *core_tpg_pre_addlun(
|
||||
struct se_lun *lun;
|
||||
|
||||
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
|
||||
printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
|
||||
pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
|
||||
"-1: %u for Target Portal Group: %u\n",
|
||||
TPG_TFO(tpg)->get_fabric_name(),
|
||||
tpg->se_tpg_tfo->get_fabric_name(),
|
||||
unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
|
||||
TPG_TFO(tpg)->tpg_get_tag(tpg));
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg));
|
||||
return ERR_PTR(-EOVERFLOW);
|
||||
}
|
||||
|
||||
spin_lock(&tpg->tpg_lun_lock);
|
||||
lun = &tpg->tpg_lun_list[unpacked_lun];
|
||||
if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
|
||||
printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
|
||||
pr_err("TPG Logical Unit Number: %u is already active"
|
||||
" on %s Target Portal Group: %u, ignoring request.\n",
|
||||
unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
|
||||
TPG_TFO(tpg)->tpg_get_tag(tpg));
|
||||
unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg));
|
||||
spin_unlock(&tpg->tpg_lun_lock);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
@ -772,8 +777,11 @@ int core_tpg_post_addlun(
|
||||
u32 lun_access,
|
||||
void *lun_ptr)
|
||||
{
|
||||
if (core_dev_export(lun_ptr, tpg, lun) < 0)
|
||||
return -1;
|
||||
int ret;
|
||||
|
||||
ret = core_dev_export(lun_ptr, tpg, lun);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock(&tpg->tpg_lun_lock);
|
||||
lun->lun_access = lun_access;
|
||||
@ -799,21 +807,21 @@ struct se_lun *core_tpg_pre_dellun(
|
||||
struct se_lun *lun;
|
||||
|
||||
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
|
||||
printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
|
||||
pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
|
||||
"-1: %u for Target Portal Group: %u\n",
|
||||
TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
|
||||
tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
|
||||
TRANSPORT_MAX_LUNS_PER_TPG-1,
|
||||
TPG_TFO(tpg)->tpg_get_tag(tpg));
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg));
|
||||
return ERR_PTR(-EOVERFLOW);
|
||||
}
|
||||
|
||||
spin_lock(&tpg->tpg_lun_lock);
|
||||
lun = &tpg->tpg_lun_list[unpacked_lun];
|
||||
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
|
||||
printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
|
||||
pr_err("%s Logical Unit Number: %u is not active on"
|
||||
" Target Portal Group: %u, ignoring request.\n",
|
||||
TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
|
||||
TPG_TFO(tpg)->tpg_get_tag(tpg));
|
||||
tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg));
|
||||
spin_unlock(&tpg->tpg_lun_lock);
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -49,15 +49,15 @@ int core_scsi3_ua_check(
|
||||
struct se_session *sess = cmd->se_sess;
|
||||
struct se_node_acl *nacl;
|
||||
|
||||
if (!(sess))
|
||||
if (!sess)
|
||||
return 0;
|
||||
|
||||
nacl = sess->se_node_acl;
|
||||
if (!(nacl))
|
||||
if (!nacl)
|
||||
return 0;
|
||||
|
||||
deve = &nacl->device_list[cmd->orig_fe_lun];
|
||||
if (!(atomic_read(&deve->ua_count)))
|
||||
if (!atomic_read(&deve->ua_count))
|
||||
return 0;
|
||||
/*
|
||||
* From sam4r14, section 5.14 Unit attention condition:
|
||||
@ -80,10 +80,10 @@ int core_scsi3_ua_check(
|
||||
case REQUEST_SENSE:
|
||||
return 0;
|
||||
default:
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int core_scsi3_ua_allocate(
|
||||
@ -97,13 +97,13 @@ int core_scsi3_ua_allocate(
|
||||
/*
|
||||
* PASSTHROUGH OPS
|
||||
*/
|
||||
if (!(nacl))
|
||||
return -1;
|
||||
if (!nacl)
|
||||
return -EINVAL;
|
||||
|
||||
ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
|
||||
if (!(ua)) {
|
||||
printk(KERN_ERR "Unable to allocate struct se_ua\n");
|
||||
return -1;
|
||||
if (!ua) {
|
||||
pr_err("Unable to allocate struct se_ua\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
INIT_LIST_HEAD(&ua->ua_dev_list);
|
||||
INIT_LIST_HEAD(&ua->ua_nacl_list);
|
||||
@ -177,9 +177,9 @@ int core_scsi3_ua_allocate(
|
||||
spin_unlock(&deve->ua_lock);
|
||||
spin_unlock_irq(&nacl->device_list_lock);
|
||||
|
||||
printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
|
||||
pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
|
||||
" 0x%02x, ASCQ: 0x%02x\n",
|
||||
TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun,
|
||||
nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
|
||||
asc, ascq);
|
||||
|
||||
atomic_inc(&deve->ua_count);
|
||||
@ -208,23 +208,23 @@ void core_scsi3_ua_for_check_condition(
|
||||
u8 *asc,
|
||||
u8 *ascq)
|
||||
{
|
||||
struct se_device *dev = SE_DEV(cmd);
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct se_dev_entry *deve;
|
||||
struct se_session *sess = cmd->se_sess;
|
||||
struct se_node_acl *nacl;
|
||||
struct se_ua *ua = NULL, *ua_p;
|
||||
int head = 1;
|
||||
|
||||
if (!(sess))
|
||||
if (!sess)
|
||||
return;
|
||||
|
||||
nacl = sess->se_node_acl;
|
||||
if (!(nacl))
|
||||
if (!nacl)
|
||||
return;
|
||||
|
||||
spin_lock_irq(&nacl->device_list_lock);
|
||||
deve = &nacl->device_list[cmd->orig_fe_lun];
|
||||
if (!(atomic_read(&deve->ua_count))) {
|
||||
if (!atomic_read(&deve->ua_count)) {
|
||||
spin_unlock_irq(&nacl->device_list_lock);
|
||||
return;
|
||||
}
|
||||
@ -240,7 +240,7 @@ void core_scsi3_ua_for_check_condition(
|
||||
* highest priority UNIT_ATTENTION and ASC/ASCQ without
|
||||
* clearing it.
|
||||
*/
|
||||
if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) {
|
||||
if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) {
|
||||
*asc = ua->ua_asc;
|
||||
*ascq = ua->ua_ascq;
|
||||
break;
|
||||
@ -264,13 +264,13 @@ void core_scsi3_ua_for_check_condition(
|
||||
spin_unlock(&deve->ua_lock);
|
||||
spin_unlock_irq(&nacl->device_list_lock);
|
||||
|
||||
printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
|
||||
pr_debug("[%s]: %s UNIT ATTENTION condition with"
|
||||
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
|
||||
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
|
||||
TPG_TFO(nacl->se_tpg)->get_fabric_name(),
|
||||
(DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" :
|
||||
"Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl,
|
||||
cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq);
|
||||
nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
|
||||
(dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
|
||||
"Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
|
||||
cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
|
||||
}
|
||||
|
||||
int core_scsi3_ua_clear_for_request_sense(
|
||||
@ -284,18 +284,18 @@ int core_scsi3_ua_clear_for_request_sense(
|
||||
struct se_ua *ua = NULL, *ua_p;
|
||||
int head = 1;
|
||||
|
||||
if (!(sess))
|
||||
return -1;
|
||||
if (!sess)
|
||||
return -EINVAL;
|
||||
|
||||
nacl = sess->se_node_acl;
|
||||
if (!(nacl))
|
||||
return -1;
|
||||
if (!nacl)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irq(&nacl->device_list_lock);
|
||||
deve = &nacl->device_list[cmd->orig_fe_lun];
|
||||
if (!(atomic_read(&deve->ua_count))) {
|
||||
if (!atomic_read(&deve->ua_count)) {
|
||||
spin_unlock_irq(&nacl->device_list_lock);
|
||||
return -1;
|
||||
return -EPERM;
|
||||
}
|
||||
/*
|
||||
* The highest priority Unit Attentions are placed at the head of the
|
||||
@ -323,10 +323,10 @@ int core_scsi3_ua_clear_for_request_sense(
|
||||
spin_unlock(&deve->ua_lock);
|
||||
spin_unlock_irq(&nacl->device_list_lock);
|
||||
|
||||
printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
|
||||
pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
|
||||
" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
|
||||
" ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(),
|
||||
" ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
|
||||
cmd->orig_fe_lun, *asc, *ascq);
|
||||
|
||||
return (head) ? -1 : 0;
|
||||
return (head) ? -EPERM : 0;
|
||||
}
|
||||
|
@ -1,15 +1,6 @@
|
||||
EXTRA_CFLAGS += -I$(srctree)/drivers/target/ \
|
||||
-I$(srctree)/drivers/scsi/ \
|
||||
-I$(srctree)/include/scsi/ \
|
||||
-I$(srctree)/drivers/target/tcm_fc/
|
||||
|
||||
tcm_fc-y += tfc_cmd.o \
|
||||
tfc_conf.o \
|
||||
tfc_io.o \
|
||||
tfc_sess.o
|
||||
tcm_fc-y += tfc_cmd.o \
|
||||
tfc_conf.o \
|
||||
tfc_io.o \
|
||||
tfc_sess.o
|
||||
|
||||
obj-$(CONFIG_TCM_FC) += tcm_fc.o
|
||||
|
||||
ifdef CONFIGFS_TCM_FC_DEBUG
|
||||
EXTRA_CFLAGS += -DTCM_FC_DEBUG
|
||||
endif
|
||||
|
@ -23,30 +23,6 @@
|
||||
#define FT_TPG_NAMELEN 32 /* max length of TPG name */
|
||||
#define FT_LUN_NAMELEN 32 /* max length of LUN name */
|
||||
|
||||
/*
|
||||
* Debug options.
|
||||
*/
|
||||
#define FT_DEBUG_CONF 0x01 /* configuration messages */
|
||||
#define FT_DEBUG_SESS 0x02 /* session messages */
|
||||
#define FT_DEBUG_TM 0x04 /* TM operations */
|
||||
#define FT_DEBUG_IO 0x08 /* I/O commands */
|
||||
#define FT_DEBUG_DATA 0x10 /* Data transfer */
|
||||
|
||||
extern unsigned int ft_debug_logging; /* debug options */
|
||||
|
||||
#define FT_DEBUG(mask, fmt, args...) \
|
||||
do { \
|
||||
if (ft_debug_logging & (mask)) \
|
||||
printk(KERN_INFO "tcm_fc: %s: " fmt, \
|
||||
__func__, ##args); \
|
||||
} while (0)
|
||||
|
||||
#define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args)
|
||||
#define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args)
|
||||
#define FT_TM_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_TM, fmt, ##args)
|
||||
#define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args)
|
||||
#define FT_DATA_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_DATA, fmt, ##args)
|
||||
|
||||
struct ft_transport_id {
|
||||
__u8 format;
|
||||
__u8 __resvd1[7];
|
||||
@ -195,7 +171,6 @@ int ft_write_pending(struct se_cmd *);
|
||||
int ft_write_pending_status(struct se_cmd *);
|
||||
u32 ft_get_task_tag(struct se_cmd *);
|
||||
int ft_get_cmd_state(struct se_cmd *);
|
||||
void ft_new_cmd_failure(struct se_cmd *);
|
||||
int ft_queue_tm_resp(struct se_cmd *);
|
||||
int ft_is_state_remove(struct se_cmd *);
|
||||
|
||||
|
@ -59,33 +59,30 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
|
||||
struct fc_exch *ep;
|
||||
struct fc_seq *sp;
|
||||
struct se_cmd *se_cmd;
|
||||
struct se_mem *mem;
|
||||
struct se_transport_task *task;
|
||||
|
||||
if (!(ft_debug_logging & FT_DEBUG_IO))
|
||||
return;
|
||||
struct scatterlist *sg;
|
||||
int count;
|
||||
|
||||
se_cmd = &cmd->se_cmd;
|
||||
printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
|
||||
pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
|
||||
caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
|
||||
printk(KERN_INFO "%s: cmd %p cdb %p\n",
|
||||
pr_debug("%s: cmd %p cdb %p\n",
|
||||
caller, cmd, cmd->cdb);
|
||||
printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
|
||||
pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
|
||||
|
||||
pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
|
||||
caller, cmd, se_cmd->t_data_nents,
|
||||
se_cmd->data_length, se_cmd->se_cmd_flags);
|
||||
|
||||
for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
|
||||
pr_debug("%s: cmd %p sg %p page %p "
|
||||
"len 0x%x off 0x%x\n",
|
||||
caller, cmd, sg,
|
||||
sg_page(sg), sg->length, sg->offset);
|
||||
|
||||
task = T_TASK(se_cmd);
|
||||
printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
|
||||
caller, cmd, task, task->t_tasks_se_num,
|
||||
task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
|
||||
if (task->t_mem_list)
|
||||
list_for_each_entry(mem, task->t_mem_list, se_list)
|
||||
printk(KERN_INFO "%s: cmd %p mem %p page %p "
|
||||
"len 0x%x off 0x%x\n",
|
||||
caller, cmd, mem,
|
||||
mem->se_page, mem->se_len, mem->se_off);
|
||||
sp = cmd->seq;
|
||||
if (sp) {
|
||||
ep = fc_seq_exch(sp);
|
||||
printk(KERN_INFO "%s: cmd %p sid %x did %x "
|
||||
pr_debug("%s: cmd %p sid %x did %x "
|
||||
"ox_id %x rx_id %x seq_id %x e_stat %x\n",
|
||||
caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
|
||||
sp->id, ep->esb_stat);
|
||||
@ -96,15 +93,17 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
|
||||
|
||||
static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
|
||||
{
|
||||
struct se_queue_obj *qobj;
|
||||
struct ft_tpg *tpg = sess->tport->tpg;
|
||||
struct se_queue_obj *qobj = &tpg->qobj;
|
||||
unsigned long flags;
|
||||
|
||||
qobj = &sess->tport->tpg->qobj;
|
||||
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
|
||||
list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
|
||||
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
|
||||
atomic_inc(&qobj->queue_cnt);
|
||||
wake_up_interruptible(&qobj->thread_wq);
|
||||
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
|
||||
|
||||
wake_up_process(tpg->thread);
|
||||
}
|
||||
|
||||
static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
|
||||
@ -149,7 +148,7 @@ void ft_release_cmd(struct se_cmd *se_cmd)
|
||||
|
||||
void ft_check_stop_free(struct se_cmd *se_cmd)
|
||||
{
|
||||
transport_generic_free_cmd(se_cmd, 0, 1, 0);
|
||||
transport_generic_free_cmd(se_cmd, 0, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -256,15 +255,14 @@ int ft_write_pending(struct se_cmd *se_cmd)
|
||||
(fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
|
||||
if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
|
||||
/*
|
||||
* Map se_mem list to scatterlist, so that
|
||||
* DDP can be setup. DDP setup function require
|
||||
* scatterlist. se_mem_list is internal to
|
||||
* TCM/LIO target
|
||||
* cmd may have been broken up into multiple
|
||||
* tasks. Link their sgs together so we can
|
||||
* operate on them all at once.
|
||||
*/
|
||||
transport_do_task_sg_chain(se_cmd);
|
||||
cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained;
|
||||
cmd->sg = se_cmd->t_tasks_sg_chained;
|
||||
cmd->sg_cnt =
|
||||
T_TASK(se_cmd)->t_tasks_sg_chained_no;
|
||||
se_cmd->t_tasks_sg_chained_no;
|
||||
}
|
||||
if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
|
||||
cmd->sg, cmd->sg_cnt))
|
||||
@ -294,12 +292,6 @@ int ft_is_state_remove(struct se_cmd *se_cmd)
|
||||
return 0; /* XXX TBD */
|
||||
}
|
||||
|
||||
void ft_new_cmd_failure(struct se_cmd *se_cmd)
|
||||
{
|
||||
/* XXX TBD */
|
||||
printk(KERN_INFO "%s: se_cmd %p\n", __func__, se_cmd);
|
||||
}
|
||||
|
||||
/*
|
||||
* FC sequence response handler for follow-on sequences (data) and aborts.
|
||||
*/
|
||||
@ -312,7 +304,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
|
||||
/* XXX need to find cmd if queued */
|
||||
cmd->se_cmd.t_state = TRANSPORT_REMOVE;
|
||||
cmd->seq = NULL;
|
||||
transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
|
||||
transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -326,10 +318,10 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
|
||||
case FC_RCTL_DD_SOL_CTL: /* transfer ready */
|
||||
case FC_RCTL_DD_DATA_DESC: /* transfer ready */
|
||||
default:
|
||||
printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
|
||||
pr_debug("%s: unhandled frame r_ctl %x\n",
|
||||
__func__, fh->fh_r_ctl);
|
||||
fc_frame_free(fp);
|
||||
transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
|
||||
transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -351,7 +343,7 @@ static void ft_send_resp_status(struct fc_lport *lport,
|
||||
struct fcp_resp_rsp_info *info;
|
||||
|
||||
fh = fc_frame_header_get(rx_fp);
|
||||
FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n",
|
||||
pr_debug("FCP error response: did %x oxid %x status %x code %x\n",
|
||||
ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
|
||||
len = sizeof(*fcp);
|
||||
if (status == SAM_STAT_GOOD)
|
||||
@ -421,15 +413,15 @@ static void ft_send_tm(struct ft_cmd *cmd)
|
||||
* FCP4r01 indicates having a combination of
|
||||
* tm_flags set is invalid.
|
||||
*/
|
||||
FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
|
||||
pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
|
||||
ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
|
||||
return;
|
||||
}
|
||||
|
||||
FT_TM_DBG("alloc tm cmd fn %d\n", tm_func);
|
||||
pr_debug("alloc tm cmd fn %d\n", tm_func);
|
||||
tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
|
||||
if (!tmr) {
|
||||
FT_TM_DBG("alloc failed\n");
|
||||
pr_debug("alloc failed\n");
|
||||
ft_send_resp_code(cmd, FCP_TMF_FAILED);
|
||||
return;
|
||||
}
|
||||
@ -438,20 +430,20 @@ static void ft_send_tm(struct ft_cmd *cmd)
|
||||
switch (fcp->fc_tm_flags) {
|
||||
case FCP_TMF_LUN_RESET:
|
||||
cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
|
||||
if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) {
|
||||
if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) {
|
||||
/*
|
||||
* Make sure to clean up newly allocated TMR request
|
||||
* since "unable to handle TMR request because failed
|
||||
* to get to LUN"
|
||||
*/
|
||||
FT_TM_DBG("Failed to get LUN for TMR func %d, "
|
||||
pr_debug("Failed to get LUN for TMR func %d, "
|
||||
"se_cmd %p, unpacked_lun %d\n",
|
||||
tm_func, &cmd->se_cmd, cmd->lun);
|
||||
ft_dump_cmd(cmd, __func__);
|
||||
sess = cmd->sess;
|
||||
transport_send_check_condition_and_sense(&cmd->se_cmd,
|
||||
cmd->se_cmd.scsi_sense_reason, 0);
|
||||
transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
|
||||
transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
|
||||
ft_sess_put(sess);
|
||||
return;
|
||||
}
|
||||
@ -495,7 +487,7 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
|
||||
code = FCP_TMF_FAILED;
|
||||
break;
|
||||
}
|
||||
FT_TM_DBG("tmr fn %d resp %d fcp code %d\n",
|
||||
pr_debug("tmr fn %d resp %d fcp code %d\n",
|
||||
tmr->function, tmr->response, code);
|
||||
ft_send_resp_code(cmd, code);
|
||||
return 0;
|
||||
@ -523,7 +515,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
|
||||
return;
|
||||
|
||||
busy:
|
||||
FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n");
|
||||
pr_debug("cmd or seq allocation failure - sending BUSY\n");
|
||||
ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
|
||||
fc_frame_free(fp);
|
||||
ft_sess_put(sess); /* undo get from lookup */
|
||||
@ -548,7 +540,7 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
|
||||
case FC_RCTL_DD_DATA_DESC: /* transfer ready */
|
||||
case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
|
||||
default:
|
||||
printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
|
||||
pr_debug("%s: unhandled frame r_ctl %x\n",
|
||||
__func__, fh->fh_r_ctl);
|
||||
fc_frame_free(fp);
|
||||
ft_sess_put(sess); /* undo get from lookup */
|
||||
@ -637,7 +629,7 @@ static void ft_send_cmd(struct ft_cmd *cmd)
|
||||
fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
|
||||
|
||||
cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
|
||||
ret = transport_get_lun_for_cmd(&cmd->se_cmd, NULL, cmd->lun);
|
||||
ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun);
|
||||
if (ret < 0) {
|
||||
ft_dump_cmd(cmd, __func__);
|
||||
transport_send_check_condition_and_sense(&cmd->se_cmd,
|
||||
@ -647,22 +639,22 @@ static void ft_send_cmd(struct ft_cmd *cmd)
|
||||
|
||||
ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
|
||||
|
||||
FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
|
||||
pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
|
||||
ft_dump_cmd(cmd, __func__);
|
||||
|
||||
if (ret == -1) {
|
||||
if (ret == -ENOMEM) {
|
||||
transport_send_check_condition_and_sense(se_cmd,
|
||||
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
|
||||
transport_generic_free_cmd(se_cmd, 0, 1, 0);
|
||||
transport_generic_free_cmd(se_cmd, 0, 0);
|
||||
return;
|
||||
}
|
||||
if (ret == -2) {
|
||||
if (ret == -EINVAL) {
|
||||
if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
|
||||
ft_queue_status(se_cmd);
|
||||
else
|
||||
transport_send_check_condition_and_sense(se_cmd,
|
||||
se_cmd->scsi_sense_reason, 0);
|
||||
transport_generic_free_cmd(se_cmd, 0, 1, 0);
|
||||
transport_generic_free_cmd(se_cmd, 0, 0);
|
||||
return;
|
||||
}
|
||||
transport_generic_handle_cdb(se_cmd);
|
||||
@ -670,7 +662,6 @@ static void ft_send_cmd(struct ft_cmd *cmd)
|
||||
|
||||
err:
|
||||
ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -678,7 +669,7 @@ err:
|
||||
*/
|
||||
static void ft_exec_req(struct ft_cmd *cmd)
|
||||
{
|
||||
FT_IO_DBG("cmd state %x\n", cmd->state);
|
||||
pr_debug("cmd state %x\n", cmd->state);
|
||||
switch (cmd->state) {
|
||||
case FC_CMD_ST_NEW:
|
||||
ft_send_cmd(cmd);
|
||||
@ -697,15 +688,12 @@ int ft_thread(void *arg)
|
||||
struct ft_tpg *tpg = arg;
|
||||
struct se_queue_obj *qobj = &tpg->qobj;
|
||||
struct ft_cmd *cmd;
|
||||
int ret;
|
||||
|
||||
set_user_nice(current, -20);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
ret = wait_event_interruptible(qobj->thread_wq,
|
||||
atomic_read(&qobj->queue_cnt) || kthread_should_stop());
|
||||
if (ret < 0 || kthread_should_stop())
|
||||
schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
|
||||
if (kthread_should_stop())
|
||||
goto out;
|
||||
|
||||
cmd = ft_dequeue_cmd(qobj);
|
||||
if (cmd)
|
||||
ft_exec_req(cmd);
|
||||
|
@ -106,7 +106,7 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
|
||||
}
|
||||
err = 4;
|
||||
fail:
|
||||
FT_CONF_DBG("err %u len %zu pos %u byte %u\n",
|
||||
pr_debug("err %u len %zu pos %u byte %u\n",
|
||||
err, cp - name, pos, byte);
|
||||
return -1;
|
||||
}
|
||||
@ -216,14 +216,14 @@ static struct se_node_acl *ft_add_acl(
|
||||
u64 wwpn;
|
||||
u32 q_depth;
|
||||
|
||||
FT_CONF_DBG("add acl %s\n", name);
|
||||
pr_debug("add acl %s\n", name);
|
||||
tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
|
||||
|
||||
if (ft_parse_wwn(name, &wwpn, 1) < 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
|
||||
if (!(acl))
|
||||
if (!acl)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
acl->node_auth.port_name = wwpn;
|
||||
|
||||
@ -239,11 +239,11 @@ static void ft_del_acl(struct se_node_acl *se_acl)
|
||||
struct ft_node_acl *acl = container_of(se_acl,
|
||||
struct ft_node_acl, se_node_acl);
|
||||
|
||||
FT_CONF_DBG("del acl %s\n",
|
||||
pr_debug("del acl %s\n",
|
||||
config_item_name(&se_acl->acl_group.cg_item));
|
||||
|
||||
tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
|
||||
FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n",
|
||||
pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n",
|
||||
acl, se_acl, tpg, &tpg->se_tpg);
|
||||
|
||||
core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
|
||||
@ -260,11 +260,11 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
|
||||
spin_lock_bh(&se_tpg->acl_node_lock);
|
||||
list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
|
||||
acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
|
||||
FT_CONF_DBG("acl %p port_name %llx\n",
|
||||
pr_debug("acl %p port_name %llx\n",
|
||||
acl, (unsigned long long)acl->node_auth.port_name);
|
||||
if (acl->node_auth.port_name == rdata->ids.port_name ||
|
||||
acl->node_auth.node_name == rdata->ids.node_name) {
|
||||
FT_CONF_DBG("acl %p port_name %llx matched\n", acl,
|
||||
pr_debug("acl %p port_name %llx matched\n", acl,
|
||||
(unsigned long long)rdata->ids.port_name);
|
||||
found = acl;
|
||||
/* XXX need to hold onto ACL */
|
||||
@ -280,11 +280,11 @@ struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
|
||||
struct ft_node_acl *acl;
|
||||
|
||||
acl = kzalloc(sizeof(*acl), GFP_KERNEL);
|
||||
if (!(acl)) {
|
||||
printk(KERN_ERR "Unable to allocate struct ft_node_acl\n");
|
||||
if (!acl) {
|
||||
pr_err("Unable to allocate struct ft_node_acl\n");
|
||||
return NULL;
|
||||
}
|
||||
FT_CONF_DBG("acl %p\n", acl);
|
||||
pr_debug("acl %p\n", acl);
|
||||
return &acl->se_node_acl;
|
||||
}
|
||||
|
||||
@ -294,7 +294,7 @@ static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
|
||||
struct ft_node_acl *acl = container_of(se_acl,
|
||||
struct ft_node_acl, se_node_acl);
|
||||
|
||||
FT_CONF_DBG(KERN_INFO "acl %p\n", acl);
|
||||
pr_debug("acl %p\n", acl);
|
||||
kfree(acl);
|
||||
}
|
||||
|
||||
@ -311,7 +311,7 @@ static struct se_portal_group *ft_add_tpg(
|
||||
unsigned long index;
|
||||
int ret;
|
||||
|
||||
FT_CONF_DBG("tcm_fc: add tpg %s\n", name);
|
||||
pr_debug("tcm_fc: add tpg %s\n", name);
|
||||
|
||||
/*
|
||||
* Name must be "tpgt_" followed by the index.
|
||||
@ -331,7 +331,7 @@ static struct se_portal_group *ft_add_tpg(
|
||||
transport_init_queue_obj(&tpg->qobj);
|
||||
|
||||
ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
|
||||
(void *)tpg, TRANSPORT_TPG_TYPE_NORMAL);
|
||||
tpg, TRANSPORT_TPG_TYPE_NORMAL);
|
||||
if (ret < 0) {
|
||||
kfree(tpg);
|
||||
return NULL;
|
||||
@ -354,7 +354,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
|
||||
{
|
||||
struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
|
||||
|
||||
FT_CONF_DBG("del tpg %s\n",
|
||||
pr_debug("del tpg %s\n",
|
||||
config_item_name(&tpg->se_tpg.tpg_group.cg_item));
|
||||
|
||||
kthread_stop(tpg->thread);
|
||||
@ -412,7 +412,7 @@ static struct se_wwn *ft_add_lport(
|
||||
struct ft_lport_acl *old_lacl;
|
||||
u64 wwpn;
|
||||
|
||||
FT_CONF_DBG("add lport %s\n", name);
|
||||
pr_debug("add lport %s\n", name);
|
||||
if (ft_parse_wwn(name, &wwpn, 1) < 0)
|
||||
return NULL;
|
||||
lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
|
||||
@ -441,7 +441,7 @@ static void ft_del_lport(struct se_wwn *wwn)
|
||||
struct ft_lport_acl *lacl = container_of(wwn,
|
||||
struct ft_lport_acl, fc_lport_wwn);
|
||||
|
||||
FT_CONF_DBG("del lport %s\n",
|
||||
pr_debug("del lport %s\n",
|
||||
config_item_name(&wwn->wwn_group.cg_item));
|
||||
mutex_lock(&ft_lport_lock);
|
||||
list_del(&lacl->list);
|
||||
@ -536,8 +536,7 @@ static struct target_core_fabric_ops ft_fabric_ops = {
|
||||
.tpg_release_fabric_acl = ft_tpg_release_fabric_acl,
|
||||
.tpg_get_inst_index = ft_tpg_get_inst_index,
|
||||
.check_stop_free = ft_check_stop_free,
|
||||
.release_cmd_to_pool = ft_release_cmd,
|
||||
.release_cmd_direct = ft_release_cmd,
|
||||
.release_cmd = ft_release_cmd,
|
||||
.shutdown_session = ft_sess_shutdown,
|
||||
.close_session = ft_sess_close,
|
||||
.stop_session = ft_sess_stop,
|
||||
@ -550,7 +549,6 @@ static struct target_core_fabric_ops ft_fabric_ops = {
|
||||
.set_default_node_attributes = ft_set_default_node_attr,
|
||||
.get_task_tag = ft_get_task_tag,
|
||||
.get_cmd_state = ft_get_cmd_state,
|
||||
.new_cmd_failure = ft_new_cmd_failure,
|
||||
.queue_data_in = ft_queue_data_in,
|
||||
.queue_status = ft_queue_status,
|
||||
.queue_tm_rsp = ft_queue_tm_resp,
|
||||
@ -582,10 +580,10 @@ int ft_register_configfs(void)
|
||||
* Register the top level struct config_item_type with TCM core
|
||||
*/
|
||||
fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
|
||||
if (!fabric) {
|
||||
printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n",
|
||||
if (IS_ERR(fabric)) {
|
||||
pr_err("%s: target_fabric_configfs_init() failed!\n",
|
||||
__func__);
|
||||
return -1;
|
||||
return PTR_ERR(fabric);
|
||||
}
|
||||
fabric->tf_ops = ft_fabric_ops;
|
||||
|
||||
@ -610,11 +608,8 @@ int ft_register_configfs(void)
|
||||
*/
|
||||
ret = target_fabric_configfs_register(fabric);
|
||||
if (ret < 0) {
|
||||
FT_CONF_DBG("target_fabric_configfs_register() for"
|
||||
pr_debug("target_fabric_configfs_register() for"
|
||||
" FC Target failed!\n");
|
||||
printk(KERN_INFO
|
||||
"%s: target_fabric_configfs_register() failed!\n",
|
||||
__func__);
|
||||
target_fabric_configfs_free(fabric);
|
||||
return -1;
|
||||
}
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include <linux/configfs.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
@ -65,21 +66,20 @@
|
||||
int ft_queue_data_in(struct se_cmd *se_cmd)
|
||||
{
|
||||
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
|
||||
struct se_transport_task *task;
|
||||
struct fc_frame *fp = NULL;
|
||||
struct fc_exch *ep;
|
||||
struct fc_lport *lport;
|
||||
struct se_mem *mem;
|
||||
struct scatterlist *sg = NULL;
|
||||
size_t remaining;
|
||||
u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
|
||||
u32 mem_off;
|
||||
u32 mem_off = 0;
|
||||
u32 fh_off = 0;
|
||||
u32 frame_off = 0;
|
||||
size_t frame_len = 0;
|
||||
size_t mem_len;
|
||||
size_t mem_len = 0;
|
||||
size_t tlen;
|
||||
size_t off_in_page;
|
||||
struct page *page;
|
||||
struct page *page = NULL;
|
||||
int use_sg;
|
||||
int error;
|
||||
void *page_addr;
|
||||
@ -90,24 +90,17 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
|
||||
lport = ep->lp;
|
||||
cmd->seq = lport->tt.seq_start_next(cmd->seq);
|
||||
|
||||
task = T_TASK(se_cmd);
|
||||
BUG_ON(!task);
|
||||
remaining = se_cmd->data_length;
|
||||
|
||||
/*
|
||||
* Setup to use first mem list entry if any.
|
||||
* Setup to use first mem list entry, unless no data.
|
||||
*/
|
||||
if (task->t_tasks_se_num) {
|
||||
mem = list_first_entry(task->t_mem_list,
|
||||
struct se_mem, se_list);
|
||||
mem_len = mem->se_len;
|
||||
mem_off = mem->se_off;
|
||||
page = mem->se_page;
|
||||
} else {
|
||||
mem = NULL;
|
||||
mem_len = remaining;
|
||||
mem_off = 0;
|
||||
page = NULL;
|
||||
BUG_ON(remaining && !se_cmd->t_data_sg);
|
||||
if (remaining) {
|
||||
sg = se_cmd->t_data_sg;
|
||||
mem_len = sg->length;
|
||||
mem_off = sg->offset;
|
||||
page = sg_page(sg);
|
||||
}
|
||||
|
||||
/* no scatter/gather in skb for odd word length due to fc_seq_send() */
|
||||
@ -115,12 +108,10 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
|
||||
|
||||
while (remaining) {
|
||||
if (!mem_len) {
|
||||
BUG_ON(!mem);
|
||||
mem = list_entry(mem->se_list.next,
|
||||
struct se_mem, se_list);
|
||||
mem_len = min((size_t)mem->se_len, remaining);
|
||||
mem_off = mem->se_off;
|
||||
page = mem->se_page;
|
||||
sg = sg_next(sg);
|
||||
mem_len = min((size_t)sg->length, remaining);
|
||||
mem_off = sg->offset;
|
||||
page = sg_page(sg);
|
||||
}
|
||||
if (!frame_len) {
|
||||
/*
|
||||
@ -148,18 +139,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
|
||||
tlen = min(mem_len, frame_len);
|
||||
|
||||
if (use_sg) {
|
||||
if (!mem) {
|
||||
BUG_ON(!task->t_task_buf);
|
||||
page_addr = task->t_task_buf + mem_off;
|
||||
/*
|
||||
* In this case, offset is 'offset_in_page' of
|
||||
* (t_task_buf + mem_off) instead of 'mem_off'.
|
||||
*/
|
||||
off_in_page = offset_in_page(page_addr);
|
||||
page = virt_to_page(page_addr);
|
||||
tlen = min(tlen, PAGE_SIZE - off_in_page);
|
||||
} else
|
||||
off_in_page = mem_off;
|
||||
off_in_page = mem_off;
|
||||
BUG_ON(!page);
|
||||
get_page(page);
|
||||
skb_fill_page_desc(fp_skb(fp),
|
||||
@ -169,7 +149,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
|
||||
fp_skb(fp)->data_len += tlen;
|
||||
fp_skb(fp)->truesize +=
|
||||
PAGE_SIZE << compound_order(page);
|
||||
} else if (mem) {
|
||||
} else {
|
||||
BUG_ON(!page);
|
||||
from = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
|
||||
KM_SOFTIRQ0);
|
||||
@ -180,10 +160,6 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
|
||||
memcpy(to, from, tlen);
|
||||
kunmap_atomic(page_addr, KM_SOFTIRQ0);
|
||||
to += tlen;
|
||||
} else {
|
||||
from = task->t_task_buf + mem_off;
|
||||
memcpy(to, from, tlen);
|
||||
to += tlen;
|
||||
}
|
||||
|
||||
mem_off += tlen;
|
||||
@ -201,8 +177,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
|
||||
error = lport->tt.seq_send(lport, cmd->seq, fp);
|
||||
if (error) {
|
||||
/* XXX For now, initiator will retry */
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_ERR "%s: Failed to send frame %p, "
|
||||
pr_err_ratelimited("%s: Failed to send frame %p, "
|
||||
"xid <0x%x>, remaining %zu, "
|
||||
"lso_max <0x%x>\n",
|
||||
__func__, fp, ep->xid,
|
||||
@ -221,24 +196,20 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
|
||||
struct fc_seq *seq = cmd->seq;
|
||||
struct fc_exch *ep;
|
||||
struct fc_lport *lport;
|
||||
struct se_transport_task *task;
|
||||
struct fc_frame_header *fh;
|
||||
struct se_mem *mem;
|
||||
u32 mem_off;
|
||||
struct scatterlist *sg = NULL;
|
||||
u32 mem_off = 0;
|
||||
u32 rel_off;
|
||||
size_t frame_len;
|
||||
size_t mem_len;
|
||||
size_t mem_len = 0;
|
||||
size_t tlen;
|
||||
struct page *page;
|
||||
struct page *page = NULL;
|
||||
void *page_addr;
|
||||
void *from;
|
||||
void *to;
|
||||
u32 f_ctl;
|
||||
void *buf;
|
||||
|
||||
task = T_TASK(se_cmd);
|
||||
BUG_ON(!task);
|
||||
|
||||
fh = fc_frame_header_get(fp);
|
||||
if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
|
||||
goto drop;
|
||||
@ -251,7 +222,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
|
||||
*/
|
||||
buf = fc_frame_payload_get(fp, 1);
|
||||
if (cmd->was_ddp_setup && buf) {
|
||||
printk(KERN_INFO "%s: When DDP was setup, not expected to"
|
||||
pr_debug("%s: When DDP was setup, not expected to"
|
||||
"receive frame with payload, Payload shall be"
|
||||
"copied directly to buffer instead of coming "
|
||||
"via. legacy receive queues\n", __func__);
|
||||
@ -289,7 +260,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
|
||||
* this point, but just in case if required in future
|
||||
* for debugging or any other purpose
|
||||
*/
|
||||
printk(KERN_ERR "%s: Received frame with TSI bit not"
|
||||
pr_err("%s: Received frame with TSI bit not"
|
||||
" being SET, dropping the frame, "
|
||||
"cmd->sg <%p>, cmd->sg_cnt <0x%x>\n",
|
||||
__func__, cmd->sg, cmd->sg_cnt);
|
||||
@ -312,29 +283,22 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
|
||||
frame_len = se_cmd->data_length - rel_off;
|
||||
|
||||
/*
|
||||
* Setup to use first mem list entry if any.
|
||||
* Setup to use first mem list entry, unless no data.
|
||||
*/
|
||||
if (task->t_tasks_se_num) {
|
||||
mem = list_first_entry(task->t_mem_list,
|
||||
struct se_mem, se_list);
|
||||
mem_len = mem->se_len;
|
||||
mem_off = mem->se_off;
|
||||
page = mem->se_page;
|
||||
} else {
|
||||
mem = NULL;
|
||||
page = NULL;
|
||||
mem_off = 0;
|
||||
mem_len = frame_len;
|
||||
BUG_ON(frame_len && !se_cmd->t_data_sg);
|
||||
if (frame_len) {
|
||||
sg = se_cmd->t_data_sg;
|
||||
mem_len = sg->length;
|
||||
mem_off = sg->offset;
|
||||
page = sg_page(sg);
|
||||
}
|
||||
|
||||
while (frame_len) {
|
||||
if (!mem_len) {
|
||||
BUG_ON(!mem);
|
||||
mem = list_entry(mem->se_list.next,
|
||||
struct se_mem, se_list);
|
||||
mem_len = mem->se_len;
|
||||
mem_off = mem->se_off;
|
||||
page = mem->se_page;
|
||||
sg = sg_next(sg);
|
||||
mem_len = sg->length;
|
||||
mem_off = sg->offset;
|
||||
page = sg_page(sg);
|
||||
}
|
||||
if (rel_off >= mem_len) {
|
||||
rel_off -= mem_len;
|
||||
@ -347,19 +311,15 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
|
||||
|
||||
tlen = min(mem_len, frame_len);
|
||||
|
||||
if (mem) {
|
||||
to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
|
||||
KM_SOFTIRQ0);
|
||||
page_addr = to;
|
||||
to += mem_off & ~PAGE_MASK;
|
||||
tlen = min(tlen, (size_t)(PAGE_SIZE -
|
||||
(mem_off & ~PAGE_MASK)));
|
||||
memcpy(to, from, tlen);
|
||||
kunmap_atomic(page_addr, KM_SOFTIRQ0);
|
||||
} else {
|
||||
to = task->t_task_buf + mem_off;
|
||||
memcpy(to, from, tlen);
|
||||
}
|
||||
to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
|
||||
KM_SOFTIRQ0);
|
||||
page_addr = to;
|
||||
to += mem_off & ~PAGE_MASK;
|
||||
tlen = min(tlen, (size_t)(PAGE_SIZE -
|
||||
(mem_off & ~PAGE_MASK)));
|
||||
memcpy(to, from, tlen);
|
||||
kunmap_atomic(page_addr, KM_SOFTIRQ0);
|
||||
|
||||
from += tlen;
|
||||
frame_len -= tlen;
|
||||
mem_off += tlen;
|
||||
|
@ -198,13 +198,13 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
|
||||
if (sess->port_id == port_id) {
|
||||
kref_get(&sess->kref);
|
||||
rcu_read_unlock();
|
||||
FT_SESS_DBG("port_id %x found %p\n", port_id, sess);
|
||||
pr_debug("port_id %x found %p\n", port_id, sess);
|
||||
return sess;
|
||||
}
|
||||
}
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
FT_SESS_DBG("port_id %x not found\n", port_id);
|
||||
pr_debug("port_id %x not found\n", port_id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -240,7 +240,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
|
||||
hlist_add_head_rcu(&sess->hash, head);
|
||||
tport->sess_count++;
|
||||
|
||||
FT_SESS_DBG("port_id %x sess %p\n", port_id, sess);
|
||||
pr_debug("port_id %x sess %p\n", port_id, sess);
|
||||
|
||||
transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
|
||||
sess->se_sess, sess);
|
||||
@ -314,7 +314,7 @@ int ft_sess_shutdown(struct se_session *se_sess)
|
||||
{
|
||||
struct ft_sess *sess = se_sess->fabric_sess_ptr;
|
||||
|
||||
FT_SESS_DBG("port_id %x\n", sess->port_id);
|
||||
pr_debug("port_id %x\n", sess->port_id);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -335,7 +335,7 @@ void ft_sess_close(struct se_session *se_sess)
|
||||
mutex_unlock(&ft_lport_lock);
|
||||
return;
|
||||
}
|
||||
FT_SESS_DBG("port_id %x\n", port_id);
|
||||
pr_debug("port_id %x\n", port_id);
|
||||
ft_sess_unhash(sess);
|
||||
mutex_unlock(&ft_lport_lock);
|
||||
transport_deregister_session_configfs(se_sess);
|
||||
@ -348,7 +348,7 @@ void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep)
|
||||
{
|
||||
struct ft_sess *sess = se_sess->fabric_sess_ptr;
|
||||
|
||||
FT_SESS_DBG("port_id %x\n", sess->port_id);
|
||||
pr_debug("port_id %x\n", sess->port_id);
|
||||
}
|
||||
|
||||
int ft_sess_logged_in(struct se_session *se_sess)
|
||||
@ -458,7 +458,7 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
|
||||
mutex_lock(&ft_lport_lock);
|
||||
ret = ft_prli_locked(rdata, spp_len, rspp, spp);
|
||||
mutex_unlock(&ft_lport_lock);
|
||||
FT_SESS_DBG("port_id %x flags %x ret %x\n",
|
||||
pr_debug("port_id %x flags %x ret %x\n",
|
||||
rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -518,11 +518,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
|
||||
struct ft_sess *sess;
|
||||
u32 sid = fc_frame_sid(fp);
|
||||
|
||||
FT_SESS_DBG("sid %x\n", sid);
|
||||
pr_debug("sid %x\n", sid);
|
||||
|
||||
sess = ft_sess_get(lport, sid);
|
||||
if (!sess) {
|
||||
FT_SESS_DBG("sid %x sess lookup failed\n", sid);
|
||||
pr_debug("sid %x sess lookup failed\n", sid);
|
||||
/* TBD XXX - if FCP_CMND, send PRLO */
|
||||
fc_frame_free(fp);
|
||||
return;
|
||||
|
@ -9,12 +9,13 @@
|
||||
#include <net/sock.h>
|
||||
#include <net/tcp.h>
|
||||
|
||||
#define TARGET_CORE_MOD_VERSION "v4.0.0-rc7-ml"
|
||||
#define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml"
|
||||
#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
|
||||
|
||||
/* Used by transport_generic_allocate_iovecs() */
|
||||
#define TRANSPORT_IOV_DATA_BUFFER 5
|
||||
/* Maximum Number of LUNs per Target Portal Group */
|
||||
/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
|
||||
#define TRANSPORT_MAX_LUNS_PER_TPG 256
|
||||
/*
|
||||
* By default we use 32-byte CDBs in TCM Core and subsystem plugin code.
|
||||
@ -99,6 +100,7 @@ enum transport_state_table {
|
||||
TRANSPORT_FREE = 15,
|
||||
TRANSPORT_NEW_CMD_MAP = 16,
|
||||
TRANSPORT_FREE_CMD_INTR = 17,
|
||||
TRANSPORT_COMPLETE_QF_WP = 18,
|
||||
};
|
||||
|
||||
/* Used for struct se_cmd->se_cmd_flags */
|
||||
@ -108,27 +110,22 @@ enum se_cmd_flags_table {
|
||||
SCF_EMULATED_TASK_SENSE = 0x00000004,
|
||||
SCF_SCSI_DATA_SG_IO_CDB = 0x00000008,
|
||||
SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010,
|
||||
SCF_SCSI_CONTROL_NONSG_IO_CDB = 0x00000020,
|
||||
SCF_SCSI_NON_DATA_CDB = 0x00000040,
|
||||
SCF_SCSI_CDB_EXCEPTION = 0x00000080,
|
||||
SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
|
||||
SCF_CMD_PASSTHROUGH_NOALLOC = 0x00000200,
|
||||
SCF_SE_CMD_FAILED = 0x00000400,
|
||||
SCF_SE_LUN_CMD = 0x00000800,
|
||||
SCF_SE_ALLOW_EOO = 0x00001000,
|
||||
SCF_SE_DISABLE_ONLINE_CHECK = 0x00002000,
|
||||
SCF_SENT_CHECK_CONDITION = 0x00004000,
|
||||
SCF_OVERFLOW_BIT = 0x00008000,
|
||||
SCF_UNDERFLOW_BIT = 0x00010000,
|
||||
SCF_SENT_DELAYED_TAS = 0x00020000,
|
||||
SCF_ALUA_NON_OPTIMIZED = 0x00040000,
|
||||
SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000,
|
||||
SCF_PASSTHROUGH_SG_TO_MEM = 0x00100000,
|
||||
SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000,
|
||||
SCF_UNUSED = 0x00100000,
|
||||
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
|
||||
SCF_EMULATE_SYNC_CACHE = 0x00800000,
|
||||
SCF_EMULATE_CDB_ASYNC = 0x01000000,
|
||||
SCF_EMULATE_SYNC_UNMAP = 0x02000000
|
||||
SCF_EMULATE_QUEUE_FULL = 0x02000000,
|
||||
};
|
||||
|
||||
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
|
||||
@ -205,11 +202,6 @@ typedef enum {
|
||||
SCSI_INDEX_TYPE_MAX
|
||||
} scsi_index_t;
|
||||
|
||||
struct scsi_index_table {
|
||||
spinlock_t lock;
|
||||
u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
|
||||
} ____cacheline_aligned;
|
||||
|
||||
struct se_cmd;
|
||||
|
||||
struct t10_alua {
|
||||
@ -235,7 +227,7 @@ struct t10_alua_lu_gp {
|
||||
atomic_t lu_gp_ref_cnt;
|
||||
spinlock_t lu_gp_lock;
|
||||
struct config_group lu_gp_group;
|
||||
struct list_head lu_gp_list;
|
||||
struct list_head lu_gp_node;
|
||||
struct list_head lu_gp_mem_list;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
@ -291,10 +283,10 @@ struct t10_vpd {
|
||||
} ____cacheline_aligned;
|
||||
|
||||
struct t10_wwn {
|
||||
unsigned char vendor[8];
|
||||
unsigned char model[16];
|
||||
unsigned char revision[4];
|
||||
unsigned char unit_serial[INQUIRY_VPD_SERIAL_LEN];
|
||||
char vendor[8];
|
||||
char model[16];
|
||||
char revision[4];
|
||||
char unit_serial[INQUIRY_VPD_SERIAL_LEN];
|
||||
spinlock_t t10_vpd_lock;
|
||||
struct se_subsystem_dev *t10_sub_dev;
|
||||
struct config_group t10_wwn_group;
|
||||
@ -366,13 +358,13 @@ struct t10_reservation_ops {
|
||||
int (*t10_pr_clear)(struct se_cmd *);
|
||||
};
|
||||
|
||||
struct t10_reservation_template {
|
||||
struct t10_reservation {
|
||||
/* Reservation effects all target ports */
|
||||
int pr_all_tg_pt;
|
||||
/* Activate Persistence across Target Power Loss enabled
|
||||
* for SCSI device */
|
||||
int pr_aptpl_active;
|
||||
/* Used by struct t10_reservation_template->pr_aptpl_buf_len */
|
||||
/* Used by struct t10_reservation->pr_aptpl_buf_len */
|
||||
#define PR_APTPL_BUF_LEN 8192
|
||||
u32 pr_aptpl_buf_len;
|
||||
u32 pr_generation;
|
||||
@ -397,7 +389,7 @@ struct t10_reservation_template {
|
||||
|
||||
struct se_queue_req {
|
||||
int state;
|
||||
void *cmd;
|
||||
struct se_cmd *cmd;
|
||||
struct list_head qr_list;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
@ -408,64 +400,10 @@ struct se_queue_obj {
|
||||
wait_queue_head_t thread_wq;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
/*
|
||||
* Used one per struct se_cmd to hold all extra struct se_task
|
||||
* metadata. This structure is setup and allocated in
|
||||
* drivers/target/target_core_transport.c:__transport_alloc_se_cmd()
|
||||
*/
|
||||
struct se_transport_task {
|
||||
unsigned char *t_task_cdb;
|
||||
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
|
||||
unsigned long long t_task_lba;
|
||||
int t_tasks_failed;
|
||||
int t_tasks_fua;
|
||||
bool t_tasks_bidi;
|
||||
u32 t_task_cdbs;
|
||||
u32 t_tasks_check;
|
||||
u32 t_tasks_no;
|
||||
u32 t_tasks_sectors;
|
||||
u32 t_tasks_se_num;
|
||||
u32 t_tasks_se_bidi_num;
|
||||
u32 t_tasks_sg_chained_no;
|
||||
atomic_t t_fe_count;
|
||||
atomic_t t_se_count;
|
||||
atomic_t t_task_cdbs_left;
|
||||
atomic_t t_task_cdbs_ex_left;
|
||||
atomic_t t_task_cdbs_timeout_left;
|
||||
atomic_t t_task_cdbs_sent;
|
||||
atomic_t t_transport_aborted;
|
||||
atomic_t t_transport_active;
|
||||
atomic_t t_transport_complete;
|
||||
atomic_t t_transport_queue_active;
|
||||
atomic_t t_transport_sent;
|
||||
atomic_t t_transport_stop;
|
||||
atomic_t t_transport_timeout;
|
||||
atomic_t transport_dev_active;
|
||||
atomic_t transport_lun_active;
|
||||
atomic_t transport_lun_fe_stop;
|
||||
atomic_t transport_lun_stop;
|
||||
spinlock_t t_state_lock;
|
||||
struct completion t_transport_stop_comp;
|
||||
struct completion transport_lun_fe_stop_comp;
|
||||
struct completion transport_lun_stop_comp;
|
||||
struct scatterlist *t_tasks_sg_chained;
|
||||
struct scatterlist t_tasks_sg_bounce;
|
||||
void *t_task_buf;
|
||||
/*
|
||||
* Used for pre-registered fabric SGL passthrough WRITE and READ
|
||||
* with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
|
||||
* and other HW target mode fabric modules.
|
||||
*/
|
||||
struct scatterlist *t_task_pt_sgl;
|
||||
struct list_head *t_mem_list;
|
||||
/* Used for BIDI READ */
|
||||
struct list_head *t_mem_bidi_list;
|
||||
struct list_head t_task_list;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
struct se_task {
|
||||
unsigned char task_sense;
|
||||
struct scatterlist *task_sg;
|
||||
u32 task_sg_nents;
|
||||
struct scatterlist *task_sg_bidi;
|
||||
u8 task_scsi_status;
|
||||
u8 task_flags;
|
||||
@ -476,8 +414,6 @@ struct se_task {
|
||||
u32 task_no;
|
||||
u32 task_sectors;
|
||||
u32 task_size;
|
||||
u32 task_sg_num;
|
||||
u32 task_sg_offset;
|
||||
enum dma_data_direction task_data_direction;
|
||||
struct se_cmd *task_se_cmd;
|
||||
struct se_device *se_dev;
|
||||
@ -495,9 +431,6 @@ struct se_task {
|
||||
struct list_head t_state_list;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#define TASK_CMD(task) ((task)->task_se_cmd)
|
||||
#define TASK_DEV(task) ((task)->se_dev)
|
||||
|
||||
struct se_cmd {
|
||||
/* SAM response code being sent to initiator */
|
||||
u8 scsi_status;
|
||||
@ -531,9 +464,10 @@ struct se_cmd {
|
||||
atomic_t transport_sent;
|
||||
/* Used for sense data */
|
||||
void *sense_buffer;
|
||||
struct list_head se_delayed_list;
|
||||
struct list_head se_ordered_list;
|
||||
struct list_head se_lun_list;
|
||||
struct list_head se_delayed_node;
|
||||
struct list_head se_ordered_node;
|
||||
struct list_head se_lun_node;
|
||||
struct list_head se_qf_node;
|
||||
struct se_device *se_dev;
|
||||
struct se_dev_entry *se_deve;
|
||||
struct se_device *se_obj_ptr;
|
||||
@ -542,18 +476,62 @@ struct se_cmd {
|
||||
/* Only used for internal passthrough and legacy TCM fabric modules */
|
||||
struct se_session *se_sess;
|
||||
struct se_tmr_req *se_tmr_req;
|
||||
/* t_task is setup to t_task_backstore in transport_init_se_cmd() */
|
||||
struct se_transport_task *t_task;
|
||||
struct se_transport_task t_task_backstore;
|
||||
struct list_head se_queue_node;
|
||||
struct target_core_fabric_ops *se_tfo;
|
||||
int (*transport_emulate_cdb)(struct se_cmd *);
|
||||
void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *);
|
||||
void (*transport_split_cdb)(unsigned long long, u32, unsigned char *);
|
||||
void (*transport_wait_for_tasks)(struct se_cmd *, int, int);
|
||||
void (*transport_complete_callback)(struct se_cmd *);
|
||||
} ____cacheline_aligned;
|
||||
int (*transport_qf_callback)(struct se_cmd *);
|
||||
|
||||
#define T_TASK(cmd) ((cmd)->t_task)
|
||||
#define CMD_TFO(cmd) ((cmd)->se_tfo)
|
||||
unsigned char *t_task_cdb;
|
||||
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
|
||||
unsigned long long t_task_lba;
|
||||
int t_tasks_failed;
|
||||
int t_tasks_fua;
|
||||
bool t_tasks_bidi;
|
||||
u32 t_tasks_sg_chained_no;
|
||||
atomic_t t_fe_count;
|
||||
atomic_t t_se_count;
|
||||
atomic_t t_task_cdbs_left;
|
||||
atomic_t t_task_cdbs_ex_left;
|
||||
atomic_t t_task_cdbs_timeout_left;
|
||||
atomic_t t_task_cdbs_sent;
|
||||
atomic_t t_transport_aborted;
|
||||
atomic_t t_transport_active;
|
||||
atomic_t t_transport_complete;
|
||||
atomic_t t_transport_queue_active;
|
||||
atomic_t t_transport_sent;
|
||||
atomic_t t_transport_stop;
|
||||
atomic_t t_transport_timeout;
|
||||
atomic_t transport_dev_active;
|
||||
atomic_t transport_lun_active;
|
||||
atomic_t transport_lun_fe_stop;
|
||||
atomic_t transport_lun_stop;
|
||||
spinlock_t t_state_lock;
|
||||
struct completion t_transport_stop_comp;
|
||||
struct completion transport_lun_fe_stop_comp;
|
||||
struct completion transport_lun_stop_comp;
|
||||
struct scatterlist *t_tasks_sg_chained;
|
||||
|
||||
/*
|
||||
* Used for pre-registered fabric SGL passthrough WRITE and READ
|
||||
* with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
|
||||
* and other HW target mode fabric modules.
|
||||
*/
|
||||
struct scatterlist *t_task_pt_sgl;
|
||||
u32 t_task_pt_sgl_num;
|
||||
|
||||
struct scatterlist *t_data_sg;
|
||||
unsigned int t_data_nents;
|
||||
struct scatterlist *t_bidi_data_sg;
|
||||
unsigned int t_bidi_data_nents;
|
||||
|
||||
/* Used for BIDI READ */
|
||||
struct list_head t_task_list;
|
||||
u32 t_task_list_num;
|
||||
|
||||
} ____cacheline_aligned;
|
||||
|
||||
struct se_tmr_req {
|
||||
/* Task Management function to be preformed */
|
||||
@ -617,9 +595,6 @@ struct se_session {
|
||||
struct list_head sess_acl_list;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#define SE_SESS(cmd) ((cmd)->se_sess)
|
||||
#define SE_NODE_ACL(sess) ((sess)->se_node_acl)
|
||||
|
||||
struct se_device;
|
||||
struct se_transform_info;
|
||||
struct scatterlist;
|
||||
@ -640,8 +615,6 @@ struct se_lun_acl {
|
||||
struct se_ml_stat_grps ml_stat_grps;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#define ML_STAT_GRPS(lacl) (&(lacl)->ml_stat_grps)
|
||||
|
||||
struct se_dev_entry {
|
||||
bool def_pr_registered;
|
||||
/* See transport_lunflags_table */
|
||||
@ -688,6 +661,8 @@ struct se_dev_attrib {
|
||||
int emulate_reservations;
|
||||
int emulate_alua;
|
||||
int enforce_pr_isids;
|
||||
int is_nonrot;
|
||||
int emulate_rest_reord;
|
||||
u32 hw_block_size;
|
||||
u32 block_size;
|
||||
u32 hw_max_sectors;
|
||||
@ -727,10 +702,10 @@ struct se_subsystem_dev {
|
||||
/* T10 Inquiry and VPD WWN Information */
|
||||
struct t10_wwn t10_wwn;
|
||||
/* T10 SPC-2 + SPC-3 Reservations */
|
||||
struct t10_reservation_template t10_reservation;
|
||||
struct t10_reservation t10_pr;
|
||||
spinlock_t se_dev_lock;
|
||||
void *se_dev_su_ptr;
|
||||
struct list_head g_se_dev_list;
|
||||
struct list_head se_dev_node;
|
||||
struct config_group se_dev_group;
|
||||
/* For T10 Reservations */
|
||||
struct config_group se_dev_pr_group;
|
||||
@ -738,11 +713,6 @@ struct se_subsystem_dev {
|
||||
struct se_dev_stat_grps dev_stat_grps;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#define T10_ALUA(su_dev) (&(su_dev)->t10_alua)
|
||||
#define T10_RES(su_dev) (&(su_dev)->t10_reservation)
|
||||
#define T10_PR_OPS(su_dev) (&(su_dev)->t10_reservation.pr_ops)
|
||||
#define DEV_STAT_GRP(dev) (&(dev)->dev_stat_grps)
|
||||
|
||||
struct se_device {
|
||||
/* Set to 1 if thread is NOT sleeping on thread_sem */
|
||||
u8 thread_active;
|
||||
@ -780,11 +750,11 @@ struct se_device {
|
||||
atomic_t dev_status_thr_count;
|
||||
atomic_t dev_hoq_count;
|
||||
atomic_t dev_ordered_sync;
|
||||
atomic_t dev_qf_count;
|
||||
struct se_obj dev_obj;
|
||||
struct se_obj dev_access_obj;
|
||||
struct se_obj dev_export_obj;
|
||||
struct se_queue_obj *dev_queue_obj;
|
||||
struct se_queue_obj *dev_status_queue_obj;
|
||||
struct se_queue_obj dev_queue_obj;
|
||||
spinlock_t delayed_cmd_lock;
|
||||
spinlock_t ordered_cmd_lock;
|
||||
spinlock_t execute_task_lock;
|
||||
@ -796,6 +766,7 @@ struct se_device {
|
||||
spinlock_t dev_status_thr_lock;
|
||||
spinlock_t se_port_lock;
|
||||
spinlock_t se_tmr_lock;
|
||||
spinlock_t qf_cmd_lock;
|
||||
/* Used for legacy SPC-2 reservationsa */
|
||||
struct se_node_acl *dev_reserved_node_acl;
|
||||
/* Used for ALUA Logical Unit Group membership */
|
||||
@ -809,10 +780,12 @@ struct se_device {
|
||||
struct task_struct *process_thread;
|
||||
pid_t process_thread_pid;
|
||||
struct task_struct *dev_mgmt_thread;
|
||||
struct work_struct qf_work_queue;
|
||||
struct list_head delayed_cmd_list;
|
||||
struct list_head ordered_cmd_list;
|
||||
struct list_head execute_task_list;
|
||||
struct list_head state_task_list;
|
||||
struct list_head qf_cmd_list;
|
||||
/* Pointer to associated SE HBA */
|
||||
struct se_hba *se_hba;
|
||||
struct se_subsystem_dev *se_sub_dev;
|
||||
@ -824,11 +797,6 @@ struct se_device {
|
||||
struct list_head g_se_dev_list;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#define SE_DEV(cmd) ((cmd)->se_lun->lun_se_dev)
|
||||
#define SU_DEV(dev) ((dev)->se_sub_dev)
|
||||
#define DEV_ATTRIB(dev) (&(dev)->se_sub_dev->se_dev_attrib)
|
||||
#define DEV_T10_WWN(dev) (&(dev)->se_sub_dev->t10_wwn)
|
||||
|
||||
struct se_hba {
|
||||
u16 hba_tpgt;
|
||||
u32 hba_id;
|
||||
@ -837,24 +805,17 @@ struct se_hba {
|
||||
/* Virtual iSCSI devices attached. */
|
||||
u32 dev_count;
|
||||
u32 hba_index;
|
||||
atomic_t load_balance_queue;
|
||||
atomic_t left_queue_depth;
|
||||
/* Maximum queue depth the HBA can handle. */
|
||||
atomic_t max_queue_depth;
|
||||
/* Pointer to transport specific host structure. */
|
||||
void *hba_ptr;
|
||||
/* Linked list for struct se_device */
|
||||
struct list_head hba_dev_list;
|
||||
struct list_head hba_list;
|
||||
struct list_head hba_node;
|
||||
spinlock_t device_lock;
|
||||
spinlock_t hba_queue_lock;
|
||||
struct config_group hba_group;
|
||||
struct mutex hba_access_mutex;
|
||||
struct se_subsystem_api *transport;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#define SE_HBA(dev) ((dev)->se_hba)
|
||||
|
||||
struct se_port_stat_grps {
|
||||
struct config_group stat_group;
|
||||
struct config_group scsi_port_group;
|
||||
@ -881,9 +842,6 @@ struct se_lun {
|
||||
struct se_port_stat_grps port_stat_grps;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#define SE_LUN(cmd) ((cmd)->se_lun)
|
||||
#define PORT_STAT_GRP(lun) (&(lun)->port_stat_grps)
|
||||
|
||||
struct scsi_port_stats {
|
||||
u64 cmd_pdus;
|
||||
u64 tx_data_octets;
|
||||
@ -930,7 +888,7 @@ struct se_portal_group {
|
||||
spinlock_t tpg_lun_lock;
|
||||
/* Pointer to $FABRIC_MOD portal group */
|
||||
void *se_tpg_fabric_ptr;
|
||||
struct list_head se_tpg_list;
|
||||
struct list_head se_tpg_node;
|
||||
/* linked list for initiator ACL list */
|
||||
struct list_head acl_node_list;
|
||||
struct se_lun *tpg_lun_list;
|
||||
@ -949,8 +907,6 @@ struct se_portal_group {
|
||||
struct config_group tpg_param_group;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#define TPG_TFO(se_tpg) ((se_tpg)->se_tpg_tfo)
|
||||
|
||||
struct se_wwn {
|
||||
struct target_fabric_configfs *wwn_tf;
|
||||
struct config_group wwn_group;
|
||||
@ -958,28 +914,4 @@ struct se_wwn {
|
||||
struct config_group fabric_stat_group;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
struct se_global {
|
||||
u16 alua_lu_gps_counter;
|
||||
int g_sub_api_initialized;
|
||||
u32 in_shutdown;
|
||||
u32 alua_lu_gps_count;
|
||||
u32 g_hba_id_counter;
|
||||
struct config_group target_core_hbagroup;
|
||||
struct config_group alua_group;
|
||||
struct config_group alua_lu_gps_group;
|
||||
struct list_head g_lu_gps_list;
|
||||
struct list_head g_se_tpg_list;
|
||||
struct list_head g_hba_list;
|
||||
struct list_head g_se_dev_list;
|
||||
struct se_hba *g_lun0_hba;
|
||||
struct se_subsystem_dev *g_lun0_su_dev;
|
||||
struct se_device *g_lun0_dev;
|
||||
struct t10_alua_lu_gp *default_lu_gp;
|
||||
spinlock_t g_device_lock;
|
||||
spinlock_t hba_lock;
|
||||
spinlock_t se_tpg_lock;
|
||||
spinlock_t lu_gps_lock;
|
||||
spinlock_t plugin_class_lock;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#endif /* TARGET_CORE_BASE_H */
|
||||
|
@ -1,8 +1,8 @@
|
||||
#ifndef TARGET_CORE_DEVICE_H
|
||||
#define TARGET_CORE_DEVICE_H
|
||||
|
||||
extern int transport_get_lun_for_cmd(struct se_cmd *, unsigned char *, u32);
|
||||
extern int transport_get_lun_for_tmr(struct se_cmd *, u32);
|
||||
extern int transport_lookup_cmd_lun(struct se_cmd *, u32);
|
||||
extern int transport_lookup_tmr_lun(struct se_cmd *, u32);
|
||||
extern struct se_dev_entry *core_get_se_deve_from_rtpi(
|
||||
struct se_node_acl *, u16);
|
||||
extern int core_free_device_list_for_node(struct se_node_acl *,
|
||||
@ -39,6 +39,8 @@ extern int se_dev_set_emulate_tas(struct se_device *, int);
|
||||
extern int se_dev_set_emulate_tpu(struct se_device *, int);
|
||||
extern int se_dev_set_emulate_tpws(struct se_device *, int);
|
||||
extern int se_dev_set_enforce_pr_isids(struct se_device *, int);
|
||||
extern int se_dev_set_is_nonrot(struct se_device *, int);
|
||||
extern int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
|
||||
extern int se_dev_set_queue_depth(struct se_device *, u32);
|
||||
extern int se_dev_set_max_sectors(struct se_device *, u32);
|
||||
extern int se_dev_set_optimal_sectors(struct se_device *, u32);
|
||||
|
@ -38,18 +38,12 @@ struct target_core_fabric_ops {
|
||||
* initially allocated in interrupt context.
|
||||
*/
|
||||
int (*new_cmd_map)(struct se_cmd *);
|
||||
/*
|
||||
* Optional function pointer for TCM fabric modules that use
|
||||
* Linux/NET sockets to allocate struct iovec array to struct se_cmd
|
||||
*/
|
||||
int (*alloc_cmd_iovecs)(struct se_cmd *);
|
||||
/*
|
||||
* Optional to release struct se_cmd and fabric dependent allocated
|
||||
* I/O descriptor in transport_cmd_check_stop()
|
||||
*/
|
||||
void (*check_stop_free)(struct se_cmd *);
|
||||
void (*release_cmd_to_pool)(struct se_cmd *);
|
||||
void (*release_cmd_direct)(struct se_cmd *);
|
||||
void (*release_cmd)(struct se_cmd *);
|
||||
/*
|
||||
* Called with spin_lock_bh(struct se_portal_group->session_lock held.
|
||||
*/
|
||||
@ -70,7 +64,6 @@ struct target_core_fabric_ops {
|
||||
void (*set_default_node_attributes)(struct se_node_acl *);
|
||||
u32 (*get_task_tag)(struct se_cmd *);
|
||||
int (*get_cmd_state)(struct se_cmd *);
|
||||
void (*new_cmd_failure)(struct se_cmd *);
|
||||
int (*queue_data_in)(struct se_cmd *);
|
||||
int (*queue_status)(struct se_cmd *);
|
||||
int (*queue_tm_rsp)(struct se_cmd *);
|
||||
|
@ -101,6 +101,10 @@
|
||||
#define DA_ENFORCE_PR_ISIDS 1
|
||||
#define DA_STATUS_MAX_SECTORS_MIN 16
|
||||
#define DA_STATUS_MAX_SECTORS_MAX 8192
|
||||
/* By default don't report non-rotating (solid state) medium */
|
||||
#define DA_IS_NONROT 0
|
||||
/* Queue Algorithm Modifier default for restricted reordering in control mode page */
|
||||
#define DA_EMULATE_REST_REORD 0
|
||||
|
||||
#define SE_MODE_PAGE_BUF 512
|
||||
|
||||
@ -111,9 +115,8 @@ struct se_subsystem_api;
|
||||
|
||||
extern struct kmem_cache *se_mem_cache;
|
||||
|
||||
extern int init_se_global(void);
|
||||
extern void release_se_global(void);
|
||||
extern void init_scsi_index_table(void);
|
||||
extern int init_se_kmem_caches(void);
|
||||
extern void release_se_kmem_caches(void);
|
||||
extern u32 scsi_get_new_index(scsi_index_t);
|
||||
extern void transport_init_queue_obj(struct se_queue_obj *);
|
||||
extern int transport_subsystem_check_init(void);
|
||||
@ -160,36 +163,38 @@ extern struct se_device *transport_add_device_to_core_hba(struct se_hba *,
|
||||
struct se_subsystem_dev *, u32,
|
||||
void *, struct se_dev_limits *,
|
||||
const char *, const char *);
|
||||
extern void transport_device_setup_cmd(struct se_cmd *);
|
||||
extern void transport_init_se_cmd(struct se_cmd *,
|
||||
struct target_core_fabric_ops *,
|
||||
struct se_session *, u32, int, int,
|
||||
unsigned char *);
|
||||
void *transport_kmap_first_data_page(struct se_cmd *cmd);
|
||||
void transport_kunmap_first_data_page(struct se_cmd *cmd);
|
||||
extern void transport_free_se_cmd(struct se_cmd *);
|
||||
extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
|
||||
extern int transport_generic_handle_cdb(struct se_cmd *);
|
||||
extern int transport_handle_cdb_direct(struct se_cmd *);
|
||||
extern int transport_generic_handle_cdb_map(struct se_cmd *);
|
||||
extern int transport_generic_handle_data(struct se_cmd *);
|
||||
extern void transport_new_cmd_failure(struct se_cmd *);
|
||||
extern int transport_generic_handle_tmr(struct se_cmd *);
|
||||
extern void transport_generic_free_cmd_intr(struct se_cmd *);
|
||||
extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
|
||||
extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]);
|
||||
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
|
||||
struct scatterlist *, u32);
|
||||
extern int transport_clear_lun_from_sessions(struct se_lun *);
|
||||
extern int transport_check_aborted_status(struct se_cmd *, int);
|
||||
extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
|
||||
extern void transport_send_task_abort(struct se_cmd *);
|
||||
extern void transport_release_cmd_to_pool(struct se_cmd *);
|
||||
extern void transport_generic_free_cmd(struct se_cmd *, int, int, int);
|
||||
extern void transport_release_cmd(struct se_cmd *);
|
||||
extern void transport_generic_free_cmd(struct se_cmd *, int, int);
|
||||
extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
|
||||
extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32);
|
||||
extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32);
|
||||
extern int transport_map_mem_to_sg(struct se_task *, struct list_head *,
|
||||
void *, struct se_mem *,
|
||||
struct scatterlist *, struct se_mem *,
|
||||
struct se_mem **, u32 *, u32 *);
|
||||
extern void transport_do_task_sg_chain(struct se_cmd *);
|
||||
extern void transport_generic_process_write(struct se_cmd *);
|
||||
extern int transport_generic_new_cmd(struct se_cmd *);
|
||||
extern int transport_generic_do_tmr(struct se_cmd *);
|
||||
/* From target_core_alua.c */
|
||||
extern int core_alua_check_nonop_delay(struct se_cmd *);
|
||||
@ -235,13 +240,13 @@ struct se_subsystem_api {
|
||||
*/
|
||||
int (*cdb_none)(struct se_task *);
|
||||
/*
|
||||
* For SCF_SCSI_CONTROL_NONSG_IO_CDB
|
||||
* For SCF_SCSI_DATA_SG_IO_CDB
|
||||
*/
|
||||
int (*map_task_non_SG)(struct se_task *);
|
||||
int (*map_data_SG)(struct se_task *);
|
||||
/*
|
||||
* For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB
|
||||
* For SCF_SCSI_CONTROL_SG_IO_CDB
|
||||
*/
|
||||
int (*map_task_SG)(struct se_task *);
|
||||
int (*map_control_SG)(struct se_task *);
|
||||
/*
|
||||
* attach_hba():
|
||||
*/
|
||||
@ -292,7 +297,7 @@ struct se_subsystem_api {
|
||||
* drivers. Provided out of convenience.
|
||||
*/
|
||||
int (*transport_complete)(struct se_task *task);
|
||||
struct se_task *(*alloc_task)(struct se_cmd *);
|
||||
struct se_task *(*alloc_task)(unsigned char *cdb);
|
||||
/*
|
||||
* do_task():
|
||||
*/
|
||||
@ -341,20 +346,10 @@ struct se_subsystem_api {
|
||||
* Get the sector_t from a subsystem backstore..
|
||||
*/
|
||||
sector_t (*get_blocks)(struct se_device *);
|
||||
/*
|
||||
* do_se_mem_map():
|
||||
*/
|
||||
int (*do_se_mem_map)(struct se_task *, struct list_head *, void *,
|
||||
struct se_mem *, struct se_mem **, u32 *, u32 *);
|
||||
/*
|
||||
* get_sense_buffer():
|
||||
*/
|
||||
unsigned char *(*get_sense_buffer)(struct se_task *);
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#define TRANSPORT(dev) ((dev)->transport)
|
||||
#define HBA_TRANSPORT(hba) ((hba)->transport)
|
||||
|
||||
extern struct se_global *se_global;
|
||||
|
||||
#endif /* TARGET_CORE_TRANSPORT_H */
|
||||
|
Loading…
x
Reference in New Issue
Block a user