mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
Merge patch series "target: TMF and recovery fixes"
Mike Christie <michael.christie@oracle.com> says: The following patches apply over Martin's 6.4 branches and Linus's tree. They fix a couple regressions in iscsit that occur when there are TMRs executing and a connection is closed. It also includes Dimitry's fixes in related code paths for cmd cleanup when ERL2 is used and the write pending hang during conn cleanup. This version of the patchset brings it back to just regressions and fixes for bugs we have a lot of users hitting. I'm going to fix isert and get it hooked into iscsit properly in a second patchset, because this one was getting so large. I've also moved my cleanup type of patches for a 3rd patchset. Link: https://lore.kernel.org/r/20230319015620.96006-1-michael.christie@oracle.com Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
commit
ae2fb3cb0f
@ -2506,8 +2506,8 @@ isert_wait4cmds(struct iscsit_conn *conn)
|
||||
isert_info("iscsit_conn %p\n", conn);
|
||||
|
||||
if (conn->sess) {
|
||||
target_stop_session(conn->sess->se_sess);
|
||||
target_wait_for_sess_cmds(conn->sess->se_sess);
|
||||
target_stop_cmd_counter(conn->cmd_cnt);
|
||||
target_wait_for_cmds(conn->cmd_cnt);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <target/target_core_base.h>
|
||||
#include <target/target_core_fabric.h>
|
||||
|
||||
#include <target/target_core_backend.h>
|
||||
#include <target/iscsi/iscsi_target_core.h>
|
||||
#include "iscsi_target_parameters.h"
|
||||
#include "iscsi_target_seq_pdu_list.h"
|
||||
@ -1190,9 +1191,10 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
|
||||
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
|
||||
*/
|
||||
__target_init_cmd(&cmd->se_cmd, &iscsi_ops,
|
||||
conn->sess->se_sess, be32_to_cpu(hdr->data_length),
|
||||
cmd->data_direction, sam_task_attr,
|
||||
cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun));
|
||||
conn->sess->se_sess, be32_to_cpu(hdr->data_length),
|
||||
cmd->data_direction, sam_task_attr,
|
||||
cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun),
|
||||
conn->cmd_cnt);
|
||||
|
||||
pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
|
||||
" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
|
||||
@ -2055,7 +2057,8 @@ iscsit_handle_task_mgt_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
|
||||
__target_init_cmd(&cmd->se_cmd, &iscsi_ops,
|
||||
conn->sess->se_sess, 0, DMA_NONE,
|
||||
TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
|
||||
scsilun_to_int(&hdr->lun));
|
||||
scsilun_to_int(&hdr->lun),
|
||||
conn->cmd_cnt);
|
||||
|
||||
target_get_sess_cmd(&cmd->se_cmd, true);
|
||||
|
||||
@ -4218,9 +4221,12 @@ static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
|
||||
list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
|
||||
struct se_cmd *se_cmd = &cmd->se_cmd;
|
||||
|
||||
if (se_cmd->se_tfo != NULL) {
|
||||
spin_lock_irq(&se_cmd->t_state_lock);
|
||||
if (se_cmd->transport_state & CMD_T_ABORTED) {
|
||||
if (!se_cmd->se_tfo)
|
||||
continue;
|
||||
|
||||
spin_lock_irq(&se_cmd->t_state_lock);
|
||||
if (se_cmd->transport_state & CMD_T_ABORTED) {
|
||||
if (!(se_cmd->transport_state & CMD_T_TAS))
|
||||
/*
|
||||
* LIO's abort path owns the cleanup for this,
|
||||
* so put it back on the list and let
|
||||
@ -4228,11 +4234,20 @@ static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
|
||||
*/
|
||||
list_move_tail(&cmd->i_conn_node,
|
||||
&conn->conn_cmd_list);
|
||||
} else {
|
||||
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
|
||||
}
|
||||
spin_unlock_irq(&se_cmd->t_state_lock);
|
||||
} else {
|
||||
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
|
||||
}
|
||||
|
||||
if (cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
|
||||
/*
|
||||
* We never submitted the cmd to LIO core, so we have
|
||||
* to tell LIO to perform the completion process.
|
||||
*/
|
||||
spin_unlock_irq(&se_cmd->t_state_lock);
|
||||
target_complete_cmd(&cmd->se_cmd, SAM_STAT_TASK_ABORTED);
|
||||
continue;
|
||||
}
|
||||
spin_unlock_irq(&se_cmd->t_state_lock);
|
||||
}
|
||||
spin_unlock_bh(&conn->cmd_lock);
|
||||
|
||||
@ -4243,6 +4258,16 @@ static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
|
||||
iscsit_free_cmd(cmd, true);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait on commands that were cleaned up via the aborted_task path.
|
||||
* LLDs that implement iscsit_wait_conn will already have waited for
|
||||
* commands.
|
||||
*/
|
||||
if (!conn->conn_transport->iscsit_wait_conn) {
|
||||
target_stop_cmd_counter(conn->cmd_cnt);
|
||||
target_wait_for_cmds(conn->cmd_cnt);
|
||||
}
|
||||
}
|
||||
|
||||
static void iscsit_stop_timers_for_cmds(
|
||||
@ -4517,6 +4542,9 @@ int iscsit_close_session(struct iscsit_session *sess, bool can_sleep)
|
||||
iscsit_stop_time2retain_timer(sess);
|
||||
spin_unlock_bh(&se_tpg->session_lock);
|
||||
|
||||
if (sess->sess_ops->ErrorRecoveryLevel == 2)
|
||||
iscsit_free_connection_recovery_entries(sess);
|
||||
|
||||
/*
|
||||
* transport_deregister_session_configfs() will clear the
|
||||
* struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
|
||||
@ -4540,9 +4568,6 @@ int iscsit_close_session(struct iscsit_session *sess, bool can_sleep)
|
||||
|
||||
transport_deregister_session(sess->se_sess);
|
||||
|
||||
if (sess->sess_ops->ErrorRecoveryLevel == 2)
|
||||
iscsit_free_connection_recovery_entries(sess);
|
||||
|
||||
iscsit_free_all_ooo_cmdsns(sess);
|
||||
|
||||
spin_lock_bh(&se_tpg->session_lock);
|
||||
|
@ -1147,8 +1147,14 @@ static struct iscsit_conn *iscsit_alloc_conn(struct iscsi_np *np)
|
||||
goto free_conn_cpumask;
|
||||
}
|
||||
|
||||
conn->cmd_cnt = target_alloc_cmd_counter();
|
||||
if (!conn->cmd_cnt)
|
||||
goto free_conn_allowed_cpumask;
|
||||
|
||||
return conn;
|
||||
|
||||
free_conn_allowed_cpumask:
|
||||
free_cpumask_var(conn->allowed_cpumask);
|
||||
free_conn_cpumask:
|
||||
free_cpumask_var(conn->conn_cpumask);
|
||||
free_conn_ops:
|
||||
@ -1162,6 +1168,7 @@ static struct iscsit_conn *iscsit_alloc_conn(struct iscsi_np *np)
|
||||
|
||||
void iscsit_free_conn(struct iscsit_conn *conn)
|
||||
{
|
||||
target_free_cmd_counter(conn->cmd_cnt);
|
||||
free_cpumask_var(conn->allowed_cpumask);
|
||||
free_cpumask_var(conn->conn_cpumask);
|
||||
kfree(conn->conn_ops);
|
||||
|
@ -741,6 +741,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
|
||||
spin_lock_init(&dev->t10_alua.lba_map_lock);
|
||||
|
||||
INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
|
||||
mutex_init(&dev->lun_reset_mutex);
|
||||
|
||||
dev->t10_wwn.t10_dev = dev;
|
||||
/*
|
||||
|
@ -139,7 +139,6 @@ int init_se_kmem_caches(void);
|
||||
void release_se_kmem_caches(void);
|
||||
u32 scsi_get_new_index(scsi_index_t);
|
||||
void transport_subsystem_check_init(void);
|
||||
void transport_uninit_session(struct se_session *);
|
||||
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
|
||||
void transport_dump_dev_state(struct se_device *, char *, int *);
|
||||
void transport_dump_dev_info(struct se_device *, struct se_lun *,
|
||||
|
@ -188,14 +188,23 @@ static void core_tmr_drain_tmr_list(
|
||||
* LUN_RESET tmr..
|
||||
*/
|
||||
spin_lock_irqsave(&dev->se_tmr_lock, flags);
|
||||
if (tmr)
|
||||
list_del_init(&tmr->tmr_list);
|
||||
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
|
||||
if (tmr_p == tmr)
|
||||
continue;
|
||||
|
||||
cmd = tmr_p->task_cmd;
|
||||
if (!cmd) {
|
||||
pr_err("Unable to locate struct se_cmd for TMR\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* We only execute one LUN_RESET at a time so we can't wait
|
||||
* on them below.
|
||||
*/
|
||||
if (tmr_p->function == TMR_LUN_RESET)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If this function was called with a valid pr_res_key
|
||||
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
|
||||
@ -379,14 +388,25 @@ int core_tmr_lun_reset(
|
||||
tmr_nacl->initiatorname);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* We only allow one reset or preempt and abort to execute at a time
|
||||
* to prevent one call from claiming all the cmds causing a second
|
||||
* call from returning while cmds it should have waited on are still
|
||||
* running.
|
||||
*/
|
||||
mutex_lock(&dev->lun_reset_mutex);
|
||||
|
||||
pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
|
||||
(preempt_and_abort_list) ? "Preempt" : "TMR",
|
||||
dev->transport->name, tas);
|
||||
|
||||
core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
|
||||
core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
|
||||
preempt_and_abort_list);
|
||||
|
||||
mutex_unlock(&dev->lun_reset_mutex);
|
||||
|
||||
/*
|
||||
* Clear any legacy SPC-2 reservation when called during
|
||||
* LOGICAL UNIT RESET
|
||||
|
@ -329,7 +329,7 @@ static void target_shutdown_sessions(struct se_node_acl *acl)
|
||||
restart:
|
||||
spin_lock_irqsave(&acl->nacl_sess_lock, flags);
|
||||
list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
|
||||
if (atomic_read(&sess->stopped))
|
||||
if (sess->cmd_cnt && atomic_read(&sess->cmd_cnt->stopped))
|
||||
continue;
|
||||
|
||||
list_del_init(&sess->sess_acl_list);
|
||||
|
@ -220,45 +220,67 @@ void transport_subsystem_check_init(void)
|
||||
sub_api_initialized = 1;
|
||||
}
|
||||
|
||||
static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
|
||||
static void target_release_cmd_refcnt(struct percpu_ref *ref)
|
||||
{
|
||||
struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
|
||||
|
||||
wake_up(&sess->cmd_count_wq);
|
||||
struct target_cmd_counter *cmd_cnt = container_of(ref,
|
||||
typeof(*cmd_cnt),
|
||||
refcnt);
|
||||
wake_up(&cmd_cnt->refcnt_wq);
|
||||
}
|
||||
|
||||
struct target_cmd_counter *target_alloc_cmd_counter(void)
|
||||
{
|
||||
struct target_cmd_counter *cmd_cnt;
|
||||
int rc;
|
||||
|
||||
cmd_cnt = kzalloc(sizeof(*cmd_cnt), GFP_KERNEL);
|
||||
if (!cmd_cnt)
|
||||
return NULL;
|
||||
|
||||
init_completion(&cmd_cnt->stop_done);
|
||||
init_waitqueue_head(&cmd_cnt->refcnt_wq);
|
||||
atomic_set(&cmd_cnt->stopped, 0);
|
||||
|
||||
rc = percpu_ref_init(&cmd_cnt->refcnt, target_release_cmd_refcnt, 0,
|
||||
GFP_KERNEL);
|
||||
if (rc)
|
||||
goto free_cmd_cnt;
|
||||
|
||||
return cmd_cnt;
|
||||
|
||||
free_cmd_cnt:
|
||||
kfree(cmd_cnt);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(target_alloc_cmd_counter);
|
||||
|
||||
void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt)
|
||||
{
|
||||
/*
|
||||
* Drivers like loop do not call target_stop_session during session
|
||||
* shutdown so we have to drop the ref taken at init time here.
|
||||
*/
|
||||
if (!atomic_read(&cmd_cnt->stopped))
|
||||
percpu_ref_put(&cmd_cnt->refcnt);
|
||||
|
||||
percpu_ref_exit(&cmd_cnt->refcnt);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(target_free_cmd_counter);
|
||||
|
||||
/**
|
||||
* transport_init_session - initialize a session object
|
||||
* @se_sess: Session object pointer.
|
||||
*
|
||||
* The caller must have zero-initialized @se_sess before calling this function.
|
||||
*/
|
||||
int transport_init_session(struct se_session *se_sess)
|
||||
void transport_init_session(struct se_session *se_sess)
|
||||
{
|
||||
INIT_LIST_HEAD(&se_sess->sess_list);
|
||||
INIT_LIST_HEAD(&se_sess->sess_acl_list);
|
||||
spin_lock_init(&se_sess->sess_cmd_lock);
|
||||
init_waitqueue_head(&se_sess->cmd_count_wq);
|
||||
init_completion(&se_sess->stop_done);
|
||||
atomic_set(&se_sess->stopped, 0);
|
||||
return percpu_ref_init(&se_sess->cmd_count,
|
||||
target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
|
||||
}
|
||||
EXPORT_SYMBOL(transport_init_session);
|
||||
|
||||
void transport_uninit_session(struct se_session *se_sess)
|
||||
{
|
||||
/*
|
||||
* Drivers like iscsi and loop do not call target_stop_session
|
||||
* during session shutdown so we have to drop the ref taken at init
|
||||
* time here.
|
||||
*/
|
||||
if (!atomic_read(&se_sess->stopped))
|
||||
percpu_ref_put(&se_sess->cmd_count);
|
||||
|
||||
percpu_ref_exit(&se_sess->cmd_count);
|
||||
}
|
||||
|
||||
/**
|
||||
* transport_alloc_session - allocate a session object and initialize it
|
||||
* @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
|
||||
@ -266,7 +288,6 @@ void transport_uninit_session(struct se_session *se_sess)
|
||||
struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
|
||||
{
|
||||
struct se_session *se_sess;
|
||||
int ret;
|
||||
|
||||
se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
|
||||
if (!se_sess) {
|
||||
@ -274,11 +295,7 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
|
||||
" se_sess_cache\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
ret = transport_init_session(se_sess);
|
||||
if (ret < 0) {
|
||||
kmem_cache_free(se_sess_cache, se_sess);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
transport_init_session(se_sess);
|
||||
se_sess->sup_prot_ops = sup_prot_ops;
|
||||
|
||||
return se_sess;
|
||||
@ -444,8 +461,13 @@ target_setup_session(struct se_portal_group *tpg,
|
||||
int (*callback)(struct se_portal_group *,
|
||||
struct se_session *, void *))
|
||||
{
|
||||
struct target_cmd_counter *cmd_cnt;
|
||||
struct se_session *sess;
|
||||
int rc;
|
||||
|
||||
cmd_cnt = target_alloc_cmd_counter();
|
||||
if (!cmd_cnt)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
/*
|
||||
* If the fabric driver is using percpu-ida based pre allocation
|
||||
* of I/O descriptor tags, go ahead and perform that setup now..
|
||||
@ -455,29 +477,36 @@ target_setup_session(struct se_portal_group *tpg,
|
||||
else
|
||||
sess = transport_alloc_session(prot_op);
|
||||
|
||||
if (IS_ERR(sess))
|
||||
return sess;
|
||||
if (IS_ERR(sess)) {
|
||||
rc = PTR_ERR(sess);
|
||||
goto free_cnt;
|
||||
}
|
||||
sess->cmd_cnt = cmd_cnt;
|
||||
|
||||
sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
|
||||
(unsigned char *)initiatorname);
|
||||
if (!sess->se_node_acl) {
|
||||
transport_free_session(sess);
|
||||
return ERR_PTR(-EACCES);
|
||||
rc = -EACCES;
|
||||
goto free_sess;
|
||||
}
|
||||
/*
|
||||
* Go ahead and perform any remaining fabric setup that is
|
||||
* required before transport_register_session().
|
||||
*/
|
||||
if (callback != NULL) {
|
||||
int rc = callback(tpg, sess, private);
|
||||
if (rc) {
|
||||
transport_free_session(sess);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
rc = callback(tpg, sess, private);
|
||||
if (rc)
|
||||
goto free_sess;
|
||||
}
|
||||
|
||||
transport_register_session(tpg, sess->se_node_acl, sess, private);
|
||||
return sess;
|
||||
|
||||
free_sess:
|
||||
transport_free_session(sess);
|
||||
free_cnt:
|
||||
target_free_cmd_counter(cmd_cnt);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
EXPORT_SYMBOL(target_setup_session);
|
||||
|
||||
@ -602,7 +631,8 @@ void transport_free_session(struct se_session *se_sess)
|
||||
sbitmap_queue_free(&se_sess->sess_tag_pool);
|
||||
kvfree(se_sess->sess_cmd_map);
|
||||
}
|
||||
transport_uninit_session(se_sess);
|
||||
if (se_sess->cmd_cnt)
|
||||
target_free_cmd_counter(se_sess->cmd_cnt);
|
||||
kmem_cache_free(se_sess_cache, se_sess);
|
||||
}
|
||||
EXPORT_SYMBOL(transport_free_session);
|
||||
@ -1412,14 +1442,12 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
|
||||
*
|
||||
* Preserves the value of @cmd->tag.
|
||||
*/
|
||||
void __target_init_cmd(
|
||||
struct se_cmd *cmd,
|
||||
const struct target_core_fabric_ops *tfo,
|
||||
struct se_session *se_sess,
|
||||
u32 data_length,
|
||||
int data_direction,
|
||||
int task_attr,
|
||||
unsigned char *sense_buffer, u64 unpacked_lun)
|
||||
void __target_init_cmd(struct se_cmd *cmd,
|
||||
const struct target_core_fabric_ops *tfo,
|
||||
struct se_session *se_sess, u32 data_length,
|
||||
int data_direction, int task_attr,
|
||||
unsigned char *sense_buffer, u64 unpacked_lun,
|
||||
struct target_cmd_counter *cmd_cnt)
|
||||
{
|
||||
INIT_LIST_HEAD(&cmd->se_delayed_node);
|
||||
INIT_LIST_HEAD(&cmd->se_qf_node);
|
||||
@ -1439,6 +1467,7 @@ void __target_init_cmd(
|
||||
cmd->sam_task_attr = task_attr;
|
||||
cmd->sense_buffer = sense_buffer;
|
||||
cmd->orig_fe_lun = unpacked_lun;
|
||||
cmd->cmd_cnt = cmd_cnt;
|
||||
|
||||
if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
|
||||
cmd->cpuid = raw_smp_processor_id();
|
||||
@ -1658,7 +1687,8 @@ int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
|
||||
* target_core_fabric_ops->queue_status() callback
|
||||
*/
|
||||
__target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length,
|
||||
data_dir, task_attr, sense, unpacked_lun);
|
||||
data_dir, task_attr, sense, unpacked_lun,
|
||||
se_sess->cmd_cnt);
|
||||
|
||||
/*
|
||||
* Obtain struct se_cmd->cmd_kref reference. A second kref_get here is
|
||||
@ -1953,7 +1983,8 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
|
||||
BUG_ON(!se_tpg);
|
||||
|
||||
__target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
|
||||
0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
|
||||
0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun,
|
||||
se_sess->cmd_cnt);
|
||||
/*
|
||||
* FIXME: Currently expect caller to handle se_cmd->se_tmr_req
|
||||
* allocation failure.
|
||||
@ -2957,7 +2988,6 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
|
||||
*/
|
||||
int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
|
||||
{
|
||||
struct se_session *se_sess = se_cmd->se_sess;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
@ -2970,9 +3000,14 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
|
||||
se_cmd->se_cmd_flags |= SCF_ACK_KREF;
|
||||
}
|
||||
|
||||
if (!percpu_ref_tryget_live(&se_sess->cmd_count))
|
||||
ret = -ESHUTDOWN;
|
||||
|
||||
/*
|
||||
* Users like xcopy do not use counters since they never do a stop
|
||||
* and wait.
|
||||
*/
|
||||
if (se_cmd->cmd_cnt) {
|
||||
if (!percpu_ref_tryget_live(&se_cmd->cmd_cnt->refcnt))
|
||||
ret = -ESHUTDOWN;
|
||||
}
|
||||
if (ret && ack_kref)
|
||||
target_put_sess_cmd(se_cmd);
|
||||
|
||||
@ -2993,7 +3028,7 @@ static void target_free_cmd_mem(struct se_cmd *cmd)
|
||||
static void target_release_cmd_kref(struct kref *kref)
|
||||
{
|
||||
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
|
||||
struct se_session *se_sess = se_cmd->se_sess;
|
||||
struct target_cmd_counter *cmd_cnt = se_cmd->cmd_cnt;
|
||||
struct completion *free_compl = se_cmd->free_compl;
|
||||
struct completion *abrt_compl = se_cmd->abrt_compl;
|
||||
|
||||
@ -3004,7 +3039,8 @@ static void target_release_cmd_kref(struct kref *kref)
|
||||
if (abrt_compl)
|
||||
complete(abrt_compl);
|
||||
|
||||
percpu_ref_put(&se_sess->cmd_count);
|
||||
if (cmd_cnt)
|
||||
percpu_ref_put(&cmd_cnt->refcnt);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3123,46 +3159,67 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd)
|
||||
}
|
||||
EXPORT_SYMBOL(target_show_cmd);
|
||||
|
||||
static void target_stop_session_confirm(struct percpu_ref *ref)
|
||||
static void target_stop_cmd_counter_confirm(struct percpu_ref *ref)
|
||||
{
|
||||
struct se_session *se_sess = container_of(ref, struct se_session,
|
||||
cmd_count);
|
||||
complete_all(&se_sess->stop_done);
|
||||
struct target_cmd_counter *cmd_cnt = container_of(ref,
|
||||
struct target_cmd_counter,
|
||||
refcnt);
|
||||
complete_all(&cmd_cnt->stop_done);
|
||||
}
|
||||
|
||||
/**
|
||||
* target_stop_cmd_counter - Stop new IO from being added to the counter.
|
||||
* @cmd_cnt: counter to stop
|
||||
*/
|
||||
void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt)
|
||||
{
|
||||
pr_debug("Stopping command counter.\n");
|
||||
if (!atomic_cmpxchg(&cmd_cnt->stopped, 0, 1))
|
||||
percpu_ref_kill_and_confirm(&cmd_cnt->refcnt,
|
||||
target_stop_cmd_counter_confirm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(target_stop_cmd_counter);
|
||||
|
||||
/**
|
||||
* target_stop_session - Stop new IO from being queued on the session.
|
||||
* @se_sess: session to stop
|
||||
* @se_sess: session to stop
|
||||
*/
|
||||
void target_stop_session(struct se_session *se_sess)
|
||||
{
|
||||
pr_debug("Stopping session queue.\n");
|
||||
if (atomic_cmpxchg(&se_sess->stopped, 0, 1) == 0)
|
||||
percpu_ref_kill_and_confirm(&se_sess->cmd_count,
|
||||
target_stop_session_confirm);
|
||||
target_stop_cmd_counter(se_sess->cmd_cnt);
|
||||
}
|
||||
EXPORT_SYMBOL(target_stop_session);
|
||||
|
||||
/**
|
||||
* target_wait_for_sess_cmds - Wait for outstanding commands
|
||||
* @se_sess: session to wait for active I/O
|
||||
* target_wait_for_cmds - Wait for outstanding cmds.
|
||||
* @cmd_cnt: counter to wait for active I/O for.
|
||||
*/
|
||||
void target_wait_for_sess_cmds(struct se_session *se_sess)
|
||||
void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt)
|
||||
{
|
||||
int ret;
|
||||
|
||||
WARN_ON_ONCE(!atomic_read(&se_sess->stopped));
|
||||
WARN_ON_ONCE(!atomic_read(&cmd_cnt->stopped));
|
||||
|
||||
do {
|
||||
pr_debug("Waiting for running cmds to complete.\n");
|
||||
ret = wait_event_timeout(se_sess->cmd_count_wq,
|
||||
percpu_ref_is_zero(&se_sess->cmd_count),
|
||||
180 * HZ);
|
||||
ret = wait_event_timeout(cmd_cnt->refcnt_wq,
|
||||
percpu_ref_is_zero(&cmd_cnt->refcnt),
|
||||
180 * HZ);
|
||||
} while (ret <= 0);
|
||||
|
||||
wait_for_completion(&se_sess->stop_done);
|
||||
wait_for_completion(&cmd_cnt->stop_done);
|
||||
pr_debug("Waiting for cmds done.\n");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(target_wait_for_cmds);
|
||||
|
||||
/**
|
||||
* target_wait_for_sess_cmds - Wait for outstanding commands
|
||||
* @se_sess: session to wait for active I/O
|
||||
*/
|
||||
void target_wait_for_sess_cmds(struct se_session *se_sess)
|
||||
{
|
||||
target_wait_for_cmds(se_sess->cmd_cnt);
|
||||
}
|
||||
EXPORT_SYMBOL(target_wait_for_sess_cmds);
|
||||
|
||||
/*
|
||||
|
@ -461,8 +461,6 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
|
||||
|
||||
int target_xcopy_setup_pt(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
|
||||
if (!xcopy_wq) {
|
||||
pr_err("Unable to allocate xcopy_wq\n");
|
||||
@ -479,9 +477,7 @@ int target_xcopy_setup_pt(void)
|
||||
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
|
||||
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
|
||||
memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
|
||||
ret = transport_init_session(&xcopy_pt_sess);
|
||||
if (ret < 0)
|
||||
goto destroy_wq;
|
||||
transport_init_session(&xcopy_pt_sess);
|
||||
|
||||
xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
|
||||
xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
|
||||
@ -490,19 +486,12 @@ int target_xcopy_setup_pt(void)
|
||||
xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
|
||||
|
||||
return 0;
|
||||
|
||||
destroy_wq:
|
||||
destroy_workqueue(xcopy_wq);
|
||||
xcopy_wq = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void target_xcopy_release_pt(void)
|
||||
{
|
||||
if (xcopy_wq) {
|
||||
if (xcopy_wq)
|
||||
destroy_workqueue(xcopy_wq);
|
||||
transport_uninit_session(&xcopy_pt_sess);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -602,8 +591,8 @@ static int target_xcopy_read_source(
|
||||
(unsigned long long)src_lba, transfer_length_block, src_bytes);
|
||||
|
||||
__target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, src_bytes,
|
||||
DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
|
||||
|
||||
DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0,
|
||||
NULL);
|
||||
rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0],
|
||||
remote_port);
|
||||
if (rc < 0) {
|
||||
@ -647,8 +636,8 @@ static int target_xcopy_write_destination(
|
||||
(unsigned long long)dst_lba, transfer_length_block, dst_bytes);
|
||||
|
||||
__target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, dst_bytes,
|
||||
DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
|
||||
|
||||
DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0,
|
||||
NULL);
|
||||
rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0],
|
||||
remote_port);
|
||||
if (rc < 0) {
|
||||
|
@ -1054,7 +1054,7 @@ static void usbg_cmd_work(struct work_struct *work)
|
||||
tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
|
||||
tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
|
||||
cmd->prio_attr, cmd->sense_iu.sense,
|
||||
cmd->unpacked_lun);
|
||||
cmd->unpacked_lun, NULL);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1183,7 +1183,7 @@ static void bot_cmd_work(struct work_struct *work)
|
||||
tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
|
||||
tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
|
||||
cmd->prio_attr, cmd->sense_iu.sense,
|
||||
cmd->unpacked_lun);
|
||||
cmd->unpacked_lun, NULL);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -600,6 +600,7 @@ struct iscsit_conn {
|
||||
struct iscsi_tpg_np *tpg_np;
|
||||
/* Pointer to parent session */
|
||||
struct iscsit_session *sess;
|
||||
struct target_cmd_counter *cmd_cnt;
|
||||
int bitmap_id;
|
||||
int rx_thread_active;
|
||||
struct task_struct *rx_thread;
|
||||
|
@ -494,6 +494,7 @@ struct se_cmd {
|
||||
struct se_lun *se_lun;
|
||||
/* Only used for internal passthrough and legacy TCM fabric modules */
|
||||
struct se_session *se_sess;
|
||||
struct target_cmd_counter *cmd_cnt;
|
||||
struct se_tmr_req *se_tmr_req;
|
||||
struct llist_node se_cmd_list;
|
||||
struct completion *free_compl;
|
||||
@ -619,22 +620,26 @@ static inline struct se_node_acl *fabric_stat_to_nacl(struct config_item *item)
|
||||
acl_fabric_stat_group);
|
||||
}
|
||||
|
||||
struct se_session {
|
||||
struct target_cmd_counter {
|
||||
struct percpu_ref refcnt;
|
||||
wait_queue_head_t refcnt_wq;
|
||||
struct completion stop_done;
|
||||
atomic_t stopped;
|
||||
};
|
||||
|
||||
struct se_session {
|
||||
u64 sess_bin_isid;
|
||||
enum target_prot_op sup_prot_ops;
|
||||
enum target_prot_type sess_prot_type;
|
||||
struct se_node_acl *se_node_acl;
|
||||
struct se_portal_group *se_tpg;
|
||||
void *fabric_sess_ptr;
|
||||
struct percpu_ref cmd_count;
|
||||
struct list_head sess_list;
|
||||
struct list_head sess_acl_list;
|
||||
spinlock_t sess_cmd_lock;
|
||||
wait_queue_head_t cmd_count_wq;
|
||||
struct completion stop_done;
|
||||
void *sess_cmd_map;
|
||||
struct sbitmap_queue sess_tag_pool;
|
||||
struct target_cmd_counter *cmd_cnt;
|
||||
};
|
||||
|
||||
struct se_device;
|
||||
@ -863,6 +868,7 @@ struct se_device {
|
||||
struct rcu_head rcu_head;
|
||||
int queue_cnt;
|
||||
struct se_device_queue *queues;
|
||||
struct mutex lun_reset_mutex;
|
||||
};
|
||||
|
||||
struct target_opcode_descriptor {
|
||||
|
@ -133,7 +133,12 @@ struct se_session *target_setup_session(struct se_portal_group *,
|
||||
struct se_session *, void *));
|
||||
void target_remove_session(struct se_session *);
|
||||
|
||||
int transport_init_session(struct se_session *se_sess);
|
||||
void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt);
|
||||
void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt);
|
||||
struct target_cmd_counter *target_alloc_cmd_counter(void);
|
||||
void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt);
|
||||
|
||||
void transport_init_session(struct se_session *se_sess);
|
||||
struct se_session *transport_alloc_session(enum target_prot_op);
|
||||
int transport_alloc_session_tags(struct se_session *, unsigned int,
|
||||
unsigned int);
|
||||
@ -149,9 +154,11 @@ void transport_deregister_session_configfs(struct se_session *);
|
||||
void transport_deregister_session(struct se_session *);
|
||||
|
||||
|
||||
void __target_init_cmd(struct se_cmd *,
|
||||
const struct target_core_fabric_ops *,
|
||||
struct se_session *, u32, int, int, unsigned char *, u64);
|
||||
void __target_init_cmd(struct se_cmd *cmd,
|
||||
const struct target_core_fabric_ops *tfo,
|
||||
struct se_session *sess, u32 data_length, int data_direction,
|
||||
int task_attr, unsigned char *sense_buffer, u64 unpacked_lun,
|
||||
struct target_cmd_counter *cmd_cnt);
|
||||
int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
|
||||
unsigned char *sense, u64 unpacked_lun, u32 data_length,
|
||||
int task_attr, int data_dir, int flags);
|
||||
|
Loading…
Reference in New Issue
Block a user