mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 01:05:29 +00:00
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger: "This series is predominantly bug-fixes, with a few small improvements that have been outstanding over the last release cycle. As usual, the associated bug-fixes have CC' tags for stable. Also, things have been particularly quiet wrt new developments the last months, with most folks continuing to focus on stability atop 4.x stable kernels for their respective production configurations. Also at this point, the stable trees have been synced up with mainline. This will continue to be a priority, as production users tend to run exclusively atop stable kernels, a few releases behind mainline. The highlights include: - Fix PR PREEMPT_AND_ABORT null pointer dereference regression in v4.11+ (tangwenji) - Fix OOPs during removing TCMU device (Xiubo Li + Zhang Zhuoyu) - Add netlink command reply supported option for each device (Kenjiro Nakayama) - cxgbit: Abort the TCP connection in case of data out timeout (Varun Prakash) - Fix PR/ALUA file path truncation (David Disseldorp) - Fix double se_cmd completion during ->cmd_time_out (Mike Christie) - Fix QUEUE_FULL + SCSI task attribute handling in 4.1+ (Bryant Ly + nab) - Fix quiese during transport_write_pending_qf endless loop (nab) - Avoid early CMD_T_PRE_EXECUTE failures during ABORT_TASK in 3.14+ (Don White + nab)" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (35 commits) tcmu: Add a missing unlock on an error path tcmu: Fix some memory corruption iscsi-target: Fix non-immediate TMR reference leak iscsi-target: Make TASK_REASSIGN use proper se_cmd->cmd_kref target: Avoid early CMD_T_PRE_EXECUTE failures during ABORT_TASK target: Fix quiese during transport_write_pending_qf endless loop target: Fix caw_sem leak in transport_generic_request_failure target: Fix QUEUE_FULL + SCSI task attribute handling iSCSI-target: Use common error handling code in iscsi_decode_text_input() target/iscsi: Detect conn_cmd_list corruption early target/iscsi: Fix a race condition in iscsit_add_reject_from_cmd() target/iscsi: Modify iscsit_do_crypto_hash_buf() prototype target/iscsi: Fix endianness in an error message target/iscsi: Use min() in iscsit_dump_data_payload() instead of open-coding it target/iscsi: Define OFFLOAD_BUF_SIZE once target: Inline transport_put_cmd() target: Suppress gcc 7 fallthrough warnings target: Move a declaration of a global variable into a header file tcmu: fix double se_cmd completion target: return SAM_STAT_TASK_SET_FULL for TCM_OUT_OF_RESOURCES ...
This commit is contained in:
commit
eda5d47134
@ -165,6 +165,7 @@ enum cxgbit_csk_flags {
|
||||
CSK_LOGIN_PDU_DONE,
|
||||
CSK_LOGIN_DONE,
|
||||
CSK_DDP_ENABLE,
|
||||
CSK_ABORT_RPL_WAIT,
|
||||
};
|
||||
|
||||
struct cxgbit_sock_common {
|
||||
@ -321,6 +322,7 @@ int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
|
||||
int cxgbit_setup_conn_digest(struct cxgbit_sock *);
|
||||
int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
|
||||
void cxgbit_free_np(struct iscsi_np *);
|
||||
void cxgbit_abort_conn(struct cxgbit_sock *csk);
|
||||
void cxgbit_free_conn(struct iscsi_conn *);
|
||||
extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
|
||||
int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
|
||||
|
@ -665,6 +665,46 @@ static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
|
||||
return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
|
||||
}
|
||||
|
||||
static void
|
||||
__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
|
||||
{
|
||||
__kfree_skb(skb);
|
||||
|
||||
if (csk->com.state != CSK_STATE_ESTABLISHED)
|
||||
goto no_abort;
|
||||
|
||||
set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
|
||||
csk->com.state = CSK_STATE_ABORTING;
|
||||
|
||||
cxgbit_send_abort_req(csk);
|
||||
|
||||
return;
|
||||
|
||||
no_abort:
|
||||
cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
|
||||
cxgbit_put_csk(csk);
|
||||
}
|
||||
|
||||
void cxgbit_abort_conn(struct cxgbit_sock *csk)
|
||||
{
|
||||
struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
|
||||
|
||||
cxgbit_get_csk(csk);
|
||||
cxgbit_init_wr_wait(&csk->com.wr_wait);
|
||||
|
||||
spin_lock_bh(&csk->lock);
|
||||
if (csk->lock_owner) {
|
||||
cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
|
||||
__skb_queue_tail(&csk->backlogq, skb);
|
||||
} else {
|
||||
__cxgbit_abort_conn(csk, skb);
|
||||
}
|
||||
spin_unlock_bh(&csk->lock);
|
||||
|
||||
cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
|
||||
csk->tid, 600, __func__);
|
||||
}
|
||||
|
||||
void cxgbit_free_conn(struct iscsi_conn *conn)
|
||||
{
|
||||
struct cxgbit_sock *csk = conn->context;
|
||||
@ -1709,12 +1749,17 @@ static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
|
||||
|
||||
static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
|
||||
{
|
||||
struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
|
||||
|
||||
pr_debug("%s: csk %p; tid %u; state %d\n",
|
||||
__func__, csk, csk->tid, csk->com.state);
|
||||
|
||||
switch (csk->com.state) {
|
||||
case CSK_STATE_ABORTING:
|
||||
csk->com.state = CSK_STATE_DEAD;
|
||||
if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
|
||||
cxgbit_wake_up(&csk->com.wr_wait, __func__,
|
||||
rpl->status);
|
||||
cxgbit_put_csk(csk);
|
||||
break;
|
||||
default:
|
||||
|
@ -275,6 +275,14 @@ void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
||||
struct cxgbit_device *cdev = csk->com.cdev;
|
||||
struct cxgbi_ppm *ppm = cdev2ppm(cdev);
|
||||
|
||||
/* Abort the TCP conn if DDP is not complete to
|
||||
* avoid any possibility of DDP after freeing
|
||||
* the cmd.
|
||||
*/
|
||||
if (unlikely(cmd->write_data_done !=
|
||||
cmd->se_cmd.data_length))
|
||||
cxgbit_abort_conn(csk);
|
||||
|
||||
cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
|
||||
|
||||
dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
|
||||
|
@ -446,6 +446,7 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
|
||||
case CPL_RX_ISCSI_DDP:
|
||||
case CPL_FW4_ACK:
|
||||
lro_flush = false;
|
||||
/* fall through */
|
||||
case CPL_ABORT_RPL_RSS:
|
||||
case CPL_PASS_ESTABLISH:
|
||||
case CPL_PEER_CLOSE:
|
||||
|
@ -502,7 +502,7 @@ void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
||||
EXPORT_SYMBOL(iscsit_aborted_task);
|
||||
|
||||
static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
|
||||
u32, u32, u8 *, u8 *);
|
||||
u32, u32, const void *, void *);
|
||||
static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);
|
||||
|
||||
static int
|
||||
@ -523,7 +523,7 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
|
||||
iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
|
||||
ISCSI_HDR_LEN, 0, NULL,
|
||||
(u8 *)header_digest);
|
||||
header_digest);
|
||||
|
||||
iov[0].iov_len += ISCSI_CRC_LEN;
|
||||
tx_size += ISCSI_CRC_LEN;
|
||||
@ -550,9 +550,8 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
if (conn->conn_ops->DataDigest) {
|
||||
iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
|
||||
data_buf, data_buf_len,
|
||||
padding,
|
||||
(u8 *)&cmd->pad_bytes,
|
||||
(u8 *)&cmd->data_crc);
|
||||
padding, &cmd->pad_bytes,
|
||||
&cmd->data_crc);
|
||||
|
||||
iov[niov].iov_base = &cmd->data_crc;
|
||||
iov[niov++].iov_len = ISCSI_CRC_LEN;
|
||||
@ -597,7 +596,7 @@ iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
|
||||
iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
|
||||
ISCSI_HDR_LEN, 0, NULL,
|
||||
(u8 *)header_digest);
|
||||
header_digest);
|
||||
|
||||
iov[0].iov_len += ISCSI_CRC_LEN;
|
||||
tx_size += ISCSI_CRC_LEN;
|
||||
@ -836,6 +835,7 @@ static int iscsit_add_reject_from_cmd(
|
||||
unsigned char *buf)
|
||||
{
|
||||
struct iscsi_conn *conn;
|
||||
const bool do_put = cmd->se_cmd.se_tfo != NULL;
|
||||
|
||||
if (!cmd->conn) {
|
||||
pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
|
||||
@ -866,7 +866,7 @@ static int iscsit_add_reject_from_cmd(
|
||||
* Perform the kref_put now if se_cmd has already been setup by
|
||||
* scsit_setup_scsi_cmd()
|
||||
*/
|
||||
if (cmd->se_cmd.se_tfo != NULL) {
|
||||
if (do_put) {
|
||||
pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
|
||||
target_put_sess_cmd(&cmd->se_cmd);
|
||||
}
|
||||
@ -1410,13 +1410,9 @@ static u32 iscsit_do_crypto_hash_sg(
|
||||
return data_crc;
|
||||
}
|
||||
|
||||
static void iscsit_do_crypto_hash_buf(
|
||||
struct ahash_request *hash,
|
||||
const void *buf,
|
||||
u32 payload_length,
|
||||
u32 padding,
|
||||
u8 *pad_bytes,
|
||||
u8 *data_crc)
|
||||
static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
|
||||
const void *buf, u32 payload_length, u32 padding,
|
||||
const void *pad_bytes, void *data_crc)
|
||||
{
|
||||
struct scatterlist sg[2];
|
||||
|
||||
@ -1462,9 +1458,9 @@ __iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
|
||||
iscsit_mod_dataout_timer(cmd);
|
||||
|
||||
if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
|
||||
pr_err("DataOut Offset: %u, Length %u greater than"
|
||||
" iSCSI Command EDTL %u, protocol error.\n",
|
||||
hdr->offset, payload_length, cmd->se_cmd.data_length);
|
||||
pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n",
|
||||
be32_to_cpu(hdr->offset), payload_length,
|
||||
cmd->se_cmd.data_length);
|
||||
return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
|
||||
}
|
||||
|
||||
@ -1878,10 +1874,9 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
}
|
||||
|
||||
if (conn->conn_ops->DataDigest) {
|
||||
iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
|
||||
ping_data, payload_length,
|
||||
padding, cmd->pad_bytes,
|
||||
(u8 *)&data_crc);
|
||||
iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
|
||||
payload_length, padding,
|
||||
cmd->pad_bytes, &data_crc);
|
||||
|
||||
if (checksum != data_crc) {
|
||||
pr_err("Ping data CRC32C DataDigest"
|
||||
@ -1962,7 +1957,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
struct iscsi_tmr_req *tmr_req;
|
||||
struct iscsi_tm *hdr;
|
||||
int out_of_order_cmdsn = 0, ret;
|
||||
bool sess_ref = false;
|
||||
u8 function, tcm_function = TMR_UNKNOWN;
|
||||
|
||||
hdr = (struct iscsi_tm *) buf;
|
||||
@ -1995,22 +1989,23 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
|
||||
cmd->data_direction = DMA_NONE;
|
||||
cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
|
||||
if (!cmd->tmr_req)
|
||||
if (!cmd->tmr_req) {
|
||||
return iscsit_add_reject_cmd(cmd,
|
||||
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
|
||||
buf);
|
||||
}
|
||||
|
||||
transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
|
||||
conn->sess->se_sess, 0, DMA_NONE,
|
||||
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
|
||||
|
||||
target_get_sess_cmd(&cmd->se_cmd, true);
|
||||
|
||||
/*
|
||||
* TASK_REASSIGN for ERL=2 / connection stays inside of
|
||||
* LIO-Target $FABRIC_MOD
|
||||
*/
|
||||
if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
|
||||
transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
|
||||
conn->sess->se_sess, 0, DMA_NONE,
|
||||
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
|
||||
|
||||
target_get_sess_cmd(&cmd->se_cmd, true);
|
||||
sess_ref = true;
|
||||
tcm_function = iscsit_convert_tmf(function);
|
||||
if (tcm_function == TMR_UNKNOWN) {
|
||||
pr_err("Unknown iSCSI TMR Function:"
|
||||
@ -2101,12 +2096,14 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
|
||||
if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
|
||||
int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
|
||||
if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
|
||||
if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
|
||||
out_of_order_cmdsn = 1;
|
||||
else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
|
||||
} else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
|
||||
target_put_sess_cmd(&cmd->se_cmd);
|
||||
return 0;
|
||||
else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
|
||||
} else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
|
||||
|
||||
@ -2126,12 +2123,8 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
* For connection recovery, this is also the default action for
|
||||
* TMR TASK_REASSIGN.
|
||||
*/
|
||||
if (sess_ref) {
|
||||
pr_debug("Handle TMR, using sess_ref=true check\n");
|
||||
target_put_sess_cmd(&cmd->se_cmd);
|
||||
}
|
||||
|
||||
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
|
||||
target_put_sess_cmd(&cmd->se_cmd);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
|
||||
@ -2287,10 +2280,9 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
goto reject;
|
||||
|
||||
if (conn->conn_ops->DataDigest) {
|
||||
iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
|
||||
text_in, payload_length,
|
||||
padding, (u8 *)&pad_bytes,
|
||||
(u8 *)&data_crc);
|
||||
iscsit_do_crypto_hash_buf(conn->conn_rx_hash, text_in,
|
||||
payload_length, padding,
|
||||
&pad_bytes, &data_crc);
|
||||
|
||||
if (checksum != data_crc) {
|
||||
pr_err("Text data CRC32C DataDigest"
|
||||
@ -3978,9 +3970,9 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
|
||||
return;
|
||||
}
|
||||
|
||||
iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
|
||||
buffer, ISCSI_HDR_LEN,
|
||||
0, NULL, (u8 *)&checksum);
|
||||
iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
|
||||
ISCSI_HDR_LEN, 0, NULL,
|
||||
&checksum);
|
||||
|
||||
if (digest != checksum) {
|
||||
pr_err("HeaderDigest CRC32C failed,"
|
||||
|
@ -1123,7 +1123,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
|
||||
|
||||
ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
|
||||
if (ret < 0)
|
||||
return NULL;
|
||||
goto free_out;
|
||||
|
||||
ret = iscsit_tpg_add_portal_group(tiqn, tpg);
|
||||
if (ret != 0)
|
||||
@ -1135,6 +1135,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
|
||||
return &tpg->tpg_se_tpg;
|
||||
out:
|
||||
core_tpg_deregister(&tpg->tpg_se_tpg);
|
||||
free_out:
|
||||
kfree(tpg);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -34,7 +34,7 @@
|
||||
#include "iscsi_target_erl2.h"
|
||||
#include "iscsi_target.h"
|
||||
|
||||
#define OFFLOAD_BUF_SIZE 32768
|
||||
#define OFFLOAD_BUF_SIZE 32768U
|
||||
|
||||
/*
|
||||
* Used to dump excess datain payload for certain error recovery
|
||||
@ -56,7 +56,7 @@ int iscsit_dump_data_payload(
|
||||
if (conn->sess->sess_ops->RDMAExtensions)
|
||||
return 0;
|
||||
|
||||
length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
|
||||
length = min(buf_len, OFFLOAD_BUF_SIZE);
|
||||
|
||||
buf = kzalloc(length, GFP_ATOMIC);
|
||||
if (!buf) {
|
||||
@ -67,8 +67,7 @@ int iscsit_dump_data_payload(
|
||||
memset(&iov, 0, sizeof(struct kvec));
|
||||
|
||||
while (offset < buf_len) {
|
||||
size = ((offset + length) > buf_len) ?
|
||||
(buf_len - offset) : length;
|
||||
size = min(buf_len - offset, length);
|
||||
|
||||
iov.iov_len = size;
|
||||
iov.iov_base = buf;
|
||||
|
@ -1380,10 +1380,8 @@ int iscsi_decode_text_input(
|
||||
char *key, *value;
|
||||
struct iscsi_param *param;
|
||||
|
||||
if (iscsi_extract_key_value(start, &key, &value) < 0) {
|
||||
kfree(tmpbuf);
|
||||
return -1;
|
||||
}
|
||||
if (iscsi_extract_key_value(start, &key, &value) < 0)
|
||||
goto free_buffer;
|
||||
|
||||
pr_debug("Got key: %s=%s\n", key, value);
|
||||
|
||||
@ -1396,38 +1394,37 @@ int iscsi_decode_text_input(
|
||||
|
||||
param = iscsi_check_key(key, phase, sender, param_list);
|
||||
if (!param) {
|
||||
if (iscsi_add_notunderstood_response(key,
|
||||
value, param_list) < 0) {
|
||||
kfree(tmpbuf);
|
||||
return -1;
|
||||
}
|
||||
if (iscsi_add_notunderstood_response(key, value,
|
||||
param_list) < 0)
|
||||
goto free_buffer;
|
||||
|
||||
start += strlen(key) + strlen(value) + 2;
|
||||
continue;
|
||||
}
|
||||
if (iscsi_check_value(param, value) < 0) {
|
||||
kfree(tmpbuf);
|
||||
return -1;
|
||||
}
|
||||
if (iscsi_check_value(param, value) < 0)
|
||||
goto free_buffer;
|
||||
|
||||
start += strlen(key) + strlen(value) + 2;
|
||||
|
||||
if (IS_PSTATE_PROPOSER(param)) {
|
||||
if (iscsi_check_proposer_state(param, value) < 0) {
|
||||
kfree(tmpbuf);
|
||||
return -1;
|
||||
}
|
||||
if (iscsi_check_proposer_state(param, value) < 0)
|
||||
goto free_buffer;
|
||||
|
||||
SET_PSTATE_RESPONSE_GOT(param);
|
||||
} else {
|
||||
if (iscsi_check_acceptor_state(param, value, conn) < 0) {
|
||||
kfree(tmpbuf);
|
||||
return -1;
|
||||
}
|
||||
if (iscsi_check_acceptor_state(param, value, conn) < 0)
|
||||
goto free_buffer;
|
||||
|
||||
SET_PSTATE_ACCEPTOR(param);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(tmpbuf);
|
||||
return 0;
|
||||
|
||||
free_buffer:
|
||||
kfree(tmpbuf);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int iscsi_encode_text_output(
|
||||
|
@ -25,8 +25,6 @@
|
||||
#include "iscsi_target_tpg.h"
|
||||
#include "iscsi_target_seq_pdu_list.h"
|
||||
|
||||
#define OFFLOAD_BUF_SIZE 32768
|
||||
|
||||
#ifdef DEBUG
|
||||
static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
|
||||
{
|
||||
|
@ -90,10 +90,10 @@ int iscsit_load_discovery_tpg(void)
|
||||
*/
|
||||
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
|
||||
if (!param)
|
||||
goto out;
|
||||
goto free_pl_out;
|
||||
|
||||
if (iscsi_update_param_value(param, "CHAP,None") < 0)
|
||||
goto out;
|
||||
goto free_pl_out;
|
||||
|
||||
tpg->tpg_attrib.authentication = 0;
|
||||
|
||||
@ -105,6 +105,8 @@ int iscsit_load_discovery_tpg(void)
|
||||
pr_debug("CORE[0] - Allocated Discovery TPG\n");
|
||||
|
||||
return 0;
|
||||
free_pl_out:
|
||||
iscsi_release_param_list(tpg->param_list);
|
||||
out:
|
||||
if (tpg->sid == 1)
|
||||
core_tpg_deregister(&tpg->tpg_se_tpg);
|
||||
@ -119,6 +121,7 @@ void iscsit_release_discovery_tpg(void)
|
||||
if (!tpg)
|
||||
return;
|
||||
|
||||
iscsi_release_param_list(tpg->param_list);
|
||||
core_tpg_deregister(&tpg->tpg_se_tpg);
|
||||
|
||||
kfree(tpg);
|
||||
|
@ -695,6 +695,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
|
||||
struct iscsi_session *sess;
|
||||
struct se_cmd *se_cmd = &cmd->se_cmd;
|
||||
|
||||
WARN_ON(!list_empty(&cmd->i_conn_node));
|
||||
|
||||
if (cmd->conn)
|
||||
sess = cmd->conn->sess;
|
||||
else
|
||||
@ -717,6 +719,8 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
|
||||
{
|
||||
struct iscsi_conn *conn = cmd->conn;
|
||||
|
||||
WARN_ON(!list_empty(&cmd->i_conn_node));
|
||||
|
||||
if (cmd->data_direction == DMA_TO_DEVICE) {
|
||||
iscsit_stop_dataout_timer(cmd);
|
||||
iscsit_free_r2ts_from_list(cmd);
|
||||
|
@ -918,7 +918,7 @@ static int core_alua_update_tpg_primary_metadata(
|
||||
{
|
||||
unsigned char *md_buf;
|
||||
struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
|
||||
char path[ALUA_METADATA_PATH_LEN];
|
||||
char *path;
|
||||
int len, rc;
|
||||
|
||||
md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
|
||||
@ -927,8 +927,6 @@ static int core_alua_update_tpg_primary_metadata(
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(path, 0, ALUA_METADATA_PATH_LEN);
|
||||
|
||||
len = snprintf(md_buf, ALUA_MD_BUF_LEN,
|
||||
"tg_pt_gp_id=%hu\n"
|
||||
"alua_access_state=0x%02x\n"
|
||||
@ -937,11 +935,14 @@ static int core_alua_update_tpg_primary_metadata(
|
||||
tg_pt_gp->tg_pt_gp_alua_access_state,
|
||||
tg_pt_gp->tg_pt_gp_alua_access_status);
|
||||
|
||||
snprintf(path, ALUA_METADATA_PATH_LEN,
|
||||
"%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0],
|
||||
config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
|
||||
|
||||
rc = core_alua_write_tpg_metadata(path, md_buf, len);
|
||||
rc = -ENOMEM;
|
||||
path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
|
||||
&wwn->unit_serial[0],
|
||||
config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
|
||||
if (path) {
|
||||
rc = core_alua_write_tpg_metadata(path, md_buf, len);
|
||||
kfree(path);
|
||||
}
|
||||
kfree(md_buf);
|
||||
return rc;
|
||||
}
|
||||
@ -1209,7 +1210,7 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
|
||||
{
|
||||
struct se_portal_group *se_tpg = lun->lun_tpg;
|
||||
unsigned char *md_buf;
|
||||
char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
|
||||
char *path;
|
||||
int len, rc;
|
||||
|
||||
mutex_lock(&lun->lun_tg_pt_md_mutex);
|
||||
@ -1221,28 +1222,32 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
memset(path, 0, ALUA_METADATA_PATH_LEN);
|
||||
memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
|
||||
|
||||
len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
|
||||
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
|
||||
|
||||
if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
|
||||
snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
|
||||
se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
|
||||
|
||||
len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
|
||||
"alua_tg_pt_status=0x%02x\n",
|
||||
atomic_read(&lun->lun_tg_pt_secondary_offline),
|
||||
lun->lun_tg_pt_secondary_stat);
|
||||
|
||||
snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu",
|
||||
db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
|
||||
lun->unpacked_lun);
|
||||
if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
|
||||
path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
|
||||
db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
|
||||
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
|
||||
se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
|
||||
lun->unpacked_lun);
|
||||
} else {
|
||||
path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
|
||||
db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
|
||||
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
|
||||
lun->unpacked_lun);
|
||||
}
|
||||
if (!path) {
|
||||
rc = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
rc = core_alua_write_tpg_metadata(path, md_buf, len);
|
||||
kfree(path);
|
||||
out_free:
|
||||
kfree(md_buf);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&lun->lun_tg_pt_md_mutex);
|
||||
return rc;
|
||||
|
@ -72,15 +72,6 @@
|
||||
*/
|
||||
#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0
|
||||
#define ALUA_MAX_IMPLICIT_TRANS_SECS 255
|
||||
/*
|
||||
* Used by core_alua_update_tpg_primary_metadata() and
|
||||
* core_alua_update_tpg_secondary_metadata()
|
||||
*/
|
||||
#define ALUA_METADATA_PATH_LEN 512
|
||||
/*
|
||||
* Used by core_alua_update_tpg_secondary_metadata()
|
||||
*/
|
||||
#define ALUA_SECONDARY_METADATA_WWN_LEN 256
|
||||
|
||||
/* Used by core_alua_update_tpg_(primary,secondary)_metadata */
|
||||
#define ALUA_MD_BUF_LEN 1024
|
||||
|
@ -1611,12 +1611,12 @@ static match_table_t tokens = {
|
||||
{Opt_res_type, "res_type=%d"},
|
||||
{Opt_res_scope, "res_scope=%d"},
|
||||
{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
|
||||
{Opt_mapped_lun, "mapped_lun=%lld"},
|
||||
{Opt_mapped_lun, "mapped_lun=%u"},
|
||||
{Opt_target_fabric, "target_fabric=%s"},
|
||||
{Opt_target_node, "target_node=%s"},
|
||||
{Opt_tpgt, "tpgt=%d"},
|
||||
{Opt_port_rtpi, "port_rtpi=%d"},
|
||||
{Opt_target_lun, "target_lun=%lld"},
|
||||
{Opt_target_lun, "target_lun=%u"},
|
||||
{Opt_err, NULL}
|
||||
};
|
||||
|
||||
@ -1693,7 +1693,7 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
|
||||
}
|
||||
break;
|
||||
case Opt_sa_res_key:
|
||||
ret = kstrtoull(args->from, 0, &tmp_ll);
|
||||
ret = match_u64(args, &tmp_ll);
|
||||
if (ret < 0) {
|
||||
pr_err("kstrtoull() failed for sa_res_key=\n");
|
||||
goto out;
|
||||
@ -1727,10 +1727,10 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
|
||||
all_tg_pt = (int)arg;
|
||||
break;
|
||||
case Opt_mapped_lun:
|
||||
ret = match_int(args, &arg);
|
||||
ret = match_u64(args, &tmp_ll);
|
||||
if (ret)
|
||||
goto out;
|
||||
mapped_lun = (u64)arg;
|
||||
mapped_lun = (u64)tmp_ll;
|
||||
break;
|
||||
/*
|
||||
* PR APTPL Metadata for Target Port
|
||||
@ -1768,10 +1768,10 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
|
||||
goto out;
|
||||
break;
|
||||
case Opt_target_lun:
|
||||
ret = match_int(args, &arg);
|
||||
ret = match_u64(args, &tmp_ll);
|
||||
if (ret)
|
||||
goto out;
|
||||
target_lun = (u64)arg;
|
||||
target_lun = (u64)tmp_ll;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -623,8 +623,6 @@ static struct configfs_attribute *target_fabric_port_attrs[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
extern struct configfs_item_operations target_core_dev_item_ops;
|
||||
|
||||
static int target_fabric_port_link(
|
||||
struct config_item *lun_ci,
|
||||
struct config_item *se_dev_ci)
|
||||
|
@ -482,6 +482,10 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
int ret;
|
||||
|
||||
if (!nolb) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (cmd->se_dev->dev_attrib.pi_prot_type) {
|
||||
ret = fd_do_prot_unmap(cmd, lba, nolb);
|
||||
if (ret)
|
||||
|
@ -89,6 +89,7 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
|
||||
void *data);
|
||||
|
||||
/* target_core_configfs.c */
|
||||
extern struct configfs_item_operations target_core_dev_item_ops;
|
||||
void target_setup_backend_cits(struct target_backend *);
|
||||
|
||||
/* target_core_fabric_configfs.c */
|
||||
|
@ -58,8 +58,10 @@ void core_pr_dump_initiator_port(
|
||||
char *buf,
|
||||
u32 size)
|
||||
{
|
||||
if (!pr_reg->isid_present_at_reg)
|
||||
if (!pr_reg->isid_present_at_reg) {
|
||||
buf[0] = '\0';
|
||||
return;
|
||||
}
|
||||
|
||||
snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
|
||||
}
|
||||
@ -351,6 +353,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
|
||||
break;
|
||||
case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
|
||||
we = 1;
|
||||
/* fall through */
|
||||
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
|
||||
/*
|
||||
* Some commands are only allowed for registered I_T Nexuses.
|
||||
@ -359,6 +362,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
|
||||
break;
|
||||
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
|
||||
we = 1;
|
||||
/* fall through */
|
||||
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
|
||||
/*
|
||||
* Each registered I_T Nexus is a reservation holder.
|
||||
@ -1521,7 +1525,7 @@ core_scsi3_decode_spec_i_port(
|
||||
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
|
||||
if (!tidh_new) {
|
||||
pr_err("Unable to allocate tidh_new\n");
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
|
||||
}
|
||||
INIT_LIST_HEAD(&tidh_new->dest_list);
|
||||
tidh_new->dest_tpg = tpg;
|
||||
@ -1533,7 +1537,7 @@ core_scsi3_decode_spec_i_port(
|
||||
sa_res_key, all_tg_pt, aptpl);
|
||||
if (!local_pr_reg) {
|
||||
kfree(tidh_new);
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
|
||||
}
|
||||
tidh_new->dest_pr_reg = local_pr_reg;
|
||||
/*
|
||||
@ -1553,7 +1557,7 @@ core_scsi3_decode_spec_i_port(
|
||||
|
||||
buf = transport_kmap_data_sg(cmd);
|
||||
if (!buf) {
|
||||
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1767,7 +1771,7 @@ core_scsi3_decode_spec_i_port(
|
||||
core_scsi3_nodeacl_undepend_item(dest_node_acl);
|
||||
core_scsi3_tpg_undepend_item(dest_tpg);
|
||||
kfree(tidh_new);
|
||||
ret = TCM_INVALID_PARAMETER_LIST;
|
||||
ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
|
||||
goto out_unmap;
|
||||
}
|
||||
tidh_new->dest_pr_reg = dest_pr_reg;
|
||||
@ -1971,24 +1975,21 @@ static int __core_scsi3_write_aptpl_to_file(
|
||||
struct t10_wwn *wwn = &dev->t10_wwn;
|
||||
struct file *file;
|
||||
int flags = O_RDWR | O_CREAT | O_TRUNC;
|
||||
char path[512];
|
||||
char *path;
|
||||
u32 pr_aptpl_buf_len;
|
||||
int ret;
|
||||
loff_t pos = 0;
|
||||
|
||||
memset(path, 0, 512);
|
||||
path = kasprintf(GFP_KERNEL, "%s/pr/aptpl_%s", db_root,
|
||||
&wwn->unit_serial[0]);
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
if (strlen(&wwn->unit_serial[0]) >= 512) {
|
||||
pr_err("WWN value for struct se_device does not fit"
|
||||
" into path buffer\n");
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
snprintf(path, 512, "%s/pr/aptpl_%s", db_root, &wwn->unit_serial[0]);
|
||||
file = filp_open(path, flags, 0600);
|
||||
if (IS_ERR(file)) {
|
||||
pr_err("filp_open(%s) for APTPL metadata"
|
||||
" failed\n", path);
|
||||
kfree(path);
|
||||
return PTR_ERR(file);
|
||||
}
|
||||
|
||||
@ -1999,6 +2000,7 @@ static int __core_scsi3_write_aptpl_to_file(
|
||||
if (ret < 0)
|
||||
pr_debug("Error writing APTPL metadata file: %s\n", path);
|
||||
fput(file);
|
||||
kfree(path);
|
||||
|
||||
return (ret < 0) ? -EIO : 0;
|
||||
}
|
||||
@ -2103,7 +2105,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
|
||||
register_type, 0)) {
|
||||
pr_err("Unable to allocate"
|
||||
" struct t10_pr_registration\n");
|
||||
return TCM_INVALID_PARAMETER_LIST;
|
||||
return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
@ -3215,7 +3217,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
|
||||
*/
|
||||
buf = transport_kmap_data_sg(cmd);
|
||||
if (!buf) {
|
||||
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
|
||||
goto out_put_pr_reg;
|
||||
}
|
||||
|
||||
@ -3267,7 +3269,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
|
||||
|
||||
buf = transport_kmap_data_sg(cmd);
|
||||
if (!buf) {
|
||||
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
|
||||
goto out_put_pr_reg;
|
||||
}
|
||||
proto_ident = (buf[24] & 0x0f);
|
||||
@ -3466,7 +3468,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
|
||||
if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
|
||||
dest_lun, dest_se_deve, dest_se_deve->mapped_lun,
|
||||
iport_ptr, sa_res_key, 0, aptpl, 2, 1)) {
|
||||
ret = TCM_INVALID_PARAMETER_LIST;
|
||||
ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
|
||||
goto out;
|
||||
}
|
||||
spin_lock(&dev->dev_reservation_lock);
|
||||
@ -3528,8 +3530,6 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
|
||||
|
||||
core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl);
|
||||
|
||||
transport_kunmap_data_sg(cmd);
|
||||
|
||||
core_scsi3_put_pr_reg(dest_pr_reg);
|
||||
return 0;
|
||||
out:
|
||||
@ -4011,6 +4011,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
||||
* Set the ADDITIONAL DESCRIPTOR LENGTH
|
||||
*/
|
||||
put_unaligned_be32(desc_len, &buf[off]);
|
||||
off += 4;
|
||||
/*
|
||||
* Size of full desctipor header minus TransportID
|
||||
* containing $FABRIC_MOD specific) initiator device/port
|
||||
|
@ -133,6 +133,15 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
|
||||
spin_unlock(&se_cmd->t_state_lock);
|
||||
return false;
|
||||
}
|
||||
if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) {
|
||||
if (se_cmd->scsi_status) {
|
||||
pr_debug("Attempted to abort io tag: %llu early failure"
|
||||
" status: 0x%02x\n", se_cmd->tag,
|
||||
se_cmd->scsi_status);
|
||||
spin_unlock(&se_cmd->t_state_lock);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
|
||||
pr_debug("Attempted to abort io tag: %llu already shutdown,"
|
||||
" skipping\n", se_cmd->tag);
|
||||
@ -217,7 +226,8 @@ static void core_tmr_drain_tmr_list(
|
||||
* LUN_RESET tmr..
|
||||
*/
|
||||
spin_lock_irqsave(&dev->se_tmr_lock, flags);
|
||||
list_del_init(&tmr->tmr_list);
|
||||
if (tmr)
|
||||
list_del_init(&tmr->tmr_list);
|
||||
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
|
||||
cmd = tmr_p->task_cmd;
|
||||
if (!cmd) {
|
||||
|
@ -67,7 +67,6 @@ static void transport_complete_task_attr(struct se_cmd *cmd);
|
||||
static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
|
||||
static void transport_handle_queue_full(struct se_cmd *cmd,
|
||||
struct se_device *dev, int err, bool write_pending);
|
||||
static int transport_put_cmd(struct se_cmd *cmd);
|
||||
static void target_complete_ok_work(struct work_struct *work);
|
||||
|
||||
int init_se_kmem_caches(void)
|
||||
@ -668,7 +667,7 @@ int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
|
||||
if (transport_cmd_check_stop_to_fabric(cmd))
|
||||
return 1;
|
||||
if (remove && ack_kref)
|
||||
ret = transport_put_cmd(cmd);
|
||||
ret = target_put_sess_cmd(cmd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1730,9 +1729,6 @@ void transport_generic_request_failure(struct se_cmd *cmd,
|
||||
{
|
||||
int ret = 0, post_ret = 0;
|
||||
|
||||
if (transport_check_aborted_status(cmd, 1))
|
||||
return;
|
||||
|
||||
pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
|
||||
sense_reason);
|
||||
target_show_cmd("-----[ ", cmd);
|
||||
@ -1741,6 +1737,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
|
||||
* For SAM Task Attribute emulation for failed struct se_cmd
|
||||
*/
|
||||
transport_complete_task_attr(cmd);
|
||||
|
||||
/*
|
||||
* Handle special case for COMPARE_AND_WRITE failure, where the
|
||||
* callback is expected to drop the per device ->caw_sem.
|
||||
@ -1749,6 +1746,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
|
||||
cmd->transport_complete_callback)
|
||||
cmd->transport_complete_callback(cmd, false, &post_ret);
|
||||
|
||||
if (transport_check_aborted_status(cmd, 1))
|
||||
return;
|
||||
|
||||
switch (sense_reason) {
|
||||
case TCM_NON_EXISTENT_LUN:
|
||||
case TCM_UNSUPPORTED_SCSI_OPCODE:
|
||||
@ -1772,8 +1772,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
|
||||
case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
|
||||
break;
|
||||
case TCM_OUT_OF_RESOURCES:
|
||||
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
break;
|
||||
cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
|
||||
goto queue_status;
|
||||
case TCM_RESERVATION_CONFLICT:
|
||||
/*
|
||||
* No SENSE Data payload for this case, set SCSI Status
|
||||
@ -1795,11 +1795,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
|
||||
cmd->orig_fe_lun, 0x2C,
|
||||
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
|
||||
}
|
||||
trace_target_cmd_complete(cmd);
|
||||
ret = cmd->se_tfo->queue_status(cmd);
|
||||
if (ret)
|
||||
goto queue_full;
|
||||
goto check_stop;
|
||||
|
||||
goto queue_status;
|
||||
default:
|
||||
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
|
||||
cmd->t_task_cdb[0], sense_reason);
|
||||
@ -1816,6 +1813,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
|
||||
transport_cmd_check_stop_to_fabric(cmd);
|
||||
return;
|
||||
|
||||
queue_status:
|
||||
trace_target_cmd_complete(cmd);
|
||||
ret = cmd->se_tfo->queue_status(cmd);
|
||||
if (!ret)
|
||||
goto check_stop;
|
||||
queue_full:
|
||||
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
|
||||
}
|
||||
@ -1973,6 +1975,7 @@ void target_execute_cmd(struct se_cmd *cmd)
|
||||
}
|
||||
|
||||
cmd->t_state = TRANSPORT_PROCESSING;
|
||||
cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
|
||||
cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
|
||||
spin_unlock_irq(&cmd->t_state_lock);
|
||||
|
||||
@ -2010,6 +2013,8 @@ static void target_restart_delayed_cmds(struct se_device *dev)
|
||||
list_del(&cmd->se_delayed_node);
|
||||
spin_unlock(&dev->delayed_cmd_lock);
|
||||
|
||||
cmd->transport_state |= CMD_T_SENT;
|
||||
|
||||
__target_execute_cmd(cmd, true);
|
||||
|
||||
if (cmd->sam_task_attr == TCM_ORDERED_TAG)
|
||||
@ -2045,6 +2050,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
|
||||
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
|
||||
dev->dev_cur_ordered_id);
|
||||
}
|
||||
cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
|
||||
|
||||
restart:
|
||||
target_restart_delayed_cmds(dev);
|
||||
}
|
||||
@ -2090,7 +2097,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
|
||||
ret = cmd->se_tfo->queue_data_in(cmd);
|
||||
break;
|
||||
}
|
||||
/* Fall through for DMA_TO_DEVICE */
|
||||
/* fall through */
|
||||
case DMA_NONE:
|
||||
queue_status:
|
||||
trace_target_cmd_complete(cmd);
|
||||
@ -2268,7 +2275,7 @@ static void target_complete_ok_work(struct work_struct *work)
|
||||
goto queue_full;
|
||||
break;
|
||||
}
|
||||
/* Fall through for DMA_TO_DEVICE */
|
||||
/* fall through */
|
||||
case DMA_NONE:
|
||||
queue_status:
|
||||
trace_target_cmd_complete(cmd);
|
||||
@ -2352,22 +2359,6 @@ static inline void transport_free_pages(struct se_cmd *cmd)
|
||||
cmd->t_bidi_data_nents = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* transport_put_cmd - release a reference to a command
|
||||
* @cmd: command to release
|
||||
*
|
||||
* This routine releases our reference to the command and frees it if possible.
|
||||
*/
|
||||
static int transport_put_cmd(struct se_cmd *cmd)
|
||||
{
|
||||
BUG_ON(!cmd->se_tfo);
|
||||
/*
|
||||
* If this cmd has been setup with target_get_sess_cmd(), drop
|
||||
* the kref and call ->release_cmd() in kref callback.
|
||||
*/
|
||||
return target_put_sess_cmd(cmd);
|
||||
}
|
||||
|
||||
void *transport_kmap_data_sg(struct se_cmd *cmd)
|
||||
{
|
||||
struct scatterlist *sg = cmd->t_data_sg;
|
||||
@ -2570,7 +2561,20 @@ EXPORT_SYMBOL(transport_generic_new_cmd);
|
||||
|
||||
static void transport_write_pending_qf(struct se_cmd *cmd)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
bool stop;
|
||||
|
||||
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
||||
stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
|
||||
if (stop) {
|
||||
pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
|
||||
__func__, __LINE__, cmd->tag);
|
||||
complete_all(&cmd->t_transport_stop_comp);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = cmd->se_tfo->write_pending(cmd);
|
||||
if (ret) {
|
||||
@ -2603,7 +2607,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
|
||||
target_wait_free_cmd(cmd, &aborted, &tas);
|
||||
|
||||
if (!aborted || tas)
|
||||
ret = transport_put_cmd(cmd);
|
||||
ret = target_put_sess_cmd(cmd);
|
||||
} else {
|
||||
if (wait_for_tasks)
|
||||
target_wait_free_cmd(cmd, &aborted, &tas);
|
||||
@ -2619,7 +2623,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
|
||||
transport_lun_remove_cmd(cmd);
|
||||
|
||||
if (!aborted || tas)
|
||||
ret = transport_put_cmd(cmd);
|
||||
ret = target_put_sess_cmd(cmd);
|
||||
}
|
||||
/*
|
||||
* If the task has been internally aborted due to TMR ABORT_TASK
|
||||
@ -2664,6 +2668,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
|
||||
ret = -ESHUTDOWN;
|
||||
goto out;
|
||||
}
|
||||
se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
|
||||
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
|
||||
out:
|
||||
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
|
||||
@ -3145,6 +3150,21 @@ static const struct sense_info sense_info_table[] = {
|
||||
.key = NOT_READY,
|
||||
.asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
|
||||
},
|
||||
[TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
|
||||
/*
|
||||
* From spc4r22 section5.7.7,5.7.8
|
||||
* If a PERSISTENT RESERVE OUT command with a REGISTER service action
|
||||
* or a REGISTER AND IGNORE EXISTING KEY service action or
|
||||
* REGISTER AND MOVE service actionis attempted,
|
||||
* but there are insufficient device server resources to complete the
|
||||
* operation, then the command shall be terminated with CHECK CONDITION
|
||||
* status, with the sense key set to ILLEGAL REQUEST,and the additonal
|
||||
* sense code set to INSUFFICIENT REGISTRATION RESOURCES.
|
||||
*/
|
||||
.key = ILLEGAL_REQUEST,
|
||||
.asc = 0x55,
|
||||
.ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
|
||||
},
|
||||
};
|
||||
|
||||
static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
|
||||
|
@ -150,6 +150,8 @@ struct tcmu_dev {
|
||||
wait_queue_head_t nl_cmd_wq;
|
||||
|
||||
char dev_config[TCMU_CONFIG_LEN];
|
||||
|
||||
int nl_reply_supported;
|
||||
};
|
||||
|
||||
#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
|
||||
@ -430,7 +432,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
|
||||
struct se_device *se_dev = se_cmd->se_dev;
|
||||
struct tcmu_dev *udev = TCMU_DEV(se_dev);
|
||||
struct tcmu_cmd *tcmu_cmd;
|
||||
int cmd_id;
|
||||
|
||||
tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
|
||||
if (!tcmu_cmd)
|
||||
@ -438,9 +439,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
|
||||
|
||||
tcmu_cmd->se_cmd = se_cmd;
|
||||
tcmu_cmd->tcmu_dev = udev;
|
||||
if (udev->cmd_time_out)
|
||||
tcmu_cmd->deadline = jiffies +
|
||||
msecs_to_jiffies(udev->cmd_time_out);
|
||||
|
||||
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
|
||||
tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
|
||||
@ -451,19 +449,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock_irq(&udev->commands_lock);
|
||||
cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
|
||||
USHRT_MAX, GFP_NOWAIT);
|
||||
spin_unlock_irq(&udev->commands_lock);
|
||||
idr_preload_end();
|
||||
|
||||
if (cmd_id < 0) {
|
||||
tcmu_free_cmd(tcmu_cmd);
|
||||
return NULL;
|
||||
}
|
||||
tcmu_cmd->cmd_id = cmd_id;
|
||||
|
||||
return tcmu_cmd;
|
||||
}
|
||||
|
||||
@ -746,6 +731,30 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
|
||||
return command_size;
|
||||
}
|
||||
|
||||
static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
|
||||
{
|
||||
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
|
||||
unsigned long tmo = udev->cmd_time_out;
|
||||
int cmd_id;
|
||||
|
||||
if (tcmu_cmd->cmd_id)
|
||||
return 0;
|
||||
|
||||
cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
|
||||
if (cmd_id < 0) {
|
||||
pr_err("tcmu: Could not allocate cmd id.\n");
|
||||
return cmd_id;
|
||||
}
|
||||
tcmu_cmd->cmd_id = cmd_id;
|
||||
|
||||
if (!tmo)
|
||||
return 0;
|
||||
|
||||
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
|
||||
mod_timer(&udev->timeout, tcmu_cmd->deadline);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||
{
|
||||
@ -839,7 +848,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||
entry = (void *) mb + CMDR_OFF + cmd_head;
|
||||
memset(entry, 0, command_size);
|
||||
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
|
||||
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
|
||||
|
||||
/* Handle allocating space from the data area */
|
||||
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
|
||||
@ -877,6 +885,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||
}
|
||||
entry->req.iov_bidi_cnt = iov_cnt;
|
||||
|
||||
ret = tcmu_setup_cmd_timer(tcmu_cmd);
|
||||
if (ret) {
|
||||
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
|
||||
mutex_unlock(&udev->cmdr_lock);
|
||||
return TCM_OUT_OF_RESOURCES;
|
||||
}
|
||||
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
|
||||
|
||||
/*
|
||||
* Recalaulate the command's base size and size according
|
||||
* to the actual needs
|
||||
@ -910,8 +926,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||
static sense_reason_t
|
||||
tcmu_queue_cmd(struct se_cmd *se_cmd)
|
||||
{
|
||||
struct se_device *se_dev = se_cmd->se_dev;
|
||||
struct tcmu_dev *udev = TCMU_DEV(se_dev);
|
||||
struct tcmu_cmd *tcmu_cmd;
|
||||
sense_reason_t ret;
|
||||
|
||||
@ -922,9 +936,6 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
|
||||
ret = tcmu_queue_cmd_ring(tcmu_cmd);
|
||||
if (ret != TCM_NO_SENSE) {
|
||||
pr_err("TCMU: Could not queue command\n");
|
||||
spin_lock_irq(&udev->commands_lock);
|
||||
idr_remove(&udev->commands, tcmu_cmd->cmd_id);
|
||||
spin_unlock_irq(&udev->commands_lock);
|
||||
|
||||
tcmu_free_cmd(tcmu_cmd);
|
||||
}
|
||||
@ -1112,6 +1123,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
|
||||
init_waitqueue_head(&udev->nl_cmd_wq);
|
||||
spin_lock_init(&udev->nl_cmd_lock);
|
||||
|
||||
INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
|
||||
|
||||
return &udev->se_dev;
|
||||
}
|
||||
|
||||
@ -1280,10 +1293,54 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
|
||||
kfree(udev);
|
||||
}
|
||||
|
||||
static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
|
||||
{
|
||||
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
|
||||
kmem_cache_free(tcmu_cmd_cache, cmd);
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void tcmu_blocks_release(struct tcmu_dev *udev)
|
||||
{
|
||||
int i;
|
||||
struct page *page;
|
||||
|
||||
/* Try to release all block pages */
|
||||
mutex_lock(&udev->cmdr_lock);
|
||||
for (i = 0; i <= udev->dbi_max; i++) {
|
||||
page = radix_tree_delete(&udev->data_blocks, i);
|
||||
if (page) {
|
||||
__free_page(page);
|
||||
atomic_dec(&global_db_count);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&udev->cmdr_lock);
|
||||
}
|
||||
|
||||
static void tcmu_dev_kref_release(struct kref *kref)
|
||||
{
|
||||
struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
|
||||
struct se_device *dev = &udev->se_dev;
|
||||
struct tcmu_cmd *cmd;
|
||||
bool all_expired = true;
|
||||
int i;
|
||||
|
||||
vfree(udev->mb_addr);
|
||||
udev->mb_addr = NULL;
|
||||
|
||||
/* Upper layer should drain all requests before calling this */
|
||||
spin_lock_irq(&udev->commands_lock);
|
||||
idr_for_each_entry(&udev->commands, cmd, i) {
|
||||
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
|
||||
all_expired = false;
|
||||
}
|
||||
idr_destroy(&udev->commands);
|
||||
spin_unlock_irq(&udev->commands_lock);
|
||||
WARN_ON(!all_expired);
|
||||
|
||||
tcmu_blocks_release(udev);
|
||||
|
||||
call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
|
||||
}
|
||||
@ -1306,6 +1363,10 @@ static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
|
||||
|
||||
if (!tcmu_kern_cmd_reply_supported)
|
||||
return;
|
||||
|
||||
if (udev->nl_reply_supported <= 0)
|
||||
return;
|
||||
|
||||
relock:
|
||||
spin_lock(&udev->nl_cmd_lock);
|
||||
|
||||
@ -1332,6 +1393,9 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
|
||||
if (!tcmu_kern_cmd_reply_supported)
|
||||
return 0;
|
||||
|
||||
if (udev->nl_reply_supported <= 0)
|
||||
return 0;
|
||||
|
||||
pr_debug("sleeping for nl reply\n");
|
||||
wait_for_completion(&nl_cmd->complete);
|
||||
|
||||
@ -1476,8 +1540,6 @@ static int tcmu_configure_device(struct se_device *dev)
|
||||
WARN_ON(udev->data_size % PAGE_SIZE);
|
||||
WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
|
||||
|
||||
INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
|
||||
|
||||
info->version = __stringify(TCMU_MAILBOX_VERSION);
|
||||
|
||||
info->mem[0].name = "tcm-user command & data buffer";
|
||||
@ -1506,6 +1568,12 @@ static int tcmu_configure_device(struct se_device *dev)
|
||||
dev->dev_attrib.emulate_write_cache = 0;
|
||||
dev->dev_attrib.hw_queue_depth = 128;
|
||||
|
||||
/* If user didn't explicitly disable netlink reply support, use
|
||||
* module scope setting.
|
||||
*/
|
||||
if (udev->nl_reply_supported >= 0)
|
||||
udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
|
||||
|
||||
/*
|
||||
* Get a ref incase userspace does a close on the uio device before
|
||||
* LIO has initiated tcmu_free_device.
|
||||
@ -1527,6 +1595,7 @@ static int tcmu_configure_device(struct se_device *dev)
|
||||
uio_unregister_device(&udev->uio_info);
|
||||
err_register:
|
||||
vfree(udev->mb_addr);
|
||||
udev->mb_addr = NULL;
|
||||
err_vzalloc:
|
||||
kfree(info->name);
|
||||
info->name = NULL;
|
||||
@ -1534,37 +1603,11 @@ static int tcmu_configure_device(struct se_device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
|
||||
{
|
||||
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
|
||||
kmem_cache_free(tcmu_cmd_cache, cmd);
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static bool tcmu_dev_configured(struct tcmu_dev *udev)
|
||||
{
|
||||
return udev->uio_info.uio_dev ? true : false;
|
||||
}
|
||||
|
||||
static void tcmu_blocks_release(struct tcmu_dev *udev)
|
||||
{
|
||||
int i;
|
||||
struct page *page;
|
||||
|
||||
/* Try to release all block pages */
|
||||
mutex_lock(&udev->cmdr_lock);
|
||||
for (i = 0; i <= udev->dbi_max; i++) {
|
||||
page = radix_tree_delete(&udev->data_blocks, i);
|
||||
if (page) {
|
||||
__free_page(page);
|
||||
atomic_dec(&global_db_count);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&udev->cmdr_lock);
|
||||
}
|
||||
|
||||
static void tcmu_free_device(struct se_device *dev)
|
||||
{
|
||||
struct tcmu_dev *udev = TCMU_DEV(dev);
|
||||
@ -1576,9 +1619,6 @@ static void tcmu_free_device(struct se_device *dev)
|
||||
static void tcmu_destroy_device(struct se_device *dev)
|
||||
{
|
||||
struct tcmu_dev *udev = TCMU_DEV(dev);
|
||||
struct tcmu_cmd *cmd;
|
||||
bool all_expired = true;
|
||||
int i;
|
||||
|
||||
del_timer_sync(&udev->timeout);
|
||||
|
||||
@ -1586,20 +1626,6 @@ static void tcmu_destroy_device(struct se_device *dev)
|
||||
list_del(&udev->node);
|
||||
mutex_unlock(&root_udev_mutex);
|
||||
|
||||
vfree(udev->mb_addr);
|
||||
|
||||
/* Upper layer should drain all requests before calling this */
|
||||
spin_lock_irq(&udev->commands_lock);
|
||||
idr_for_each_entry(&udev->commands, cmd, i) {
|
||||
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
|
||||
all_expired = false;
|
||||
}
|
||||
idr_destroy(&udev->commands);
|
||||
spin_unlock_irq(&udev->commands_lock);
|
||||
WARN_ON(!all_expired);
|
||||
|
||||
tcmu_blocks_release(udev);
|
||||
|
||||
tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
|
||||
|
||||
uio_unregister_device(&udev->uio_info);
|
||||
@ -1610,7 +1636,7 @@ static void tcmu_destroy_device(struct se_device *dev)
|
||||
|
||||
enum {
|
||||
Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
|
||||
Opt_err,
|
||||
Opt_nl_reply_supported, Opt_err,
|
||||
};
|
||||
|
||||
static match_table_t tokens = {
|
||||
@ -1618,6 +1644,7 @@ static match_table_t tokens = {
|
||||
{Opt_dev_size, "dev_size=%u"},
|
||||
{Opt_hw_block_size, "hw_block_size=%u"},
|
||||
{Opt_hw_max_sectors, "hw_max_sectors=%u"},
|
||||
{Opt_nl_reply_supported, "nl_reply_supported=%d"},
|
||||
{Opt_err, NULL}
|
||||
};
|
||||
|
||||
@ -1692,6 +1719,17 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
|
||||
ret = tcmu_set_dev_attrib(&args[0],
|
||||
&(dev->dev_attrib.hw_max_sectors));
|
||||
break;
|
||||
case Opt_nl_reply_supported:
|
||||
arg_p = match_strdup(&args[0]);
|
||||
if (!arg_p) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
|
||||
kfree(arg_p);
|
||||
if (ret < 0)
|
||||
pr_err("kstrtoint() failed for nl_reply_supported=\n");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -1734,8 +1772,7 @@ static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
|
||||
{
|
||||
struct se_dev_attrib *da = container_of(to_config_group(item),
|
||||
struct se_dev_attrib, da_group);
|
||||
struct tcmu_dev *udev = container_of(da->da_dev,
|
||||
struct tcmu_dev, se_dev);
|
||||
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
||||
|
||||
return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
|
||||
}
|
||||
@ -1842,6 +1879,34 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
|
||||
}
|
||||
CONFIGFS_ATTR(tcmu_, dev_size);
|
||||
|
||||
static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
|
||||
char *page)
|
||||
{
|
||||
struct se_dev_attrib *da = container_of(to_config_group(item),
|
||||
struct se_dev_attrib, da_group);
|
||||
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
||||
|
||||
return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
|
||||
}
|
||||
|
||||
static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
struct se_dev_attrib *da = container_of(to_config_group(item),
|
||||
struct se_dev_attrib, da_group);
|
||||
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
||||
s8 val;
|
||||
int ret;
|
||||
|
||||
ret = kstrtos8(page, 0, &val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
udev->nl_reply_supported = val;
|
||||
return count;
|
||||
}
|
||||
CONFIGFS_ATTR(tcmu_, nl_reply_supported);
|
||||
|
||||
static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
|
||||
char *page)
|
||||
{
|
||||
@ -1884,6 +1949,7 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = {
|
||||
&tcmu_attr_dev_config,
|
||||
&tcmu_attr_dev_size,
|
||||
&tcmu_attr_emulate_write_cache,
|
||||
&tcmu_attr_nl_reply_supported,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -182,6 +182,7 @@ enum tcm_sense_reason_table {
|
||||
TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE = R(0x1a),
|
||||
TCM_TOO_MANY_SEGMENT_DESCS = R(0x1b),
|
||||
TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c),
|
||||
TCM_INSUFFICIENT_REGISTRATION_RESOURCES = R(0x1d),
|
||||
#undef R
|
||||
};
|
||||
|
||||
@ -490,6 +491,7 @@ struct se_cmd {
|
||||
#define CMD_T_STOP (1 << 5)
|
||||
#define CMD_T_TAS (1 << 10)
|
||||
#define CMD_T_FABRIC_STOP (1 << 11)
|
||||
#define CMD_T_PRE_EXECUTE (1 << 12)
|
||||
spinlock_t t_state_lock;
|
||||
struct kref cmd_kref;
|
||||
struct completion t_transport_stop_comp;
|
||||
|
Loading…
Reference in New Issue
Block a user