mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-19 20:05:08 +00:00
block-6.9-20240503
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmY09JYQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpvYID/wNyZxtEc4VP7u6fqwzawdf6Cawi+rJ0MN+ 7fJATnV9Az7Jh7vy7XTDJ01yUe7u/uMvep4XrXuR38Pq5Kfu8jMx1BJ2M2tBJzr3 s2UWgiWSlCQuhKJsAstEVoimMBYggreS7fVYoTzs4b4ZixJF9mVyjUMxSIKj7N19 S3iyzb4CwJw5EhB7vZqIoi0wzaHfRvyXP0ssYlBC2yY7r04N+RjY1q7IrwsLey8f n6j6bHENbfKd/fY3clb0MilXAsfYMAdMhl4vyK/8Z0Lfe+sjeRz7z++1nRASd/cE Sc6GW/Dp4xtgJU63VT/bYEPxwo4ZLDoutgrq3SI0kxOVks+JDPnLBVoDdPyJhEct dZWm4ZThuD5DJ1kEIAsiDaHsy/konhvcPnR1tDZYgs4K4uOvP5XJKnl/qlAd+Hud 4MXahuoIrV8XONuUjKXWEQ5mdekPV00JpYGgc4BTKR6DiNkFx4OPR/gnYYR1aDLS uASXNBwaqYTu+iMiqYYW2m1ryagtCChVh510gjUZBxFTLGR95RwTZDTgVUqeVbTK ZkQh9uhmYGOzVWEKIlGc903TcmA2EByq+rPIewc7WJfFp/Ob1YamgUVUfWUVnAG6 KBKfWRGzeQHkf2QHOeThWVlGXr7gDHtS5fib27aPXaW/gTc3qveBl3GxfT1Nxo+z y1TbxWfnEA== =of/D -----END PGP SIGNATURE----- Merge tag 'block-6.9-20240503' of git://git.kernel.dk/linux Pull block fixes from Jens Axboe: "Nothing major in here - an nvme pull request with mostly auth/tcp fixes, and a single fix for ublk not setting segment count and size limits" * tag 'block-6.9-20240503' of git://git.kernel.dk/linux: nvme-tcp: strict pdu pacing to avoid send stalls on TLS nvmet: fix nvme status code when namespace is disabled nvmet-tcp: fix possible memory leak when tearing down a controller nvme: cancel pending I/O if nvme controller is in terminal state nvmet-auth: replace pr_debug() with pr_err() to report an error. nvmet-auth: return the error code to the nvmet_auth_host_hash() callers nvme: find numa distance only if controller has valid numa id ublk: remove segment count and size limits nvme: fix warn output about shared namespaces without CONFIG_NVME_MULTIPATH
This commit is contained in:
commit
3d25a941ea
@ -2177,7 +2177,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
|
||||
.max_hw_sectors = p->max_sectors,
|
||||
.chunk_sectors = p->chunk_sectors,
|
||||
.virt_boundary_mask = p->virt_boundary_mask,
|
||||
|
||||
.max_segments = USHRT_MAX,
|
||||
.max_segment_size = UINT_MAX,
|
||||
};
|
||||
struct gendisk *disk;
|
||||
int ret = -EINVAL;
|
||||
|
@ -628,27 +628,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
|
||||
|
||||
/*
|
||||
* Returns true for sink states that can't ever transition back to live.
|
||||
*/
|
||||
static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
switch (nvme_ctrl_state(ctrl)) {
|
||||
case NVME_CTRL_NEW:
|
||||
case NVME_CTRL_LIVE:
|
||||
case NVME_CTRL_RESETTING:
|
||||
case NVME_CTRL_CONNECTING:
|
||||
return false;
|
||||
case NVME_CTRL_DELETING:
|
||||
case NVME_CTRL_DELETING_NOIO:
|
||||
case NVME_CTRL_DEAD:
|
||||
return true;
|
||||
default:
|
||||
WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Waits for the controller state to be resetting, or returns false if it is
|
||||
* not possible to ever transition to that state.
|
||||
@ -3681,7 +3660,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
|
||||
"Found shared namespace %d, but multipathing not supported.\n",
|
||||
info->nsid);
|
||||
dev_warn_once(ctrl->device,
|
||||
"Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
|
||||
"Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0.\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -247,7 +247,8 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
|
||||
if (nvme_path_is_disabled(ns))
|
||||
continue;
|
||||
|
||||
if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
|
||||
if (ns->ctrl->numa_node != NUMA_NO_NODE &&
|
||||
READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
|
||||
distance = node_distance(node, ns->ctrl->numa_node);
|
||||
else
|
||||
distance = LOCAL_DISTANCE;
|
||||
|
@ -741,6 +741,27 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
|
||||
nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true for sink states that can't ever transition back to live.
|
||||
*/
|
||||
static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
switch (nvme_ctrl_state(ctrl)) {
|
||||
case NVME_CTRL_NEW:
|
||||
case NVME_CTRL_LIVE:
|
||||
case NVME_CTRL_RESETTING:
|
||||
case NVME_CTRL_CONNECTING:
|
||||
return false;
|
||||
case NVME_CTRL_DELETING:
|
||||
case NVME_CTRL_DELETING_NOIO:
|
||||
case NVME_CTRL_DEAD:
|
||||
return true;
|
||||
default:
|
||||
WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
void nvme_complete_rq(struct request *req);
|
||||
void nvme_complete_batch_req(struct request *req);
|
||||
|
||||
|
@ -1286,6 +1286,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
|
||||
u32 csts = readl(dev->bar + NVME_REG_CSTS);
|
||||
u8 opcode;
|
||||
|
||||
if (nvme_state_terminal(&dev->ctrl))
|
||||
goto disable;
|
||||
|
||||
/* If PCI error recovery process is happening, we cannot reset or
|
||||
* the recovery mechanism will surely fail.
|
||||
*/
|
||||
@ -1390,8 +1393,11 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
|
||||
return BLK_EH_RESET_TIMER;
|
||||
|
||||
disable:
|
||||
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
|
||||
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
|
||||
if (nvme_state_terminal(&dev->ctrl))
|
||||
nvme_dev_disable(dev, true);
|
||||
return BLK_EH_DONE;
|
||||
}
|
||||
|
||||
nvme_dev_disable(dev, false);
|
||||
if (nvme_try_sched_reset(&dev->ctrl))
|
||||
|
@ -360,12 +360,18 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
|
||||
} while (ret > 0);
|
||||
}
|
||||
|
||||
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
|
||||
static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue *queue)
|
||||
{
|
||||
return !list_empty(&queue->send_list) ||
|
||||
!llist_empty(&queue->req_list);
|
||||
}
|
||||
|
||||
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
|
||||
{
|
||||
return !nvme_tcp_tls(&queue->ctrl->ctrl) &&
|
||||
nvme_tcp_queue_has_pending(queue);
|
||||
}
|
||||
|
||||
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
||||
bool sync, bool last)
|
||||
{
|
||||
@ -386,7 +392,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
||||
mutex_unlock(&queue->send_mutex);
|
||||
}
|
||||
|
||||
if (last && nvme_tcp_queue_more(queue))
|
||||
if (last && nvme_tcp_queue_has_pending(queue))
|
||||
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
|
||||
}
|
||||
|
||||
|
@ -285,9 +285,9 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
|
||||
}
|
||||
|
||||
if (shash_len != crypto_shash_digestsize(shash_tfm)) {
|
||||
pr_debug("%s: hash len mismatch (len %d digest %d)\n",
|
||||
__func__, shash_len,
|
||||
crypto_shash_digestsize(shash_tfm));
|
||||
pr_err("%s: hash len mismatch (len %d digest %d)\n",
|
||||
__func__, shash_len,
|
||||
crypto_shash_digestsize(shash_tfm));
|
||||
ret = -EINVAL;
|
||||
goto out_free_tfm;
|
||||
}
|
||||
@ -370,7 +370,7 @@ out_free_response:
|
||||
nvme_auth_free_key(transformed_key);
|
||||
out_free_tfm:
|
||||
crypto_free_shash(shash_tfm);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
|
||||
|
@ -754,6 +754,19 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
|
||||
{
|
||||
struct config_item *ns_item;
|
||||
char name[4] = {};
|
||||
|
||||
if (sprintf(name, "%u", nsid) <= 0)
|
||||
return false;
|
||||
mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
|
||||
ns_item = config_group_find_item(&subsys->namespaces_group, name);
|
||||
mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
|
||||
return ns_item != NULL;
|
||||
}
|
||||
|
||||
static void nvmet_ns_release(struct config_item *item)
|
||||
{
|
||||
struct nvmet_ns *ns = to_nvmet_ns(item);
|
||||
|
@ -437,10 +437,13 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
|
||||
u16 nvmet_req_find_ns(struct nvmet_req *req)
|
||||
{
|
||||
u32 nsid = le32_to_cpu(req->cmd->common.nsid);
|
||||
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
|
||||
|
||||
req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
|
||||
req->ns = xa_load(&subsys->namespaces, nsid);
|
||||
if (unlikely(!req->ns)) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
||||
if (nvmet_subsys_nsid_exists(subsys, nsid))
|
||||
return NVME_SC_INTERNAL_PATH_ERROR;
|
||||
return NVME_SC_INVALID_NS | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
|
@ -543,6 +543,7 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
|
||||
struct nvmet_host *host);
|
||||
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
|
||||
u8 event_info, u8 log_page);
|
||||
bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
|
||||
|
||||
#define NVMET_MIN_QUEUE_SIZE 16
|
||||
#define NVMET_MAX_QUEUE_SIZE 1024
|
||||
|
@ -348,6 +348,7 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* If cmd buffers are NULL, no operation is performed */
|
||||
static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
|
||||
{
|
||||
kfree(cmd->iov);
|
||||
@ -1581,13 +1582,9 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
|
||||
struct nvmet_tcp_cmd *cmd = queue->cmds;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < queue->nr_cmds; i++, cmd++) {
|
||||
if (nvmet_tcp_need_data_in(cmd))
|
||||
nvmet_tcp_free_cmd_buffers(cmd);
|
||||
}
|
||||
|
||||
if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
|
||||
nvmet_tcp_free_cmd_buffers(&queue->connect);
|
||||
for (i = 0; i < queue->nr_cmds; i++, cmd++)
|
||||
nvmet_tcp_free_cmd_buffers(cmd);
|
||||
nvmet_tcp_free_cmd_buffers(&queue->connect);
|
||||
}
|
||||
|
||||
static void nvmet_tcp_release_queue_work(struct work_struct *w)
|
||||
|
Loading…
x
Reference in New Issue
Block a user