nvme fixes for Linux 6.2

- fix doorbell buffer value endianness (Klaus Jensen)
  - fix Linux vs NVMe page size mismatch (Keith Busch)
  - fix a potential use memory access beyong the allocation limit
    (Keith Busch)
  - fix a multipath vs blktrace NULL pointer dereference
    (Yanjun Zhang)
 -----BEGIN PGP SIGNATURE-----
 
 iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmOkeqkLHGhjaEBsc3Qu
 ZGUACgkQD55TZVIEUYNqvBAAleIay/9mavb1iXTteEFKBN3ml/3Dslc1nETP5FWS
 7j8oXaYT4TsXTN4D5lGUPNzeDIVaPvbVeduJLpGbA7Z/g4XSEdfnorc+AmLdje4q
 LzPAd9u99+P92U5Colj2el4eyPTPzZFbP8IHBZxsR6fTU1i2WyiVYDw+V+MCIQE0
 yrg8oU4JHTq3/4B21guADIOK46hYlUMKUhNNsmW1DNsMs/i320ENbZ5gPY4+WiQq
 t9LK8QDY/NS519KCwtHsZOVwicTpXZoRG19Kx9duiLU+cRUwG5ApdRe0vBXBVjMH
 R65ekFUu7BUXcRHFoNOZeHzjLnDekYkdfBEHTol9+5fdLMZM3Dbv0CAZindYWA38
 VNr63nUkkMh4kShBQjk6VR/TYMsVJ8ZmmrC9Q8kkV9JnvG0ajohQspVhVDwQDKgO
 +RJSZ0yE6uvw9Vzjha0lpUs/DxMEBzXyCe1kGhecb830lLDB0T9KH5EnBMcnpH9w
 E5QGqLHfgbqaAqOXq8aBrZRHc0gcb7ubh47LJI4G+d52XrbeHBmRIbpQ4HAq9A7s
 AeCNtTZ1ksByZsvX/Wwy/Osxs52U9+piRvdBBL39WuM7R0DFQuRykJNqxofhkf6g
 OG/8i1xd0jQusnyyGNY7jRra9FLcvHNKZTx8HNOFXP7RVeWWdVUrajwaRiGZufQ3
 mwg=
 =1hmt
 -----END PGP SIGNATURE-----

Merge tag 'nvme-6.2-2022-12-22' of git://git.infradead.org/nvme into block-6.2

Pull NVMe fixes from Christoph:

"nvme fixes for Linux 6.2

 - fix doorbell buffer value endianness (Klaus Jensen)
 - fix Linux vs NVMe page size mismatch (Keith Busch)
 - fix a potential use memory access beyong the allocation limit
   (Keith Busch)
 - fix a multipath vs blktrace NULL pointer dereference
   (Yanjun Zhang)"

* tag 'nvme-6.2-2022-12-22' of git://git.infradead.org/nvme:
  nvme: fix multipath crash caused by flush request when blktrace is enabled
  nvme-pci: fix page size checks
  nvme-pci: fix mempool alloc size
  nvme-pci: fix doorbell buffer value endianness
This commit is contained in:
Jens Axboe 2022-12-22 09:22:35 -07:00
commit fb857b0bb2
2 changed files with 20 additions and 19 deletions

View File

@ -893,7 +893,7 @@ static inline void nvme_trace_bio_complete(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
if (req->cmd_flags & REQ_NVME_MPATH)
if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio)
trace_block_bio_complete(ns->head->disk->queue, req->bio);
}

View File

@ -36,7 +36,7 @@
#define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc))
/*
* These can be higher, but we need to ensure that any command doesn't
@ -144,9 +144,9 @@ struct nvme_dev {
mempool_t *iod_mempool;
/* shadow doorbell buffer support: */
u32 *dbbuf_dbs;
__le32 *dbbuf_dbs;
dma_addr_t dbbuf_dbs_dma_addr;
u32 *dbbuf_eis;
__le32 *dbbuf_eis;
dma_addr_t dbbuf_eis_dma_addr;
/* host memory buffer support: */
@ -208,10 +208,10 @@ struct nvme_queue {
#define NVMEQ_SQ_CMB 1
#define NVMEQ_DELETE_ERROR 2
#define NVMEQ_POLLED 3
u32 *dbbuf_sq_db;
u32 *dbbuf_cq_db;
u32 *dbbuf_sq_ei;
u32 *dbbuf_cq_ei;
__le32 *dbbuf_sq_db;
__le32 *dbbuf_cq_db;
__le32 *dbbuf_sq_ei;
__le32 *dbbuf_cq_ei;
struct completion delete_done;
};
@ -343,11 +343,11 @@ static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
}
/* Update dbbuf and return true if an MMIO is required */
static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
volatile u32 *dbbuf_ei)
static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
volatile __le32 *dbbuf_ei)
{
if (dbbuf_db) {
u16 old_value;
u16 old_value, event_idx;
/*
* Ensure that the queue is written before updating
@ -355,8 +355,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
*/
wmb();
old_value = *dbbuf_db;
*dbbuf_db = value;
old_value = le32_to_cpu(*dbbuf_db);
*dbbuf_db = cpu_to_le32(value);
/*
* Ensure that the doorbell is updated before reading the event
@ -366,7 +366,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
*/
mb();
if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
event_idx = le32_to_cpu(*dbbuf_ei);
if (!nvme_dbbuf_need_event(event_idx, value, old_value))
return false;
}
@ -380,9 +381,9 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
*/
static int nvme_pci_npages_prp(void)
{
unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE,
NVME_CTRL_PAGE_SIZE);
return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE;
unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE);
return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8);
}
/*
@ -392,7 +393,7 @@ static int nvme_pci_npages_prp(void)
static int nvme_pci_npages_sgl(void)
{
return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc),
PAGE_SIZE);
NVME_CTRL_PAGE_SIZE);
}
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@ -708,7 +709,7 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
sge->length = cpu_to_le32(entries * sizeof(*sge));
sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
} else {
sge->length = cpu_to_le32(PAGE_SIZE);
sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE);
sge->type = NVME_SGL_FMT_SEG_DESC << 4;
}
}