block-6.4-2023-05-26

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmRw6C8QHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpm0aD/9h4AAwmFAhVYxcr2Lk4AXwTz1sMDnQ58P3
 /OhwvogCsAV9K4venjuDYPL0IdHOq+Txh2S51DnhqGlJlU9xRxPMbTjCToUMqfCw
 VjUo0aF9XfQdqPWR2/zh9Zi1YwZd0rC+KBRRvlEdWszoze0H3L8rbxBNllSzdeKl
 hECLNpq8z45nzPAYateISs7Cv9OvGuqkaPbjCdHqGQ60Tdt4QIcnnPUUPfntcZHI
 tjHnAc3uN4WyrEmMPlTd6MtVV6VCQKAKz65L9bvvNlESverXNv923p8TPzEdb8tR
 utSD995Wxh8rBOC3+WA5+d1pYI+C1cUADmgE6QN/8jqu04No4l5Ob3+8ImPmrnGW
 a0VSX/BKt63QnXgFFYfn5yBMVMkjtoFtD87WAlLK50fYJTDn+ouVzqhcTsBJ9f/5
 xzyFvof8vVp83/Xlkwv/NumDTcyauBobiN6mI1mTBk0hakmxqNgb5xEDvbf5Q/M0
 jpp/9nBqmnFJTZRhRVu/G1RdeqDMDN1+/ws3uc3v+K+aeDMGx44l6g3N7f7kAPor
 Xk/Cky4x0KirxvpgJzhvS2mOSXbpnAKqYGK2+nIFS00dtbvUX9bERJoxTZbyihC+
 AkcPpO92UvxR7nsjgQY7R/euzqD4HKEYExzImT9c3Iug9z3Mtot0FncY6Jb8CEHW
 Vkh6wxsg6A==
 =MvS9
 -----END PGP SIGNATURE-----

Merge tag 'block-6.4-2023-05-26' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:
 "A few fixes for the storage side of things:

   - Fix bio caching condition for passthrough IO (Anuj)

   - end-of-device check fix for zero sized devices (Christoph)

   - Update Paolo's email address

   - NVMe pull request via Keith with a single quirk addition

   - Fix regression in how wbt enablement is done (Yu)

   - Fix race in active queue accounting (Tian)"

* tag 'block-6.4-2023-05-26' of git://git.kernel.dk/linux:
  NVMe: Add MAXIO 1602 to bogus nid list.
  block: make bio_check_eod work for zero sized devices
  block: fix bio-cache for passthru IO
  block, bfq: update Paolo's address in maintainer list
  blk-mq: fix race condition in active queue accounting
  blk-wbt: fix that wbt can't be disabled by default
This commit is contained in:
Linus Torvalds 2023-05-26 15:04:54 -07:00
commit a92c9ab69f
6 changed files with 20 additions and 12 deletions

View File

@ -3536,7 +3536,7 @@ F: Documentation/filesystems/befs.rst
F: fs/befs/ F: fs/befs/
BFQ I/O SCHEDULER BFQ I/O SCHEDULER
M: Paolo Valente <paolo.valente@linaro.org> M: Paolo Valente <paolo.valente@unimore.it>
M: Jens Axboe <axboe@kernel.dk> M: Jens Axboe <axboe@kernel.dk>
L: linux-block@vger.kernel.org L: linux-block@vger.kernel.org
S: Maintained S: Maintained

View File

@ -520,7 +520,7 @@ static inline int bio_check_eod(struct bio *bio)
sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
unsigned int nr_sectors = bio_sectors(bio); unsigned int nr_sectors = bio_sectors(bio);
if (nr_sectors && maxsector && if (nr_sectors &&
(nr_sectors > maxsector || (nr_sectors > maxsector ||
bio->bi_iter.bi_sector > maxsector - nr_sectors)) { bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
pr_info_ratelimited("%s: attempt to access beyond end of device\n" pr_info_ratelimited("%s: attempt to access beyond end of device\n"

View File

@ -248,7 +248,7 @@ static struct bio *blk_rq_map_bio_alloc(struct request *rq,
{ {
struct bio *bio; struct bio *bio;
if (rq->cmd_flags & REQ_ALLOC_CACHE) { if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) {
bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask, bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask,
&fs_bio_set); &fs_bio_set);
if (!bio) if (!bio)

View File

@ -39,16 +39,20 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{ {
unsigned int users; unsigned int users;
/*
* calling test_bit() prior to test_and_set_bit() is intentional,
* it avoids dirtying the cacheline if the queue is already active.
*/
if (blk_mq_is_shared_tags(hctx->flags)) { if (blk_mq_is_shared_tags(hctx->flags)) {
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
return; return;
set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags);
} else { } else {
if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return; return;
set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state);
} }
users = atomic_inc_return(&hctx->tags->active_queues); users = atomic_inc_return(&hctx->tags->active_queues);

View File

@ -730,14 +730,16 @@ void wbt_enable_default(struct gendisk *disk)
{ {
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
struct rq_qos *rqos; struct rq_qos *rqos;
bool disable_flag = q->elevator && bool enable = IS_ENABLED(CONFIG_BLK_WBT_MQ);
test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags);
if (q->elevator &&
test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags))
enable = false;
/* Throttling already enabled? */ /* Throttling already enabled? */
rqos = wbt_rq_qos(q); rqos = wbt_rq_qos(q);
if (rqos) { if (rqos) {
if (!disable_flag && if (enable && RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT; RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
return; return;
} }
@ -746,7 +748,7 @@ void wbt_enable_default(struct gendisk *disk)
if (!blk_queue_registered(q)) if (!blk_queue_registered(q))
return; return;
if (queue_is_mq(q) && !disable_flag) if (queue_is_mq(q) && enable)
wbt_init(disk); wbt_init(disk);
} }
EXPORT_SYMBOL_GPL(wbt_enable_default); EXPORT_SYMBOL_GPL(wbt_enable_default);

View File

@ -3424,6 +3424,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, }, .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
.driver_data = NVME_QUIRK_BOGUS_NID, }, .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1602), /* MAXIO MAP1602 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
.driver_data = NVME_QUIRK_BOGUS_NID, }, .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */