mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-01 02:33:57 +00:00
block-6.1-2022-11-18
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmN38ZUQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpgXxD/9tUSFUKIVGIn4pmNILfY3XV45HOi1w44yR zCxCELupcBeT+YixmaJcT8sunrrg2fLPOXMrDJk1cG/izXHzkjAQsHZvERfqC7hC f5onH+2MyGm3qBwxV0iGqITJgTwQGInVJijT4f9UZd/8ultymyZR2nOdIdIydHCF qzlOjq6hgIuGKHhFgOqRUg/OAkx510ZEEilUDcZ6XVV+zL7ccN6J9+eNTI3c58wT 7jvxZC4u6QGKteGvVniE3WXgk3QdFiQRORvV09g+PkbG/vPjAIZ5tJFb9PdIOebD 3guDiNUasgz2vnDetMK+yk4LcedcRfWnqgn+Vm8C26j5Fxs13eDx5kMDteVy7CYh 3bokOATHohoZZ9qTApgQUswTfGJfBdoy0nUTPuffxPdKDyUPteIxFCADcnyDHnDG d/+PjU3FKF31o2HcUfvYp7OMO0VZP0hJSWps8znoVXKxb+LH9qKkYzHVlfni5kkS k9XqqD1Ki98Erb346YqgvQjCkz+CUd5DxtGyh9Oh2+oS2qHP6WjdKo1QPFmWD5dp EyXGSqGoZrIPtnKohLUN9EiVXanRQWJr3L0gw2CYXpmwfSKfMC3CQraEC1jOc01l TfsLJGbl3L5XpLzxoBwDu44cqp+VvbalergdcmsDTLDFHhONY2g5LJh6C9/EDdnQ Cde1uHikGw== =sOGG -----END PGP SIGNATURE----- Merge tag 'block-6.1-2022-11-18' of git://git.kernel.dk/linux Pull block fixes from Jens Axboe: - NVMe pull request via Christoph: - Two more bogus nid quirks (Bean Huo, Tiago Dias Ferreira) - Memory leak fix in nvmet (Sagi Grimberg) - Regression fix for block cgroups pinning the wrong blkcg, causing leaks of cgroups and blkcgs (Chris) - UAF fix for drbd setup error handling (Dan) - Fix DMA alignment propagation in DM (Keith) * tag 'block-6.1-2022-11-18' of git://git.kernel.dk/linux: dm-log-writes: set dma_alignment limit in io_hints dm-integrity: set dma_alignment limit in io_hints block: make blk_set_default_limits() private dm-crypt: provide dma_alignment limit in io_hints block: make dma_alignment a stacking queue_limit nvmet: fix a memory leak in nvmet_auth_set_key nvme-pci: add NVME_QUIRK_BOGUS_NID for Netac NV7000 drbd: use after free in drbd_create_device() nvme-pci: add NVME_QUIRK_BOGUS_NID for Micron Nitro blk-cgroup: properly pin the parent in blkcg_css_online
This commit is contained in:
commit
f4408c3dfc
@ -1213,7 +1213,7 @@ static int blkcg_css_online(struct cgroup_subsys_state *css)
|
||||
* parent so that offline always happens towards the root.
|
||||
*/
|
||||
if (parent)
|
||||
blkcg_pin_online(css);
|
||||
blkcg_pin_online(&parent->css);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -425,7 +425,6 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
|
||||
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
|
||||
goto fail_stats;
|
||||
|
||||
blk_queue_dma_alignment(q, 511);
|
||||
blk_set_default_limits(&q->limits);
|
||||
q->nr_requests = BLKDEV_DEFAULT_RQ;
|
||||
|
||||
|
@ -57,8 +57,8 @@ void blk_set_default_limits(struct queue_limits *lim)
|
||||
lim->misaligned = 0;
|
||||
lim->zoned = BLK_ZONED_NONE;
|
||||
lim->zone_write_granularity = 0;
|
||||
lim->dma_alignment = 511;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_set_default_limits);
|
||||
|
||||
/**
|
||||
* blk_set_stacking_limits - set default limits for stacking devices
|
||||
@ -600,6 +600,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||
|
||||
t->io_min = max(t->io_min, b->io_min);
|
||||
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
|
||||
t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
|
||||
|
||||
/* Set non-power-of-2 compatible chunk_sectors boundary */
|
||||
if (b->chunk_sectors)
|
||||
@ -773,7 +774,7 @@ EXPORT_SYMBOL(blk_queue_virt_boundary);
|
||||
**/
|
||||
void blk_queue_dma_alignment(struct request_queue *q, int mask)
|
||||
{
|
||||
q->dma_alignment = mask;
|
||||
q->limits.dma_alignment = mask;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_dma_alignment);
|
||||
|
||||
@ -795,8 +796,8 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
|
||||
{
|
||||
BUG_ON(mask > PAGE_SIZE);
|
||||
|
||||
if (mask > q->dma_alignment)
|
||||
q->dma_alignment = mask;
|
||||
if (mask > q->limits.dma_alignment)
|
||||
q->limits.dma_alignment = mask;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
|
||||
|
||||
|
@ -331,6 +331,7 @@ void blk_rq_set_mixed_merge(struct request *rq);
|
||||
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
|
||||
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
|
||||
|
||||
void blk_set_default_limits(struct queue_limits *lim);
|
||||
int blk_dev_init(void);
|
||||
|
||||
/*
|
||||
|
@ -2672,7 +2672,7 @@ static int init_submitter(struct drbd_device *device)
|
||||
enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
|
||||
{
|
||||
struct drbd_resource *resource = adm_ctx->resource;
|
||||
struct drbd_connection *connection;
|
||||
struct drbd_connection *connection, *n;
|
||||
struct drbd_device *device;
|
||||
struct drbd_peer_device *peer_device, *tmp_peer_device;
|
||||
struct gendisk *disk;
|
||||
@ -2789,7 +2789,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
|
||||
return NO_ERROR;
|
||||
|
||||
out_idr_remove_from_resource:
|
||||
for_each_connection(connection, resource) {
|
||||
for_each_connection_safe(connection, n, resource) {
|
||||
peer_device = idr_remove(&connection->peer_devices, vnr);
|
||||
if (peer_device)
|
||||
kref_put(&connection->kref, drbd_destroy_connection);
|
||||
|
@ -3630,6 +3630,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
limits->physical_block_size =
|
||||
max_t(unsigned, limits->physical_block_size, cc->sector_size);
|
||||
limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
|
||||
limits->dma_alignment = limits->logical_block_size - 1;
|
||||
}
|
||||
|
||||
static struct target_type crypt_target = {
|
||||
|
@ -3378,6 +3378,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
|
||||
limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
|
||||
limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
|
||||
blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
|
||||
limits->dma_alignment = limits->logical_block_size - 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -875,6 +875,7 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit
|
||||
limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev);
|
||||
limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev);
|
||||
limits->io_min = limits->physical_block_size;
|
||||
limits->dma_alignment = limits->logical_block_size - 1;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_FS_DAX)
|
||||
|
@ -3489,6 +3489,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
{ PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */
|
||||
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN },
|
||||
{ PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
|
||||
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
{ PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */
|
||||
@ -3519,6 +3521,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
{ PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
|
||||
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
{ PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
|
||||
|
@ -45,9 +45,11 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
|
||||
if (!dhchap_secret)
|
||||
return -ENOMEM;
|
||||
if (set_ctrl) {
|
||||
kfree(host->dhchap_ctrl_secret);
|
||||
host->dhchap_ctrl_secret = strim(dhchap_secret);
|
||||
host->dhchap_ctrl_key_hash = key_hash;
|
||||
} else {
|
||||
kfree(host->dhchap_secret);
|
||||
host->dhchap_secret = strim(dhchap_secret);
|
||||
host->dhchap_key_hash = key_hash;
|
||||
}
|
||||
|
@ -311,6 +311,13 @@ struct queue_limits {
|
||||
unsigned char discard_misaligned;
|
||||
unsigned char raid_partial_stripes_expensive;
|
||||
enum blk_zoned_model zoned;
|
||||
|
||||
/*
|
||||
* Drivers that set dma_alignment to less than 511 must be prepared to
|
||||
* handle individual bvec's that are not a multiple of a SECTOR_SIZE
|
||||
* due to possible offsets.
|
||||
*/
|
||||
unsigned int dma_alignment;
|
||||
};
|
||||
|
||||
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
|
||||
@ -456,12 +463,6 @@ struct request_queue {
|
||||
unsigned long nr_requests; /* Max # of requests */
|
||||
|
||||
unsigned int dma_pad_mask;
|
||||
/*
|
||||
* Drivers that set dma_alignment to less than 511 must be prepared to
|
||||
* handle individual bvec's that are not a multiple of a SECTOR_SIZE
|
||||
* due to possible offsets.
|
||||
*/
|
||||
unsigned int dma_alignment;
|
||||
|
||||
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
|
||||
struct blk_crypto_profile *crypto_profile;
|
||||
@ -944,7 +945,6 @@ extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
|
||||
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
|
||||
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
|
||||
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
|
||||
extern void blk_set_default_limits(struct queue_limits *lim);
|
||||
extern void blk_set_stacking_limits(struct queue_limits *lim);
|
||||
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||
sector_t offset);
|
||||
@ -1324,7 +1324,7 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev)
|
||||
|
||||
static inline int queue_dma_alignment(const struct request_queue *q)
|
||||
{
|
||||
return q ? q->dma_alignment : 511;
|
||||
return q ? q->limits.dma_alignment : 511;
|
||||
}
|
||||
|
||||
static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
|
||||
|
Loading…
Reference in New Issue
Block a user