mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
Merge patch series "convert SCSI to atomic queue limits, part 1 (v3)"
Christoph Hellwig <hch@lst.de> says: Hi all, this series converts the SCSI midlayer and LLDDs to use atomic queue limits API. It is pretty straight forward, except for the mpt3mr driver which does really weird and probably already broken things by setting limits from unlocked device iteration callbacks. I will probably defer the (more complicated) ULD changes to the next merge window as they would heavily conflict with Damien's zone write plugging series. With that the series could go in through the SCSI tree if Jens' ACKs the core block layer bits. Link: https://lore.kernel.org/r/20240409143748.980206-1-hch@lst.de Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
commit
f92141e18c
@ -284,72 +284,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(queue_limits_set);
|
||||
|
||||
/**
|
||||
* blk_queue_bounce_limit - set bounce buffer limit for queue
|
||||
* @q: the request queue for the device
|
||||
* @bounce: bounce limit to enforce
|
||||
*
|
||||
* Description:
|
||||
* Force bouncing for ISA DMA ranges or highmem.
|
||||
*
|
||||
* DEPRECATED, don't use in new code.
|
||||
**/
|
||||
void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
|
||||
{
|
||||
q->limits.bounce = bounce;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_bounce_limit);
|
||||
|
||||
/**
|
||||
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
|
||||
* @q: the request queue for the device
|
||||
* @max_hw_sectors: max hardware sectors in the usual 512b unit
|
||||
*
|
||||
* Description:
|
||||
* Enables a low level driver to set a hard upper limit,
|
||||
* max_hw_sectors, on the size of requests. max_hw_sectors is set by
|
||||
* the device driver based upon the capabilities of the I/O
|
||||
* controller.
|
||||
*
|
||||
* max_dev_sectors is a hard limit imposed by the storage device for
|
||||
* READ/WRITE requests. It is set by the disk driver.
|
||||
*
|
||||
* max_sectors is a soft limit imposed by the block layer for
|
||||
* filesystem type requests. This value can be overridden on a
|
||||
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
|
||||
* The soft limit can not exceed max_hw_sectors.
|
||||
**/
|
||||
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
|
||||
{
|
||||
struct queue_limits *limits = &q->limits;
|
||||
unsigned int max_sectors;
|
||||
|
||||
if ((max_hw_sectors << 9) < PAGE_SIZE) {
|
||||
max_hw_sectors = 1 << (PAGE_SHIFT - 9);
|
||||
pr_info("%s: set to minimum %u\n", __func__, max_hw_sectors);
|
||||
}
|
||||
|
||||
max_hw_sectors = round_down(max_hw_sectors,
|
||||
limits->logical_block_size >> SECTOR_SHIFT);
|
||||
limits->max_hw_sectors = max_hw_sectors;
|
||||
|
||||
max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
|
||||
|
||||
if (limits->max_user_sectors)
|
||||
max_sectors = min(max_sectors, limits->max_user_sectors);
|
||||
else
|
||||
max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS_CAP);
|
||||
|
||||
max_sectors = round_down(max_sectors,
|
||||
limits->logical_block_size >> SECTOR_SHIFT);
|
||||
limits->max_sectors = max_sectors;
|
||||
|
||||
if (!q->disk)
|
||||
return;
|
||||
q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
|
||||
|
||||
/**
|
||||
* blk_queue_chunk_sectors - set size of the chunk for this queue
|
||||
* @q: the request queue for the device
|
||||
@ -436,65 +370,6 @@ void blk_queue_max_zone_append_sectors(struct request_queue *q,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
|
||||
|
||||
/**
|
||||
* blk_queue_max_segments - set max hw segments for a request for this queue
|
||||
* @q: the request queue for the device
|
||||
* @max_segments: max number of segments
|
||||
*
|
||||
* Description:
|
||||
* Enables a low level driver to set an upper limit on the number of
|
||||
* hw data segments in a request.
|
||||
**/
|
||||
void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
|
||||
{
|
||||
if (!max_segments) {
|
||||
max_segments = 1;
|
||||
pr_info("%s: set to minimum %u\n", __func__, max_segments);
|
||||
}
|
||||
|
||||
q->limits.max_segments = max_segments;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_max_segments);
|
||||
|
||||
/**
|
||||
* blk_queue_max_discard_segments - set max segments for discard requests
|
||||
* @q: the request queue for the device
|
||||
* @max_segments: max number of segments
|
||||
*
|
||||
* Description:
|
||||
* Enables a low level driver to set an upper limit on the number of
|
||||
* segments in a discard request.
|
||||
**/
|
||||
void blk_queue_max_discard_segments(struct request_queue *q,
|
||||
unsigned short max_segments)
|
||||
{
|
||||
q->limits.max_discard_segments = max_segments;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
|
||||
|
||||
/**
|
||||
* blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
|
||||
* @q: the request queue for the device
|
||||
* @max_size: max size of segment in bytes
|
||||
*
|
||||
* Description:
|
||||
* Enables a low level driver to set an upper limit on the size of a
|
||||
* coalesced segment
|
||||
**/
|
||||
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
|
||||
{
|
||||
if (max_size < PAGE_SIZE) {
|
||||
max_size = PAGE_SIZE;
|
||||
pr_info("%s: set to minimum %u\n", __func__, max_size);
|
||||
}
|
||||
|
||||
/* see blk_queue_virt_boundary() for the explanation */
|
||||
WARN_ON_ONCE(q->limits.virt_boundary_mask);
|
||||
|
||||
q->limits.max_segment_size = max_size;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_max_segment_size);
|
||||
|
||||
/**
|
||||
* blk_queue_logical_block_size - set logical block size for the queue
|
||||
* @q: the request queue for the device
|
||||
@ -661,29 +536,6 @@ void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_limits_io_opt);
|
||||
|
||||
/**
|
||||
* blk_queue_io_opt - set optimal request size for the queue
|
||||
* @q: the request queue for the device
|
||||
* @opt: optimal request size in bytes
|
||||
*
|
||||
* Description:
|
||||
* Storage devices may report an optimal I/O size, which is the
|
||||
* device's preferred unit for sustained I/O. This is rarely reported
|
||||
* for disk drives. For RAID arrays it is usually the stripe width or
|
||||
* the internal track size. A properly aligned multiple of
|
||||
* optimal_io_size is the preferred request size for workloads where
|
||||
* sustained throughput is desired.
|
||||
*/
|
||||
void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
|
||||
{
|
||||
blk_limits_io_opt(&q->limits, opt);
|
||||
if (!q->disk)
|
||||
return;
|
||||
q->disk->bdi->ra_pages =
|
||||
max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_io_opt);
|
||||
|
||||
static int queue_limit_alignment_offset(const struct queue_limits *lim,
|
||||
sector_t sector)
|
||||
{
|
||||
@ -933,81 +785,6 @@ void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_update_dma_pad);
|
||||
|
||||
/**
|
||||
* blk_queue_segment_boundary - set boundary rules for segment merging
|
||||
* @q: the request queue for the device
|
||||
* @mask: the memory boundary mask
|
||||
**/
|
||||
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
|
||||
{
|
||||
if (mask < PAGE_SIZE - 1) {
|
||||
mask = PAGE_SIZE - 1;
|
||||
pr_info("%s: set to minimum %lx\n", __func__, mask);
|
||||
}
|
||||
|
||||
q->limits.seg_boundary_mask = mask;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_segment_boundary);
|
||||
|
||||
/**
|
||||
* blk_queue_virt_boundary - set boundary rules for bio merging
|
||||
* @q: the request queue for the device
|
||||
* @mask: the memory boundary mask
|
||||
**/
|
||||
void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
|
||||
{
|
||||
q->limits.virt_boundary_mask = mask;
|
||||
|
||||
/*
|
||||
* Devices that require a virtual boundary do not support scatter/gather
|
||||
* I/O natively, but instead require a descriptor list entry for each
|
||||
* page (which might not be idential to the Linux PAGE_SIZE). Because
|
||||
* of that they are not limited by our notion of "segment size".
|
||||
*/
|
||||
if (mask)
|
||||
q->limits.max_segment_size = UINT_MAX;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_virt_boundary);
|
||||
|
||||
/**
|
||||
* blk_queue_dma_alignment - set dma length and memory alignment
|
||||
* @q: the request queue for the device
|
||||
* @mask: alignment mask
|
||||
*
|
||||
* description:
|
||||
* set required memory and length alignment for direct dma transactions.
|
||||
* this is used when building direct io requests for the queue.
|
||||
*
|
||||
**/
|
||||
void blk_queue_dma_alignment(struct request_queue *q, int mask)
|
||||
{
|
||||
q->limits.dma_alignment = mask;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_dma_alignment);
|
||||
|
||||
/**
|
||||
* blk_queue_update_dma_alignment - update dma length and memory alignment
|
||||
* @q: the request queue for the device
|
||||
* @mask: alignment mask
|
||||
*
|
||||
* description:
|
||||
* update required memory and length alignment for direct dma transactions.
|
||||
* If the requested alignment is larger than the current alignment, then
|
||||
* the current queue alignment is updated to the new value, otherwise it
|
||||
* is left alone. The design of this is to allow multiple objects
|
||||
* (driver, device, transport etc) to set their respective
|
||||
* alignments without having them interfere.
|
||||
*
|
||||
**/
|
||||
void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
|
||||
{
|
||||
BUG_ON(mask > PAGE_SIZE);
|
||||
|
||||
if (mask > q->limits.dma_alignment)
|
||||
q->limits.dma_alignment = mask;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
|
||||
|
||||
/**
|
||||
* blk_set_queue_depth - tell the block layer about the device queue depth
|
||||
* @q: the request queue for the device
|
||||
@ -1061,28 +838,6 @@ void blk_queue_required_elevator_features(struct request_queue *q,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
|
||||
|
||||
/**
|
||||
* blk_queue_can_use_dma_map_merging - configure queue for merging segments.
|
||||
* @q: the request queue for the device
|
||||
* @dev: the device pointer for dma
|
||||
*
|
||||
* Tell the block layer about merging the segments by dma map of @q.
|
||||
*/
|
||||
bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
|
||||
struct device *dev)
|
||||
{
|
||||
unsigned long boundary = dma_get_merge_boundary(dev);
|
||||
|
||||
if (!boundary)
|
||||
return false;
|
||||
|
||||
/* No need to update max_segment_size. see blk_queue_virt_boundary() */
|
||||
blk_queue_virt_boundary(q, boundary);
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
|
||||
|
||||
/**
|
||||
* disk_set_zoned - inidicate a zoned device
|
||||
* @disk: gendisk to configure
|
||||
|
@ -354,12 +354,14 @@ static const struct blk_mq_ops bsg_mq_ops = {
|
||||
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests
|
||||
* @dev: device to attach bsg device to
|
||||
* @name: device to give bsg device
|
||||
* @lim: queue limits for the bsg queue
|
||||
* @job_fn: bsg job handler
|
||||
* @timeout: timeout handler function pointer
|
||||
* @dd_job_size: size of LLD data needed for each job
|
||||
*/
|
||||
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
|
||||
bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size)
|
||||
struct queue_limits *lim, bsg_job_fn *job_fn,
|
||||
bsg_timeout_fn *timeout, int dd_job_size)
|
||||
{
|
||||
struct bsg_set *bset;
|
||||
struct blk_mq_tag_set *set;
|
||||
@ -383,7 +385,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
|
||||
if (blk_mq_alloc_tag_set(set))
|
||||
goto out_tag_set;
|
||||
|
||||
q = blk_mq_alloc_queue(set, NULL, NULL);
|
||||
q = blk_mq_alloc_queue(set, lim, NULL);
|
||||
if (IS_ERR(q)) {
|
||||
ret = PTR_ERR(q);
|
||||
goto out_queue;
|
||||
|
@ -397,7 +397,7 @@ extern const struct attribute_group *ahci_sdev_groups[];
|
||||
.sdev_groups = ahci_sdev_groups, \
|
||||
.change_queue_depth = ata_scsi_change_queue_depth, \
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
|
||||
.slave_configure = ata_scsi_slave_config
|
||||
.device_configure = ata_scsi_device_configure
|
||||
|
||||
extern struct ata_port_operations ahci_ops;
|
||||
extern struct ata_port_operations ahci_platform_ops;
|
||||
|
@ -1254,21 +1254,24 @@ void ata_sas_tport_delete(struct ata_port *ap)
|
||||
EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
|
||||
|
||||
/**
|
||||
* ata_sas_slave_configure - Default slave_config routine for libata devices
|
||||
* ata_sas_device_configure - Default device_configure routine for libata
|
||||
* devices
|
||||
* @sdev: SCSI device to configure
|
||||
* @lim: queue limits
|
||||
* @ap: ATA port to which SCSI device is attached
|
||||
*
|
||||
* RETURNS:
|
||||
* Zero.
|
||||
*/
|
||||
|
||||
int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
|
||||
int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
|
||||
struct ata_port *ap)
|
||||
{
|
||||
ata_scsi_sdev_config(sdev);
|
||||
|
||||
return ata_scsi_dev_config(sdev, ap->link.device);
|
||||
return ata_scsi_dev_config(sdev, lim, ap->link.device);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
|
||||
EXPORT_SYMBOL_GPL(ata_sas_device_configure);
|
||||
|
||||
/**
|
||||
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
|
||||
|
@ -1021,7 +1021,8 @@ bool ata_scsi_dma_need_drain(struct request *rq)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
|
||||
|
||||
int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
|
||||
int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
|
||||
struct ata_device *dev)
|
||||
{
|
||||
struct request_queue *q = sdev->request_queue;
|
||||
int depth = 1;
|
||||
@ -1031,7 +1032,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
|
||||
|
||||
/* configure max sectors */
|
||||
dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors);
|
||||
blk_queue_max_hw_sectors(q, dev->max_sectors);
|
||||
lim->max_hw_sectors = dev->max_sectors;
|
||||
|
||||
if (dev->class == ATA_DEV_ATAPI) {
|
||||
sdev->sector_size = ATA_SECT_SIZE;
|
||||
@ -1040,7 +1041,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
|
||||
blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
|
||||
|
||||
/* make room for appending the drain */
|
||||
blk_queue_max_segments(q, queue_max_segments(q) - 1);
|
||||
lim->max_segments--;
|
||||
|
||||
sdev->dma_drain_len = ATAPI_MAX_DRAIN;
|
||||
sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO);
|
||||
@ -1077,7 +1078,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
|
||||
"sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
|
||||
sdev->sector_size);
|
||||
|
||||
blk_queue_update_dma_alignment(q, sdev->sector_size - 1);
|
||||
lim->dma_alignment = sdev->sector_size - 1;
|
||||
|
||||
if (dev->flags & ATA_DFLAG_AN)
|
||||
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
|
||||
@ -1131,8 +1132,9 @@ int ata_scsi_slave_alloc(struct scsi_device *sdev)
|
||||
EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
|
||||
|
||||
/**
|
||||
* ata_scsi_slave_config - Set SCSI device attributes
|
||||
* ata_scsi_device_configure - Set SCSI device attributes
|
||||
* @sdev: SCSI device to examine
|
||||
* @lim: queue limits
|
||||
*
|
||||
* This is called before we actually start reading
|
||||
* and writing to the device, to configure certain
|
||||
@ -1142,17 +1144,18 @@ EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
|
||||
* Defined by SCSI layer. We don't really care.
|
||||
*/
|
||||
|
||||
int ata_scsi_slave_config(struct scsi_device *sdev)
|
||||
int ata_scsi_device_configure(struct scsi_device *sdev,
|
||||
struct queue_limits *lim)
|
||||
{
|
||||
struct ata_port *ap = ata_shost_to_port(sdev->host);
|
||||
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
|
||||
|
||||
if (dev)
|
||||
return ata_scsi_dev_config(sdev, dev);
|
||||
return ata_scsi_dev_config(sdev, lim, dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
|
||||
EXPORT_SYMBOL_GPL(ata_scsi_device_configure);
|
||||
|
||||
/**
|
||||
* ata_scsi_slave_destroy - SCSI device is about to be destroyed
|
||||
|
@ -131,7 +131,8 @@ extern void ata_scsi_dev_rescan(struct work_struct *work);
|
||||
extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
|
||||
unsigned int id, u64 lun);
|
||||
void ata_scsi_sdev_config(struct scsi_device *sdev);
|
||||
int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev);
|
||||
int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
|
||||
struct ata_device *dev);
|
||||
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev);
|
||||
|
||||
/* libata-eh.c */
|
||||
|
@ -796,7 +796,8 @@ static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
|
||||
/* Hook the standard slave config to fixup some HW related alignment
|
||||
* restrictions
|
||||
*/
|
||||
static int pata_macio_slave_config(struct scsi_device *sdev)
|
||||
static int pata_macio_device_configure(struct scsi_device *sdev,
|
||||
struct queue_limits *lim)
|
||||
{
|
||||
struct ata_port *ap = ata_shost_to_port(sdev->host);
|
||||
struct pata_macio_priv *priv = ap->private_data;
|
||||
@ -805,7 +806,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
|
||||
int rc;
|
||||
|
||||
/* First call original */
|
||||
rc = ata_scsi_slave_config(sdev);
|
||||
rc = ata_scsi_device_configure(sdev, lim);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -814,7 +815,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
|
||||
|
||||
/* OHare has issues with non cache aligned DMA on some chipsets */
|
||||
if (priv->kind == controller_ohare) {
|
||||
blk_queue_update_dma_alignment(sdev->request_queue, 31);
|
||||
lim->dma_alignment = 31;
|
||||
blk_queue_update_dma_pad(sdev->request_queue, 31);
|
||||
|
||||
/* Tell the world about it */
|
||||
@ -829,7 +830,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
|
||||
/* Shasta and K2 seem to have "issues" with reads ... */
|
||||
if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
|
||||
/* Allright these are bad, apply restrictions */
|
||||
blk_queue_update_dma_alignment(sdev->request_queue, 15);
|
||||
lim->dma_alignment = 15;
|
||||
blk_queue_update_dma_pad(sdev->request_queue, 15);
|
||||
|
||||
/* We enable MWI and hack cache line size directly here, this
|
||||
@ -918,7 +919,7 @@ static const struct scsi_host_template pata_macio_sht = {
|
||||
* use 64K minus 256
|
||||
*/
|
||||
.max_segment_size = MAX_DBDMA_SEG,
|
||||
.slave_configure = pata_macio_slave_config,
|
||||
.device_configure = pata_macio_device_configure,
|
||||
.sdev_groups = ata_common_sdev_groups,
|
||||
.can_queue = ATA_DEF_QUEUE,
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
|
||||
|
@ -673,7 +673,7 @@ static const struct scsi_host_template mv6_sht = {
|
||||
.sdev_groups = ata_ncq_sdev_groups,
|
||||
.change_queue_depth = ata_scsi_change_queue_depth,
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
|
||||
.slave_configure = ata_scsi_slave_config
|
||||
.device_configure = ata_scsi_device_configure
|
||||
};
|
||||
|
||||
static struct ata_port_operations mv5_ops = {
|
||||
|
@ -296,7 +296,8 @@ static void nv_nf2_freeze(struct ata_port *ap);
|
||||
static void nv_nf2_thaw(struct ata_port *ap);
|
||||
static void nv_ck804_freeze(struct ata_port *ap);
|
||||
static void nv_ck804_thaw(struct ata_port *ap);
|
||||
static int nv_adma_slave_config(struct scsi_device *sdev);
|
||||
static int nv_adma_device_configure(struct scsi_device *sdev,
|
||||
struct queue_limits *lim);
|
||||
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
|
||||
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
|
||||
@ -318,7 +319,8 @@ static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
|
||||
static void nv_mcp55_thaw(struct ata_port *ap);
|
||||
static void nv_mcp55_freeze(struct ata_port *ap);
|
||||
static void nv_swncq_error_handler(struct ata_port *ap);
|
||||
static int nv_swncq_slave_config(struct scsi_device *sdev);
|
||||
static int nv_swncq_device_configure(struct scsi_device *sdev,
|
||||
struct queue_limits *lim);
|
||||
static int nv_swncq_port_start(struct ata_port *ap);
|
||||
static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
|
||||
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
|
||||
@ -380,7 +382,7 @@ static const struct scsi_host_template nv_adma_sht = {
|
||||
.can_queue = NV_ADMA_MAX_CPBS,
|
||||
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
|
||||
.dma_boundary = NV_ADMA_DMA_BOUNDARY,
|
||||
.slave_configure = nv_adma_slave_config,
|
||||
.device_configure = nv_adma_device_configure,
|
||||
.sdev_groups = ata_ncq_sdev_groups,
|
||||
.change_queue_depth = ata_scsi_change_queue_depth,
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
|
||||
@ -391,7 +393,7 @@ static const struct scsi_host_template nv_swncq_sht = {
|
||||
.can_queue = ATA_MAX_QUEUE - 1,
|
||||
.sg_tablesize = LIBATA_MAX_PRD,
|
||||
.dma_boundary = ATA_DMA_BOUNDARY,
|
||||
.slave_configure = nv_swncq_slave_config,
|
||||
.device_configure = nv_swncq_device_configure,
|
||||
.sdev_groups = ata_ncq_sdev_groups,
|
||||
.change_queue_depth = ata_scsi_change_queue_depth,
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
|
||||
@ -661,7 +663,8 @@ static void nv_adma_mode(struct ata_port *ap)
|
||||
pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
|
||||
}
|
||||
|
||||
static int nv_adma_slave_config(struct scsi_device *sdev)
|
||||
static int nv_adma_device_configure(struct scsi_device *sdev,
|
||||
struct queue_limits *lim)
|
||||
{
|
||||
struct ata_port *ap = ata_shost_to_port(sdev->host);
|
||||
struct nv_adma_port_priv *pp = ap->private_data;
|
||||
@ -673,7 +676,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
|
||||
int adma_enable;
|
||||
u32 current_reg, new_reg, config_mask;
|
||||
|
||||
rc = ata_scsi_slave_config(sdev);
|
||||
rc = ata_scsi_device_configure(sdev, lim);
|
||||
|
||||
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
|
||||
/* Not a proper libata device, ignore */
|
||||
@ -740,8 +743,8 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
|
||||
rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
|
||||
}
|
||||
|
||||
blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
|
||||
blk_queue_max_segments(sdev->request_queue, sg_tablesize);
|
||||
lim->seg_boundary_mask = segment_boundary;
|
||||
lim->max_segments = sg_tablesize;
|
||||
ata_port_info(ap,
|
||||
"DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
|
||||
(unsigned long long)*ap->host->dev->dma_mask,
|
||||
@ -1868,7 +1871,8 @@ static void nv_swncq_host_init(struct ata_host *host)
|
||||
writel(~0x0, mmio + NV_INT_STATUS_MCP55);
|
||||
}
|
||||
|
||||
static int nv_swncq_slave_config(struct scsi_device *sdev)
|
||||
static int nv_swncq_device_configure(struct scsi_device *sdev,
|
||||
struct queue_limits *lim)
|
||||
{
|
||||
struct ata_port *ap = ata_shost_to_port(sdev->host);
|
||||
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
|
||||
@ -1878,7 +1882,7 @@ static int nv_swncq_slave_config(struct scsi_device *sdev)
|
||||
u8 check_maxtor = 0;
|
||||
unsigned char model_num[ATA_ID_PROD_LEN + 1];
|
||||
|
||||
rc = ata_scsi_slave_config(sdev);
|
||||
rc = ata_scsi_device_configure(sdev, lim);
|
||||
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
|
||||
/* Not a proper libata device, ignore */
|
||||
return rc;
|
||||
|
@ -381,7 +381,7 @@ static const struct scsi_host_template sil24_sht = {
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
|
||||
.sdev_groups = ata_ncq_sdev_groups,
|
||||
.change_queue_depth = ata_scsi_change_queue_depth,
|
||||
.slave_configure = ata_scsi_slave_config
|
||||
.device_configure = ata_scsi_device_configure
|
||||
};
|
||||
|
||||
static struct ata_port_operations sil24_ops = {
|
||||
|
@ -1500,19 +1500,14 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
|
||||
|
||||
sdev->allow_restart = 1;
|
||||
|
||||
/*
|
||||
* SBP-2 does not require any alignment, but we set it anyway
|
||||
* for compatibility with earlier versions of this driver.
|
||||
*/
|
||||
blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
|
||||
|
||||
if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
|
||||
sdev->inquiry_len = 36;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
|
||||
static int sbp2_scsi_device_configure(struct scsi_device *sdev,
|
||||
struct queue_limits *lim)
|
||||
{
|
||||
struct sbp2_logical_unit *lu = sdev->hostdata;
|
||||
|
||||
@ -1538,7 +1533,7 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
|
||||
sdev->start_stop_pwr_cond = 1;
|
||||
|
||||
if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
|
||||
blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
|
||||
lim->max_hw_sectors = 128 * 1024 / 512;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1596,7 +1591,7 @@ static const struct scsi_host_template scsi_driver_template = {
|
||||
.proc_name = "sbp2",
|
||||
.queuecommand = sbp2_scsi_queuecommand,
|
||||
.slave_alloc = sbp2_scsi_slave_alloc,
|
||||
.slave_configure = sbp2_scsi_slave_configure,
|
||||
.device_configure = sbp2_scsi_device_configure,
|
||||
.eh_abort_handler = sbp2_scsi_abort,
|
||||
.this_id = -1,
|
||||
.sg_tablesize = SG_ALL,
|
||||
|
@ -129,6 +129,7 @@ static const struct scsi_host_template mptfc_driver_template = {
|
||||
.sg_tablesize = MPT_SCSI_SG_DEPTH,
|
||||
.max_sectors = 8192,
|
||||
.cmd_per_lun = 7,
|
||||
.dma_alignment = 511,
|
||||
.shost_groups = mptscsih_host_attr_groups,
|
||||
};
|
||||
|
||||
|
@ -2020,6 +2020,7 @@ static const struct scsi_host_template mptsas_driver_template = {
|
||||
.sg_tablesize = MPT_SCSI_SG_DEPTH,
|
||||
.max_sectors = 8192,
|
||||
.cmd_per_lun = 7,
|
||||
.dma_alignment = 511,
|
||||
.shost_groups = mptscsih_host_attr_groups,
|
||||
.no_write_same = 1,
|
||||
};
|
||||
|
@ -2438,8 +2438,6 @@ mptscsih_slave_configure(struct scsi_device *sdev)
|
||||
"tagged %d, simple %d\n",
|
||||
ioc->name,sdev->tagged_supported, sdev->simple_tags));
|
||||
|
||||
blk_queue_dma_alignment (sdev->request_queue, 512 - 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -843,6 +843,7 @@ static const struct scsi_host_template mptspi_driver_template = {
|
||||
.sg_tablesize = MPT_SCSI_SG_DEPTH,
|
||||
.max_sectors = 8192,
|
||||
.cmd_per_lun = 7,
|
||||
.dma_alignment = 511,
|
||||
.shost_groups = mptscsih_host_attr_groups,
|
||||
};
|
||||
|
||||
|
@ -4561,9 +4561,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
|
||||
len_to_track_end = 0;
|
||||
/*
|
||||
* A tidaw can address 4k of memory, but must not cross page boundaries
|
||||
* We can let the block layer handle this by setting
|
||||
* blk_queue_segment_boundary to page boundaries and
|
||||
* blk_max_segment_size to page size when setting up the request queue.
|
||||
* We can let the block layer handle this by setting seg_boundary_mask
|
||||
* to page boundaries and max_segment_size to page size when setting up
|
||||
* the request queue.
|
||||
* For write requests, a TIDAW must not cross track boundaries, because
|
||||
* we have to set the CBC flag on the last tidaw for each track.
|
||||
*/
|
||||
|
@ -746,6 +746,7 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
|
||||
/* need to have host registered before triggering any interrupt */
|
||||
list_add_tail(&HOSTDATA(shpnt)->host_list, &aha152x_host_list);
|
||||
|
||||
shpnt->no_highmem = true;
|
||||
shpnt->io_port = setup->io_port;
|
||||
shpnt->n_io_port = IO_RANGE;
|
||||
shpnt->irq = setup->irq;
|
||||
@ -2940,12 +2941,6 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aha152x_adjust_queue(struct scsi_device *device)
|
||||
{
|
||||
blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct scsi_host_template aha152x_driver_template = {
|
||||
.module = THIS_MODULE,
|
||||
.name = AHA152X_REVID,
|
||||
@ -2961,7 +2956,6 @@ static const struct scsi_host_template aha152x_driver_template = {
|
||||
.this_id = 7,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.slave_alloc = aha152x_adjust_queue,
|
||||
.cmd_size = sizeof(struct aha152x_cmd_priv),
|
||||
};
|
||||
|
||||
|
@ -643,7 +643,8 @@ extern int hisi_sas_probe(struct platform_device *pdev,
|
||||
const struct hisi_sas_hw *ops);
|
||||
extern void hisi_sas_remove(struct platform_device *pdev);
|
||||
|
||||
extern int hisi_sas_slave_configure(struct scsi_device *sdev);
|
||||
int hisi_sas_device_configure(struct scsi_device *sdev,
|
||||
struct queue_limits *lim);
|
||||
extern int hisi_sas_slave_alloc(struct scsi_device *sdev);
|
||||
extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
|
||||
extern void hisi_sas_scan_start(struct Scsi_Host *shost);
|
||||
|
@ -868,10 +868,11 @@ static int hisi_sas_dev_found(struct domain_device *device)
|
||||
return rc;
|
||||
}
|
||||
|
||||
int hisi_sas_slave_configure(struct scsi_device *sdev)
|
||||
int hisi_sas_device_configure(struct scsi_device *sdev,
|
||||
struct queue_limits *lim)
|
||||
{
|
||||
struct domain_device *dev = sdev_to_domain_dev(sdev);
|
||||
int ret = sas_slave_configure(sdev);
|
||||
int ret = sas_device_configure(sdev, lim);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -880,7 +881,7 @@ int hisi_sas_slave_configure(struct scsi_device *sdev)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_device_configure);
|
||||
|
||||
void hisi_sas_scan_start(struct Scsi_Host *shost)
|
||||
{
|
||||
|
@ -1736,7 +1736,7 @@ ATTRIBUTE_GROUPS(host_v1_hw);
|
||||
|
||||
static const struct scsi_host_template sht_v1_hw = {
|
||||
LIBSAS_SHT_BASE_NO_SLAVE_INIT
|
||||
.slave_configure = hisi_sas_slave_configure,
|
||||
.device_configure = hisi_sas_device_configure,
|
||||
.scan_finished = hisi_sas_scan_finished,
|
||||
.scan_start = hisi_sas_scan_start,
|
||||
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
|
||||
|
@ -3568,7 +3568,7 @@ static void map_queues_v2_hw(struct Scsi_Host *shost)
|
||||
|
||||
static const struct scsi_host_template sht_v2_hw = {
|
||||
LIBSAS_SHT_BASE_NO_SLAVE_INIT
|
||||
.slave_configure = hisi_sas_slave_configure,
|
||||
.device_configure = hisi_sas_device_configure,
|
||||
.scan_finished = hisi_sas_scan_finished,
|
||||
.scan_start = hisi_sas_scan_start,
|
||||
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
|
||||
|
@ -2894,11 +2894,12 @@ static ssize_t iopoll_q_cnt_v3_hw_show(struct device *dev,
|
||||
}
|
||||
static DEVICE_ATTR_RO(iopoll_q_cnt_v3_hw);
|
||||
|
||||
static int slave_configure_v3_hw(struct scsi_device *sdev)
|
||||
static int device_configure_v3_hw(struct scsi_device *sdev,
|
||||
struct queue_limits *lim)
|
||||
{
|
||||
struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev);
|
||||
struct hisi_hba *hisi_hba = shost_priv(shost);
|
||||
int ret = hisi_sas_slave_configure(sdev);
|
||||
int ret = hisi_sas_device_configure(sdev, lim);
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
||||
if (ret)
|
||||
@ -3321,7 +3322,7 @@ static void hisi_sas_map_queues(struct Scsi_Host *shost)
|
||||
|
||||
static const struct scsi_host_template sht_v3_hw = {
|
||||
LIBSAS_SHT_BASE_NO_SLAVE_INIT
|
||||
.slave_configure = slave_configure_v3_hw,
|
||||
.device_configure = device_configure_v3_hw,
|
||||
.scan_finished = hisi_sas_scan_finished,
|
||||
.scan_start = hisi_sas_scan_start,
|
||||
.map_queues = hisi_sas_map_queues,
|
||||
|
@ -478,6 +478,12 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv
|
||||
else
|
||||
shost->max_segment_size = BLK_MAX_SEGMENT_SIZE;
|
||||
|
||||
/* 32-byte (dword) is a common minimum for HBAs. */
|
||||
if (sht->dma_alignment)
|
||||
shost->dma_alignment = sht->dma_alignment;
|
||||
else
|
||||
shost->dma_alignment = 3;
|
||||
|
||||
/*
|
||||
* assume a 4GB boundary, if not set
|
||||
*/
|
||||
|
@ -1151,11 +1151,11 @@ static struct attribute *hptiop_host_attrs[] = {
|
||||
|
||||
ATTRIBUTE_GROUPS(hptiop_host);
|
||||
|
||||
static int hptiop_slave_config(struct scsi_device *sdev)
|
||||
static int hptiop_device_configure(struct scsi_device *sdev,
|
||||
struct queue_limits *lim)
|
||||
{
|
||||
if (sdev->type == TYPE_TAPE)
|
||||
blk_queue_max_hw_sectors(sdev->request_queue, 8192);
|
||||
|
||||
lim->max_hw_sectors = 8192;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1168,7 +1168,7 @@ static const struct scsi_host_template driver_template = {
|
||||
.emulated = 0,
|
||||
.proc_name = driver_name,
|
||||
.shost_groups = hptiop_host_groups,
|
||||
.slave_configure = hptiop_slave_config,
|
||||
.device_configure = hptiop_device_configure,
|
||||
.this_id = -1,
|
||||
.change_queue_depth = hptiop_adjust_disk_queue_depth,
|
||||
.cmd_size = sizeof(struct hpt_cmd_priv),
|
||||
|
@ -5541,8 +5541,6 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
|
||||
rport->supported_classes |= FC_COS_CLASS2;
|
||||
if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
|
||||
rport->supported_classes |= FC_COS_CLASS3;
|
||||
if (rport->rqst_q)
|
||||
blk_queue_max_segments(rport->rqst_q, 1);
|
||||
} else
|
||||
tgt_dbg(tgt, "rport add failed\n");
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
@ -6391,8 +6389,6 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||
|
||||
ibmvfc_init_sub_crqs(vhost);
|
||||
|
||||
if (shost_to_fc_host(shost)->rqst_q)
|
||||
blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
|
||||
dev_set_drvdata(dev, vhost);
|
||||
spin_lock(&ibmvfc_driver_lock);
|
||||
list_add_tail(&vhost->queue, &ibmvfc_head);
|
||||
@ -6547,6 +6543,7 @@ static struct fc_function_template ibmvfc_transport_functions = {
|
||||
.get_starget_port_id = ibmvfc_get_starget_port_id,
|
||||
.show_starget_port_id = 1,
|
||||
|
||||
.max_bsg_segments = 1,
|
||||
.bsg_request = ibmvfc_bsg_request,
|
||||
.bsg_timeout = ibmvfc_bsg_timeout,
|
||||
};
|
||||
|
@ -1100,16 +1100,6 @@ static int device_check(imm_struct *dev, bool autodetect)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* imm cannot deal with highmem, so this causes all IO pages for this host
|
||||
* to reside in low memory (hence mapped)
|
||||
*/
|
||||
static int imm_adjust_queue(struct scsi_device *device)
|
||||
{
|
||||
blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct scsi_host_template imm_template = {
|
||||
.module = THIS_MODULE,
|
||||
.proc_name = "imm",
|
||||
@ -1123,7 +1113,6 @@ static const struct scsi_host_template imm_template = {
|
||||
.this_id = 7,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.can_queue = 1,
|
||||
.slave_alloc = imm_adjust_queue,
|
||||
.cmd_size = sizeof(struct scsi_pointer),
|
||||
};
|
||||
|
||||
@ -1235,6 +1224,7 @@ static int __imm_attach(struct parport *pb)
|
||||
host = scsi_host_alloc(&imm_template, sizeof(imm_struct *));
|
||||
if (!host)
|
||||
goto out1;
|
||||
host->no_highmem = true;
|
||||
host->io_port = pb->base;
|
||||
host->n_io_port = ports;
|
||||
host->dma_channel = -1;
|
||||
|
@ -4769,15 +4769,17 @@ static void ipr_slave_destroy(struct scsi_device *sdev)
|
||||
}
|
||||
|
||||
/**
|
||||
* ipr_slave_configure - Configure a SCSI device
|
||||
* ipr_device_configure - Configure a SCSI device
|
||||
* @sdev: scsi device struct
|
||||
* @lim: queue limits
|
||||
*
|
||||
* This function configures the specified scsi device.
|
||||
*
|
||||
* Return value:
|
||||
* 0 on success
|
||||
**/
|
||||
static int ipr_slave_configure(struct scsi_device *sdev)
|
||||
static int ipr_device_configure(struct scsi_device *sdev,
|
||||
struct queue_limits *lim)
|
||||
{
|
||||
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
|
||||
struct ipr_resource_entry *res;
|
||||
@ -4798,7 +4800,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
|
||||
sdev->no_report_opcodes = 1;
|
||||
blk_queue_rq_timeout(sdev->request_queue,
|
||||
IPR_VSET_RW_TIMEOUT);
|
||||
blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
|
||||
lim->max_hw_sectors = IPR_VSET_MAX_SECTORS;
|
||||
}
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
|
||||
@ -6397,7 +6399,7 @@ static const struct scsi_host_template driver_template = {
|
||||
.eh_device_reset_handler = ipr_eh_dev_reset,
|
||||
.eh_host_reset_handler = ipr_eh_host_reset,
|
||||
.slave_alloc = ipr_slave_alloc,
|
||||
.slave_configure = ipr_slave_configure,
|
||||
.device_configure = ipr_device_configure,
|
||||
.slave_destroy = ipr_slave_destroy,
|
||||
.scan_finished = ipr_scan_finished,
|
||||
.target_destroy = ipr_target_destroy,
|
||||
|
@ -943,6 +943,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
|
||||
shost->max_id = 0;
|
||||
shost->max_channel = 0;
|
||||
shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
|
||||
shost->dma_alignment = 0;
|
||||
|
||||
rc = iscsi_host_get_max_scsi_cmds(shost, cmds_max);
|
||||
if (rc < 0)
|
||||
@ -1065,7 +1066,6 @@ static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
|
||||
if (conn->datadgst_en)
|
||||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
|
||||
sdev->request_queue);
|
||||
blk_queue_dma_alignment(sdev->request_queue, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -804,14 +804,15 @@ EXPORT_SYMBOL_GPL(sas_target_alloc);
|
||||
|
||||
#define SAS_DEF_QD 256
|
||||
|
||||
int sas_slave_configure(struct scsi_device *scsi_dev)
|
||||
int sas_device_configure(struct scsi_device *scsi_dev,
|
||||
struct queue_limits *lim)
|
||||
{
|
||||
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
|
||||
|
||||
BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
|
||||
|
||||
if (dev_is_sata(dev)) {
|
||||
ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
|
||||
ata_sas_device_configure(scsi_dev, lim, dev->sata_dev.ap);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -829,7 +830,7 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sas_slave_configure);
|
||||
EXPORT_SYMBOL_GPL(sas_device_configure);
|
||||
|
||||
int sas_change_queue_depth(struct scsi_device *sdev, int depth)
|
||||
{
|
||||
|
@ -2701,7 +2701,7 @@ int megasas_get_ctrl_info(struct megasas_instance *instance);
|
||||
int
|
||||
megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend);
|
||||
void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
|
||||
bool is_target_prop);
|
||||
struct queue_limits *lim, bool is_target_prop);
|
||||
int megasas_get_target_prop(struct megasas_instance *instance,
|
||||
struct scsi_device *sdev);
|
||||
void megasas_get_snapdump_properties(struct megasas_instance *instance);
|
||||
|
@ -1888,7 +1888,7 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no)
|
||||
* Returns void
|
||||
*/
|
||||
void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
|
||||
bool is_target_prop)
|
||||
struct queue_limits *lim, bool is_target_prop)
|
||||
{
|
||||
u16 pd_index = 0, ld;
|
||||
u32 device_id;
|
||||
@ -1915,8 +1915,10 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
|
||||
return;
|
||||
raid = MR_LdRaidGet(ld, local_map_ptr);
|
||||
|
||||
if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
|
||||
blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
|
||||
if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
|
||||
if (lim)
|
||||
lim->dma_alignment = 0x7;
|
||||
}
|
||||
|
||||
mr_device_priv_data->is_tm_capable =
|
||||
raid->capability.tmCapable;
|
||||
@ -1967,7 +1969,8 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
|
||||
*
|
||||
*/
|
||||
static inline void
|
||||
megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
|
||||
megasas_set_nvme_device_properties(struct scsi_device *sdev,
|
||||
struct queue_limits *lim, u32 max_io_size)
|
||||
{
|
||||
struct megasas_instance *instance;
|
||||
u32 mr_nvme_pg_size;
|
||||
@ -1976,10 +1979,10 @@ megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
|
||||
mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
|
||||
MR_DEFAULT_NVME_PAGE_SIZE);
|
||||
|
||||
blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
|
||||
lim->max_hw_sectors = max_io_size / 512;
|
||||
lim->virt_boundary_mask = mr_nvme_pg_size - 1;
|
||||
|
||||
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
|
||||
blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2041,7 +2044,7 @@ static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
|
||||
* @is_target_prop true, if fw provided target properties.
|
||||
*/
|
||||
static void megasas_set_static_target_properties(struct scsi_device *sdev,
|
||||
bool is_target_prop)
|
||||
struct queue_limits *lim, bool is_target_prop)
|
||||
{
|
||||
u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
|
||||
struct megasas_instance *instance;
|
||||
@ -2060,13 +2063,15 @@ static void megasas_set_static_target_properties(struct scsi_device *sdev,
|
||||
max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
|
||||
|
||||
if (instance->nvme_page_size && max_io_size_kb)
|
||||
megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
|
||||
megasas_set_nvme_device_properties(sdev, lim,
|
||||
max_io_size_kb << 10);
|
||||
|
||||
megasas_set_fw_assisted_qd(sdev, is_target_prop);
|
||||
}
|
||||
|
||||
|
||||
static int megasas_slave_configure(struct scsi_device *sdev)
|
||||
static int megasas_device_configure(struct scsi_device *sdev,
|
||||
struct queue_limits *lim)
|
||||
{
|
||||
u16 pd_index = 0;
|
||||
struct megasas_instance *instance;
|
||||
@ -2096,10 +2101,10 @@ static int megasas_slave_configure(struct scsi_device *sdev)
|
||||
ret_target_prop = megasas_get_target_prop(instance, sdev);
|
||||
|
||||
is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
|
||||
megasas_set_static_target_properties(sdev, is_target_prop);
|
||||
megasas_set_static_target_properties(sdev, lim, is_target_prop);
|
||||
|
||||
/* This sdev property may change post OCR */
|
||||
megasas_set_dynamic_target_properties(sdev, is_target_prop);
|
||||
megasas_set_dynamic_target_properties(sdev, lim, is_target_prop);
|
||||
|
||||
mutex_unlock(&instance->reset_mutex);
|
||||
|
||||
@ -3507,7 +3512,7 @@ static const struct scsi_host_template megasas_template = {
|
||||
.module = THIS_MODULE,
|
||||
.name = "Avago SAS based MegaRAID driver",
|
||||
.proc_name = "megaraid_sas",
|
||||
.slave_configure = megasas_slave_configure,
|
||||
.device_configure = megasas_device_configure,
|
||||
.slave_alloc = megasas_slave_alloc,
|
||||
.slave_destroy = megasas_slave_destroy,
|
||||
.queuecommand = megasas_queue_command,
|
||||
|
@ -5119,7 +5119,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
|
||||
ret_target_prop = megasas_get_target_prop(instance, sdev);
|
||||
|
||||
is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
|
||||
megasas_set_dynamic_target_properties(sdev, is_target_prop);
|
||||
megasas_set_dynamic_target_properties(sdev, NULL,
|
||||
is_target_prop);
|
||||
}
|
||||
|
||||
status_reg = instance->instancet->read_fw_status_reg
|
||||
|
@ -1352,7 +1352,6 @@ void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout);
|
||||
void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc);
|
||||
void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc);
|
||||
void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc);
|
||||
void mpi3mr_refresh_tgtdevs(struct mpi3mr_ioc *mrioc);
|
||||
void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc);
|
||||
void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code);
|
||||
void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc);
|
||||
|
@ -1845,6 +1845,10 @@ void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
|
||||
{
|
||||
struct device *bsg_dev = &mrioc->bsg_dev;
|
||||
struct device *parent = &mrioc->shost->shost_gendev;
|
||||
struct queue_limits lim = {
|
||||
.max_hw_sectors = MPI3MR_MAX_APP_XFER_SECTORS,
|
||||
.max_segments = MPI3MR_MAX_APP_XFER_SEGMENTS,
|
||||
};
|
||||
|
||||
device_initialize(bsg_dev);
|
||||
|
||||
@ -1860,20 +1864,14 @@ void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
|
||||
return;
|
||||
}
|
||||
|
||||
mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev),
|
||||
mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), &lim,
|
||||
mpi3mr_bsg_request, NULL, 0);
|
||||
if (IS_ERR(mrioc->bsg_queue)) {
|
||||
ioc_err(mrioc, "%s: bsg registration failed\n",
|
||||
dev_name(bsg_dev));
|
||||
device_del(bsg_dev);
|
||||
put_device(bsg_dev);
|
||||
return;
|
||||
}
|
||||
|
||||
blk_queue_max_segments(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SEGMENTS);
|
||||
blk_queue_max_hw_sectors(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SECTORS);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -986,6 +986,25 @@ static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
|
||||
return retval;
|
||||
}
|
||||
|
||||
static void mpi3mr_configure_nvme_dev(struct mpi3mr_tgt_dev *tgt_dev,
|
||||
struct queue_limits *lim)
|
||||
{
|
||||
u8 pgsz = tgt_dev->dev_spec.pcie_inf.pgsz ? : MPI3MR_DEFAULT_PGSZEXP;
|
||||
|
||||
lim->max_hw_sectors = tgt_dev->dev_spec.pcie_inf.mdts / 512;
|
||||
lim->virt_boundary_mask = (1 << pgsz) - 1;
|
||||
}
|
||||
|
||||
static void mpi3mr_configure_tgt_dev(struct mpi3mr_tgt_dev *tgt_dev,
|
||||
struct queue_limits *lim)
|
||||
{
|
||||
if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE &&
|
||||
(tgt_dev->dev_spec.pcie_inf.dev_info &
|
||||
MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
|
||||
MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)
|
||||
mpi3mr_configure_nvme_dev(tgt_dev, lim);
|
||||
}
|
||||
|
||||
/**
|
||||
* mpi3mr_update_sdev - Update SCSI device information
|
||||
* @sdev: SCSI device reference
|
||||
@ -1001,31 +1020,17 @@ static void
|
||||
mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
|
||||
{
|
||||
struct mpi3mr_tgt_dev *tgtdev;
|
||||
struct queue_limits lim;
|
||||
|
||||
tgtdev = (struct mpi3mr_tgt_dev *)data;
|
||||
if (!tgtdev)
|
||||
return;
|
||||
|
||||
mpi3mr_change_queue_depth(sdev, tgtdev->q_depth);
|
||||
switch (tgtdev->dev_type) {
|
||||
case MPI3_DEVICE_DEVFORM_PCIE:
|
||||
/*The block layer hw sector size = 512*/
|
||||
if ((tgtdev->dev_spec.pcie_inf.dev_info &
|
||||
MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
|
||||
MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
|
||||
blk_queue_max_hw_sectors(sdev->request_queue,
|
||||
tgtdev->dev_spec.pcie_inf.mdts / 512);
|
||||
if (tgtdev->dev_spec.pcie_inf.pgsz == 0)
|
||||
blk_queue_virt_boundary(sdev->request_queue,
|
||||
((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
|
||||
else
|
||||
blk_queue_virt_boundary(sdev->request_queue,
|
||||
((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
lim = queue_limits_start_update(sdev->request_queue);
|
||||
mpi3mr_configure_tgt_dev(tgtdev, &lim);
|
||||
WARN_ON_ONCE(queue_limits_commit_update(sdev->request_queue, &lim));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1038,8 +1043,7 @@ mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
|
||||
*
|
||||
* Return: Nothing.
|
||||
*/
|
||||
|
||||
void mpi3mr_refresh_tgtdevs(struct mpi3mr_ioc *mrioc)
|
||||
static void mpi3mr_refresh_tgtdevs(struct mpi3mr_ioc *mrioc)
|
||||
{
|
||||
struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
|
||||
struct mpi3mr_stgt_priv_data *tgt_priv;
|
||||
@ -4393,15 +4397,17 @@ static void mpi3mr_target_destroy(struct scsi_target *starget)
|
||||
}
|
||||
|
||||
/**
|
||||
* mpi3mr_slave_configure - Slave configure callback handler
|
||||
* mpi3mr_device_configure - Slave configure callback handler
|
||||
* @sdev: SCSI device reference
|
||||
* @lim: queue limits
|
||||
*
|
||||
* Configure queue depth, max hardware sectors and virt boundary
|
||||
* as required
|
||||
*
|
||||
* Return: 0 always.
|
||||
*/
|
||||
static int mpi3mr_slave_configure(struct scsi_device *sdev)
|
||||
static int mpi3mr_device_configure(struct scsi_device *sdev,
|
||||
struct queue_limits *lim)
|
||||
{
|
||||
struct scsi_target *starget;
|
||||
struct Scsi_Host *shost;
|
||||
@ -4432,28 +4438,8 @@ static int mpi3mr_slave_configure(struct scsi_device *sdev)
|
||||
sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT;
|
||||
blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT);
|
||||
|
||||
switch (tgt_dev->dev_type) {
|
||||
case MPI3_DEVICE_DEVFORM_PCIE:
|
||||
/*The block layer hw sector size = 512*/
|
||||
if ((tgt_dev->dev_spec.pcie_inf.dev_info &
|
||||
MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
|
||||
MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
|
||||
blk_queue_max_hw_sectors(sdev->request_queue,
|
||||
tgt_dev->dev_spec.pcie_inf.mdts / 512);
|
||||
if (tgt_dev->dev_spec.pcie_inf.pgsz == 0)
|
||||
blk_queue_virt_boundary(sdev->request_queue,
|
||||
((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
|
||||
else
|
||||
blk_queue_virt_boundary(sdev->request_queue,
|
||||
((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
mpi3mr_configure_tgt_dev(tgt_dev, lim);
|
||||
mpi3mr_tgtdev_put(tgt_dev);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
@ -4921,7 +4907,7 @@ static const struct scsi_host_template mpi3mr_driver_template = {
|
||||
.queuecommand = mpi3mr_qcmd,
|
||||
.target_alloc = mpi3mr_target_alloc,
|
||||
.slave_alloc = mpi3mr_slave_alloc,
|
||||
.slave_configure = mpi3mr_slave_configure,
|
||||
.device_configure = mpi3mr_device_configure,
|
||||
.target_destroy = mpi3mr_target_destroy,
|
||||
.slave_destroy = mpi3mr_slave_destroy,
|
||||
.scan_finished = mpi3mr_scan_finished,
|
||||
|
@ -2497,14 +2497,15 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
|
||||
}
|
||||
|
||||
/**
|
||||
* scsih_slave_configure - device configure routine.
|
||||
* scsih_device_configure - device configure routine.
|
||||
* @sdev: scsi device struct
|
||||
* @lim: queue limits
|
||||
*
|
||||
* Return: 0 if ok. Any other return is assumed to be an error and
|
||||
* the device is ignored.
|
||||
*/
|
||||
static int
|
||||
scsih_slave_configure(struct scsi_device *sdev)
|
||||
scsih_device_configure(struct scsi_device *sdev, struct queue_limits *lim)
|
||||
{
|
||||
struct Scsi_Host *shost = sdev->host;
|
||||
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
|
||||
@ -2609,8 +2610,7 @@ scsih_slave_configure(struct scsi_device *sdev)
|
||||
raid_device->num_pds, ds);
|
||||
|
||||
if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
|
||||
blk_queue_max_hw_sectors(sdev->request_queue,
|
||||
MPT3SAS_RAID_MAX_SECTORS);
|
||||
lim->max_hw_sectors = MPT3SAS_RAID_MAX_SECTORS;
|
||||
sdev_printk(KERN_INFO, sdev,
|
||||
"Set queue's max_sector to: %u\n",
|
||||
MPT3SAS_RAID_MAX_SECTORS);
|
||||
@ -2675,8 +2675,7 @@ scsih_slave_configure(struct scsi_device *sdev)
|
||||
pcie_device->connector_name);
|
||||
|
||||
if (pcie_device->nvme_mdts)
|
||||
blk_queue_max_hw_sectors(sdev->request_queue,
|
||||
pcie_device->nvme_mdts/512);
|
||||
lim->max_hw_sectors = pcie_device->nvme_mdts / 512;
|
||||
|
||||
pcie_device_put(pcie_device);
|
||||
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
|
||||
@ -2687,8 +2686,7 @@ scsih_slave_configure(struct scsi_device *sdev)
|
||||
**/
|
||||
blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
|
||||
sdev->request_queue);
|
||||
blk_queue_virt_boundary(sdev->request_queue,
|
||||
ioc->page_size - 1);
|
||||
lim->virt_boundary_mask = ioc->page_size - 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -11914,7 +11912,7 @@ static const struct scsi_host_template mpt2sas_driver_template = {
|
||||
.queuecommand = scsih_qcmd,
|
||||
.target_alloc = scsih_target_alloc,
|
||||
.slave_alloc = scsih_slave_alloc,
|
||||
.slave_configure = scsih_slave_configure,
|
||||
.device_configure = scsih_device_configure,
|
||||
.target_destroy = scsih_target_destroy,
|
||||
.slave_destroy = scsih_slave_destroy,
|
||||
.scan_finished = scsih_scan_finished,
|
||||
@ -11952,7 +11950,7 @@ static const struct scsi_host_template mpt3sas_driver_template = {
|
||||
.queuecommand = scsih_qcmd,
|
||||
.target_alloc = scsih_target_alloc,
|
||||
.slave_alloc = scsih_slave_alloc,
|
||||
.slave_configure = scsih_slave_configure,
|
||||
.device_configure = scsih_device_configure,
|
||||
.target_destroy = scsih_target_destroy,
|
||||
.slave_destroy = scsih_slave_destroy,
|
||||
.scan_finished = scsih_scan_finished,
|
||||
|
@ -197,8 +197,9 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
|
||||
}
|
||||
|
||||
/**
|
||||
* pmcraid_slave_configure - Configures a SCSI device
|
||||
* pmcraid_device_configure - Configures a SCSI device
|
||||
* @scsi_dev: scsi device struct
|
||||
* @lim: queue limits
|
||||
*
|
||||
* This function is executed by SCSI mid layer just after a device is first
|
||||
* scanned (i.e. it has responded to an INQUIRY). For VSET resources, the
|
||||
@ -209,7 +210,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
|
||||
* Return value:
|
||||
* 0 on success
|
||||
*/
|
||||
static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
|
||||
static int pmcraid_device_configure(struct scsi_device *scsi_dev,
|
||||
struct queue_limits *lim)
|
||||
{
|
||||
struct pmcraid_resource_entry *res = scsi_dev->hostdata;
|
||||
|
||||
@ -233,8 +235,7 @@ static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
|
||||
scsi_dev->allow_restart = 1;
|
||||
blk_queue_rq_timeout(scsi_dev->request_queue,
|
||||
PMCRAID_VSET_IO_TIMEOUT);
|
||||
blk_queue_max_hw_sectors(scsi_dev->request_queue,
|
||||
PMCRAID_VSET_MAX_SECTORS);
|
||||
lim->max_hw_sectors = PMCRAID_VSET_MAX_SECTORS;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3668,7 +3669,7 @@ static const struct scsi_host_template pmcraid_host_template = {
|
||||
.eh_host_reset_handler = pmcraid_eh_host_reset_handler,
|
||||
|
||||
.slave_alloc = pmcraid_slave_alloc,
|
||||
.slave_configure = pmcraid_slave_configure,
|
||||
.device_configure = pmcraid_device_configure,
|
||||
.slave_destroy = pmcraid_slave_destroy,
|
||||
.change_queue_depth = pmcraid_change_queue_depth,
|
||||
.can_queue = PMCRAID_MAX_IO_CMD,
|
||||
|
@ -986,12 +986,6 @@ static int device_check(ppa_struct *dev, bool autodetect)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int ppa_adjust_queue(struct scsi_device *device)
|
||||
{
|
||||
blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct scsi_host_template ppa_template = {
|
||||
.module = THIS_MODULE,
|
||||
.proc_name = "ppa",
|
||||
@ -1005,7 +999,6 @@ static const struct scsi_host_template ppa_template = {
|
||||
.this_id = -1,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.can_queue = 1,
|
||||
.slave_alloc = ppa_adjust_queue,
|
||||
.cmd_size = sizeof(struct scsi_pointer),
|
||||
};
|
||||
|
||||
@ -1111,6 +1104,7 @@ static int __ppa_attach(struct parport *pb)
|
||||
host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *));
|
||||
if (!host)
|
||||
goto out1;
|
||||
host->no_highmem = true;
|
||||
host->io_port = pb->base;
|
||||
host->n_io_port = ports;
|
||||
host->dma_channel = -1;
|
||||
|
@ -1957,9 +1957,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
|
||||
scsi_qla_host_t *vha = shost_priv(sdev->host);
|
||||
struct req_que *req = vha->req;
|
||||
|
||||
if (IS_T10_PI_CAPABLE(vha->hw))
|
||||
blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
|
||||
|
||||
scsi_change_queue_depth(sdev, req->max_q_depth);
|
||||
return 0;
|
||||
}
|
||||
@ -3575,6 +3572,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
QLA_SG_ALL : 128;
|
||||
}
|
||||
|
||||
if (IS_T10_PI_CAPABLE(base_vha->hw))
|
||||
host->dma_alignment = 0x7;
|
||||
|
||||
ret = scsi_add_host(host, &pdev->dev);
|
||||
if (ret)
|
||||
goto probe_failed;
|
||||
|
@ -32,7 +32,7 @@
|
||||
#include <scsi/scsi_driver.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_transport.h> /* __scsi_init_queue() */
|
||||
#include <scsi/scsi_transport.h> /* scsi_init_limits() */
|
||||
#include <scsi/scsi_dh.h>
|
||||
|
||||
#include <trace/events/scsi.h>
|
||||
@ -1965,42 +1965,36 @@ static void scsi_map_queues(struct blk_mq_tag_set *set)
|
||||
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
|
||||
}
|
||||
|
||||
void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
|
||||
void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim)
|
||||
{
|
||||
struct device *dev = shost->dma_dev;
|
||||
|
||||
/*
|
||||
* this limit is imposed by hardware restrictions
|
||||
*/
|
||||
blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
|
||||
SG_MAX_SEGMENTS));
|
||||
memset(lim, 0, sizeof(*lim));
|
||||
lim->max_segments =
|
||||
min_t(unsigned short, shost->sg_tablesize, SG_MAX_SEGMENTS);
|
||||
|
||||
if (scsi_host_prot_dma(shost)) {
|
||||
shost->sg_prot_tablesize =
|
||||
min_not_zero(shost->sg_prot_tablesize,
|
||||
(unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
|
||||
BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
|
||||
blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
|
||||
lim->max_integrity_segments = shost->sg_prot_tablesize;
|
||||
}
|
||||
|
||||
blk_queue_max_hw_sectors(q, shost->max_sectors);
|
||||
blk_queue_segment_boundary(q, shost->dma_boundary);
|
||||
lim->max_hw_sectors = shost->max_sectors;
|
||||
lim->seg_boundary_mask = shost->dma_boundary;
|
||||
lim->max_segment_size = shost->max_segment_size;
|
||||
lim->virt_boundary_mask = shost->virt_boundary_mask;
|
||||
lim->dma_alignment = max_t(unsigned int,
|
||||
shost->dma_alignment, dma_get_cache_alignment() - 1);
|
||||
|
||||
if (shost->no_highmem)
|
||||
lim->bounce = BLK_BOUNCE_HIGH;
|
||||
|
||||
dma_set_seg_boundary(dev, shost->dma_boundary);
|
||||
|
||||
blk_queue_max_segment_size(q, shost->max_segment_size);
|
||||
blk_queue_virt_boundary(q, shost->virt_boundary_mask);
|
||||
dma_set_max_seg_size(dev, queue_max_segment_size(q));
|
||||
|
||||
/*
|
||||
* Set a reasonable default alignment: The larger of 32-byte (dword),
|
||||
* which is a common minimum for HBAs, and the minimum DMA alignment,
|
||||
* which is set by the platform.
|
||||
*
|
||||
* Devices that require a bigger alignment can increase it later.
|
||||
*/
|
||||
blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
|
||||
dma_set_max_seg_size(dev, shost->max_segment_size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__scsi_init_queue);
|
||||
EXPORT_SYMBOL_GPL(scsi_init_limits);
|
||||
|
||||
static const struct blk_mq_ops scsi_mq_ops_no_commit = {
|
||||
.get_budget = scsi_mq_get_budget,
|
||||
|
@ -227,7 +227,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
|
||||
|
||||
/*
|
||||
* realloc if new shift is calculated, which is caused by setting
|
||||
* up one new default queue depth after calling ->slave_configure
|
||||
* up one new default queue depth after calling ->device_configure
|
||||
*/
|
||||
if (!need_alloc && new_shift != sdev->budget_map.shift)
|
||||
need_alloc = need_free = true;
|
||||
@ -283,6 +283,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
|
||||
struct request_queue *q;
|
||||
int display_failure_msg = 1, ret;
|
||||
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
|
||||
struct queue_limits lim;
|
||||
|
||||
sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
|
||||
GFP_KERNEL);
|
||||
@ -332,7 +333,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
|
||||
|
||||
sdev->sg_reserved_size = INT_MAX;
|
||||
|
||||
q = blk_mq_alloc_queue(&sdev->host->tag_set, NULL, NULL);
|
||||
scsi_init_limits(shost, &lim);
|
||||
q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, NULL);
|
||||
if (IS_ERR(q)) {
|
||||
/* release fn is set up in scsi_sysfs_device_initialise, so
|
||||
* have to free and put manually here */
|
||||
@ -343,7 +345,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
|
||||
kref_get(&sdev->host->tagset_refcnt);
|
||||
sdev->request_queue = q;
|
||||
q->queuedata = sdev;
|
||||
__scsi_init_queue(sdev->host, q);
|
||||
|
||||
depth = sdev->host->cmd_per_lun ?: 1;
|
||||
|
||||
@ -873,6 +874,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
|
||||
static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
|
||||
blist_flags_t *bflags, int async)
|
||||
{
|
||||
const struct scsi_host_template *hostt = sdev->host->hostt;
|
||||
struct queue_limits lim;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@ -1003,19 +1006,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
|
||||
if (*bflags & BLIST_SELECT_NO_ATN)
|
||||
sdev->select_no_atn = 1;
|
||||
|
||||
/*
|
||||
* Maximum 512 sector transfer length
|
||||
* broken RA4x00 Compaq Disk Array
|
||||
*/
|
||||
if (*bflags & BLIST_MAX_512)
|
||||
blk_queue_max_hw_sectors(sdev->request_queue, 512);
|
||||
/*
|
||||
* Max 1024 sector transfer length for targets that report incorrect
|
||||
* max/optimal lengths and relied on the old block layer safe default
|
||||
*/
|
||||
else if (*bflags & BLIST_MAX_1024)
|
||||
blk_queue_max_hw_sectors(sdev->request_queue, 1024);
|
||||
|
||||
/*
|
||||
* Some devices may not want to have a start command automatically
|
||||
* issued when a device is added.
|
||||
@ -1076,28 +1066,46 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
|
||||
|
||||
transport_configure_device(&sdev->sdev_gendev);
|
||||
|
||||
if (sdev->host->hostt->slave_configure) {
|
||||
ret = sdev->host->hostt->slave_configure(sdev);
|
||||
if (ret) {
|
||||
/*
|
||||
* if LLDD reports slave not present, don't clutter
|
||||
* console with alloc failure messages
|
||||
*/
|
||||
if (ret != -ENXIO) {
|
||||
sdev_printk(KERN_ERR, sdev,
|
||||
"failed to configure device\n");
|
||||
}
|
||||
return SCSI_SCAN_NO_RESPONSE;
|
||||
}
|
||||
/*
|
||||
* No need to freeze the queue as it isn't reachable to anyone else yet.
|
||||
*/
|
||||
lim = queue_limits_start_update(sdev->request_queue);
|
||||
if (*bflags & BLIST_MAX_512)
|
||||
lim.max_hw_sectors = 512;
|
||||
else if (*bflags & BLIST_MAX_1024)
|
||||
lim.max_hw_sectors = 1024;
|
||||
|
||||
if (hostt->device_configure)
|
||||
ret = hostt->device_configure(sdev, &lim);
|
||||
else if (hostt->slave_configure)
|
||||
ret = hostt->slave_configure(sdev);
|
||||
if (ret) {
|
||||
queue_limits_cancel_update(sdev->request_queue);
|
||||
/*
|
||||
* The queue_depth is often changed in ->slave_configure.
|
||||
* Set up budget map again since memory consumption of
|
||||
* the map depends on actual queue depth.
|
||||
* If the LLDD reports device not present, don't clutter the
|
||||
* console with failure messages.
|
||||
*/
|
||||
scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
|
||||
if (ret != -ENXIO)
|
||||
sdev_printk(KERN_ERR, sdev,
|
||||
"failed to configure device\n");
|
||||
return SCSI_SCAN_NO_RESPONSE;
|
||||
}
|
||||
|
||||
ret = queue_limits_commit_update(sdev->request_queue, &lim);
|
||||
if (ret) {
|
||||
sdev_printk(KERN_ERR, sdev, "failed to apply queue limits.\n");
|
||||
return SCSI_SCAN_NO_RESPONSE;
|
||||
}
|
||||
|
||||
/*
|
||||
* The queue_depth is often changed in ->device_configure.
|
||||
*
|
||||
* Set up budget map again since memory consumption of the map depends
|
||||
* on actual queue depth.
|
||||
*/
|
||||
if (hostt->device_configure || hostt->slave_configure)
|
||||
scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
|
||||
|
||||
if (sdev->scsi_level >= SCSI_3)
|
||||
scsi_attach_vpd(sdev);
|
||||
|
||||
|
@ -4276,6 +4276,7 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
|
||||
{
|
||||
struct device *dev = &shost->shost_gendev;
|
||||
struct fc_internal *i = to_fc_internal(shost->transportt);
|
||||
struct queue_limits lim;
|
||||
struct request_queue *q;
|
||||
char bsg_name[20];
|
||||
|
||||
@ -4286,16 +4287,16 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
|
||||
|
||||
snprintf(bsg_name, sizeof(bsg_name),
|
||||
"fc_host%d", shost->host_no);
|
||||
|
||||
q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, fc_bsg_job_timeout,
|
||||
i->f->dd_bsg_size);
|
||||
scsi_init_limits(shost, &lim);
|
||||
lim.max_segments = min_not_zero(lim.max_segments, i->f->max_bsg_segments);
|
||||
q = bsg_setup_queue(dev, bsg_name, &lim, fc_bsg_dispatch,
|
||||
fc_bsg_job_timeout, i->f->dd_bsg_size);
|
||||
if (IS_ERR(q)) {
|
||||
dev_err(dev,
|
||||
"fc_host%d: bsg interface failed to initialize - setup queue\n",
|
||||
shost->host_no);
|
||||
return PTR_ERR(q);
|
||||
}
|
||||
__scsi_init_queue(shost, q);
|
||||
blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
|
||||
fc_host->rqst_q = q;
|
||||
return 0;
|
||||
@ -4311,6 +4312,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
|
||||
{
|
||||
struct device *dev = &rport->dev;
|
||||
struct fc_internal *i = to_fc_internal(shost->transportt);
|
||||
struct queue_limits lim;
|
||||
struct request_queue *q;
|
||||
|
||||
rport->rqst_q = NULL;
|
||||
@ -4318,13 +4320,14 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
|
||||
if (!i->f->bsg_request)
|
||||
return -ENOTSUPP;
|
||||
|
||||
q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch_prep,
|
||||
scsi_init_limits(shost, &lim);
|
||||
lim.max_segments = min_not_zero(lim.max_segments, i->f->max_bsg_segments);
|
||||
q = bsg_setup_queue(dev, dev_name(dev), &lim, fc_bsg_dispatch_prep,
|
||||
fc_bsg_job_timeout, i->f->dd_bsg_size);
|
||||
if (IS_ERR(q)) {
|
||||
dev_err(dev, "failed to setup bsg queue\n");
|
||||
return PTR_ERR(q);
|
||||
}
|
||||
__scsi_init_queue(shost, q);
|
||||
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
|
||||
rport->rqst_q = q;
|
||||
return 0;
|
||||
|
@ -1535,6 +1535,7 @@ iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
|
||||
{
|
||||
struct device *dev = &shost->shost_gendev;
|
||||
struct iscsi_internal *i = to_iscsi_internal(shost->transportt);
|
||||
struct queue_limits lim;
|
||||
struct request_queue *q;
|
||||
char bsg_name[20];
|
||||
|
||||
@ -1542,13 +1543,14 @@ iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
|
||||
return -ENOTSUPP;
|
||||
|
||||
snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no);
|
||||
q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, NULL, 0);
|
||||
scsi_init_limits(shost, &lim);
|
||||
q = bsg_setup_queue(dev, bsg_name, &lim, iscsi_bsg_host_dispatch, NULL,
|
||||
0);
|
||||
if (IS_ERR(q)) {
|
||||
shost_printk(KERN_ERR, shost, "bsg interface failed to "
|
||||
"initialize - no request queue\n");
|
||||
return PTR_ERR(q);
|
||||
}
|
||||
__scsi_init_queue(shost, q);
|
||||
|
||||
ihost->bsg_q = q;
|
||||
return 0;
|
||||
|
@ -197,7 +197,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
|
||||
}
|
||||
|
||||
if (rphy) {
|
||||
q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev),
|
||||
q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev), NULL,
|
||||
sas_smp_dispatch, NULL, 0);
|
||||
if (IS_ERR(q))
|
||||
return PTR_ERR(q);
|
||||
@ -206,7 +206,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
|
||||
char name[20];
|
||||
|
||||
snprintf(name, sizeof(name), "sas_host%d", shost->host_no);
|
||||
q = bsg_setup_queue(&shost->shost_gendev, name,
|
||||
q = bsg_setup_queue(&shost->shost_gendev, name, NULL,
|
||||
sas_smp_dispatch, NULL, 0);
|
||||
if (IS_ERR(q))
|
||||
return PTR_ERR(q);
|
||||
|
@ -70,18 +70,6 @@ static int slave_alloc(struct scsi_device *sdev)
|
||||
|
||||
static int slave_configure(struct scsi_device *sdev)
|
||||
{
|
||||
/*
|
||||
* Scatter-gather buffers (all but the last) must have a length
|
||||
* divisible by the bulk maxpacket size. Otherwise a data packet
|
||||
* would end up being short, causing a premature end to the data
|
||||
* transfer. Since high-speed bulk pipes have a maxpacket size
|
||||
* of 512, we'll use that as the scsi device queue's DMA alignment
|
||||
* mask. Guaranteeing proper alignment of the first buffer will
|
||||
* have the desired effect because, except at the beginning and
|
||||
* the end, scatter-gather buffers follow page boundaries.
|
||||
*/
|
||||
blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
|
||||
|
||||
/* Set the SCSI level to at least 2. We'll leave it at 3 if that's
|
||||
* what is originally reported. We need this to avoid confusing
|
||||
* the SCSI layer with devices that report 0 or 1, but need 10-byte
|
||||
@ -219,6 +207,18 @@ static const struct scsi_host_template rtsx_host_template = {
|
||||
/* limit the total size of a transfer to 120 KB */
|
||||
.max_sectors = 240,
|
||||
|
||||
/*
|
||||
* Scatter-gather buffers (all but the last) must have a length
|
||||
* divisible by the bulk maxpacket size. Otherwise a data packet
|
||||
* would end up being short, causing a premature end to the data
|
||||
* transfer. Since high-speed bulk pipes have a maxpacket size
|
||||
* of 512, we'll use that as the scsi device queue's DMA alignment
|
||||
* mask. Guaranteeing proper alignment of the first buffer will
|
||||
* have the desired effect because, except at the beginning and
|
||||
* the end, scatter-gather buffers follow page boundaries.
|
||||
*/
|
||||
.dma_alignment = 511,
|
||||
|
||||
/* emulated HBA */
|
||||
.emulated = 1,
|
||||
|
||||
|
@ -253,7 +253,8 @@ int ufs_bsg_probe(struct ufs_hba *hba)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0);
|
||||
q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), NULL, ufs_bsg_request,
|
||||
NULL, 0);
|
||||
if (IS_ERR(q)) {
|
||||
ret = PTR_ERR(q);
|
||||
goto out;
|
||||
|
@ -5218,9 +5218,6 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
|
||||
*/
|
||||
sdev->silence_suspend = 1;
|
||||
|
||||
if (hba->vops && hba->vops->config_scsi_dev)
|
||||
hba->vops->config_scsi_dev(sdev);
|
||||
|
||||
ufshcd_crypto_register(hba, q);
|
||||
|
||||
return 0;
|
||||
|
@ -1187,6 +1187,8 @@ static int exynos_ufs_init(struct ufs_hba *hba)
|
||||
goto out;
|
||||
exynos_ufs_specify_phy_time_attr(ufs);
|
||||
exynos_ufs_config_smu(ufs);
|
||||
|
||||
hba->host->dma_alignment = SZ_4K - 1;
|
||||
return 0;
|
||||
|
||||
out:
|
||||
@ -1510,11 +1512,6 @@ static int fsd_ufs_pre_link(struct exynos_ufs *ufs)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void exynos_ufs_config_scsi_dev(struct scsi_device *sdev)
|
||||
{
|
||||
blk_queue_update_dma_alignment(sdev->request_queue, SZ_4K - 1);
|
||||
}
|
||||
|
||||
static int fsd_ufs_post_link(struct exynos_ufs *ufs)
|
||||
{
|
||||
int i;
|
||||
@ -1583,7 +1580,6 @@ static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
|
||||
.hibern8_notify = exynos_ufs_hibern8_notify,
|
||||
.suspend = exynos_ufs_suspend,
|
||||
.resume = exynos_ufs_resume,
|
||||
.config_scsi_dev = exynos_ufs_config_scsi_dev,
|
||||
};
|
||||
|
||||
static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = {
|
||||
|
@ -328,12 +328,6 @@ static int mts_slave_alloc (struct scsi_device *s)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mts_slave_configure (struct scsi_device *s)
|
||||
{
|
||||
blk_queue_dma_alignment(s->request_queue, (512 - 1));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mts_scsi_abort(struct scsi_cmnd *srb)
|
||||
{
|
||||
struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]);
|
||||
@ -631,8 +625,8 @@ static const struct scsi_host_template mts_scsi_host_template = {
|
||||
.can_queue = 1,
|
||||
.this_id = -1,
|
||||
.emulated = 1,
|
||||
.dma_alignment = 511,
|
||||
.slave_alloc = mts_slave_alloc,
|
||||
.slave_configure = mts_slave_configure,
|
||||
.max_sectors= 256, /* 128 K */
|
||||
};
|
||||
|
||||
|
@ -40,7 +40,6 @@
|
||||
#include <scsi/scsi_eh.h>
|
||||
|
||||
#include "usb.h"
|
||||
#include <linux/usb/hcd.h>
|
||||
#include "scsiglue.h"
|
||||
#include "debug.h"
|
||||
#include "transport.h"
|
||||
@ -76,12 +75,6 @@ static int slave_alloc (struct scsi_device *sdev)
|
||||
*/
|
||||
sdev->inquiry_len = 36;
|
||||
|
||||
/*
|
||||
* Some host controllers may have alignment requirements.
|
||||
* We'll play it safe by requiring 512-byte alignment always.
|
||||
*/
|
||||
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
|
||||
|
||||
/* Tell the SCSI layer if we know there is more than one LUN */
|
||||
if (us->protocol == USB_PR_BULK && us->max_lun > 0)
|
||||
sdev->sdev_bflags |= BLIST_FORCELUN;
|
||||
@ -89,7 +82,7 @@ static int slave_alloc (struct scsi_device *sdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int slave_configure(struct scsi_device *sdev)
|
||||
static int device_configure(struct scsi_device *sdev, struct queue_limits *lim)
|
||||
{
|
||||
struct us_data *us = host_to_us(sdev->host);
|
||||
struct device *dev = us->pusb_dev->bus->sysdev;
|
||||
@ -104,40 +97,28 @@ static int slave_configure(struct scsi_device *sdev)
|
||||
|
||||
if (us->fflags & US_FL_MAX_SECTORS_MIN)
|
||||
max_sectors = PAGE_SIZE >> 9;
|
||||
if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
|
||||
blk_queue_max_hw_sectors(sdev->request_queue,
|
||||
max_sectors);
|
||||
lim->max_hw_sectors = min(lim->max_hw_sectors, max_sectors);
|
||||
} else if (sdev->type == TYPE_TAPE) {
|
||||
/*
|
||||
* Tapes need much higher max_sector limits, so just
|
||||
* raise it to the maximum possible (4 GB / 512) and
|
||||
* let the queue segment size sort out the real limit.
|
||||
*/
|
||||
blk_queue_max_hw_sectors(sdev->request_queue, 0x7FFFFF);
|
||||
lim->max_hw_sectors = 0x7FFFFF;
|
||||
} else if (us->pusb_dev->speed >= USB_SPEED_SUPER) {
|
||||
/*
|
||||
* USB3 devices will be limited to 2048 sectors. This gives us
|
||||
* better throughput on most devices.
|
||||
*/
|
||||
blk_queue_max_hw_sectors(sdev->request_queue, 2048);
|
||||
lim->max_hw_sectors = 2048;
|
||||
}
|
||||
|
||||
/*
|
||||
* The max_hw_sectors should be up to maximum size of a mapping for
|
||||
* the device. Otherwise, a DMA API might fail on swiotlb environment.
|
||||
*/
|
||||
blk_queue_max_hw_sectors(sdev->request_queue,
|
||||
min_t(size_t, queue_max_hw_sectors(sdev->request_queue),
|
||||
dma_max_mapping_size(dev) >> SECTOR_SHIFT));
|
||||
|
||||
/*
|
||||
* Some USB host controllers can't do DMA; they have to use PIO.
|
||||
* For such controllers we need to make sure the block layer sets
|
||||
* up bounce buffers in addressable memory.
|
||||
*/
|
||||
if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)) ||
|
||||
(bus_to_hcd(us->pusb_dev->bus)->localmem_pool != NULL))
|
||||
blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_HIGH);
|
||||
lim->max_hw_sectors = min_t(size_t,
|
||||
lim->max_hw_sectors, dma_max_mapping_size(dev) >> SECTOR_SHIFT);
|
||||
|
||||
/*
|
||||
* We can't put these settings in slave_alloc() because that gets
|
||||
@ -598,13 +579,22 @@ static ssize_t max_sectors_store(struct device *dev, struct device_attribute *at
|
||||
size_t count)
|
||||
{
|
||||
struct scsi_device *sdev = to_scsi_device(dev);
|
||||
struct queue_limits lim;
|
||||
unsigned short ms;
|
||||
int ret;
|
||||
|
||||
if (sscanf(buf, "%hu", &ms) > 0) {
|
||||
blk_queue_max_hw_sectors(sdev->request_queue, ms);
|
||||
return count;
|
||||
}
|
||||
return -EINVAL;
|
||||
if (sscanf(buf, "%hu", &ms) <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
blk_mq_freeze_queue(sdev->request_queue);
|
||||
lim = queue_limits_start_update(sdev->request_queue);
|
||||
lim.max_hw_sectors = ms;
|
||||
ret = queue_limits_commit_update(sdev->request_queue, &lim);
|
||||
blk_mq_unfreeze_queue(sdev->request_queue);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR_RW(max_sectors);
|
||||
|
||||
@ -642,12 +632,17 @@ static const struct scsi_host_template usb_stor_host_template = {
|
||||
.this_id = -1,
|
||||
|
||||
.slave_alloc = slave_alloc,
|
||||
.slave_configure = slave_configure,
|
||||
.device_configure = device_configure,
|
||||
.target_alloc = target_alloc,
|
||||
|
||||
/* lots of sg segments can be handled */
|
||||
.sg_tablesize = SG_MAX_SEGMENTS,
|
||||
|
||||
/*
|
||||
* Some host controllers may have alignment requirements.
|
||||
* We'll play it safe by requiring 512-byte alignment always.
|
||||
*/
|
||||
.dma_alignment = 511,
|
||||
|
||||
/*
|
||||
* Limit the total size of a transfer to 120 KB.
|
||||
|
@ -823,26 +823,19 @@ static int uas_slave_alloc(struct scsi_device *sdev)
|
||||
(struct uas_dev_info *)sdev->host->hostdata;
|
||||
|
||||
sdev->hostdata = devinfo;
|
||||
|
||||
/*
|
||||
* The protocol has no requirements on alignment in the strict sense.
|
||||
* Controllers may or may not have alignment restrictions.
|
||||
* As this is not exported, we use an extremely conservative guess.
|
||||
*/
|
||||
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
|
||||
|
||||
if (devinfo->flags & US_FL_MAX_SECTORS_64)
|
||||
blk_queue_max_hw_sectors(sdev->request_queue, 64);
|
||||
else if (devinfo->flags & US_FL_MAX_SECTORS_240)
|
||||
blk_queue_max_hw_sectors(sdev->request_queue, 240);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uas_slave_configure(struct scsi_device *sdev)
|
||||
static int uas_device_configure(struct scsi_device *sdev,
|
||||
struct queue_limits *lim)
|
||||
{
|
||||
struct uas_dev_info *devinfo = sdev->hostdata;
|
||||
|
||||
if (devinfo->flags & US_FL_MAX_SECTORS_64)
|
||||
lim->max_hw_sectors = 64;
|
||||
else if (devinfo->flags & US_FL_MAX_SECTORS_240)
|
||||
lim->max_hw_sectors = 240;
|
||||
|
||||
if (devinfo->flags & US_FL_NO_REPORT_OPCODES)
|
||||
sdev->no_report_opcodes = 1;
|
||||
|
||||
@ -907,11 +900,17 @@ static const struct scsi_host_template uas_host_template = {
|
||||
.queuecommand = uas_queuecommand,
|
||||
.target_alloc = uas_target_alloc,
|
||||
.slave_alloc = uas_slave_alloc,
|
||||
.slave_configure = uas_slave_configure,
|
||||
.device_configure = uas_device_configure,
|
||||
.eh_abort_handler = uas_eh_abort_handler,
|
||||
.eh_device_reset_handler = uas_eh_device_reset_handler,
|
||||
.this_id = -1,
|
||||
.skip_settle_delay = 1,
|
||||
/*
|
||||
* The protocol has no requirements on alignment in the strict sense.
|
||||
* Controllers may or may not have alignment restrictions.
|
||||
* As this is not exported, we use an extremely conservative guess.
|
||||
*/
|
||||
.dma_alignment = 511,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.cmd_size = sizeof(struct uas_cmd_info),
|
||||
};
|
||||
|
@ -47,6 +47,7 @@
|
||||
#include <scsi/scsi_device.h>
|
||||
|
||||
#include "usb.h"
|
||||
#include <linux/usb/hcd.h>
|
||||
#include "scsiglue.h"
|
||||
#include "transport.h"
|
||||
#include "protocol.h"
|
||||
@ -961,6 +962,15 @@ int usb_stor_probe1(struct us_data **pus,
|
||||
if (result)
|
||||
goto BadDevice;
|
||||
|
||||
/*
|
||||
* Some USB host controllers can't do DMA; they have to use PIO.
|
||||
* For such controllers we need to make sure the block layer sets
|
||||
* up bounce buffers in addressable memory.
|
||||
*/
|
||||
if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)) ||
|
||||
bus_to_hcd(us->pusb_dev->bus)->localmem_pool)
|
||||
host->no_highmem = true;
|
||||
|
||||
/* Get the unusual_devs entries and the descriptors */
|
||||
result = get_device_info(us, id, unusual_dev);
|
||||
if (result)
|
||||
|
@ -892,18 +892,25 @@ int queue_limits_commit_update(struct request_queue *q,
|
||||
struct queue_limits *lim);
|
||||
int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
|
||||
|
||||
/**
|
||||
* queue_limits_cancel_update - cancel an atomic update of queue limits
|
||||
* @q: queue to update
|
||||
*
|
||||
* This functions cancels an atomic update of the queue limits started by
|
||||
* queue_limits_start_update() and should be used when an error occurs after
|
||||
* starting update.
|
||||
*/
|
||||
static inline void queue_limits_cancel_update(struct request_queue *q)
|
||||
{
|
||||
mutex_unlock(&q->limits_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Access functions for manipulating queue properties
|
||||
*/
|
||||
void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
|
||||
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_max_discard_segments(struct request_queue *,
|
||||
unsigned short);
|
||||
void blk_queue_max_secure_erase_sectors(struct request_queue *q,
|
||||
unsigned int max_sectors);
|
||||
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_max_discard_sectors(struct request_queue *q,
|
||||
unsigned int max_discard_sectors);
|
||||
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
|
||||
@ -920,7 +927,6 @@ void disk_update_readahead(struct gendisk *disk);
|
||||
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
|
||||
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
|
||||
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
|
||||
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
|
||||
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
|
||||
extern void blk_set_stacking_limits(struct queue_limits *lim);
|
||||
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||
@ -928,10 +934,6 @@ extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||
void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
|
||||
sector_t offset, const char *pfx);
|
||||
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
|
||||
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
|
||||
extern void blk_queue_dma_alignment(struct request_queue *, int);
|
||||
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
|
||||
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
|
||||
|
||||
@ -948,8 +950,6 @@ void disk_set_independent_access_ranges(struct gendisk *disk,
|
||||
|
||||
extern void blk_queue_required_elevator_features(struct request_queue *q,
|
||||
unsigned int features);
|
||||
extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
|
||||
struct device *dev);
|
||||
|
||||
bool __must_check blk_get_queue(struct request_queue *);
|
||||
extern void blk_put_queue(struct request_queue *);
|
||||
|
@ -65,7 +65,8 @@ struct bsg_job {
|
||||
void bsg_job_done(struct bsg_job *job, int result,
|
||||
unsigned int reply_payload_rcv_len);
|
||||
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
|
||||
bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size);
|
||||
struct queue_limits *lim, bsg_job_fn *job_fn,
|
||||
bsg_timeout_fn *timeout, int dd_job_size);
|
||||
void bsg_remove_queue(struct request_queue *q);
|
||||
void bsg_job_put(struct bsg_job *job);
|
||||
int __must_check bsg_job_get(struct bsg_job *job);
|
||||
|
@ -1151,7 +1151,8 @@ extern int ata_std_bios_param(struct scsi_device *sdev,
|
||||
sector_t capacity, int geom[]);
|
||||
extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
|
||||
extern int ata_scsi_slave_alloc(struct scsi_device *sdev);
|
||||
extern int ata_scsi_slave_config(struct scsi_device *sdev);
|
||||
int ata_scsi_device_configure(struct scsi_device *sdev,
|
||||
struct queue_limits *lim);
|
||||
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
|
||||
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
|
||||
int queue_depth);
|
||||
@ -1249,7 +1250,8 @@ extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
|
||||
extern void ata_port_probe(struct ata_port *ap);
|
||||
extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
|
||||
extern void ata_sas_tport_delete(struct ata_port *ap);
|
||||
extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
|
||||
int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
|
||||
struct ata_port *ap);
|
||||
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
|
||||
extern void ata_tf_to_fis(const struct ata_taskfile *tf,
|
||||
u8 pmp, int is_cmd, u8 *fis);
|
||||
@ -1415,13 +1417,13 @@ extern const struct attribute_group *ata_common_sdev_groups[];
|
||||
__ATA_BASE_SHT(drv_name), \
|
||||
.can_queue = ATA_DEF_QUEUE, \
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
|
||||
.slave_configure = ata_scsi_slave_config
|
||||
.device_configure = ata_scsi_device_configure
|
||||
|
||||
#define ATA_SUBBASE_SHT_QD(drv_name, drv_qd) \
|
||||
__ATA_BASE_SHT(drv_name), \
|
||||
.can_queue = drv_qd, \
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
|
||||
.slave_configure = ata_scsi_slave_config
|
||||
.device_configure = ata_scsi_device_configure
|
||||
|
||||
#define ATA_BASE_SHT(drv_name) \
|
||||
ATA_SUBBASE_SHT(drv_name), \
|
||||
|
@ -433,8 +433,8 @@ struct mmc_host {
|
||||
mmc_pm_flag_t pm_caps; /* supported pm features */
|
||||
|
||||
/* host specific block data */
|
||||
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
|
||||
unsigned short max_segs; /* see blk_queue_max_segments */
|
||||
unsigned int max_seg_size; /* lim->max_segment_size */
|
||||
unsigned short max_segs; /* lim->max_segments */
|
||||
unsigned short unused;
|
||||
unsigned int max_req_size; /* maximum number of bytes in one req */
|
||||
unsigned int max_blk_size; /* maximum size of one mmc block */
|
||||
|
@ -683,7 +683,8 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset);
|
||||
int sas_phy_enable(struct sas_phy *phy, int enable);
|
||||
extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
|
||||
extern int sas_target_alloc(struct scsi_target *);
|
||||
extern int sas_slave_configure(struct scsi_device *);
|
||||
int sas_device_configure(struct scsi_device *dev,
|
||||
struct queue_limits *lim);
|
||||
extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
|
||||
extern int sas_bios_param(struct scsi_device *, struct block_device *,
|
||||
sector_t capacity, int *hsc);
|
||||
@ -749,7 +750,7 @@ void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
|
||||
#endif
|
||||
|
||||
#define LIBSAS_SHT_BASE _LIBSAS_SHT_BASE \
|
||||
.slave_configure = sas_slave_configure, \
|
||||
.device_configure = sas_device_configure, \
|
||||
.slave_alloc = sas_slave_alloc, \
|
||||
|
||||
#define LIBSAS_SHT_BASE_NO_SLAVE_INIT _LIBSAS_SHT_BASE
|
||||
|
@ -211,7 +211,11 @@ struct scsi_host_template {
|
||||
* up after yourself before returning non-0
|
||||
*
|
||||
* Status: OPTIONAL
|
||||
*
|
||||
* Note: slave_configure is the legacy version, use device_configure for
|
||||
* all new code. A driver must never define both.
|
||||
*/
|
||||
int (* device_configure)(struct scsi_device *, struct queue_limits *lim);
|
||||
int (* slave_configure)(struct scsi_device *);
|
||||
|
||||
/*
|
||||
@ -405,6 +409,8 @@ struct scsi_host_template {
|
||||
*/
|
||||
unsigned int max_segment_size;
|
||||
|
||||
unsigned int dma_alignment;
|
||||
|
||||
/*
|
||||
* DMA scatter gather segment boundary limit. A segment crossing this
|
||||
* boundary will be split in two.
|
||||
@ -614,6 +620,7 @@ struct Scsi_Host {
|
||||
unsigned int max_sectors;
|
||||
unsigned int opt_sectors;
|
||||
unsigned int max_segment_size;
|
||||
unsigned int dma_alignment;
|
||||
unsigned long dma_boundary;
|
||||
unsigned long virt_boundary_mask;
|
||||
/*
|
||||
@ -665,6 +672,8 @@ struct Scsi_Host {
|
||||
/* The transport requires the LUN bits NOT to be stored in CDB[1] */
|
||||
unsigned no_scsi2_lun_in_cdb:1;
|
||||
|
||||
unsigned no_highmem:1;
|
||||
|
||||
/*
|
||||
* Optional work queue to be utilized by the transport
|
||||
*/
|
||||
|
@ -83,6 +83,6 @@ scsi_transport_device_data(struct scsi_device *sdev)
|
||||
+ shost->transportt->device_private_offset;
|
||||
}
|
||||
|
||||
void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q);
|
||||
void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim);
|
||||
|
||||
#endif /* SCSI_TRANSPORT_H */
|
||||
|
@ -709,6 +709,7 @@ struct fc_function_template {
|
||||
int (*vport_delete)(struct fc_vport *);
|
||||
|
||||
/* bsg support */
|
||||
u32 max_bsg_segments;
|
||||
int (*bsg_request)(struct bsg_job *);
|
||||
int (*bsg_timeout)(struct bsg_job *);
|
||||
|
||||
|
@ -374,7 +374,6 @@ struct ufs_hba_variant_ops {
|
||||
int (*get_outstanding_cqs)(struct ufs_hba *hba,
|
||||
unsigned long *ocqs);
|
||||
int (*config_esi)(struct ufs_hba *hba);
|
||||
void (*config_scsi_dev)(struct scsi_device *sdev);
|
||||
};
|
||||
|
||||
/* clock gating state */
|
||||
|
Loading…
Reference in New Issue
Block a user