mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-07 21:53:44 +00:00
Merge branch 'for-2.6.36' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.36' of git://git.kernel.dk/linux-2.6-block: (149 commits) block: make sure that REQ_* types are seen even with CONFIG_BLOCK=n xen-blkfront: fix missing out label blkdev: fix blkdev_issue_zeroout return value block: update request stacking methods to support discards block: fix missing export of blk_types.h writeback: fix bad _bh spinlock nesting drbd: revert "delay probes", feature is being re-implemented differently drbd: Initialize all members of sync_conf to their defaults [Bugz 315] drbd: Disable delay probes for the upcomming release writeback: cleanup bdi_register writeback: add new tracepoints writeback: remove unnecessary init_timer call writeback: optimize periodic bdi thread wakeups writeback: prevent unnecessary bdi threads wakeups writeback: move bdi threads exiting logic to the forker thread writeback: restructure bdi forker loop a little writeback: move last_active to bdi writeback: do not remove bdi from bdi_list writeback: simplify bdi code a little writeback: do not lose wake-ups in bdi threads ... Fixed up pretty trivial conflicts in drivers/block/virtio_blk.c and drivers/scsi/scsi_error.c as per Jens.
This commit is contained in:
commit
2f9e825d3e
@ -3,6 +3,4 @@
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#define ISA_DMA_THRESHOLD (~0UL)
|
||||
|
||||
#endif /* !(_ALPHA_SCATTERLIST_H) */
|
||||
|
@ -3,6 +3,4 @@
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#define ISA_DMA_THRESHOLD (0xffffffff)
|
||||
|
||||
#endif /* __ASM_AVR32_SCATTERLIST_H */
|
||||
|
@ -3,6 +3,4 @@
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#define ISA_DMA_THRESHOLD (0xffffffff)
|
||||
|
||||
#endif /* !(_BLACKFIN_SCATTERLIST_H) */
|
||||
|
@ -3,6 +3,4 @@
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#define ISA_DMA_THRESHOLD (0x1fffffff)
|
||||
|
||||
#endif /* !(__ASM_CRIS_SCATTERLIST_H) */
|
||||
|
@ -3,6 +3,4 @@
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#define ISA_DMA_THRESHOLD (0xffffffffUL)
|
||||
|
||||
#endif /* !_ASM_SCATTERLIST_H */
|
||||
|
@ -3,6 +3,4 @@
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#define ISA_DMA_THRESHOLD (0xffffffff)
|
||||
|
||||
#endif /* !(_H8300_SCATTERLIST_H) */
|
||||
|
@ -2,15 +2,6 @@
|
||||
#define _ASM_IA64_SCATTERLIST_H
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
/*
|
||||
* It used to be that ISA_DMA_THRESHOLD had something to do with the
|
||||
* DMA-limits of ISA-devices. Nowadays, its only remaining use (apart
|
||||
* from the aha1542.c driver, which isn't 64-bit clean anyhow) is to
|
||||
* tell the block-layer (via BLK_BOUNCE_ISA) what the max. physical
|
||||
* address of a page is that is allocated with GFP_DMA. On IA-64,
|
||||
* that's 4GB - 1.
|
||||
*/
|
||||
#define ISA_DMA_THRESHOLD 0xffffffff
|
||||
#define ARCH_HAS_SG_CHAIN
|
||||
|
||||
#endif /* _ASM_IA64_SCATTERLIST_H */
|
||||
|
@ -3,6 +3,4 @@
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#define ISA_DMA_THRESHOLD (0x1fffffff)
|
||||
|
||||
#endif /* _ASM_M32R_SCATTERLIST_H */
|
||||
|
@ -3,7 +3,4 @@
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
/* This is bogus and should go away. */
|
||||
#define ISA_DMA_THRESHOLD (0x00ffffff)
|
||||
|
||||
#endif /* !(_M68K_SCATTERLIST_H) */
|
||||
|
@ -1,3 +1 @@
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#define ISA_DMA_THRESHOLD (~0UL)
|
||||
|
@ -3,6 +3,4 @@
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#define ISA_DMA_THRESHOLD (0x00ffffffUL)
|
||||
|
||||
#endif /* __ASM_SCATTERLIST_H */
|
||||
|
@ -13,6 +13,4 @@
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#define ISA_DMA_THRESHOLD (0x00ffffff)
|
||||
|
||||
#endif /* _ASM_SCATTERLIST_H */
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#define ISA_DMA_THRESHOLD (~0UL)
|
||||
#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
|
||||
|
||||
#endif /* _ASM_PARISC_SCATTERLIST_H */
|
||||
|
@ -12,9 +12,6 @@
|
||||
#include <asm/dma.h>
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#ifdef __powerpc64__
|
||||
#define ISA_DMA_THRESHOLD (~0UL)
|
||||
#endif
|
||||
#define ARCH_HAS_SG_CHAIN
|
||||
|
||||
#endif /* _ASM_POWERPC_SCATTERLIST_H */
|
||||
|
@ -1,3 +1 @@
|
||||
#define ISA_DMA_THRESHOLD (~0UL)
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
@ -1,8 +1,6 @@
|
||||
#ifndef _ASM_SCORE_SCATTERLIST_H
|
||||
#define _ASM_SCORE_SCATTERLIST_H
|
||||
|
||||
#define ISA_DMA_THRESHOLD (~0UL)
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#endif /* _ASM_SCORE_SCATTERLIST_H */
|
||||
|
@ -1,8 +1,6 @@
|
||||
#ifndef __ASM_SH_SCATTERLIST_H
|
||||
#define __ASM_SH_SCATTERLIST_H
|
||||
|
||||
#define ISA_DMA_THRESHOLD phys_addr_mask()
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#endif /* __ASM_SH_SCATTERLIST_H */
|
||||
|
@ -3,7 +3,6 @@
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#define ISA_DMA_THRESHOLD (~0UL)
|
||||
#define ARCH_HAS_SG_CHAIN
|
||||
|
||||
#endif /* !(_SPARC_SCATTERLIST_H) */
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "linux/mm.h"
|
||||
#include "linux/slab.h"
|
||||
#include "linux/vmalloc.h"
|
||||
#include "linux/smp_lock.h"
|
||||
#include "linux/blkpg.h"
|
||||
#include "linux/genhd.h"
|
||||
#include "linux/spinlock.h"
|
||||
@ -1098,6 +1099,7 @@ static int ubd_open(struct block_device *bdev, fmode_t mode)
|
||||
struct ubd *ubd_dev = disk->private_data;
|
||||
int err = 0;
|
||||
|
||||
lock_kernel();
|
||||
if(ubd_dev->count == 0){
|
||||
err = ubd_open_dev(ubd_dev);
|
||||
if(err){
|
||||
@ -1115,7 +1117,8 @@ static int ubd_open(struct block_device *bdev, fmode_t mode)
|
||||
if(--ubd_dev->count == 0) ubd_close_dev(ubd_dev);
|
||||
err = -EROFS;
|
||||
}*/
|
||||
out:
|
||||
out:
|
||||
unlock_kernel();
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1123,8 +1126,10 @@ static int ubd_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct ubd *ubd_dev = disk->private_data;
|
||||
|
||||
lock_kernel();
|
||||
if(--ubd_dev->count == 0)
|
||||
ubd_close_dev(ubd_dev);
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,6 @@
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#define ISA_DMA_THRESHOLD (0x00ffffff)
|
||||
#define ARCH_HAS_SG_CHAIN
|
||||
|
||||
#endif /* _ASM_X86_SCATTERLIST_H */
|
||||
|
@ -13,6 +13,4 @@
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#define ISA_DMA_THRESHOLD (~0UL)
|
||||
|
||||
#endif /* _XTENSA_SCATTERLIST_H */
|
||||
|
@ -13,7 +13,6 @@
|
||||
* blk_queue_ordered - does this queue support ordered writes
|
||||
* @q: the request queue
|
||||
* @ordered: one of QUEUE_ORDERED_*
|
||||
* @prepare_flush_fn: rq setup helper for cache flush ordered writes
|
||||
*
|
||||
* Description:
|
||||
* For journalled file systems, doing ordered writes on a commit
|
||||
@ -22,15 +21,8 @@
|
||||
* feature should call this function and indicate so.
|
||||
*
|
||||
**/
|
||||
int blk_queue_ordered(struct request_queue *q, unsigned ordered,
|
||||
prepare_flush_fn *prepare_flush_fn)
|
||||
int blk_queue_ordered(struct request_queue *q, unsigned ordered)
|
||||
{
|
||||
if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
|
||||
QUEUE_ORDERED_DO_POSTFLUSH))) {
|
||||
printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ordered != QUEUE_ORDERED_NONE &&
|
||||
ordered != QUEUE_ORDERED_DRAIN &&
|
||||
ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
|
||||
@ -44,7 +36,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
|
||||
|
||||
q->ordered = ordered;
|
||||
q->next_ordered = ordered;
|
||||
q->prepare_flush_fn = prepare_flush_fn;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -79,7 +70,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
|
||||
*
|
||||
* http://thread.gmane.org/gmane.linux.kernel/537473
|
||||
*/
|
||||
if (!blk_fs_request(rq))
|
||||
if (rq->cmd_type != REQ_TYPE_FS)
|
||||
return QUEUE_ORDSEQ_DRAIN;
|
||||
|
||||
if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
|
||||
@ -143,10 +134,10 @@ static void queue_flush(struct request_queue *q, unsigned which)
|
||||
}
|
||||
|
||||
blk_rq_init(q, rq);
|
||||
rq->cmd_flags = REQ_HARDBARRIER;
|
||||
rq->rq_disk = q->bar_rq.rq_disk;
|
||||
rq->cmd_type = REQ_TYPE_FS;
|
||||
rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH;
|
||||
rq->rq_disk = q->orig_bar_rq->rq_disk;
|
||||
rq->end_io = end_io;
|
||||
q->prepare_flush_fn(q, rq);
|
||||
|
||||
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
|
||||
}
|
||||
@ -203,7 +194,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
|
||||
/* initialize proxy request and queue it */
|
||||
blk_rq_init(q, rq);
|
||||
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
|
||||
rq->cmd_flags |= REQ_RW;
|
||||
rq->cmd_flags |= REQ_WRITE;
|
||||
if (q->ordered & QUEUE_ORDERED_DO_FUA)
|
||||
rq->cmd_flags |= REQ_FUA;
|
||||
init_request_from_bio(rq, q->orig_bar_rq->bio);
|
||||
@ -236,7 +227,8 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
|
||||
bool blk_do_ordered(struct request_queue *q, struct request **rqp)
|
||||
{
|
||||
struct request *rq = *rqp;
|
||||
const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
|
||||
const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
|
||||
(rq->cmd_flags & REQ_HARDBARRIER);
|
||||
|
||||
if (!q->ordseq) {
|
||||
if (!is_barrier)
|
||||
@ -261,7 +253,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
|
||||
*/
|
||||
|
||||
/* Special requests are not subject to ordering rules. */
|
||||
if (!blk_fs_request(rq) &&
|
||||
if (rq->cmd_type != REQ_TYPE_FS &&
|
||||
rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
|
||||
return true;
|
||||
|
||||
@ -319,6 +311,15 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
|
||||
if (!q)
|
||||
return -ENXIO;
|
||||
|
||||
/*
|
||||
* some block devices may not have their queue correctly set up here
|
||||
* (e.g. loop device without a backing file) and so issuing a flush
|
||||
* here will panic. Ensure there is a request function before issuing
|
||||
* the barrier.
|
||||
*/
|
||||
if (!q->make_request_fn)
|
||||
return -ENXIO;
|
||||
|
||||
bio = bio_alloc(gfp_mask, 0);
|
||||
bio->bi_end_io = bio_end_empty_barrier;
|
||||
bio->bi_bdev = bdev;
|
||||
|
117
block/blk-core.c
117
block/blk-core.c
@ -184,7 +184,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
|
||||
printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
|
||||
rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
|
||||
|
||||
if (blk_pc_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
printk(KERN_INFO " cdb: ");
|
||||
for (bit = 0; bit < BLK_MAX_CDB; bit++)
|
||||
printk("%02x ", rq->cmd[bit]);
|
||||
@ -608,6 +608,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
|
||||
|
||||
q->request_fn = rfn;
|
||||
q->prep_rq_fn = NULL;
|
||||
q->unprep_rq_fn = NULL;
|
||||
q->unplug_fn = generic_unplug_device;
|
||||
q->queue_flags = QUEUE_FLAG_DEFAULT;
|
||||
q->queue_lock = lock;
|
||||
@ -1135,30 +1136,46 @@ void blk_put_request(struct request *req)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_put_request);
|
||||
|
||||
/**
|
||||
* blk_add_request_payload - add a payload to a request
|
||||
* @rq: request to update
|
||||
* @page: page backing the payload
|
||||
* @len: length of the payload.
|
||||
*
|
||||
* This allows to later add a payload to an already submitted request by
|
||||
* a block driver. The driver needs to take care of freeing the payload
|
||||
* itself.
|
||||
*
|
||||
* Note that this is a quite horrible hack and nothing but handling of
|
||||
* discard requests should ever use it.
|
||||
*/
|
||||
void blk_add_request_payload(struct request *rq, struct page *page,
|
||||
unsigned int len)
|
||||
{
|
||||
struct bio *bio = rq->bio;
|
||||
|
||||
bio->bi_io_vec->bv_page = page;
|
||||
bio->bi_io_vec->bv_offset = 0;
|
||||
bio->bi_io_vec->bv_len = len;
|
||||
|
||||
bio->bi_size = len;
|
||||
bio->bi_vcnt = 1;
|
||||
bio->bi_phys_segments = 1;
|
||||
|
||||
rq->__data_len = rq->resid_len = len;
|
||||
rq->nr_phys_segments = 1;
|
||||
rq->buffer = bio_data(bio);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_add_request_payload);
|
||||
|
||||
void init_request_from_bio(struct request *req, struct bio *bio)
|
||||
{
|
||||
req->cpu = bio->bi_comp_cpu;
|
||||
req->cmd_type = REQ_TYPE_FS;
|
||||
|
||||
/*
|
||||
* Inherit FAILFAST from bio (for read-ahead, and explicit
|
||||
* FAILFAST). FAILFAST flags are identical for req and bio.
|
||||
*/
|
||||
if (bio_rw_flagged(bio, BIO_RW_AHEAD))
|
||||
req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
|
||||
if (bio->bi_rw & REQ_RAHEAD)
|
||||
req->cmd_flags |= REQ_FAILFAST_MASK;
|
||||
else
|
||||
req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
|
||||
|
||||
if (bio_rw_flagged(bio, BIO_RW_DISCARD))
|
||||
req->cmd_flags |= REQ_DISCARD;
|
||||
if (bio_rw_flagged(bio, BIO_RW_BARRIER))
|
||||
req->cmd_flags |= REQ_HARDBARRIER;
|
||||
if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
|
||||
req->cmd_flags |= REQ_RW_SYNC;
|
||||
if (bio_rw_flagged(bio, BIO_RW_META))
|
||||
req->cmd_flags |= REQ_RW_META;
|
||||
if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
|
||||
req->cmd_flags |= REQ_NOIDLE;
|
||||
|
||||
req->errors = 0;
|
||||
req->__sector = bio->bi_sector;
|
||||
@ -1181,12 +1198,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
|
||||
int el_ret;
|
||||
unsigned int bytes = bio->bi_size;
|
||||
const unsigned short prio = bio_prio(bio);
|
||||
const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
|
||||
const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
|
||||
const bool sync = (bio->bi_rw & REQ_SYNC);
|
||||
const bool unplug = (bio->bi_rw & REQ_UNPLUG);
|
||||
const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
|
||||
int rw_flags;
|
||||
|
||||
if (bio_rw_flagged(bio, BIO_RW_BARRIER) &&
|
||||
if ((bio->bi_rw & REQ_HARDBARRIER) &&
|
||||
(q->next_ordered == QUEUE_ORDERED_NONE)) {
|
||||
bio_endio(bio, -EOPNOTSUPP);
|
||||
return 0;
|
||||
@ -1200,7 +1217,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
|
||||
if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q))
|
||||
goto get_rq;
|
||||
|
||||
el_ret = elv_merge(q, &req, bio);
|
||||
@ -1275,7 +1292,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
|
||||
*/
|
||||
rw_flags = bio_data_dir(bio);
|
||||
if (sync)
|
||||
rw_flags |= REQ_RW_SYNC;
|
||||
rw_flags |= REQ_SYNC;
|
||||
|
||||
/*
|
||||
* Grab a free request. This is might sleep but can not fail.
|
||||
@ -1464,7 +1481,7 @@ static inline void __generic_make_request(struct bio *bio)
|
||||
goto end_io;
|
||||
}
|
||||
|
||||
if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
|
||||
if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
|
||||
nr_sectors > queue_max_hw_sectors(q))) {
|
||||
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
|
||||
bdevname(bio->bi_bdev, b),
|
||||
@ -1497,8 +1514,7 @@ static inline void __generic_make_request(struct bio *bio)
|
||||
if (bio_check_eod(bio, nr_sectors))
|
||||
goto end_io;
|
||||
|
||||
if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
|
||||
!blk_queue_discard(q)) {
|
||||
if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(q)) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto end_io;
|
||||
}
|
||||
@ -1583,7 +1599,7 @@ void submit_bio(int rw, struct bio *bio)
|
||||
* If it's a regular read/write or a barrier with data attached,
|
||||
* go through the normal accounting stuff before submission.
|
||||
*/
|
||||
if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) {
|
||||
if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
|
||||
if (rw & WRITE) {
|
||||
count_vm_events(PGPGOUT, count);
|
||||
} else {
|
||||
@ -1628,6 +1644,9 @@ EXPORT_SYMBOL(submit_bio);
|
||||
*/
|
||||
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
if (rq->cmd_flags & REQ_DISCARD)
|
||||
return 0;
|
||||
|
||||
if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
|
||||
blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
|
||||
printk(KERN_ERR "%s: over max size limit.\n", __func__);
|
||||
@ -1796,7 +1815,7 @@ struct request *blk_peek_request(struct request_queue *q)
|
||||
* sees this request (possibly after
|
||||
* requeueing). Notify IO scheduler.
|
||||
*/
|
||||
if (blk_sorted_rq(rq))
|
||||
if (rq->cmd_flags & REQ_SORTED)
|
||||
elv_activate_rq(q, rq);
|
||||
|
||||
/*
|
||||
@ -1984,10 +2003,11 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
|
||||
* TODO: tj: This is too subtle. It would be better to let
|
||||
* low level drivers do what they see fit.
|
||||
*/
|
||||
if (blk_fs_request(req))
|
||||
if (req->cmd_type == REQ_TYPE_FS)
|
||||
req->errors = 0;
|
||||
|
||||
if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
|
||||
if (error && req->cmd_type == REQ_TYPE_FS &&
|
||||
!(req->cmd_flags & REQ_QUIET)) {
|
||||
printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
|
||||
req->rq_disk ? req->rq_disk->disk_name : "?",
|
||||
(unsigned long long)blk_rq_pos(req));
|
||||
@ -2074,7 +2094,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
|
||||
req->buffer = bio_data(req->bio);
|
||||
|
||||
/* update sector only for requests with clear definition of sector */
|
||||
if (blk_fs_request(req) || blk_discard_rq(req))
|
||||
if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
|
||||
req->__sector += total_bytes >> 9;
|
||||
|
||||
/* mixed attributes always follow the first bio */
|
||||
@ -2111,11 +2131,32 @@ static bool blk_update_bidi_request(struct request *rq, int error,
|
||||
blk_update_request(rq->next_rq, error, bidi_bytes))
|
||||
return true;
|
||||
|
||||
add_disk_randomness(rq->rq_disk);
|
||||
if (blk_queue_add_random(rq->q))
|
||||
add_disk_randomness(rq->rq_disk);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_unprep_request - unprepare a request
|
||||
* @req: the request
|
||||
*
|
||||
* This function makes a request ready for complete resubmission (or
|
||||
* completion). It happens only after all error handling is complete,
|
||||
* so represents the appropriate moment to deallocate any resources
|
||||
* that were allocated to the request in the prep_rq_fn. The queue
|
||||
* lock is held when calling this.
|
||||
*/
|
||||
void blk_unprep_request(struct request *req)
|
||||
{
|
||||
struct request_queue *q = req->q;
|
||||
|
||||
req->cmd_flags &= ~REQ_DONTPREP;
|
||||
if (q->unprep_rq_fn)
|
||||
q->unprep_rq_fn(q, req);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_unprep_request);
|
||||
|
||||
/*
|
||||
* queue lock must be held
|
||||
*/
|
||||
@ -2126,11 +2167,15 @@ static void blk_finish_request(struct request *req, int error)
|
||||
|
||||
BUG_ON(blk_queued_rq(req));
|
||||
|
||||
if (unlikely(laptop_mode) && blk_fs_request(req))
|
||||
if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
|
||||
laptop_io_completion(&req->q->backing_dev_info);
|
||||
|
||||
blk_delete_timer(req);
|
||||
|
||||
if (req->cmd_flags & REQ_DONTPREP)
|
||||
blk_unprep_request(req);
|
||||
|
||||
|
||||
blk_account_io_done(req);
|
||||
|
||||
if (req->end_io)
|
||||
@ -2363,7 +2408,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio)
|
||||
{
|
||||
/* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
|
||||
rq->cmd_flags |= bio->bi_rw & REQ_RW;
|
||||
rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
|
||||
|
||||
if (bio_has_data(bio)) {
|
||||
rq->nr_phys_segments = bio_phys_segments(q, bio);
|
||||
@ -2450,6 +2495,8 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
|
||||
{
|
||||
dst->cpu = src->cpu;
|
||||
dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE);
|
||||
if (src->cmd_flags & REQ_DISCARD)
|
||||
dst->cmd_flags |= REQ_DISCARD;
|
||||
dst->cmd_type = src->cmd_type;
|
||||
dst->__sector = blk_rq_pos(src);
|
||||
dst->__data_len = blk_rq_bytes(src);
|
||||
|
@ -57,7 +57,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
||||
__elv_add_request(q, rq, where, 1);
|
||||
__generic_unplug_device(q);
|
||||
/* the queue is stopped so it won't be plugged+unplugged */
|
||||
if (blk_pm_resume_request(rq))
|
||||
if (rq->cmd_type == REQ_TYPE_PM_RESUME)
|
||||
q->request_fn(q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
@ -19,7 +19,6 @@ static void blkdev_discard_end_io(struct bio *bio, int err)
|
||||
|
||||
if (bio->bi_private)
|
||||
complete(bio->bi_private);
|
||||
__free_page(bio_page(bio));
|
||||
|
||||
bio_put(bio);
|
||||
}
|
||||
@ -42,8 +41,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
int type = flags & BLKDEV_IFL_BARRIER ?
|
||||
DISCARD_BARRIER : DISCARD_NOBARRIER;
|
||||
unsigned int max_discard_sectors;
|
||||
struct bio *bio;
|
||||
struct page *page;
|
||||
int ret = 0;
|
||||
|
||||
if (!q)
|
||||
@ -52,36 +51,30 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
||||
if (!blk_queue_discard(q))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
while (nr_sects && !ret) {
|
||||
unsigned int sector_size = q->limits.logical_block_size;
|
||||
unsigned int max_discard_sectors =
|
||||
min(q->limits.max_discard_sectors, UINT_MAX >> 9);
|
||||
/*
|
||||
* Ensure that max_discard_sectors is of the proper
|
||||
* granularity
|
||||
*/
|
||||
max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
|
||||
if (q->limits.discard_granularity) {
|
||||
unsigned int disc_sects = q->limits.discard_granularity >> 9;
|
||||
|
||||
max_discard_sectors &= ~(disc_sects - 1);
|
||||
}
|
||||
|
||||
while (nr_sects && !ret) {
|
||||
bio = bio_alloc(gfp_mask, 1);
|
||||
if (!bio)
|
||||
goto out;
|
||||
if (!bio) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
bio->bi_sector = sector;
|
||||
bio->bi_end_io = blkdev_discard_end_io;
|
||||
bio->bi_bdev = bdev;
|
||||
if (flags & BLKDEV_IFL_WAIT)
|
||||
bio->bi_private = &wait;
|
||||
|
||||
/*
|
||||
* Add a zeroed one-sector payload as that's what
|
||||
* our current implementations need. If we'll ever need
|
||||
* more the interface will need revisiting.
|
||||
*/
|
||||
page = alloc_page(gfp_mask | __GFP_ZERO);
|
||||
if (!page)
|
||||
goto out_free_bio;
|
||||
if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
|
||||
goto out_free_page;
|
||||
|
||||
/*
|
||||
* And override the bio size - the way discard works we
|
||||
* touch many more blocks on disk than the actual payload
|
||||
* length.
|
||||
*/
|
||||
if (nr_sects > max_discard_sectors) {
|
||||
bio->bi_size = max_discard_sectors << 9;
|
||||
nr_sects -= max_discard_sectors;
|
||||
@ -103,13 +96,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
||||
ret = -EIO;
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
return ret;
|
||||
out_free_page:
|
||||
__free_page(page);
|
||||
out_free_bio:
|
||||
bio_put(bio);
|
||||
out:
|
||||
return -ENOMEM;
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_issue_discard);
|
||||
|
||||
@ -157,7 +145,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
|
||||
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
|
||||
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret;
|
||||
struct bio *bio;
|
||||
struct bio_batch bb;
|
||||
unsigned int sz, issued = 0;
|
||||
@ -175,11 +163,14 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
|
||||
return ret;
|
||||
}
|
||||
submit:
|
||||
ret = 0;
|
||||
while (nr_sects != 0) {
|
||||
bio = bio_alloc(gfp_mask,
|
||||
min(nr_sects, (sector_t)BIO_MAX_PAGES));
|
||||
if (!bio)
|
||||
if (!bio) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
bio->bi_sector = sector;
|
||||
bio->bi_bdev = bdev;
|
||||
@ -198,6 +189,7 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
|
||||
if (ret < (sz << 9))
|
||||
break;
|
||||
}
|
||||
ret = 0;
|
||||
issued++;
|
||||
submit_bio(WRITE, bio);
|
||||
}
|
||||
|
@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
|
||||
return PTR_ERR(bio);
|
||||
|
||||
if (rq_data_dir(rq) == WRITE)
|
||||
bio->bi_rw |= (1 << BIO_RW);
|
||||
bio->bi_rw |= (1 << REQ_WRITE);
|
||||
|
||||
if (do_copy)
|
||||
rq->cmd_flags |= REQ_COPY_USER;
|
||||
|
@ -12,7 +12,6 @@
|
||||
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
|
||||
struct bio *bio)
|
||||
{
|
||||
unsigned int phys_size;
|
||||
struct bio_vec *bv, *bvprv = NULL;
|
||||
int cluster, i, high, highprv = 1;
|
||||
unsigned int seg_size, nr_phys_segs;
|
||||
@ -24,7 +23,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
|
||||
fbio = bio;
|
||||
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
|
||||
seg_size = 0;
|
||||
phys_size = nr_phys_segs = 0;
|
||||
nr_phys_segs = 0;
|
||||
for_each_bio(bio) {
|
||||
bio_for_each_segment(bv, bio, i) {
|
||||
/*
|
||||
@ -180,7 +179,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
||||
}
|
||||
|
||||
if (q->dma_drain_size && q->dma_drain_needed(rq)) {
|
||||
if (rq->cmd_flags & REQ_RW)
|
||||
if (rq->cmd_flags & REQ_WRITE)
|
||||
memset(q->dma_drain_buffer, 0, q->dma_drain_size);
|
||||
|
||||
sg->page_link &= ~0x02;
|
||||
@ -226,7 +225,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
|
||||
{
|
||||
unsigned short max_sectors;
|
||||
|
||||
if (unlikely(blk_pc_request(req)))
|
||||
if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
|
||||
max_sectors = queue_max_hw_sectors(q);
|
||||
else
|
||||
max_sectors = queue_max_sectors(q);
|
||||
@ -250,7 +249,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
|
||||
{
|
||||
unsigned short max_sectors;
|
||||
|
||||
if (unlikely(blk_pc_request(req)))
|
||||
if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
|
||||
max_sectors = queue_max_hw_sectors(q);
|
||||
else
|
||||
max_sectors = queue_max_sectors(q);
|
||||
|
@ -36,6 +36,23 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_prep_rq);
|
||||
|
||||
/**
|
||||
* blk_queue_unprep_rq - set an unprepare_request function for queue
|
||||
* @q: queue
|
||||
* @ufn: unprepare_request function
|
||||
*
|
||||
* It's possible for a queue to register an unprepare_request callback
|
||||
* which is invoked before the request is finally completed. The goal
|
||||
* of the function is to deallocate any data that was allocated in the
|
||||
* prepare_request callback.
|
||||
*
|
||||
*/
|
||||
void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
|
||||
{
|
||||
q->unprep_rq_fn = ufn;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_unprep_rq);
|
||||
|
||||
/**
|
||||
* blk_queue_merge_bvec - set a merge_bvec function for queue
|
||||
* @q: queue
|
||||
|
@ -180,26 +180,36 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
|
||||
return queue_var_show(max_hw_sectors_kb, (page));
|
||||
}
|
||||
|
||||
static ssize_t queue_nonrot_show(struct request_queue *q, char *page)
|
||||
{
|
||||
return queue_var_show(!blk_queue_nonrot(q), page);
|
||||
#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
|
||||
static ssize_t \
|
||||
queue_show_##name(struct request_queue *q, char *page) \
|
||||
{ \
|
||||
int bit; \
|
||||
bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
|
||||
return queue_var_show(neg ? !bit : bit, page); \
|
||||
} \
|
||||
static ssize_t \
|
||||
queue_store_##name(struct request_queue *q, const char *page, size_t count) \
|
||||
{ \
|
||||
unsigned long val; \
|
||||
ssize_t ret; \
|
||||
ret = queue_var_store(&val, page, count); \
|
||||
if (neg) \
|
||||
val = !val; \
|
||||
\
|
||||
spin_lock_irq(q->queue_lock); \
|
||||
if (val) \
|
||||
queue_flag_set(QUEUE_FLAG_##flag, q); \
|
||||
else \
|
||||
queue_flag_clear(QUEUE_FLAG_##flag, q); \
|
||||
spin_unlock_irq(q->queue_lock); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
static ssize_t queue_nonrot_store(struct request_queue *q, const char *page,
|
||||
size_t count)
|
||||
{
|
||||
unsigned long nm;
|
||||
ssize_t ret = queue_var_store(&nm, page, count);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (nm)
|
||||
queue_flag_clear(QUEUE_FLAG_NONROT, q);
|
||||
else
|
||||
queue_flag_set(QUEUE_FLAG_NONROT, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
|
||||
QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
|
||||
QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
|
||||
#undef QUEUE_SYSFS_BIT_FNS
|
||||
|
||||
static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
|
||||
{
|
||||
@ -250,27 +260,6 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t queue_iostats_show(struct request_queue *q, char *page)
|
||||
{
|
||||
return queue_var_show(blk_queue_io_stat(q), page);
|
||||
}
|
||||
|
||||
static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
|
||||
size_t count)
|
||||
{
|
||||
unsigned long stats;
|
||||
ssize_t ret = queue_var_store(&stats, page, count);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (stats)
|
||||
queue_flag_set(QUEUE_FLAG_IO_STAT, q);
|
||||
else
|
||||
queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct queue_sysfs_entry queue_requests_entry = {
|
||||
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
|
||||
.show = queue_requests_show,
|
||||
@ -352,8 +341,8 @@ static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
|
||||
|
||||
static struct queue_sysfs_entry queue_nonrot_entry = {
|
||||
.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
|
||||
.show = queue_nonrot_show,
|
||||
.store = queue_nonrot_store,
|
||||
.show = queue_show_nonrot,
|
||||
.store = queue_store_nonrot,
|
||||
};
|
||||
|
||||
static struct queue_sysfs_entry queue_nomerges_entry = {
|
||||
@ -370,8 +359,14 @@ static struct queue_sysfs_entry queue_rq_affinity_entry = {
|
||||
|
||||
static struct queue_sysfs_entry queue_iostats_entry = {
|
||||
.attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
|
||||
.show = queue_iostats_show,
|
||||
.store = queue_iostats_store,
|
||||
.show = queue_show_iostats,
|
||||
.store = queue_store_iostats,
|
||||
};
|
||||
|
||||
static struct queue_sysfs_entry queue_random_entry = {
|
||||
.attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
|
||||
.show = queue_show_random,
|
||||
.store = queue_store_random,
|
||||
};
|
||||
|
||||
static struct attribute *default_attrs[] = {
|
||||
@ -394,6 +389,7 @@ static struct attribute *default_attrs[] = {
|
||||
&queue_nomerges_entry.attr,
|
||||
&queue_rq_affinity_entry.attr,
|
||||
&queue_iostats_entry.attr,
|
||||
&queue_random_entry.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -161,8 +161,10 @@ static inline int blk_cpu_to_group(int cpu)
|
||||
*/
|
||||
static inline int blk_do_io_stat(struct request *rq)
|
||||
{
|
||||
return rq->rq_disk && blk_rq_io_stat(rq) &&
|
||||
(blk_fs_request(rq) || blk_discard_rq(rq));
|
||||
return rq->rq_disk &&
|
||||
(rq->cmd_flags & REQ_IO_STAT) &&
|
||||
(rq->cmd_type == REQ_TYPE_FS ||
|
||||
(rq->cmd_flags & REQ_DISCARD));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -458,7 +458,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
|
||||
*/
|
||||
static inline bool cfq_bio_sync(struct bio *bio)
|
||||
{
|
||||
return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
|
||||
return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -646,9 +646,10 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
|
||||
return rq1;
|
||||
else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
|
||||
return rq2;
|
||||
if (rq_is_meta(rq1) && !rq_is_meta(rq2))
|
||||
if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
|
||||
return rq1;
|
||||
else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
|
||||
else if ((rq2->cmd_flags & REQ_META) &&
|
||||
!(rq1->cmd_flags & REQ_META))
|
||||
return rq2;
|
||||
|
||||
s1 = blk_rq_pos(rq1);
|
||||
@ -1484,7 +1485,7 @@ static void cfq_remove_request(struct request *rq)
|
||||
cfqq->cfqd->rq_queued--;
|
||||
cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
|
||||
rq_data_dir(rq), rq_is_sync(rq));
|
||||
if (rq_is_meta(rq)) {
|
||||
if (rq->cmd_flags & REQ_META) {
|
||||
WARN_ON(!cfqq->meta_pending);
|
||||
cfqq->meta_pending--;
|
||||
}
|
||||
@ -3176,7 +3177,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
||||
* So both queues are sync. Let the new request get disk time if
|
||||
* it's a metadata request and the current queue is doing regular IO.
|
||||
*/
|
||||
if (rq_is_meta(rq) && !cfqq->meta_pending)
|
||||
if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
|
||||
return true;
|
||||
|
||||
/*
|
||||
@ -3230,7 +3231,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
struct cfq_io_context *cic = RQ_CIC(rq);
|
||||
|
||||
cfqd->rq_queued++;
|
||||
if (rq_is_meta(rq))
|
||||
if (rq->cmd_flags & REQ_META)
|
||||
cfqq->meta_pending++;
|
||||
|
||||
cfq_update_io_thinktime(cfqd, cic);
|
||||
@ -3365,7 +3366,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
||||
unsigned long now;
|
||||
|
||||
now = jiffies;
|
||||
cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq));
|
||||
cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
|
||||
!!(rq->cmd_flags & REQ_NOIDLE));
|
||||
|
||||
cfq_update_hw_tag(cfqd);
|
||||
|
||||
@ -3419,11 +3421,12 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
||||
cfq_slice_expired(cfqd, 1);
|
||||
else if (sync && cfqq_empty &&
|
||||
!cfq_close_cooperator(cfqd, cfqq)) {
|
||||
cfqd->noidle_tree_requires_idle |= !rq_noidle(rq);
|
||||
cfqd->noidle_tree_requires_idle |=
|
||||
!(rq->cmd_flags & REQ_NOIDLE);
|
||||
/*
|
||||
* Idling is enabled for SYNC_WORKLOAD.
|
||||
* SYNC_NOIDLE_WORKLOAD idles at the end of the tree
|
||||
* only if we processed at least one !rq_noidle request
|
||||
* only if we processed at least one !REQ_NOIDLE request
|
||||
*/
|
||||
if (cfqd->serving_type == SYNC_WORKLOAD
|
||||
|| cfqd->noidle_tree_requires_idle
|
||||
|
@ -535,56 +535,6 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
return err;
|
||||
}
|
||||
|
||||
struct compat_blk_user_trace_setup {
|
||||
char name[32];
|
||||
u16 act_mask;
|
||||
u32 buf_size;
|
||||
u32 buf_nr;
|
||||
compat_u64 start_lba;
|
||||
compat_u64 end_lba;
|
||||
u32 pid;
|
||||
};
|
||||
#define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup)
|
||||
|
||||
static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
|
||||
{
|
||||
struct blk_user_trace_setup buts;
|
||||
struct compat_blk_user_trace_setup cbuts;
|
||||
struct request_queue *q;
|
||||
char b[BDEVNAME_SIZE];
|
||||
int ret;
|
||||
|
||||
q = bdev_get_queue(bdev);
|
||||
if (!q)
|
||||
return -ENXIO;
|
||||
|
||||
if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
|
||||
return -EFAULT;
|
||||
|
||||
bdevname(bdev, b);
|
||||
|
||||
buts = (struct blk_user_trace_setup) {
|
||||
.act_mask = cbuts.act_mask,
|
||||
.buf_size = cbuts.buf_size,
|
||||
.buf_nr = cbuts.buf_nr,
|
||||
.start_lba = cbuts.start_lba,
|
||||
.end_lba = cbuts.end_lba,
|
||||
.pid = cbuts.pid,
|
||||
};
|
||||
memcpy(&buts.name, &cbuts.name, 32);
|
||||
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
ret = do_blk_trace_setup(q, b, bdev->bd_dev, bdev, &buts);
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (copy_to_user(arg, &buts.name, 32))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned cmd, unsigned long arg)
|
||||
{
|
||||
@ -802,16 +752,10 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
||||
return compat_put_u64(arg, bdev->bd_inode->i_size);
|
||||
|
||||
case BLKTRACESETUP32:
|
||||
lock_kernel();
|
||||
ret = compat_blk_trace_setup(bdev, compat_ptr(arg));
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
case BLKTRACESTART: /* compatible */
|
||||
case BLKTRACESTOP: /* compatible */
|
||||
case BLKTRACETEARDOWN: /* compatible */
|
||||
lock_kernel();
|
||||
ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
default:
|
||||
if (disk->fops->compat_ioctl)
|
||||
|
@ -79,8 +79,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
|
||||
/*
|
||||
* Don't merge file system requests and discard requests
|
||||
*/
|
||||
if (bio_rw_flagged(bio, BIO_RW_DISCARD) !=
|
||||
bio_rw_flagged(rq->bio, BIO_RW_DISCARD))
|
||||
if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
@ -428,7 +427,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
|
||||
list_for_each_prev(entry, &q->queue_head) {
|
||||
struct request *pos = list_entry_rq(entry);
|
||||
|
||||
if (blk_discard_rq(rq) != blk_discard_rq(pos))
|
||||
if ((rq->cmd_flags & REQ_DISCARD) !=
|
||||
(pos->cmd_flags & REQ_DISCARD))
|
||||
break;
|
||||
if (rq_data_dir(rq) != rq_data_dir(pos))
|
||||
break;
|
||||
@ -558,7 +558,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
|
||||
*/
|
||||
if (blk_account_rq(rq)) {
|
||||
q->in_flight[rq_is_sync(rq)]--;
|
||||
if (blk_sorted_rq(rq))
|
||||
if (rq->cmd_flags & REQ_SORTED)
|
||||
elv_deactivate_rq(q, rq);
|
||||
}
|
||||
|
||||
@ -644,7 +644,8 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
|
||||
break;
|
||||
|
||||
case ELEVATOR_INSERT_SORT:
|
||||
BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
|
||||
BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
|
||||
!(rq->cmd_flags & REQ_DISCARD));
|
||||
rq->cmd_flags |= REQ_SORTED;
|
||||
q->nr_sorted++;
|
||||
if (rq_mergeable(rq)) {
|
||||
@ -716,7 +717,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
|
||||
/*
|
||||
* toggle ordered color
|
||||
*/
|
||||
if (blk_barrier_rq(rq))
|
||||
if (rq->cmd_flags & REQ_HARDBARRIER)
|
||||
q->ordcolor ^= 1;
|
||||
|
||||
/*
|
||||
@ -729,7 +730,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
|
||||
* this request is scheduling boundary, update
|
||||
* end_sector
|
||||
*/
|
||||
if (blk_fs_request(rq) || blk_discard_rq(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_FS ||
|
||||
(rq->cmd_flags & REQ_DISCARD)) {
|
||||
q->end_sector = rq_end_sector(rq);
|
||||
q->boundary_rq = rq;
|
||||
}
|
||||
@ -843,7 +845,8 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
|
||||
*/
|
||||
if (blk_account_rq(rq)) {
|
||||
q->in_flight[rq_is_sync(rq)]--;
|
||||
if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
|
||||
if ((rq->cmd_flags & REQ_SORTED) &&
|
||||
e->ops->elevator_completed_req_fn)
|
||||
e->ops->elevator_completed_req_fn(q, rq);
|
||||
}
|
||||
|
||||
|
@ -163,18 +163,10 @@ int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned cmd, unsigned long arg)
|
||||
{
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
int ret;
|
||||
|
||||
if (disk->fops->ioctl)
|
||||
return disk->fops->ioctl(bdev, mode, cmd, arg);
|
||||
|
||||
if (disk->fops->locked_ioctl) {
|
||||
lock_kernel();
|
||||
ret = disk->fops->locked_ioctl(bdev, mode, cmd, arg);
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
|
||||
return -ENOTTY;
|
||||
}
|
||||
/*
|
||||
@ -185,8 +177,7 @@ int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl);
|
||||
|
||||
/*
|
||||
* always keep this in sync with compat_blkdev_ioctl() and
|
||||
* compat_blkdev_locked_ioctl()
|
||||
* always keep this in sync with compat_blkdev_ioctl()
|
||||
*/
|
||||
int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||
unsigned long arg)
|
||||
@ -206,10 +197,8 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||
if (ret != -EINVAL && ret != -ENOTTY)
|
||||
return ret;
|
||||
|
||||
lock_kernel();
|
||||
fsync_bdev(bdev);
|
||||
invalidate_bdev(bdev);
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
|
||||
case BLKROSET:
|
||||
@ -221,9 +210,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||
return -EACCES;
|
||||
if (get_user(n, (int __user *)(arg)))
|
||||
return -EFAULT;
|
||||
lock_kernel();
|
||||
set_device_ro(bdev, n);
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
|
||||
case BLKDISCARD: {
|
||||
@ -309,14 +296,10 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||
bd_release(bdev);
|
||||
return ret;
|
||||
case BLKPG:
|
||||
lock_kernel();
|
||||
ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
|
||||
unlock_kernel();
|
||||
break;
|
||||
case BLKRRPART:
|
||||
lock_kernel();
|
||||
ret = blkdev_reread_part(bdev);
|
||||
unlock_kernel();
|
||||
break;
|
||||
case BLKGETSIZE:
|
||||
size = bdev->bd_inode->i_size;
|
||||
@ -329,9 +312,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||
case BLKTRACESTOP:
|
||||
case BLKTRACESETUP:
|
||||
case BLKTRACETEARDOWN:
|
||||
lock_kernel();
|
||||
ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg);
|
||||
unlock_kernel();
|
||||
break;
|
||||
default:
|
||||
ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
|
||||
|
@ -1111,10 +1111,10 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
|
||||
*/
|
||||
static int atapi_drain_needed(struct request *rq)
|
||||
{
|
||||
if (likely(!blk_pc_request(rq)))
|
||||
if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
|
||||
return 0;
|
||||
|
||||
if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
|
||||
if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE))
|
||||
return 0;
|
||||
|
||||
return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
|
||||
|
@ -79,23 +79,28 @@ static int DAC960_open(struct block_device *bdev, fmode_t mode)
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
DAC960_Controller_T *p = disk->queue->queuedata;
|
||||
int drive_nr = (long)disk->private_data;
|
||||
int ret = -ENXIO;
|
||||
|
||||
lock_kernel();
|
||||
if (p->FirmwareType == DAC960_V1_Controller) {
|
||||
if (p->V1.LogicalDriveInformation[drive_nr].
|
||||
LogicalDriveState == DAC960_V1_LogicalDrive_Offline)
|
||||
return -ENXIO;
|
||||
goto out;
|
||||
} else {
|
||||
DAC960_V2_LogicalDeviceInfo_T *i =
|
||||
p->V2.LogicalDeviceInformation[drive_nr];
|
||||
if (!i || i->LogicalDeviceState == DAC960_V2_LogicalDevice_Offline)
|
||||
return -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
check_disk_change(bdev);
|
||||
|
||||
if (!get_capacity(p->disks[drive_nr]))
|
||||
return -ENXIO;
|
||||
return 0;
|
||||
goto out;
|
||||
ret = 0;
|
||||
out:
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int DAC960_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
|
@ -60,6 +60,7 @@
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/amifdreg.h>
|
||||
#include <linux/amifd.h>
|
||||
#include <linux/buffer_head.h>
|
||||
@ -1423,7 +1424,7 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long param)
|
||||
{
|
||||
struct amiga_floppy_struct *p = bdev->bd_disk->private_data;
|
||||
@ -1500,6 +1501,18 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long param)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = fd_locked_ioctl(bdev, mode, cmd, param);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void fd_probe(int dev)
|
||||
{
|
||||
unsigned long code;
|
||||
@ -1542,10 +1555,13 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
||||
int old_dev;
|
||||
unsigned long flags;
|
||||
|
||||
lock_kernel();
|
||||
old_dev = fd_device[drive];
|
||||
|
||||
if (fd_ref[drive] && old_dev != system)
|
||||
if (fd_ref[drive] && old_dev != system) {
|
||||
unlock_kernel();
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (mode & (FMODE_READ|FMODE_WRITE)) {
|
||||
check_disk_change(bdev);
|
||||
@ -1558,8 +1574,10 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
||||
fd_deselect (drive);
|
||||
rel_fdc();
|
||||
|
||||
if (wrprot)
|
||||
if (wrprot) {
|
||||
unlock_kernel();
|
||||
return -EROFS;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1576,6 +1594,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
||||
printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive,
|
||||
unit[drive].type->name, data_types[system].name);
|
||||
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1584,6 +1603,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
|
||||
struct amiga_floppy_struct *p = disk->private_data;
|
||||
int drive = p - unit;
|
||||
|
||||
lock_kernel();
|
||||
if (unit[drive].dirty == 1) {
|
||||
del_timer (flush_track_timer + drive);
|
||||
non_int_flush_track (drive);
|
||||
@ -1597,6 +1617,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
|
||||
/* the mod_use counter is handled this way */
|
||||
floppy_off (drive | 0x40000000);
|
||||
#endif
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1638,7 +1659,7 @@ static const struct block_device_operations floppy_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = floppy_open,
|
||||
.release = floppy_release,
|
||||
.locked_ioctl = fd_ioctl,
|
||||
.ioctl = fd_ioctl,
|
||||
.getgeo = fd_getgeo,
|
||||
.media_changed = amiga_floppy_change,
|
||||
};
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include "aoe.h"
|
||||
|
||||
static struct kmem_cache *buf_pool_cache;
|
||||
@ -124,13 +125,16 @@ aoeblk_open(struct block_device *bdev, fmode_t mode)
|
||||
struct aoedev *d = bdev->bd_disk->private_data;
|
||||
ulong flags;
|
||||
|
||||
lock_kernel();
|
||||
spin_lock_irqsave(&d->lock, flags);
|
||||
if (d->flags & DEVFL_UP) {
|
||||
d->nopen++;
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
}
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
unlock_kernel();
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@ -173,7 +177,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
|
||||
BUG();
|
||||
bio_endio(bio, -ENXIO);
|
||||
return 0;
|
||||
} else if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
|
||||
} else if (bio->bi_rw & REQ_HARDBARRIER) {
|
||||
bio_endio(bio, -EOPNOTSUPP);
|
||||
return 0;
|
||||
} else if (bio->bi_io_vec == NULL) {
|
||||
|
@ -67,6 +67,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/smp_lock.h>
|
||||
|
||||
#include <asm/atafd.h>
|
||||
#include <asm/atafdreg.h>
|
||||
@ -359,7 +360,7 @@ static void finish_fdc( void );
|
||||
static void finish_fdc_done( int dummy );
|
||||
static void setup_req_params( int drive );
|
||||
static void redo_fd_request( void);
|
||||
static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
|
||||
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
|
||||
cmd, unsigned long param);
|
||||
static void fd_probe( int drive );
|
||||
static int fd_test_drive_present( int drive );
|
||||
@ -1480,7 +1481,7 @@ void do_fd_request(struct request_queue * q)
|
||||
atari_enable_irq( IRQ_MFP_FDC );
|
||||
}
|
||||
|
||||
static int fd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long param)
|
||||
{
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
@ -1665,6 +1666,17 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
}
|
||||
}
|
||||
|
||||
static int fd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = fd_locked_ioctl(bdev, mode, cmd, arg);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Initialize the 'unit' variable for drive 'drive' */
|
||||
|
||||
@ -1838,24 +1850,36 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = floppy_open(bdev, mode);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int floppy_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct atari_floppy_struct *p = disk->private_data;
|
||||
lock_kernel();
|
||||
if (p->ref < 0)
|
||||
p->ref = 0;
|
||||
else if (!p->ref--) {
|
||||
printk(KERN_ERR "floppy_release with fd_ref == 0");
|
||||
p->ref = 0;
|
||||
}
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct block_device_operations floppy_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = floppy_open,
|
||||
.open = floppy_unlocked_open,
|
||||
.release = floppy_release,
|
||||
.locked_ioctl = fd_ioctl,
|
||||
.ioctl = fd_ioctl,
|
||||
.media_changed = check_floppy_change,
|
||||
.revalidate_disk= floppy_revalidate,
|
||||
};
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/buffer_head.h> /* invalidate_bh_lrus() */
|
||||
#include <linux/slab.h>
|
||||
@ -340,7 +341,7 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)
|
||||
get_capacity(bdev->bd_disk))
|
||||
goto out;
|
||||
|
||||
if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
|
||||
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
|
||||
err = 0;
|
||||
discard_from_brd(brd, sector, bio->bi_size);
|
||||
goto out;
|
||||
@ -401,6 +402,7 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
* ram device BLKFLSBUF has special semantics, we want to actually
|
||||
* release and destroy the ramdisk data.
|
||||
*/
|
||||
lock_kernel();
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
error = -EBUSY;
|
||||
if (bdev->bd_openers <= 1) {
|
||||
@ -417,13 +419,14 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
error = 0;
|
||||
}
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
unlock_kernel();
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static const struct block_device_operations brd_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.locked_ioctl = brd_ioctl,
|
||||
.ioctl = brd_ioctl,
|
||||
#ifdef CONFIG_BLK_DEV_XIP
|
||||
.direct_access = brd_direct_access,
|
||||
#endif
|
||||
@ -479,7 +482,7 @@ static struct brd_device *brd_alloc(int i)
|
||||
if (!brd->brd_queue)
|
||||
goto out_free_dev;
|
||||
blk_queue_make_request(brd->brd_queue, brd_make_request);
|
||||
blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL);
|
||||
blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG);
|
||||
blk_queue_max_hw_sectors(brd->brd_queue, 1024);
|
||||
blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -25,7 +25,7 @@ struct access_method {
|
||||
void (*submit_command)(ctlr_info_t *h, CommandList_struct *c);
|
||||
void (*set_intr_mask)(ctlr_info_t *h, unsigned long val);
|
||||
unsigned long (*fifo_full)(ctlr_info_t *h);
|
||||
unsigned long (*intr_pending)(ctlr_info_t *h);
|
||||
bool (*intr_pending)(ctlr_info_t *h);
|
||||
unsigned long (*command_completed)(ctlr_info_t *h);
|
||||
};
|
||||
typedef struct _drive_info_struct
|
||||
@ -85,8 +85,8 @@ struct ctlr_info
|
||||
int max_cmd_sgentries;
|
||||
SGDescriptor_struct **cmd_sg_list;
|
||||
|
||||
# define DOORBELL_INT 0
|
||||
# define PERF_MODE_INT 1
|
||||
# define PERF_MODE_INT 0
|
||||
# define DOORBELL_INT 1
|
||||
# define SIMPLE_MODE_INT 2
|
||||
# define MEMQ_MODE_INT 3
|
||||
unsigned int intr[4];
|
||||
@ -137,10 +137,27 @@ struct ctlr_info
|
||||
struct list_head scan_list;
|
||||
struct completion scan_wait;
|
||||
struct device dev;
|
||||
/*
|
||||
* Performant mode tables.
|
||||
*/
|
||||
u32 trans_support;
|
||||
u32 trans_offset;
|
||||
struct TransTable_struct *transtable;
|
||||
unsigned long transMethod;
|
||||
|
||||
/*
|
||||
* Performant mode completion buffer
|
||||
*/
|
||||
u64 *reply_pool;
|
||||
dma_addr_t reply_pool_dhandle;
|
||||
u64 *reply_pool_head;
|
||||
size_t reply_pool_size;
|
||||
unsigned char reply_pool_wraparound;
|
||||
u32 *blockFetchTable;
|
||||
};
|
||||
|
||||
/* Defining the diffent access_menthods */
|
||||
/*
|
||||
/* Defining the diffent access_methods
|
||||
*
|
||||
* Memory mapped FIFO interface (SMART 53xx cards)
|
||||
*/
|
||||
#define SA5_DOORBELL 0x20
|
||||
@ -159,19 +176,47 @@ struct ctlr_info
|
||||
#define SA5B_INTR_PENDING 0x04
|
||||
#define FIFO_EMPTY 0xffffffff
|
||||
#define CCISS_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
|
||||
/* Perf. mode flags */
|
||||
#define SA5_PERF_INTR_PENDING 0x04
|
||||
#define SA5_PERF_INTR_OFF 0x05
|
||||
#define SA5_OUTDB_STATUS_PERF_BIT 0x01
|
||||
#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
|
||||
#define SA5_OUTDB_CLEAR 0xA0
|
||||
#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
|
||||
#define SA5_OUTDB_STATUS 0x9C
|
||||
|
||||
|
||||
#define CISS_ERROR_BIT 0x02
|
||||
|
||||
#define CCISS_INTR_ON 1
|
||||
#define CCISS_INTR_OFF 0
|
||||
|
||||
|
||||
/* CCISS_BOARD_READY_WAIT_SECS is how long to wait for a board
|
||||
* to become ready, in seconds, before giving up on it.
|
||||
* CCISS_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
|
||||
* between polling the board to see if it is ready, in
|
||||
* milliseconds. CCISS_BOARD_READY_ITERATIONS is derived
|
||||
* the above.
|
||||
*/
|
||||
#define CCISS_BOARD_READY_WAIT_SECS (120)
|
||||
#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
|
||||
#define CCISS_BOARD_READY_ITERATIONS \
|
||||
((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
|
||||
CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
|
||||
#define CCISS_POST_RESET_PAUSE_MSECS (3000)
|
||||
#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
|
||||
#define CCISS_POST_RESET_NOOP_RETRIES (12)
|
||||
|
||||
/*
|
||||
Send the command to the hardware
|
||||
*/
|
||||
static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c)
|
||||
{
|
||||
#ifdef CCISS_DEBUG
|
||||
printk("Sending %x - down to controller\n", c->busaddr );
|
||||
#endif /* CCISS_DEBUG */
|
||||
printk(KERN_WARNING "cciss%d: Sending %08x - down to controller\n",
|
||||
h->ctlr, c->busaddr);
|
||||
#endif /* CCISS_DEBUG */
|
||||
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
|
||||
h->commands_outstanding++;
|
||||
if ( h->commands_outstanding > h->max_outstanding)
|
||||
@ -214,6 +259,20 @@ static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val)
|
||||
h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
|
||||
}
|
||||
}
|
||||
|
||||
/* Performant mode intr_mask */
|
||||
static void SA5_performant_intr_mask(ctlr_info_t *h, unsigned long val)
|
||||
{
|
||||
if (val) { /* turn on interrupts */
|
||||
h->interrupts_enabled = 1;
|
||||
writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
|
||||
} else {
|
||||
h->interrupts_enabled = 0;
|
||||
writel(SA5_PERF_INTR_OFF,
|
||||
h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if fifo is full.
|
||||
*
|
||||
@ -250,10 +309,44 @@ static unsigned long SA5_completed(ctlr_info_t *h)
|
||||
return ( register_value);
|
||||
|
||||
}
|
||||
|
||||
/* Performant mode command completed */
|
||||
static unsigned long SA5_performant_completed(ctlr_info_t *h)
|
||||
{
|
||||
unsigned long register_value = FIFO_EMPTY;
|
||||
|
||||
/* flush the controller write of the reply queue by reading
|
||||
* outbound doorbell status register.
|
||||
*/
|
||||
register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
|
||||
/* msi auto clears the interrupt pending bit. */
|
||||
if (!(h->msi_vector || h->msix_vector)) {
|
||||
writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
|
||||
/* Do a read in order to flush the write to the controller
|
||||
* (as per spec.)
|
||||
*/
|
||||
register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
|
||||
}
|
||||
|
||||
if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
|
||||
register_value = *(h->reply_pool_head);
|
||||
(h->reply_pool_head)++;
|
||||
h->commands_outstanding--;
|
||||
} else {
|
||||
register_value = FIFO_EMPTY;
|
||||
}
|
||||
/* Check for wraparound */
|
||||
if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
|
||||
h->reply_pool_head = h->reply_pool;
|
||||
h->reply_pool_wraparound ^= 1;
|
||||
}
|
||||
|
||||
return register_value;
|
||||
}
|
||||
/*
|
||||
* Returns true if an interrupt is pending..
|
||||
*/
|
||||
static unsigned long SA5_intr_pending(ctlr_info_t *h)
|
||||
static bool SA5_intr_pending(ctlr_info_t *h)
|
||||
{
|
||||
unsigned long register_value =
|
||||
readl(h->vaddr + SA5_INTR_STATUS);
|
||||
@ -268,7 +361,7 @@ static unsigned long SA5_intr_pending(ctlr_info_t *h)
|
||||
/*
|
||||
* Returns true if an interrupt is pending..
|
||||
*/
|
||||
static unsigned long SA5B_intr_pending(ctlr_info_t *h)
|
||||
static bool SA5B_intr_pending(ctlr_info_t *h)
|
||||
{
|
||||
unsigned long register_value =
|
||||
readl(h->vaddr + SA5_INTR_STATUS);
|
||||
@ -280,6 +373,20 @@ static unsigned long SA5B_intr_pending(ctlr_info_t *h)
|
||||
return 0 ;
|
||||
}
|
||||
|
||||
static bool SA5_performant_intr_pending(ctlr_info_t *h)
|
||||
{
|
||||
unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
|
||||
|
||||
if (!register_value)
|
||||
return false;
|
||||
|
||||
if (h->msi_vector || h->msix_vector)
|
||||
return true;
|
||||
|
||||
/* Read outbound doorbell to flush */
|
||||
register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
|
||||
return register_value & SA5_OUTDB_STATUS_PERF_BIT;
|
||||
}
|
||||
|
||||
static struct access_method SA5_access = {
|
||||
SA5_submit_command,
|
||||
@ -297,6 +404,14 @@ static struct access_method SA5B_access = {
|
||||
SA5_completed,
|
||||
};
|
||||
|
||||
static struct access_method SA5_performant_access = {
|
||||
SA5_submit_command,
|
||||
SA5_performant_intr_mask,
|
||||
SA5_fifo_full,
|
||||
SA5_performant_intr_pending,
|
||||
SA5_performant_completed,
|
||||
};
|
||||
|
||||
struct board_type {
|
||||
__u32 board_id;
|
||||
char *product_name;
|
||||
@ -304,6 +419,4 @@ struct board_type {
|
||||
int nr_cmds; /* Max cmds this kind of ctlr can handle. */
|
||||
};
|
||||
|
||||
#define CCISS_LOCK(i) (&hba[i]->lock)
|
||||
|
||||
#endif /* CCISS_H */
|
||||
|
@ -52,8 +52,10 @@
|
||||
/* Configuration Table */
|
||||
#define CFGTBL_ChangeReq 0x00000001l
|
||||
#define CFGTBL_AccCmds 0x00000001l
|
||||
#define DOORBELL_CTLR_RESET 0x00000004l
|
||||
|
||||
#define CFGTBL_Trans_Simple 0x00000002l
|
||||
#define CFGTBL_Trans_Performant 0x00000004l
|
||||
|
||||
#define CFGTBL_BusType_Ultra2 0x00000001l
|
||||
#define CFGTBL_BusType_Ultra3 0x00000002l
|
||||
@ -173,12 +175,15 @@ typedef struct _SGDescriptor_struct {
|
||||
* PAD_64 can be adjusted independently as needed for 32-bit
|
||||
* and 64-bits systems.
|
||||
*/
|
||||
#define COMMANDLIST_ALIGNMENT (8)
|
||||
#define COMMANDLIST_ALIGNMENT (32)
|
||||
#define IS_64_BIT ((sizeof(long) - 4)/4)
|
||||
#define IS_32_BIT (!IS_64_BIT)
|
||||
#define PAD_32 (0)
|
||||
#define PAD_64 (4)
|
||||
#define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
|
||||
#define DIRECT_LOOKUP_BIT 0x10
|
||||
#define DIRECT_LOOKUP_SHIFT 5
|
||||
|
||||
typedef struct _CommandList_struct {
|
||||
CommandListHeader_struct Header;
|
||||
RequestBlock_struct Request;
|
||||
@ -195,7 +200,7 @@ typedef struct _CommandList_struct {
|
||||
struct completion *waiting;
|
||||
int retry_count;
|
||||
void * scsi_cmd;
|
||||
char pad[PADSIZE];
|
||||
char pad[PADSIZE];
|
||||
} CommandList_struct;
|
||||
|
||||
/* Configuration Table Structure */
|
||||
@ -209,12 +214,15 @@ typedef struct _HostWrite_struct {
|
||||
typedef struct _CfgTable_struct {
|
||||
BYTE Signature[4];
|
||||
DWORD SpecValence;
|
||||
#define SIMPLE_MODE 0x02
|
||||
#define PERFORMANT_MODE 0x04
|
||||
#define MEMQ_MODE 0x08
|
||||
DWORD TransportSupport;
|
||||
DWORD TransportActive;
|
||||
HostWrite_struct HostWrite;
|
||||
DWORD CmdsOutMax;
|
||||
DWORD BusTypes;
|
||||
DWORD Reserved;
|
||||
DWORD TransMethodOffset;
|
||||
BYTE ServerName[16];
|
||||
DWORD HeartBeat;
|
||||
DWORD SCSI_Prefetch;
|
||||
@ -222,6 +230,28 @@ typedef struct _CfgTable_struct {
|
||||
DWORD MaxLogicalUnits;
|
||||
DWORD MaxPhysicalDrives;
|
||||
DWORD MaxPhysicalDrivesPerLogicalUnit;
|
||||
DWORD MaxPerformantModeCommands;
|
||||
u8 reserved[0x78 - 0x58];
|
||||
u32 misc_fw_support; /* offset 0x78 */
|
||||
#define MISC_FW_DOORBELL_RESET (0x02)
|
||||
} CfgTable_struct;
|
||||
|
||||
struct TransTable_struct {
|
||||
u32 BlockFetch0;
|
||||
u32 BlockFetch1;
|
||||
u32 BlockFetch2;
|
||||
u32 BlockFetch3;
|
||||
u32 BlockFetch4;
|
||||
u32 BlockFetch5;
|
||||
u32 BlockFetch6;
|
||||
u32 BlockFetch7;
|
||||
u32 RepQSize;
|
||||
u32 RepQCount;
|
||||
u32 RepQCtrAddrLow32;
|
||||
u32 RepQCtrAddrHigh32;
|
||||
u32 RepQAddr0Low32;
|
||||
u32 RepQAddr0High32;
|
||||
};
|
||||
|
||||
#pragma pack()
|
||||
#endif /* CCISS_CMD_H */
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -35,6 +35,7 @@
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/genhd.h>
|
||||
@ -157,7 +158,7 @@ static int sendcmd(
|
||||
unsigned int blkcnt,
|
||||
unsigned int log_unit );
|
||||
|
||||
static int ida_open(struct block_device *bdev, fmode_t mode);
|
||||
static int ida_unlocked_open(struct block_device *bdev, fmode_t mode);
|
||||
static int ida_release(struct gendisk *disk, fmode_t mode);
|
||||
static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
|
||||
static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
|
||||
@ -195,9 +196,9 @@ static inline ctlr_info_t *get_host(struct gendisk *disk)
|
||||
|
||||
static const struct block_device_operations ida_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = ida_open,
|
||||
.open = ida_unlocked_open,
|
||||
.release = ida_release,
|
||||
.locked_ioctl = ida_ioctl,
|
||||
.ioctl = ida_ioctl,
|
||||
.getgeo = ida_getgeo,
|
||||
.revalidate_disk= ida_revalidate,
|
||||
};
|
||||
@ -840,13 +841,29 @@ static int ida_open(struct block_device *bdev, fmode_t mode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ida_unlocked_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = ida_open(bdev, mode);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Close. Sync first.
|
||||
*/
|
||||
static int ida_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
ctlr_info_t *host = get_host(disk);
|
||||
ctlr_info_t *host;
|
||||
|
||||
lock_kernel();
|
||||
host = get_host(disk);
|
||||
host->usage_count--;
|
||||
unlock_kernel();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1128,7 +1145,7 @@ static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
* ida_ioctl does some miscellaneous stuff like reporting drive geometry,
|
||||
* setting readahead and submitting commands from userspace to the controller.
|
||||
*/
|
||||
static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
|
||||
static int ida_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drv_info_t *drv = get_drv(bdev->bd_disk);
|
||||
ctlr_info_t *host = get_host(bdev->bd_disk);
|
||||
@ -1162,7 +1179,8 @@ static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
|
||||
return error;
|
||||
case IDAGETCTLRSIG:
|
||||
if (!arg) return -EINVAL;
|
||||
put_user(host->ctlr_sig, (int __user *)arg);
|
||||
if (put_user(host->ctlr_sig, (int __user *)arg))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
case IDAREVALIDATEVOLS:
|
||||
if (MINOR(bdev->bd_dev) != 0)
|
||||
@ -1170,7 +1188,8 @@ static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
|
||||
return revalidate_allvol(host);
|
||||
case IDADRIVERVERSION:
|
||||
if (!arg) return -EINVAL;
|
||||
put_user(DRIVER_VERSION, (unsigned long __user *)arg);
|
||||
if (put_user(DRIVER_VERSION, (unsigned long __user *)arg))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
case IDAGETPCIINFO:
|
||||
{
|
||||
@ -1192,6 +1211,19 @@ static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static int ida_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long param)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = ida_locked_ioctl(bdev, mode, cmd, param);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* ida_ctlr_ioctl is for passing commands to the controller from userspace.
|
||||
* The command block (io) has already been copied to kernel space for us,
|
||||
@ -1225,17 +1257,11 @@ static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
|
||||
/* Pre submit processing */
|
||||
switch(io->cmd) {
|
||||
case PASSTHRU_A:
|
||||
p = kmalloc(io->sg[0].size, GFP_KERNEL);
|
||||
if (!p)
|
||||
{
|
||||
error = -ENOMEM;
|
||||
cmd_free(h, c, 0);
|
||||
return(error);
|
||||
}
|
||||
if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
|
||||
kfree(p);
|
||||
cmd_free(h, c, 0);
|
||||
return -EFAULT;
|
||||
p = memdup_user(io->sg[0].addr, io->sg[0].size);
|
||||
if (IS_ERR(p)) {
|
||||
error = PTR_ERR(p);
|
||||
cmd_free(h, c, 0);
|
||||
return error;
|
||||
}
|
||||
c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c),
|
||||
sizeof(ida_ioctl_t),
|
||||
@ -1266,18 +1292,12 @@ static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
|
||||
case DIAG_PASS_THRU:
|
||||
case COLLECT_BUFFER:
|
||||
case WRITE_FLASH_ROM:
|
||||
p = kmalloc(io->sg[0].size, GFP_KERNEL);
|
||||
if (!p)
|
||||
{
|
||||
error = -ENOMEM;
|
||||
cmd_free(h, c, 0);
|
||||
return(error);
|
||||
p = memdup_user(io->sg[0].addr, io->sg[0].size);
|
||||
if (IS_ERR(p)) {
|
||||
error = PTR_ERR(p);
|
||||
cmd_free(h, c, 0);
|
||||
return error;
|
||||
}
|
||||
if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
|
||||
kfree(p);
|
||||
cmd_free(h, c, 0);
|
||||
return -EFAULT;
|
||||
}
|
||||
c->req.sg[0].size = io->sg[0].size;
|
||||
c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
|
||||
c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
|
||||
|
@ -79,8 +79,8 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
|
||||
md_io.error = 0;
|
||||
|
||||
if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
|
||||
rw |= (1 << BIO_RW_BARRIER);
|
||||
rw |= ((1<<BIO_RW_UNPLUG) | (1<<BIO_RW_SYNCIO));
|
||||
rw |= REQ_HARDBARRIER;
|
||||
rw |= REQ_UNPLUG | REQ_SYNC;
|
||||
|
||||
retry:
|
||||
bio = bio_alloc(GFP_NOIO, 1);
|
||||
@ -103,11 +103,11 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
|
||||
/* check for unsupported barrier op.
|
||||
* would rather check on EOPNOTSUPP, but that is not reliable.
|
||||
* don't try again for ANY return value != 0 */
|
||||
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && !ok)) {
|
||||
if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
|
||||
/* Try again with no barrier */
|
||||
dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
|
||||
set_bit(MD_NO_BARRIER, &mdev->flags);
|
||||
rw &= ~(1 << BIO_RW_BARRIER);
|
||||
rw &= ~REQ_HARDBARRIER;
|
||||
bio_put(bio);
|
||||
goto retry;
|
||||
}
|
||||
|
@ -550,12 +550,6 @@ struct p_delay_probe {
|
||||
u32 offset; /* usecs the probe got sent after the reference time point */
|
||||
} __packed;
|
||||
|
||||
struct delay_probe {
|
||||
struct list_head list;
|
||||
unsigned int seq_num;
|
||||
struct timeval time;
|
||||
};
|
||||
|
||||
/* DCBP: Drbd Compressed Bitmap Packet ... */
|
||||
static inline enum drbd_bitmap_code
|
||||
DCBP_get_code(struct p_compressed_bm *p)
|
||||
@ -942,11 +936,9 @@ struct drbd_conf {
|
||||
unsigned int ko_count;
|
||||
struct drbd_work resync_work,
|
||||
unplug_work,
|
||||
md_sync_work,
|
||||
delay_probe_work;
|
||||
md_sync_work;
|
||||
struct timer_list resync_timer;
|
||||
struct timer_list md_sync_timer;
|
||||
struct timer_list delay_probe_timer;
|
||||
|
||||
/* Used after attach while negotiating new disk state. */
|
||||
union drbd_state new_state_tmp;
|
||||
@ -1062,12 +1054,6 @@ struct drbd_conf {
|
||||
u64 ed_uuid; /* UUID of the exposed data */
|
||||
struct mutex state_mutex;
|
||||
char congestion_reason; /* Why we where congested... */
|
||||
struct list_head delay_probes; /* protected by peer_seq_lock */
|
||||
int data_delay; /* Delay of packets on the data-sock behind meta-sock */
|
||||
unsigned int delay_seq; /* To generate sequence numbers of delay probes */
|
||||
struct timeval dps_time; /* delay-probes-start-time */
|
||||
unsigned int dp_volume_last; /* send_cnt of last delay probe */
|
||||
int c_sync_rate; /* current resync rate after delay_probe magic */
|
||||
};
|
||||
|
||||
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
|
||||
|
@ -2184,43 +2184,6 @@ int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
|
||||
return ok;
|
||||
}
|
||||
|
||||
static int drbd_send_delay_probe(struct drbd_conf *mdev, struct drbd_socket *ds)
|
||||
{
|
||||
struct p_delay_probe dp;
|
||||
int offset, ok = 0;
|
||||
struct timeval now;
|
||||
|
||||
mutex_lock(&ds->mutex);
|
||||
if (likely(ds->socket)) {
|
||||
do_gettimeofday(&now);
|
||||
offset = now.tv_usec - mdev->dps_time.tv_usec +
|
||||
(now.tv_sec - mdev->dps_time.tv_sec) * 1000000;
|
||||
dp.seq_num = cpu_to_be32(mdev->delay_seq);
|
||||
dp.offset = cpu_to_be32(offset);
|
||||
|
||||
ok = _drbd_send_cmd(mdev, ds->socket, P_DELAY_PROBE,
|
||||
(struct p_header *)&dp, sizeof(dp), 0);
|
||||
}
|
||||
mutex_unlock(&ds->mutex);
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
static int drbd_send_delay_probes(struct drbd_conf *mdev)
|
||||
{
|
||||
int ok;
|
||||
|
||||
mdev->delay_seq++;
|
||||
do_gettimeofday(&mdev->dps_time);
|
||||
ok = drbd_send_delay_probe(mdev, &mdev->meta);
|
||||
ok = ok && drbd_send_delay_probe(mdev, &mdev->data);
|
||||
|
||||
mdev->dp_volume_last = mdev->send_cnt;
|
||||
mod_timer(&mdev->delay_probe_timer, jiffies + mdev->sync_conf.dp_interval * HZ / 10);
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
/* called on sndtimeo
|
||||
* returns FALSE if we should retry,
|
||||
* TRUE if we think connection is dead
|
||||
@ -2369,31 +2332,6 @@ static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void consider_delay_probes(struct drbd_conf *mdev)
|
||||
{
|
||||
if (mdev->state.conn != C_SYNC_SOURCE || mdev->agreed_pro_version < 93)
|
||||
return;
|
||||
|
||||
if (mdev->dp_volume_last + mdev->sync_conf.dp_volume * 2 < mdev->send_cnt)
|
||||
drbd_send_delay_probes(mdev);
|
||||
}
|
||||
|
||||
static int w_delay_probes(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||
{
|
||||
if (!cancel && mdev->state.conn == C_SYNC_SOURCE)
|
||||
drbd_send_delay_probes(mdev);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void delay_probe_timer_fn(unsigned long data)
|
||||
{
|
||||
struct drbd_conf *mdev = (struct drbd_conf *) data;
|
||||
|
||||
if (list_empty(&mdev->delay_probe_work.list))
|
||||
drbd_queue_work(&mdev->data.work, &mdev->delay_probe_work);
|
||||
}
|
||||
|
||||
/* Used to send write requests
|
||||
* R_PRIMARY -> Peer (P_DATA)
|
||||
*/
|
||||
@ -2425,15 +2363,15 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
|
||||
/* NOTE: no need to check if barriers supported here as we would
|
||||
* not pass the test in make_request_common in that case
|
||||
*/
|
||||
if (bio_rw_flagged(req->master_bio, BIO_RW_BARRIER)) {
|
||||
if (req->master_bio->bi_rw & REQ_HARDBARRIER) {
|
||||
dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n");
|
||||
/* dp_flags |= DP_HARDBARRIER; */
|
||||
}
|
||||
if (bio_rw_flagged(req->master_bio, BIO_RW_SYNCIO))
|
||||
if (req->master_bio->bi_rw & REQ_SYNC)
|
||||
dp_flags |= DP_RW_SYNC;
|
||||
/* for now handle SYNCIO and UNPLUG
|
||||
* as if they still were one and the same flag */
|
||||
if (bio_rw_flagged(req->master_bio, BIO_RW_UNPLUG))
|
||||
if (req->master_bio->bi_rw & REQ_UNPLUG)
|
||||
dp_flags |= DP_RW_SYNC;
|
||||
if (mdev->state.conn >= C_SYNC_SOURCE &&
|
||||
mdev->state.conn <= C_PAUSED_SYNC_T)
|
||||
@ -2457,9 +2395,6 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
|
||||
|
||||
drbd_put_data_sock(mdev);
|
||||
|
||||
if (ok)
|
||||
consider_delay_probes(mdev);
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
@ -2506,9 +2441,6 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
|
||||
|
||||
drbd_put_data_sock(mdev);
|
||||
|
||||
if (ok)
|
||||
consider_delay_probes(mdev);
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
@ -2604,6 +2536,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
|
||||
unsigned long flags;
|
||||
int rv = 0;
|
||||
|
||||
lock_kernel();
|
||||
spin_lock_irqsave(&mdev->req_lock, flags);
|
||||
/* to have a stable mdev->state.role
|
||||
* and no race with updating open_cnt */
|
||||
@ -2618,6 +2551,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
|
||||
if (!rv)
|
||||
mdev->open_cnt++;
|
||||
spin_unlock_irqrestore(&mdev->req_lock, flags);
|
||||
unlock_kernel();
|
||||
|
||||
return rv;
|
||||
}
|
||||
@ -2625,7 +2559,9 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
|
||||
static int drbd_release(struct gendisk *gd, fmode_t mode)
|
||||
{
|
||||
struct drbd_conf *mdev = gd->private_data;
|
||||
lock_kernel();
|
||||
mdev->open_cnt--;
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2660,9 +2596,20 @@ static void drbd_unplug_fn(struct request_queue *q)
|
||||
|
||||
static void drbd_set_defaults(struct drbd_conf *mdev)
|
||||
{
|
||||
mdev->sync_conf.after = DRBD_AFTER_DEF;
|
||||
mdev->sync_conf.rate = DRBD_RATE_DEF;
|
||||
mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_DEF;
|
||||
/* This way we get a compile error when sync_conf grows,
|
||||
and we forgot to initialize it here */
|
||||
mdev->sync_conf = (struct syncer_conf) {
|
||||
/* .rate = */ DRBD_RATE_DEF,
|
||||
/* .after = */ DRBD_AFTER_DEF,
|
||||
/* .al_extents = */ DRBD_AL_EXTENTS_DEF,
|
||||
/* .verify_alg = */ {}, 0,
|
||||
/* .cpu_mask = */ {}, 0,
|
||||
/* .csums_alg = */ {}, 0,
|
||||
/* .use_rle = */ 0
|
||||
};
|
||||
|
||||
/* Have to use that way, because the layout differs between
|
||||
big endian and little endian */
|
||||
mdev->state = (union drbd_state) {
|
||||
{ .role = R_SECONDARY,
|
||||
.peer = R_UNKNOWN,
|
||||
@ -2721,24 +2668,17 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
|
||||
INIT_LIST_HEAD(&mdev->unplug_work.list);
|
||||
INIT_LIST_HEAD(&mdev->md_sync_work.list);
|
||||
INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
|
||||
INIT_LIST_HEAD(&mdev->delay_probes);
|
||||
INIT_LIST_HEAD(&mdev->delay_probe_work.list);
|
||||
|
||||
mdev->resync_work.cb = w_resync_inactive;
|
||||
mdev->unplug_work.cb = w_send_write_hint;
|
||||
mdev->md_sync_work.cb = w_md_sync;
|
||||
mdev->bm_io_work.w.cb = w_bitmap_io;
|
||||
mdev->delay_probe_work.cb = w_delay_probes;
|
||||
init_timer(&mdev->resync_timer);
|
||||
init_timer(&mdev->md_sync_timer);
|
||||
init_timer(&mdev->delay_probe_timer);
|
||||
mdev->resync_timer.function = resync_timer_fn;
|
||||
mdev->resync_timer.data = (unsigned long) mdev;
|
||||
mdev->md_sync_timer.function = md_sync_timer_fn;
|
||||
mdev->md_sync_timer.data = (unsigned long) mdev;
|
||||
mdev->delay_probe_timer.function = delay_probe_timer_fn;
|
||||
mdev->delay_probe_timer.data = (unsigned long) mdev;
|
||||
|
||||
|
||||
init_waitqueue_head(&mdev->misc_wait);
|
||||
init_waitqueue_head(&mdev->state_wait);
|
||||
|
@ -1557,10 +1557,6 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
|
||||
sc.rate = DRBD_RATE_DEF;
|
||||
sc.after = DRBD_AFTER_DEF;
|
||||
sc.al_extents = DRBD_AL_EXTENTS_DEF;
|
||||
sc.dp_volume = DRBD_DP_VOLUME_DEF;
|
||||
sc.dp_interval = DRBD_DP_INTERVAL_DEF;
|
||||
sc.throttle_th = DRBD_RS_THROTTLE_TH_DEF;
|
||||
sc.hold_off_th = DRBD_RS_HOLD_OFF_TH_DEF;
|
||||
} else
|
||||
memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
|
||||
|
||||
|
@ -73,21 +73,14 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
|
||||
seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10);
|
||||
/* if more than 1 GB display in MB */
|
||||
if (mdev->rs_total > 0x100000L)
|
||||
seq_printf(seq, "(%lu/%lu)M",
|
||||
seq_printf(seq, "(%lu/%lu)M\n\t",
|
||||
(unsigned long) Bit2KB(rs_left >> 10),
|
||||
(unsigned long) Bit2KB(mdev->rs_total >> 10));
|
||||
else
|
||||
seq_printf(seq, "(%lu/%lu)K",
|
||||
seq_printf(seq, "(%lu/%lu)K\n\t",
|
||||
(unsigned long) Bit2KB(rs_left),
|
||||
(unsigned long) Bit2KB(mdev->rs_total));
|
||||
|
||||
if (mdev->state.conn == C_SYNC_TARGET)
|
||||
seq_printf(seq, " queue_delay: %d.%d ms\n\t",
|
||||
mdev->data_delay / 1000,
|
||||
(mdev->data_delay % 1000) / 100);
|
||||
else if (mdev->state.conn == C_SYNC_SOURCE)
|
||||
seq_printf(seq, " delay_probe: %u\n\t", mdev->delay_seq);
|
||||
|
||||
/* see drivers/md/md.c
|
||||
* We do not want to overflow, so the order of operands and
|
||||
* the * 100 / 100 trick are important. We do a +1 to be
|
||||
@ -135,14 +128,6 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
|
||||
else
|
||||
seq_printf(seq, " (%ld)", dbdt);
|
||||
|
||||
if (mdev->state.conn == C_SYNC_TARGET) {
|
||||
if (mdev->c_sync_rate > 1000)
|
||||
seq_printf(seq, " want: %d,%03d",
|
||||
mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000);
|
||||
else
|
||||
seq_printf(seq, " want: %d", mdev->c_sync_rate);
|
||||
}
|
||||
|
||||
seq_printf(seq, " K/sec\n");
|
||||
}
|
||||
|
||||
|
@ -1180,7 +1180,7 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
|
||||
bio->bi_sector = sector;
|
||||
bio->bi_bdev = mdev->ldev->backing_bdev;
|
||||
/* we special case some flags in the multi-bio case, see below
|
||||
* (BIO_RW_UNPLUG, BIO_RW_BARRIER) */
|
||||
* (REQ_UNPLUG, REQ_HARDBARRIER) */
|
||||
bio->bi_rw = rw;
|
||||
bio->bi_private = e;
|
||||
bio->bi_end_io = drbd_endio_sec;
|
||||
@ -1209,16 +1209,16 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
|
||||
bios = bios->bi_next;
|
||||
bio->bi_next = NULL;
|
||||
|
||||
/* strip off BIO_RW_UNPLUG unless it is the last bio */
|
||||
/* strip off REQ_UNPLUG unless it is the last bio */
|
||||
if (bios)
|
||||
bio->bi_rw &= ~(1<<BIO_RW_UNPLUG);
|
||||
bio->bi_rw &= ~REQ_UNPLUG;
|
||||
|
||||
drbd_generic_make_request(mdev, fault_type, bio);
|
||||
|
||||
/* strip off BIO_RW_BARRIER,
|
||||
/* strip off REQ_HARDBARRIER,
|
||||
* unless it is the first or last bio */
|
||||
if (bios && bios->bi_next)
|
||||
bios->bi_rw &= ~(1<<BIO_RW_BARRIER);
|
||||
bios->bi_rw &= ~REQ_HARDBARRIER;
|
||||
} while (bios);
|
||||
maybe_kick_lo(mdev);
|
||||
return 0;
|
||||
@ -1233,7 +1233,7 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
|
||||
}
|
||||
|
||||
/**
|
||||
* w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set
|
||||
* w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
|
||||
* @mdev: DRBD device.
|
||||
* @w: work object.
|
||||
* @cancel: The connection will be closed anyways (unused in this callback)
|
||||
@ -1245,7 +1245,7 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea
|
||||
(and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
|
||||
so that we can finish that epoch in drbd_may_finish_epoch().
|
||||
That is necessary if we already have a long chain of Epochs, before
|
||||
we realize that BIO_RW_BARRIER is actually not supported */
|
||||
we realize that REQ_HARDBARRIER is actually not supported */
|
||||
|
||||
/* As long as the -ENOTSUPP on the barrier is reported immediately
|
||||
that will never trigger. If it is reported late, we will just
|
||||
@ -1824,14 +1824,14 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
|
||||
epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
|
||||
if (epoch == e->epoch) {
|
||||
set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
|
||||
rw |= (1<<BIO_RW_BARRIER);
|
||||
rw |= REQ_HARDBARRIER;
|
||||
e->flags |= EE_IS_BARRIER;
|
||||
} else {
|
||||
if (atomic_read(&epoch->epoch_size) > 1 ||
|
||||
!test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
|
||||
set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
|
||||
set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
|
||||
rw |= (1<<BIO_RW_BARRIER);
|
||||
rw |= REQ_HARDBARRIER;
|
||||
e->flags |= EE_IS_BARRIER;
|
||||
}
|
||||
}
|
||||
@ -1841,10 +1841,10 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
|
||||
dp_flags = be32_to_cpu(p->dp_flags);
|
||||
if (dp_flags & DP_HARDBARRIER) {
|
||||
dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n");
|
||||
/* rw |= (1<<BIO_RW_BARRIER); */
|
||||
/* rw |= REQ_HARDBARRIER; */
|
||||
}
|
||||
if (dp_flags & DP_RW_SYNC)
|
||||
rw |= (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
|
||||
rw |= REQ_SYNC | REQ_UNPLUG;
|
||||
if (dp_flags & DP_MAY_SET_IN_SYNC)
|
||||
e->flags |= EE_MAY_SET_IN_SYNC;
|
||||
|
||||
@ -3555,14 +3555,15 @@ static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h)
|
||||
return ok;
|
||||
}
|
||||
|
||||
static int receive_skip(struct drbd_conf *mdev, struct p_header *h)
|
||||
static int receive_skip_(struct drbd_conf *mdev, struct p_header *h, int silent)
|
||||
{
|
||||
/* TODO zero copy sink :) */
|
||||
static char sink[128];
|
||||
int size, want, r;
|
||||
|
||||
dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
|
||||
h->command, h->length);
|
||||
if (!silent)
|
||||
dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
|
||||
h->command, h->length);
|
||||
|
||||
size = h->length;
|
||||
while (size > 0) {
|
||||
@ -3574,6 +3575,16 @@ static int receive_skip(struct drbd_conf *mdev, struct p_header *h)
|
||||
return size == 0;
|
||||
}
|
||||
|
||||
static int receive_skip(struct drbd_conf *mdev, struct p_header *h)
|
||||
{
|
||||
return receive_skip_(mdev, h, 0);
|
||||
}
|
||||
|
||||
static int receive_skip_silent(struct drbd_conf *mdev, struct p_header *h)
|
||||
{
|
||||
return receive_skip_(mdev, h, 1);
|
||||
}
|
||||
|
||||
static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
|
||||
{
|
||||
if (mdev->state.disk >= D_INCONSISTENT)
|
||||
@ -3586,92 +3597,6 @@ static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static void timeval_sub_us(struct timeval* tv, unsigned int us)
|
||||
{
|
||||
tv->tv_sec -= us / 1000000;
|
||||
us = us % 1000000;
|
||||
if (tv->tv_usec > us) {
|
||||
tv->tv_usec += 1000000;
|
||||
tv->tv_sec--;
|
||||
}
|
||||
tv->tv_usec -= us;
|
||||
}
|
||||
|
||||
static void got_delay_probe(struct drbd_conf *mdev, int from, struct p_delay_probe *p)
|
||||
{
|
||||
struct delay_probe *dp;
|
||||
struct list_head *le;
|
||||
struct timeval now;
|
||||
int seq_num;
|
||||
int offset;
|
||||
int data_delay;
|
||||
|
||||
seq_num = be32_to_cpu(p->seq_num);
|
||||
offset = be32_to_cpu(p->offset);
|
||||
|
||||
spin_lock(&mdev->peer_seq_lock);
|
||||
if (!list_empty(&mdev->delay_probes)) {
|
||||
if (from == USE_DATA_SOCKET)
|
||||
le = mdev->delay_probes.next;
|
||||
else
|
||||
le = mdev->delay_probes.prev;
|
||||
|
||||
dp = list_entry(le, struct delay_probe, list);
|
||||
|
||||
if (dp->seq_num == seq_num) {
|
||||
list_del(le);
|
||||
spin_unlock(&mdev->peer_seq_lock);
|
||||
do_gettimeofday(&now);
|
||||
timeval_sub_us(&now, offset);
|
||||
data_delay =
|
||||
now.tv_usec - dp->time.tv_usec +
|
||||
(now.tv_sec - dp->time.tv_sec) * 1000000;
|
||||
|
||||
if (data_delay > 0)
|
||||
mdev->data_delay = data_delay;
|
||||
|
||||
kfree(dp);
|
||||
return;
|
||||
}
|
||||
|
||||
if (dp->seq_num > seq_num) {
|
||||
spin_unlock(&mdev->peer_seq_lock);
|
||||
dev_warn(DEV, "Previous allocation failure of struct delay_probe?\n");
|
||||
return; /* Do not alloca a struct delay_probe.... */
|
||||
}
|
||||
}
|
||||
spin_unlock(&mdev->peer_seq_lock);
|
||||
|
||||
dp = kmalloc(sizeof(struct delay_probe), GFP_NOIO);
|
||||
if (!dp) {
|
||||
dev_warn(DEV, "Failed to allocate a struct delay_probe, do not worry.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
dp->seq_num = seq_num;
|
||||
do_gettimeofday(&dp->time);
|
||||
timeval_sub_us(&dp->time, offset);
|
||||
|
||||
spin_lock(&mdev->peer_seq_lock);
|
||||
if (from == USE_DATA_SOCKET)
|
||||
list_add(&dp->list, &mdev->delay_probes);
|
||||
else
|
||||
list_add_tail(&dp->list, &mdev->delay_probes);
|
||||
spin_unlock(&mdev->peer_seq_lock);
|
||||
}
|
||||
|
||||
static int receive_delay_probe(struct drbd_conf *mdev, struct p_header *h)
|
||||
{
|
||||
struct p_delay_probe *p = (struct p_delay_probe *)h;
|
||||
|
||||
ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
|
||||
if (drbd_recv(mdev, h->payload, h->length) != h->length)
|
||||
return FALSE;
|
||||
|
||||
got_delay_probe(mdev, USE_DATA_SOCKET, p);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct p_header *);
|
||||
|
||||
static drbd_cmd_handler_f drbd_default_handler[] = {
|
||||
@ -3695,7 +3620,7 @@ static drbd_cmd_handler_f drbd_default_handler[] = {
|
||||
[P_OV_REQUEST] = receive_DataRequest,
|
||||
[P_OV_REPLY] = receive_DataRequest,
|
||||
[P_CSUM_RS_REQUEST] = receive_DataRequest,
|
||||
[P_DELAY_PROBE] = receive_delay_probe,
|
||||
[P_DELAY_PROBE] = receive_skip_silent,
|
||||
/* anything missing from this table is in
|
||||
* the asender_tbl, see get_asender_cmd */
|
||||
[P_MAX_CMD] = NULL,
|
||||
@ -4472,11 +4397,9 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header *h)
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static int got_delay_probe_m(struct drbd_conf *mdev, struct p_header *h)
|
||||
static int got_something_to_ignore_m(struct drbd_conf *mdev, struct p_header *h)
|
||||
{
|
||||
struct p_delay_probe *p = (struct p_delay_probe *)h;
|
||||
|
||||
got_delay_probe(mdev, USE_META_SOCKET, p);
|
||||
/* IGNORE */
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
@ -4504,7 +4427,7 @@ static struct asender_cmd *get_asender_cmd(int cmd)
|
||||
[P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
|
||||
[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
|
||||
[P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
|
||||
[P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_delay_probe_m },
|
||||
[P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_something_to_ignore_m },
|
||||
[P_MAX_CMD] = { 0, NULL },
|
||||
};
|
||||
if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
|
||||
|
@ -997,7 +997,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
|
||||
* because of those XXX, this is not yet enabled,
|
||||
* i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
|
||||
*/
|
||||
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags))) {
|
||||
if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
|
||||
/* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
|
||||
bio_endio(bio, -EOPNOTSUPP);
|
||||
return 0;
|
||||
|
@ -424,18 +424,6 @@ void resync_timer_fn(unsigned long data)
|
||||
drbd_queue_work(&mdev->data.work, &mdev->resync_work);
|
||||
}
|
||||
|
||||
static int calc_resync_rate(struct drbd_conf *mdev)
|
||||
{
|
||||
int d = mdev->data_delay / 1000; /* us -> ms */
|
||||
int td = mdev->sync_conf.throttle_th * 100; /* 0.1s -> ms */
|
||||
int hd = mdev->sync_conf.hold_off_th * 100; /* 0.1s -> ms */
|
||||
int cr = mdev->sync_conf.rate;
|
||||
|
||||
return d <= td ? cr :
|
||||
d >= hd ? 0 :
|
||||
cr + (cr * (td - d) / (hd - td));
|
||||
}
|
||||
|
||||
int w_make_resync_request(struct drbd_conf *mdev,
|
||||
struct drbd_work *w, int cancel)
|
||||
{
|
||||
@ -473,8 +461,7 @@ int w_make_resync_request(struct drbd_conf *mdev,
|
||||
max_segment_size = mdev->agreed_pro_version < 94 ?
|
||||
queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE;
|
||||
|
||||
mdev->c_sync_rate = calc_resync_rate(mdev);
|
||||
number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
|
||||
number = SLEEP_TIME * mdev->sync_conf.rate / ((BM_BLOCK_SIZE / 1024) * HZ);
|
||||
pe = atomic_read(&mdev->rs_pending_cnt);
|
||||
|
||||
mutex_lock(&mdev->data.mutex);
|
||||
|
@ -178,6 +178,7 @@ static int print_unex = 1;
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/fcntl.h>
|
||||
@ -514,8 +515,6 @@ static unsigned long fdc_busy;
|
||||
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(command_done);
|
||||
|
||||
#define NO_SIGNAL (!interruptible || !signal_pending(current))
|
||||
|
||||
/* Errors during formatting are counted here. */
|
||||
static int format_errors;
|
||||
|
||||
@ -539,7 +538,7 @@ static int max_buffer_sectors;
|
||||
|
||||
static int *errors;
|
||||
typedef void (*done_f)(int);
|
||||
static struct cont_t {
|
||||
static const struct cont_t {
|
||||
void (*interrupt)(void);
|
||||
/* this is called after the interrupt of the
|
||||
* main command */
|
||||
@ -578,7 +577,7 @@ static void reset_fdc(void);
|
||||
#define NEED_1_RECAL -2
|
||||
#define NEED_2_RECAL -3
|
||||
|
||||
static int usage_count;
|
||||
static atomic_t usage_count = ATOMIC_INIT(0);
|
||||
|
||||
/* buffer related variables */
|
||||
static int buffer_track = -1;
|
||||
@ -858,36 +857,15 @@ static void set_fdc(int drive)
|
||||
}
|
||||
|
||||
/* locks the driver */
|
||||
static int _lock_fdc(int drive, bool interruptible, int line)
|
||||
static int lock_fdc(int drive, bool interruptible)
|
||||
{
|
||||
if (!usage_count) {
|
||||
pr_err("Trying to lock fdc while usage count=0 at line %d\n",
|
||||
line);
|
||||
if (WARN(atomic_read(&usage_count) == 0,
|
||||
"Trying to lock fdc while usage count=0\n"))
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (test_and_set_bit(0, &fdc_busy)) {
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
add_wait_queue(&fdc_wait, &wait);
|
||||
if (wait_event_interruptible(fdc_wait, !test_and_set_bit(0, &fdc_busy)))
|
||||
return -EINTR;
|
||||
|
||||
for (;;) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
if (!test_and_set_bit(0, &fdc_busy))
|
||||
break;
|
||||
|
||||
schedule();
|
||||
|
||||
if (!NO_SIGNAL) {
|
||||
remove_wait_queue(&fdc_wait, &wait);
|
||||
return -EINTR;
|
||||
}
|
||||
}
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&fdc_wait, &wait);
|
||||
flush_scheduled_work();
|
||||
}
|
||||
command_status = FD_COMMAND_NONE;
|
||||
|
||||
__reschedule_timeout(drive, "lock fdc");
|
||||
@ -895,11 +873,8 @@ static int _lock_fdc(int drive, bool interruptible, int line)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define lock_fdc(drive, interruptible) \
|
||||
_lock_fdc(drive, interruptible, __LINE__)
|
||||
|
||||
/* unlocks the driver */
|
||||
static inline void unlock_fdc(void)
|
||||
static void unlock_fdc(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -1224,7 +1199,7 @@ static int need_more_output(void)
|
||||
/* Set perpendicular mode as required, based on data rate, if supported.
|
||||
* 82077 Now tested. 1Mbps data rate only possible with 82077-1.
|
||||
*/
|
||||
static inline void perpendicular_mode(void)
|
||||
static void perpendicular_mode(void)
|
||||
{
|
||||
unsigned char perp_mode;
|
||||
|
||||
@ -1995,14 +1970,14 @@ static void do_wakeup(void)
|
||||
wake_up(&command_done);
|
||||
}
|
||||
|
||||
static struct cont_t wakeup_cont = {
|
||||
static const struct cont_t wakeup_cont = {
|
||||
.interrupt = empty,
|
||||
.redo = do_wakeup,
|
||||
.error = empty,
|
||||
.done = (done_f)empty
|
||||
};
|
||||
|
||||
static struct cont_t intr_cont = {
|
||||
static const struct cont_t intr_cont = {
|
||||
.interrupt = empty,
|
||||
.redo = process_fd_request,
|
||||
.error = empty,
|
||||
@ -2015,25 +1990,10 @@ static int wait_til_done(void (*handler)(void), bool interruptible)
|
||||
|
||||
schedule_bh(handler);
|
||||
|
||||
if (command_status < 2 && NO_SIGNAL) {
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
|
||||
add_wait_queue(&command_done, &wait);
|
||||
for (;;) {
|
||||
set_current_state(interruptible ?
|
||||
TASK_INTERRUPTIBLE :
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
if (command_status >= 2 || !NO_SIGNAL)
|
||||
break;
|
||||
|
||||
is_alive(__func__, "");
|
||||
schedule();
|
||||
}
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&command_done, &wait);
|
||||
}
|
||||
if (interruptible)
|
||||
wait_event_interruptible(command_done, command_status >= 2);
|
||||
else
|
||||
wait_event(command_done, command_status >= 2);
|
||||
|
||||
if (command_status < 2) {
|
||||
cancel_activity();
|
||||
@ -2223,7 +2183,7 @@ static void redo_format(void)
|
||||
debugt(__func__, "queue format request");
|
||||
}
|
||||
|
||||
static struct cont_t format_cont = {
|
||||
static const struct cont_t format_cont = {
|
||||
.interrupt = format_interrupt,
|
||||
.redo = redo_format,
|
||||
.error = bad_flp_intr,
|
||||
@ -2583,10 +2543,8 @@ static int make_raw_rw_request(void)
|
||||
int tracksize;
|
||||
int ssize;
|
||||
|
||||
if (max_buffer_sectors == 0) {
|
||||
pr_info("VFS: Block I/O scheduled on unopened device\n");
|
||||
if (WARN(max_buffer_sectors == 0, "VFS: Block I/O scheduled on unopened device\n"))
|
||||
return 0;
|
||||
}
|
||||
|
||||
set_fdc((long)current_req->rq_disk->private_data);
|
||||
|
||||
@ -2921,7 +2879,7 @@ static void redo_fd_request(void)
|
||||
return;
|
||||
}
|
||||
|
||||
static struct cont_t rw_cont = {
|
||||
static const struct cont_t rw_cont = {
|
||||
.interrupt = rw_interrupt,
|
||||
.redo = redo_fd_request,
|
||||
.error = bad_flp_intr,
|
||||
@ -2936,19 +2894,16 @@ static void process_fd_request(void)
|
||||
|
||||
static void do_fd_request(struct request_queue *q)
|
||||
{
|
||||
if (max_buffer_sectors == 0) {
|
||||
pr_info("VFS: %s called on non-open device\n", __func__);
|
||||
if (WARN(max_buffer_sectors == 0,
|
||||
"VFS: %s called on non-open device\n", __func__))
|
||||
return;
|
||||
}
|
||||
|
||||
if (usage_count == 0) {
|
||||
pr_info("warning: usage count=0, current_req=%p exiting\n",
|
||||
current_req);
|
||||
pr_info("sect=%ld type=%x flags=%x\n",
|
||||
(long)blk_rq_pos(current_req), current_req->cmd_type,
|
||||
current_req->cmd_flags);
|
||||
if (WARN(atomic_read(&usage_count) == 0,
|
||||
"warning: usage count=0, current_req=%p sect=%ld type=%x flags=%x\n",
|
||||
current_req, (long)blk_rq_pos(current_req), current_req->cmd_type,
|
||||
current_req->cmd_flags))
|
||||
return;
|
||||
}
|
||||
|
||||
if (test_bit(0, &fdc_busy)) {
|
||||
/* fdc busy, this new request will be treated when the
|
||||
current one is done */
|
||||
@ -2960,7 +2915,7 @@ static void do_fd_request(struct request_queue *q)
|
||||
is_alive(__func__, "");
|
||||
}
|
||||
|
||||
static struct cont_t poll_cont = {
|
||||
static const struct cont_t poll_cont = {
|
||||
.interrupt = success_and_wakeup,
|
||||
.redo = floppy_ready,
|
||||
.error = generic_failure,
|
||||
@ -2991,7 +2946,7 @@ static void reset_intr(void)
|
||||
pr_info("weird, reset interrupt called\n");
|
||||
}
|
||||
|
||||
static struct cont_t reset_cont = {
|
||||
static const struct cont_t reset_cont = {
|
||||
.interrupt = reset_intr,
|
||||
.redo = success_and_wakeup,
|
||||
.error = generic_failure,
|
||||
@ -3033,7 +2988,7 @@ static inline int fd_copyin(void __user *param, void *address,
|
||||
return copy_from_user(address, param, size) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static inline const char *drive_name(int type, int drive)
|
||||
static const char *drive_name(int type, int drive)
|
||||
{
|
||||
struct floppy_struct *floppy;
|
||||
|
||||
@ -3096,14 +3051,14 @@ static void raw_cmd_done(int flag)
|
||||
generic_done(flag);
|
||||
}
|
||||
|
||||
static struct cont_t raw_cmd_cont = {
|
||||
static const struct cont_t raw_cmd_cont = {
|
||||
.interrupt = success_and_wakeup,
|
||||
.redo = floppy_start,
|
||||
.error = generic_failure,
|
||||
.done = raw_cmd_done
|
||||
};
|
||||
|
||||
static inline int raw_cmd_copyout(int cmd, void __user *param,
|
||||
static int raw_cmd_copyout(int cmd, void __user *param,
|
||||
struct floppy_raw_cmd *ptr)
|
||||
{
|
||||
int ret;
|
||||
@ -3148,7 +3103,7 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr)
|
||||
}
|
||||
}
|
||||
|
||||
static inline int raw_cmd_copyin(int cmd, void __user *param,
|
||||
static int raw_cmd_copyin(int cmd, void __user *param,
|
||||
struct floppy_raw_cmd **rcmd)
|
||||
{
|
||||
struct floppy_raw_cmd *ptr;
|
||||
@ -3266,7 +3221,7 @@ static int invalidate_drive(struct block_device *bdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
|
||||
static int set_geometry(unsigned int cmd, struct floppy_struct *g,
|
||||
int drive, int type, struct block_device *bdev)
|
||||
{
|
||||
int cnt;
|
||||
@ -3337,7 +3292,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
|
||||
}
|
||||
|
||||
/* handle obsolete ioctl's */
|
||||
static int ioctl_table[] = {
|
||||
static unsigned int ioctl_table[] = {
|
||||
FDCLRPRM,
|
||||
FDSETPRM,
|
||||
FDDEFPRM,
|
||||
@ -3365,7 +3320,7 @@ static int ioctl_table[] = {
|
||||
FDTWADDLE
|
||||
};
|
||||
|
||||
static inline int normalize_ioctl(int *cmd, int *size)
|
||||
static int normalize_ioctl(unsigned int *cmd, int *size)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -3417,7 +3372,7 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
|
||||
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
|
||||
unsigned long param)
|
||||
{
|
||||
int drive = (long)bdev->bd_disk->private_data;
|
||||
@ -3593,6 +3548,18 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long param)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = fd_locked_ioctl(bdev, mode, cmd, param);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __init config_types(void)
|
||||
{
|
||||
bool has_drive = false;
|
||||
@ -3649,6 +3616,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
int drive = (long)disk->private_data;
|
||||
|
||||
lock_kernel();
|
||||
mutex_lock(&open_lock);
|
||||
if (UDRS->fd_ref < 0)
|
||||
UDRS->fd_ref = 0;
|
||||
@ -3659,6 +3627,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
|
||||
if (!UDRS->fd_ref)
|
||||
opened_bdev[drive] = NULL;
|
||||
mutex_unlock(&open_lock);
|
||||
unlock_kernel();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3676,6 +3645,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
||||
int res = -EBUSY;
|
||||
char *tmp;
|
||||
|
||||
lock_kernel();
|
||||
mutex_lock(&open_lock);
|
||||
old_dev = UDRS->fd_device;
|
||||
if (opened_bdev[drive] && opened_bdev[drive] != bdev)
|
||||
@ -3752,6 +3722,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
||||
goto out;
|
||||
}
|
||||
mutex_unlock(&open_lock);
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
out:
|
||||
if (UDRS->fd_ref < 0)
|
||||
@ -3762,6 +3733,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
||||
opened_bdev[drive] = NULL;
|
||||
out2:
|
||||
mutex_unlock(&open_lock);
|
||||
unlock_kernel();
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -3829,6 +3801,7 @@ static int __floppy_read_block_0(struct block_device *bdev)
|
||||
bio.bi_size = size;
|
||||
bio.bi_bdev = bdev;
|
||||
bio.bi_sector = 0;
|
||||
bio.bi_flags = BIO_QUIET;
|
||||
init_completion(&complete);
|
||||
bio.bi_private = &complete;
|
||||
bio.bi_end_io = floppy_rb0_complete;
|
||||
@ -3857,10 +3830,10 @@ static int floppy_revalidate(struct gendisk *disk)
|
||||
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
|
||||
test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
|
||||
test_bit(drive, &fake_change) || NO_GEOM) {
|
||||
if (usage_count == 0) {
|
||||
pr_info("VFS: revalidate called on non-open device.\n");
|
||||
if (WARN(atomic_read(&usage_count) == 0,
|
||||
"VFS: revalidate called on non-open device.\n"))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
lock_fdc(drive, false);
|
||||
cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
|
||||
test_bit(FD_VERIFY_BIT, &UDRS->flags));
|
||||
@ -3893,7 +3866,7 @@ static const struct block_device_operations floppy_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = floppy_open,
|
||||
.release = floppy_release,
|
||||
.locked_ioctl = fd_ioctl,
|
||||
.ioctl = fd_ioctl,
|
||||
.getgeo = fd_getgeo,
|
||||
.media_changed = check_floppy_change,
|
||||
.revalidate_disk = floppy_revalidate,
|
||||
@ -4126,7 +4099,7 @@ static ssize_t floppy_cmos_show(struct device *dev,
|
||||
return sprintf(buf, "%X\n", UDP->cmos);
|
||||
}
|
||||
|
||||
DEVICE_ATTR(cmos, S_IRUGO, floppy_cmos_show, NULL);
|
||||
static DEVICE_ATTR(cmos, S_IRUGO, floppy_cmos_show, NULL);
|
||||
|
||||
static void floppy_device_release(struct device *dev)
|
||||
{
|
||||
@ -4175,6 +4148,9 @@ static int __init floppy_init(void)
|
||||
int i, unit, drive;
|
||||
int err, dr;
|
||||
|
||||
set_debugt();
|
||||
interruptjiffies = resultjiffies = jiffies;
|
||||
|
||||
#if defined(CONFIG_PPC)
|
||||
if (check_legacy_ioport(FDC1))
|
||||
return -ENODEV;
|
||||
@ -4353,7 +4329,7 @@ static int __init floppy_init(void)
|
||||
platform_device_unregister(&floppy_device[drive]);
|
||||
out_flush_work:
|
||||
flush_scheduled_work();
|
||||
if (usage_count)
|
||||
if (atomic_read(&usage_count))
|
||||
floppy_release_irq_and_dma();
|
||||
out_unreg_region:
|
||||
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
|
||||
@ -4370,8 +4346,6 @@ static int __init floppy_init(void)
|
||||
return err;
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(floppy_usage_lock);
|
||||
|
||||
static const struct io_region {
|
||||
int offset;
|
||||
int size;
|
||||
@ -4417,14 +4391,8 @@ static void floppy_release_regions(int fdc)
|
||||
|
||||
static int floppy_grab_irq_and_dma(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&floppy_usage_lock, flags);
|
||||
if (usage_count++) {
|
||||
spin_unlock_irqrestore(&floppy_usage_lock, flags);
|
||||
if (atomic_inc_return(&usage_count) > 1)
|
||||
return 0;
|
||||
}
|
||||
spin_unlock_irqrestore(&floppy_usage_lock, flags);
|
||||
|
||||
/*
|
||||
* We might have scheduled a free_irq(), wait it to
|
||||
@ -4435,9 +4403,7 @@ static int floppy_grab_irq_and_dma(void)
|
||||
if (fd_request_irq()) {
|
||||
DPRINT("Unable to grab IRQ%d for the floppy driver\n",
|
||||
FLOPPY_IRQ);
|
||||
spin_lock_irqsave(&floppy_usage_lock, flags);
|
||||
usage_count--;
|
||||
spin_unlock_irqrestore(&floppy_usage_lock, flags);
|
||||
atomic_dec(&usage_count);
|
||||
return -1;
|
||||
}
|
||||
if (fd_request_dma()) {
|
||||
@ -4447,9 +4413,7 @@ static int floppy_grab_irq_and_dma(void)
|
||||
use_virtual_dma = can_use_virtual_dma = 1;
|
||||
if (!(can_use_virtual_dma & 1)) {
|
||||
fd_free_irq();
|
||||
spin_lock_irqsave(&floppy_usage_lock, flags);
|
||||
usage_count--;
|
||||
spin_unlock_irqrestore(&floppy_usage_lock, flags);
|
||||
atomic_dec(&usage_count);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -4484,9 +4448,7 @@ static int floppy_grab_irq_and_dma(void)
|
||||
fd_free_dma();
|
||||
while (--fdc >= 0)
|
||||
floppy_release_regions(fdc);
|
||||
spin_lock_irqsave(&floppy_usage_lock, flags);
|
||||
usage_count--;
|
||||
spin_unlock_irqrestore(&floppy_usage_lock, flags);
|
||||
atomic_dec(&usage_count);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -4498,14 +4460,10 @@ static void floppy_release_irq_and_dma(void)
|
||||
#endif
|
||||
long tmpsize;
|
||||
unsigned long tmpaddr;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&floppy_usage_lock, flags);
|
||||
if (--usage_count) {
|
||||
spin_unlock_irqrestore(&floppy_usage_lock, flags);
|
||||
if (!atomic_dec_and_test(&usage_count))
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&floppy_usage_lock, flags);
|
||||
|
||||
if (irqdma_allocated) {
|
||||
fd_disable_dma();
|
||||
fd_free_dma();
|
||||
@ -4598,7 +4556,7 @@ static void __exit floppy_module_exit(void)
|
||||
del_timer_sync(&fd_timer);
|
||||
blk_cleanup_queue(floppy_queue);
|
||||
|
||||
if (usage_count)
|
||||
if (atomic_read(&usage_count))
|
||||
floppy_release_irq_and_dma();
|
||||
|
||||
/* eject disk, if any */
|
||||
|
@ -627,7 +627,7 @@ static void hd_request(void)
|
||||
req_data_dir(req) == READ ? "read" : "writ",
|
||||
cyl, head, sec, nsect, req->buffer);
|
||||
#endif
|
||||
if (blk_fs_request(req)) {
|
||||
if (req->cmd_type == REQ_TYPE_FS) {
|
||||
switch (rq_data_dir(req)) {
|
||||
case READ:
|
||||
hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
|
||||
|
@ -67,6 +67,7 @@
|
||||
#include <linux/compat.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/buffer_head.h> /* for invalidate_bdev() */
|
||||
#include <linux/completion.h>
|
||||
@ -476,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
|
||||
pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
|
||||
|
||||
if (bio_rw(bio) == WRITE) {
|
||||
bool barrier = bio_rw_flagged(bio, BIO_RW_BARRIER);
|
||||
bool barrier = (bio->bi_rw & REQ_HARDBARRIER);
|
||||
struct file *file = lo->lo_backing_file;
|
||||
|
||||
if (barrier) {
|
||||
@ -831,7 +832,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
||||
lo->lo_queue->unplug_fn = loop_unplug;
|
||||
|
||||
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
|
||||
blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN, NULL);
|
||||
blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN);
|
||||
|
||||
set_capacity(lo->lo_disk, size);
|
||||
bd_set_size(bdev, size << 9);
|
||||
@ -1408,9 +1409,11 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
struct loop_device *lo = bdev->bd_disk->private_data;
|
||||
|
||||
lock_kernel();
|
||||
mutex_lock(&lo->lo_ctl_mutex);
|
||||
lo->lo_refcnt++;
|
||||
mutex_unlock(&lo->lo_ctl_mutex);
|
||||
unlock_kernel();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1420,6 +1423,7 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
|
||||
struct loop_device *lo = disk->private_data;
|
||||
int err;
|
||||
|
||||
lock_kernel();
|
||||
mutex_lock(&lo->lo_ctl_mutex);
|
||||
|
||||
if (--lo->lo_refcnt)
|
||||
@ -1444,6 +1448,7 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
|
||||
out:
|
||||
mutex_unlock(&lo->lo_ctl_mutex);
|
||||
out_unlocked:
|
||||
lock_kernel();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -670,7 +670,7 @@ static void mg_request_poll(struct request_queue *q)
|
||||
break;
|
||||
}
|
||||
|
||||
if (unlikely(!blk_fs_request(host->req))) {
|
||||
if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) {
|
||||
mg_end_request_cur(host, -EIO);
|
||||
continue;
|
||||
}
|
||||
@ -756,7 +756,7 @@ static void mg_request(struct request_queue *q)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (unlikely(!blk_fs_request(req))) {
|
||||
if (unlikely(req->cmd_type != REQ_TYPE_FS)) {
|
||||
mg_end_request_cur(host, -EIO);
|
||||
continue;
|
||||
}
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kernel.h>
|
||||
@ -448,7 +449,7 @@ static void nbd_clear_que(struct nbd_device *lo)
|
||||
|
||||
static void nbd_handle_req(struct nbd_device *lo, struct request *req)
|
||||
{
|
||||
if (!blk_fs_request(req))
|
||||
if (req->cmd_type != REQ_TYPE_FS)
|
||||
goto error_out;
|
||||
|
||||
nbd_cmd(req) = NBD_CMD_READ;
|
||||
@ -716,9 +717,11 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
|
||||
lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
|
||||
|
||||
lock_kernel();
|
||||
mutex_lock(&lo->tx_lock);
|
||||
error = __nbd_ioctl(bdev, lo, cmd, arg);
|
||||
mutex_unlock(&lo->tx_lock);
|
||||
unlock_kernel();
|
||||
|
||||
return error;
|
||||
}
|
||||
@ -726,7 +729,7 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
static const struct block_device_operations nbd_fops =
|
||||
{
|
||||
.owner = THIS_MODULE,
|
||||
.locked_ioctl = nbd_ioctl,
|
||||
.ioctl = nbd_ioctl,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -310,7 +310,8 @@ static void osdblk_rq_fn(struct request_queue *q)
|
||||
break;
|
||||
|
||||
/* filter out block requests we don't understand */
|
||||
if (!blk_fs_request(rq) && !blk_barrier_rq(rq)) {
|
||||
if (rq->cmd_type != REQ_TYPE_FS &&
|
||||
!(rq->cmd_flags & REQ_HARDBARRIER)) {
|
||||
blk_end_request_all(rq, 0);
|
||||
continue;
|
||||
}
|
||||
@ -322,7 +323,7 @@ static void osdblk_rq_fn(struct request_queue *q)
|
||||
* driver-specific, etc.
|
||||
*/
|
||||
|
||||
do_flush = (rq->special == (void *) 0xdeadbeefUL);
|
||||
do_flush = rq->cmd_flags & REQ_FLUSH;
|
||||
do_write = (rq_data_dir(rq) == WRITE);
|
||||
|
||||
if (!do_flush) { /* osd_flush does not use a bio */
|
||||
@ -379,14 +380,6 @@ static void osdblk_rq_fn(struct request_queue *q)
|
||||
}
|
||||
}
|
||||
|
||||
static void osdblk_prepare_flush(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
/* add driver-specific marker, to indicate that this request
|
||||
* is a flush command
|
||||
*/
|
||||
rq->special = (void *) 0xdeadbeefUL;
|
||||
}
|
||||
|
||||
static void osdblk_free_disk(struct osdblk_device *osdev)
|
||||
{
|
||||
struct gendisk *disk = osdev->disk;
|
||||
@ -446,7 +439,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev)
|
||||
blk_queue_stack_limits(q, osd_request_queue(osdev->osd));
|
||||
|
||||
blk_queue_prep_rq(q, blk_queue_start_tag);
|
||||
blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, osdblk_prepare_flush);
|
||||
blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
|
||||
|
||||
disk->queue = q;
|
||||
|
||||
|
@ -138,6 +138,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
|
||||
#include <linux/cdrom.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
static DEFINE_SPINLOCK(pcd_lock);
|
||||
@ -224,13 +225,21 @@ static char *pcd_buf; /* buffer for request in progress */
|
||||
static int pcd_block_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
struct pcd_unit *cd = bdev->bd_disk->private_data;
|
||||
return cdrom_open(&cd->info, bdev, mode);
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = cdrom_open(&cd->info, bdev, mode);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pcd_block_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct pcd_unit *cd = disk->private_data;
|
||||
lock_kernel();
|
||||
cdrom_release(&cd->info, mode);
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -238,7 +247,13 @@ static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned cmd, unsigned long arg)
|
||||
{
|
||||
struct pcd_unit *cd = bdev->bd_disk->private_data;
|
||||
return cdrom_ioctl(&cd->info, bdev, mode, cmd, arg);
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = cdrom_ioctl(&cd->info, bdev, mode, cmd, arg);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pcd_block_media_changed(struct gendisk *disk)
|
||||
@ -251,7 +266,7 @@ static const struct block_device_operations pcd_bdops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = pcd_block_open,
|
||||
.release = pcd_block_release,
|
||||
.locked_ioctl = pcd_block_ioctl,
|
||||
.ioctl = pcd_block_ioctl,
|
||||
.media_changed = pcd_block_media_changed,
|
||||
};
|
||||
|
||||
|
@ -153,6 +153,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blkpg.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
@ -439,7 +440,7 @@ static char *pd_buf; /* buffer for request in progress */
|
||||
|
||||
static enum action do_pd_io_start(void)
|
||||
{
|
||||
if (blk_special_request(pd_req)) {
|
||||
if (pd_req->cmd_type == REQ_TYPE_SPECIAL) {
|
||||
phase = pd_special;
|
||||
return pd_special();
|
||||
}
|
||||
@ -735,12 +736,14 @@ static int pd_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
struct pd_unit *disk = bdev->bd_disk->private_data;
|
||||
|
||||
lock_kernel();
|
||||
disk->access++;
|
||||
|
||||
if (disk->removable) {
|
||||
pd_special_command(disk, pd_media_check);
|
||||
pd_special_command(disk, pd_door_lock);
|
||||
}
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -768,8 +771,10 @@ static int pd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
|
||||
switch (cmd) {
|
||||
case CDROMEJECT:
|
||||
lock_kernel();
|
||||
if (disk->access == 1)
|
||||
pd_special_command(disk, pd_eject);
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
default:
|
||||
return -EINVAL;
|
||||
@ -780,8 +785,10 @@ static int pd_release(struct gendisk *p, fmode_t mode)
|
||||
{
|
||||
struct pd_unit *disk = p->private_data;
|
||||
|
||||
lock_kernel();
|
||||
if (!--disk->access && disk->removable)
|
||||
pd_special_command(disk, pd_door_unlock);
|
||||
unlock_kernel();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -812,7 +819,7 @@ static const struct block_device_operations pd_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = pd_open,
|
||||
.release = pd_release,
|
||||
.locked_ioctl = pd_ioctl,
|
||||
.ioctl = pd_ioctl,
|
||||
.getgeo = pd_getgeo,
|
||||
.media_changed = pd_check_media,
|
||||
.revalidate_disk= pd_revalidate
|
||||
|
@ -152,6 +152,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY};
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blkpg.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
static DEFINE_SPINLOCK(pf_spin_lock);
|
||||
@ -266,7 +267,7 @@ static const struct block_device_operations pf_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = pf_open,
|
||||
.release = pf_release,
|
||||
.locked_ioctl = pf_ioctl,
|
||||
.ioctl = pf_ioctl,
|
||||
.getgeo = pf_getgeo,
|
||||
.media_changed = pf_check_media,
|
||||
};
|
||||
@ -299,20 +300,26 @@ static void __init pf_init_units(void)
|
||||
static int pf_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
struct pf_unit *pf = bdev->bd_disk->private_data;
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
pf_identify(pf);
|
||||
|
||||
ret = -ENODEV;
|
||||
if (pf->media_status == PF_NM)
|
||||
return -ENODEV;
|
||||
goto out;
|
||||
|
||||
ret = -EROFS;
|
||||
if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE))
|
||||
return -EROFS;
|
||||
goto out;
|
||||
|
||||
ret = 0;
|
||||
pf->access++;
|
||||
if (pf->removable)
|
||||
pf_lock(pf, 1);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
@ -342,7 +349,10 @@ static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, u
|
||||
|
||||
if (pf->access != 1)
|
||||
return -EBUSY;
|
||||
lock_kernel();
|
||||
pf_eject(pf);
|
||||
unlock_kernel();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -350,14 +360,18 @@ static int pf_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct pf_unit *pf = disk->private_data;
|
||||
|
||||
if (pf->access <= 0)
|
||||
lock_kernel();
|
||||
if (pf->access <= 0) {
|
||||
unlock_kernel();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pf->access--;
|
||||
|
||||
if (!pf->access && pf->removable)
|
||||
pf_lock(pf, 0);
|
||||
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
@ -57,6 +57,7 @@
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
@ -1221,7 +1222,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
|
||||
pkt->bio->bi_flags = 1 << BIO_UPTODATE;
|
||||
pkt->bio->bi_idx = 0;
|
||||
|
||||
BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW));
|
||||
BUG_ON(pkt->bio->bi_rw != REQ_WRITE);
|
||||
BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
|
||||
BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
|
||||
BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
|
||||
@ -2382,6 +2383,7 @@ static int pkt_open(struct block_device *bdev, fmode_t mode)
|
||||
|
||||
VPRINTK(DRIVER_NAME": entering open\n");
|
||||
|
||||
lock_kernel();
|
||||
mutex_lock(&ctl_mutex);
|
||||
pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
|
||||
if (!pd) {
|
||||
@ -2409,6 +2411,7 @@ static int pkt_open(struct block_device *bdev, fmode_t mode)
|
||||
}
|
||||
|
||||
mutex_unlock(&ctl_mutex);
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
|
||||
out_dec:
|
||||
@ -2416,6 +2419,7 @@ static int pkt_open(struct block_device *bdev, fmode_t mode)
|
||||
out:
|
||||
VPRINTK(DRIVER_NAME": failed open (%d)\n", ret);
|
||||
mutex_unlock(&ctl_mutex);
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2424,6 +2428,7 @@ static int pkt_close(struct gendisk *disk, fmode_t mode)
|
||||
struct pktcdvd_device *pd = disk->private_data;
|
||||
int ret = 0;
|
||||
|
||||
lock_kernel();
|
||||
mutex_lock(&ctl_mutex);
|
||||
pd->refcnt--;
|
||||
BUG_ON(pd->refcnt < 0);
|
||||
@ -2432,6 +2437,7 @@ static int pkt_close(struct gendisk *disk, fmode_t mode)
|
||||
pkt_release_dev(pd, flush);
|
||||
}
|
||||
mutex_unlock(&ctl_mutex);
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2762,10 +2768,12 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
|
||||
static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct pktcdvd_device *pd = bdev->bd_disk->private_data;
|
||||
int ret;
|
||||
|
||||
VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd,
|
||||
MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
|
||||
|
||||
lock_kernel();
|
||||
switch (cmd) {
|
||||
case CDROMEJECT:
|
||||
/*
|
||||
@ -2783,14 +2791,16 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
|
||||
case CDROM_LAST_WRITTEN:
|
||||
case CDROM_SEND_PACKET:
|
||||
case SCSI_IOCTL_SEND_COMMAND:
|
||||
return __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
|
||||
ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
|
||||
break;
|
||||
|
||||
default:
|
||||
VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
|
||||
return -ENOTTY;
|
||||
ret = -ENOTTY;
|
||||
}
|
||||
unlock_kernel();
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pkt_media_changed(struct gendisk *disk)
|
||||
@ -2812,7 +2822,7 @@ static const struct block_device_operations pktcdvd_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = pkt_open,
|
||||
.release = pkt_close,
|
||||
.locked_ioctl = pkt_ioctl,
|
||||
.ioctl = pkt_ioctl,
|
||||
.media_changed = pkt_media_changed,
|
||||
};
|
||||
|
||||
|
@ -196,13 +196,12 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
|
||||
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
|
||||
|
||||
while ((req = blk_fetch_request(q))) {
|
||||
if (blk_fs_request(req)) {
|
||||
if (ps3disk_submit_request_sg(dev, req))
|
||||
break;
|
||||
} else if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
|
||||
req->cmd[0] == REQ_LB_OP_FLUSH) {
|
||||
if (req->cmd_flags & REQ_FLUSH) {
|
||||
if (ps3disk_submit_flush_request(dev, req))
|
||||
break;
|
||||
} else if (req->cmd_type == REQ_TYPE_FS) {
|
||||
if (ps3disk_submit_request_sg(dev, req))
|
||||
break;
|
||||
} else {
|
||||
blk_dump_rq_flags(req, DEVICE_NAME " bad request");
|
||||
__blk_end_request_all(req, -EIO);
|
||||
@ -257,8 +256,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
|
||||
req->cmd[0] == REQ_LB_OP_FLUSH) {
|
||||
if (req->cmd_flags & REQ_FLUSH) {
|
||||
read = 0;
|
||||
op = "flush";
|
||||
} else {
|
||||
@ -398,16 +396,6 @@ static int ps3disk_identify(struct ps3_storage_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
|
||||
{
|
||||
struct ps3_storage_device *dev = q->queuedata;
|
||||
|
||||
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
|
||||
|
||||
req->cmd_type = REQ_TYPE_LINUX_BLOCK;
|
||||
req->cmd[0] = REQ_LB_OP_FLUSH;
|
||||
}
|
||||
|
||||
static unsigned long ps3disk_mask;
|
||||
|
||||
static DEFINE_MUTEX(ps3disk_mask_mutex);
|
||||
@ -480,8 +468,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
|
||||
blk_queue_dma_alignment(queue, dev->blk_size-1);
|
||||
blk_queue_logical_block_size(queue, dev->blk_size);
|
||||
|
||||
blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
|
||||
ps3disk_prepare_flush);
|
||||
blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH);
|
||||
|
||||
blk_queue_max_segments(queue, -1);
|
||||
blk_queue_max_segment_size(queue, dev->bounce_size);
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/fd.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/delay.h>
|
||||
@ -661,11 +662,23 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = floppy_open(bdev, mode);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int floppy_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct floppy_state *fs = disk->private_data;
|
||||
struct swim __iomem *base = fs->swd->base;
|
||||
|
||||
lock_kernel();
|
||||
if (fs->ref_count < 0)
|
||||
fs->ref_count = 0;
|
||||
else if (fs->ref_count > 0)
|
||||
@ -673,6 +686,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
|
||||
|
||||
if (fs->ref_count == 0)
|
||||
swim_motor(base, OFF);
|
||||
unlock_kernel();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -690,7 +704,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
case FDEJECT:
|
||||
if (fs->ref_count != 1)
|
||||
return -EBUSY;
|
||||
lock_kernel();
|
||||
err = floppy_eject(fs);
|
||||
unlock_kernel();
|
||||
return err;
|
||||
|
||||
case FDGETPRM:
|
||||
@ -751,9 +767,9 @@ static int floppy_revalidate(struct gendisk *disk)
|
||||
|
||||
static const struct block_device_operations floppy_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = floppy_open,
|
||||
.open = floppy_unlocked_open,
|
||||
.release = floppy_release,
|
||||
.locked_ioctl = floppy_ioctl,
|
||||
.ioctl = floppy_ioctl,
|
||||
.getgeo = floppy_getgeo,
|
||||
.media_changed = floppy_check_change,
|
||||
.revalidate_disk = floppy_revalidate,
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/io.h>
|
||||
@ -839,7 +840,7 @@ static int fd_eject(struct floppy_state *fs)
|
||||
static struct floppy_struct floppy_type =
|
||||
{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */
|
||||
|
||||
static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long param)
|
||||
{
|
||||
struct floppy_state *fs = bdev->bd_disk->private_data;
|
||||
@ -867,6 +868,18 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long param)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = floppy_locked_ioctl(bdev, mode, cmd, param);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int floppy_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
struct floppy_state *fs = bdev->bd_disk->private_data;
|
||||
@ -936,15 +949,28 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = floppy_open(bdev, mode);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int floppy_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct floppy_state *fs = disk->private_data;
|
||||
struct swim3 __iomem *sw = fs->swim3;
|
||||
lock_kernel();
|
||||
if (fs->ref_count > 0 && --fs->ref_count == 0) {
|
||||
swim3_action(fs, MOTOR_OFF);
|
||||
out_8(&sw->control_bic, 0xff);
|
||||
swim3_select(fs, RELAX);
|
||||
}
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -995,9 +1021,9 @@ static int floppy_revalidate(struct gendisk *disk)
|
||||
}
|
||||
|
||||
static const struct block_device_operations floppy_fops = {
|
||||
.open = floppy_open,
|
||||
.open = floppy_unlocked_open,
|
||||
.release = floppy_release,
|
||||
.locked_ioctl = floppy_ioctl,
|
||||
.ioctl = floppy_ioctl,
|
||||
.media_changed = floppy_check_change,
|
||||
.revalidate_disk= floppy_revalidate,
|
||||
};
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/timer.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <scsi/scsi.h>
|
||||
|
||||
#define DRV_NAME "ub"
|
||||
@ -648,7 +649,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (lun->changed && !blk_pc_request(rq)) {
|
||||
if (lun->changed && rq->cmd_type != REQ_TYPE_BLOCK_PC) {
|
||||
blk_start_request(rq);
|
||||
ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
|
||||
return 0;
|
||||
@ -684,7 +685,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
|
||||
}
|
||||
urq->nsg = n_elem;
|
||||
|
||||
if (blk_pc_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
ub_cmd_build_packet(sc, lun, cmd, urq);
|
||||
} else {
|
||||
ub_cmd_build_block(sc, lun, cmd, urq);
|
||||
@ -781,7 +782,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
||||
rq = urq->rq;
|
||||
|
||||
if (cmd->error == 0) {
|
||||
if (blk_pc_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
if (cmd->act_len >= rq->resid_len)
|
||||
rq->resid_len = 0;
|
||||
else
|
||||
@ -795,7 +796,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (blk_pc_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
/* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
|
||||
memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
|
||||
rq->sense_len = UB_SENSE_SIZE;
|
||||
@ -1710,6 +1711,18 @@ static int ub_bd_open(struct block_device *bdev, fmode_t mode)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int ub_bd_unlocked_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = ub_bd_open(bdev, mode);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
*/
|
||||
static int ub_bd_release(struct gendisk *disk, fmode_t mode)
|
||||
@ -1717,7 +1730,10 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode)
|
||||
struct ub_lun *lun = disk->private_data;
|
||||
struct ub_dev *sc = lun->udev;
|
||||
|
||||
lock_kernel();
|
||||
ub_put(sc);
|
||||
unlock_kernel();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1729,8 +1745,13 @@ static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
{
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
void __user *usermem = (void __user *) arg;
|
||||
int ret;
|
||||
|
||||
return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
|
||||
lock_kernel();
|
||||
ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1792,9 +1813,9 @@ static int ub_bd_media_changed(struct gendisk *disk)
|
||||
|
||||
static const struct block_device_operations ub_bd_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = ub_bd_open,
|
||||
.open = ub_bd_unlocked_open,
|
||||
.release = ub_bd_release,
|
||||
.locked_ioctl = ub_bd_ioctl,
|
||||
.ioctl = ub_bd_ioctl,
|
||||
.media_changed = ub_bd_media_changed,
|
||||
.revalidate_disk = ub_bd_revalidate,
|
||||
};
|
||||
|
@ -478,7 +478,7 @@ static void process_page(unsigned long data)
|
||||
le32_to_cpu(desc->local_addr)>>9,
|
||||
le32_to_cpu(desc->transfer_size));
|
||||
dump_dmastat(card, control);
|
||||
} else if (test_bit(BIO_RW, &bio->bi_rw) &&
|
||||
} else if ((bio->bi_rw & REQ_WRITE) &&
|
||||
le32_to_cpu(desc->local_addr) >> 9 ==
|
||||
card->init_size) {
|
||||
card->init_size += le32_to_cpu(desc->transfer_size) >> 9;
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/device.h>
|
||||
@ -175,6 +176,18 @@ static int viodasd_open(struct block_device *bdev, fmode_t mode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int viodasd_unlocked_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = viodasd_open(bdev, mode);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* External release entry point.
|
||||
*/
|
||||
@ -183,6 +196,7 @@ static int viodasd_release(struct gendisk *disk, fmode_t mode)
|
||||
struct viodasd_device *d = disk->private_data;
|
||||
HvLpEvent_Rc hvrc;
|
||||
|
||||
lock_kernel();
|
||||
/* Send the event to OS/400. We DON'T expect a response */
|
||||
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
|
||||
HvLpEvent_Type_VirtualIo,
|
||||
@ -195,6 +209,9 @@ static int viodasd_release(struct gendisk *disk, fmode_t mode)
|
||||
0, 0, 0);
|
||||
if (hvrc != 0)
|
||||
pr_warning("HV close call failed %d\n", (int)hvrc);
|
||||
|
||||
unlock_kernel();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -219,7 +236,7 @@ static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
*/
|
||||
static const struct block_device_operations viodasd_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = viodasd_open,
|
||||
.open = viodasd_unlocked_open,
|
||||
.release = viodasd_release,
|
||||
.getgeo = viodasd_getgeo,
|
||||
};
|
||||
@ -361,7 +378,7 @@ static void do_viodasd_request(struct request_queue *q)
|
||||
if (req == NULL)
|
||||
return;
|
||||
/* check that request contains a valid command */
|
||||
if (!blk_fs_request(req)) {
|
||||
if (req->cmd_type != REQ_TYPE_FS) {
|
||||
viodasd_end_request(req, -EIO, blk_rq_sectors(req));
|
||||
continue;
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/virtio_blk.h>
|
||||
@ -65,13 +66,18 @@ static void blk_done(struct virtqueue *vq)
|
||||
break;
|
||||
}
|
||||
|
||||
if (blk_pc_request(vbr->req)) {
|
||||
switch (vbr->req->cmd_type) {
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
vbr->req->resid_len = vbr->in_hdr.residual;
|
||||
vbr->req->sense_len = vbr->in_hdr.sense_len;
|
||||
vbr->req->errors = vbr->in_hdr.errors;
|
||||
}
|
||||
if (blk_special_request(vbr->req))
|
||||
break;
|
||||
case REQ_TYPE_SPECIAL:
|
||||
vbr->req->errors = (error != 0);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
__blk_end_request_all(vbr->req, error);
|
||||
list_del(&vbr->list);
|
||||
@ -94,36 +100,35 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
|
||||
return false;
|
||||
|
||||
vbr->req = req;
|
||||
switch (req->cmd_type) {
|
||||
case REQ_TYPE_FS:
|
||||
vbr->out_hdr.type = 0;
|
||||
vbr->out_hdr.sector = blk_rq_pos(vbr->req);
|
||||
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
|
||||
break;
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
|
||||
|
||||
if (req->cmd_flags & REQ_FLUSH) {
|
||||
vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
|
||||
vbr->out_hdr.sector = 0;
|
||||
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
|
||||
break;
|
||||
case REQ_TYPE_SPECIAL:
|
||||
vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
|
||||
vbr->out_hdr.sector = 0;
|
||||
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
|
||||
break;
|
||||
case REQ_TYPE_LINUX_BLOCK:
|
||||
if (req->cmd[0] == REQ_LB_OP_FLUSH) {
|
||||
vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
|
||||
} else {
|
||||
switch (req->cmd_type) {
|
||||
case REQ_TYPE_FS:
|
||||
vbr->out_hdr.type = 0;
|
||||
vbr->out_hdr.sector = blk_rq_pos(vbr->req);
|
||||
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
|
||||
break;
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
|
||||
vbr->out_hdr.sector = 0;
|
||||
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
|
||||
break;
|
||||
case REQ_TYPE_SPECIAL:
|
||||
vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
|
||||
vbr->out_hdr.sector = 0;
|
||||
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
|
||||
break;
|
||||
default:
|
||||
/* We don't put anything else in the queue. */
|
||||
BUG();
|
||||
}
|
||||
/*FALLTHRU*/
|
||||
default:
|
||||
/* We don't put anything else in the queue. */
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (blk_barrier_rq(vbr->req))
|
||||
if (vbr->req->cmd_flags & REQ_HARDBARRIER)
|
||||
vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
|
||||
|
||||
sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
|
||||
@ -134,12 +139,12 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
|
||||
* block, and before the normal inhdr we put the sense data and the
|
||||
* inhdr with additional status information before the normal inhdr.
|
||||
*/
|
||||
if (blk_pc_request(vbr->req))
|
||||
if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||
sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
|
||||
|
||||
num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
|
||||
|
||||
if (blk_pc_request(vbr->req)) {
|
||||
if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
|
||||
sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
|
||||
sizeof(vbr->in_hdr));
|
||||
@ -190,12 +195,6 @@ static void do_virtblk_request(struct request_queue *q)
|
||||
virtqueue_kick(vblk->vq);
|
||||
}
|
||||
|
||||
static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
|
||||
{
|
||||
req->cmd_type = REQ_TYPE_LINUX_BLOCK;
|
||||
req->cmd[0] = REQ_LB_OP_FLUSH;
|
||||
}
|
||||
|
||||
/* return id (s/n) string for *disk to *id_str
|
||||
*/
|
||||
static int virtblk_get_id(struct gendisk *disk, char *id_str)
|
||||
@ -219,7 +218,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
|
||||
return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
|
||||
}
|
||||
|
||||
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned cmd, unsigned long data)
|
||||
{
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
@ -235,6 +234,18 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
(void __user *)data);
|
||||
}
|
||||
|
||||
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long param)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = virtblk_locked_ioctl(bdev, mode, cmd, param);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* We provide getgeo only to please some old bootloader/partitioning tools */
|
||||
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
|
||||
{
|
||||
@ -261,7 +272,7 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
|
||||
}
|
||||
|
||||
static const struct block_device_operations virtblk_fops = {
|
||||
.locked_ioctl = virtblk_ioctl,
|
||||
.ioctl = virtblk_ioctl,
|
||||
.owner = THIS_MODULE,
|
||||
.getgeo = virtblk_getgeo,
|
||||
};
|
||||
@ -383,8 +394,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
|
||||
* flushing a volatile write cache on the host. Use that
|
||||
* to implement write barrier support.
|
||||
*/
|
||||
blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH,
|
||||
virtblk_prepare_flush);
|
||||
blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
|
||||
} else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) {
|
||||
/*
|
||||
* If the BARRIER feature is supported the host expects us
|
||||
@ -393,7 +403,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
|
||||
* never re-orders outstanding I/O. This feature is not
|
||||
* useful for real life scenarious and deprecated.
|
||||
*/
|
||||
blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL);
|
||||
blk_queue_ordered(q, QUEUE_ORDERED_TAG);
|
||||
} else {
|
||||
/*
|
||||
* If the FLUSH feature is not supported we must assume that
|
||||
@ -401,7 +411,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
|
||||
* caching. We still need to drain the queue to provider
|
||||
* proper barrier semantics.
|
||||
*/
|
||||
blk_queue_ordered(q, QUEUE_ORDERED_DRAIN, NULL);
|
||||
blk_queue_ordered(q, QUEUE_ORDERED_DRAIN);
|
||||
}
|
||||
|
||||
/* If disk is read-only in the host, the guest should obey */
|
||||
|
@ -46,6 +46,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/blkpg.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/io.h>
|
||||
@ -133,7 +134,7 @@ static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
|
||||
|
||||
static const struct block_device_operations xd_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.locked_ioctl = xd_ioctl,
|
||||
.ioctl = xd_ioctl,
|
||||
.getgeo = xd_getgeo,
|
||||
};
|
||||
static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int);
|
||||
@ -322,7 +323,7 @@ static void do_xd_request (struct request_queue * q)
|
||||
int res = -EIO;
|
||||
int retry;
|
||||
|
||||
if (!blk_fs_request(req))
|
||||
if (req->cmd_type != REQ_TYPE_FS)
|
||||
goto done;
|
||||
if (block + count > get_capacity(req->rq_disk))
|
||||
goto done;
|
||||
@ -347,7 +348,7 @@ static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
}
|
||||
|
||||
/* xd_ioctl: handle device ioctl's */
|
||||
static int xd_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg)
|
||||
static int xd_locked_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg)
|
||||
{
|
||||
switch (cmd) {
|
||||
case HDIO_SET_DMA:
|
||||
@ -375,6 +376,18 @@ static int xd_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long a
|
||||
}
|
||||
}
|
||||
|
||||
static int xd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long param)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = xd_locked_ioctl(bdev, mode, cmd, param);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* xd_readwrite: handle a read/write request */
|
||||
static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_int count)
|
||||
{
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include <linux/cdrom.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
@ -79,6 +80,7 @@ static const struct block_device_operations xlvbd_block_fops;
|
||||
*/
|
||||
struct blkfront_info
|
||||
{
|
||||
struct mutex mutex;
|
||||
struct xenbus_device *xbdev;
|
||||
struct gendisk *gd;
|
||||
int vdevice;
|
||||
@ -95,16 +97,14 @@ struct blkfront_info
|
||||
unsigned long shadow_free;
|
||||
int feature_barrier;
|
||||
int is_ready;
|
||||
|
||||
/**
|
||||
* The number of people holding this device open. We won't allow a
|
||||
* hot-unplug unless this is 0.
|
||||
*/
|
||||
int users;
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(blkif_io_lock);
|
||||
|
||||
static unsigned int nr_minors;
|
||||
static unsigned long *minors;
|
||||
static DEFINE_SPINLOCK(minor_lock);
|
||||
|
||||
#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
|
||||
(BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
|
||||
#define GRANT_INVALID_REF 0
|
||||
@ -139,6 +139,55 @@ static void add_id_to_freelist(struct blkfront_info *info,
|
||||
info->shadow_free = id;
|
||||
}
|
||||
|
||||
static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
|
||||
{
|
||||
unsigned int end = minor + nr;
|
||||
int rc;
|
||||
|
||||
if (end > nr_minors) {
|
||||
unsigned long *bitmap, *old;
|
||||
|
||||
bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap),
|
||||
GFP_KERNEL);
|
||||
if (bitmap == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&minor_lock);
|
||||
if (end > nr_minors) {
|
||||
old = minors;
|
||||
memcpy(bitmap, minors,
|
||||
BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
|
||||
minors = bitmap;
|
||||
nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
|
||||
} else
|
||||
old = bitmap;
|
||||
spin_unlock(&minor_lock);
|
||||
kfree(old);
|
||||
}
|
||||
|
||||
spin_lock(&minor_lock);
|
||||
if (find_next_bit(minors, end, minor) >= end) {
|
||||
for (; minor < end; ++minor)
|
||||
__set_bit(minor, minors);
|
||||
rc = 0;
|
||||
} else
|
||||
rc = -EBUSY;
|
||||
spin_unlock(&minor_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void xlbd_release_minors(unsigned int minor, unsigned int nr)
|
||||
{
|
||||
unsigned int end = minor + nr;
|
||||
|
||||
BUG_ON(end > nr_minors);
|
||||
spin_lock(&minor_lock);
|
||||
for (; minor < end; ++minor)
|
||||
__clear_bit(minor, minors);
|
||||
spin_unlock(&minor_lock);
|
||||
}
|
||||
|
||||
static void blkif_restart_queue_callback(void *arg)
|
||||
{
|
||||
struct blkfront_info *info = (struct blkfront_info *)arg;
|
||||
@ -239,7 +288,7 @@ static int blkif_queue_request(struct request *req)
|
||||
|
||||
ring_req->operation = rq_data_dir(req) ?
|
||||
BLKIF_OP_WRITE : BLKIF_OP_READ;
|
||||
if (blk_barrier_rq(req))
|
||||
if (req->cmd_flags & REQ_HARDBARRIER)
|
||||
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
|
||||
|
||||
ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
|
||||
@ -310,7 +359,7 @@ static void do_blkif_request(struct request_queue *rq)
|
||||
|
||||
blk_start_request(req);
|
||||
|
||||
if (!blk_fs_request(req)) {
|
||||
if (req->cmd_type != REQ_TYPE_FS) {
|
||||
__blk_end_request_all(req, -EIO);
|
||||
continue;
|
||||
}
|
||||
@ -372,17 +421,22 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
|
||||
static int xlvbd_barrier(struct blkfront_info *info)
|
||||
{
|
||||
int err;
|
||||
const char *barrier;
|
||||
|
||||
err = blk_queue_ordered(info->rq,
|
||||
info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE,
|
||||
NULL);
|
||||
switch (info->feature_barrier) {
|
||||
case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break;
|
||||
case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break;
|
||||
case QUEUE_ORDERED_NONE: barrier = "disabled"; break;
|
||||
default: return -EINVAL;
|
||||
}
|
||||
|
||||
err = blk_queue_ordered(info->rq, info->feature_barrier);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
printk(KERN_INFO "blkfront: %s: barriers %s\n",
|
||||
info->gd->disk_name,
|
||||
info->feature_barrier ? "enabled" : "disabled");
|
||||
info->gd->disk_name, barrier);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -418,9 +472,14 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
|
||||
if ((minor % nr_parts) == 0)
|
||||
nr_minors = nr_parts;
|
||||
|
||||
err = xlbd_reserve_minors(minor, nr_minors);
|
||||
if (err)
|
||||
goto out;
|
||||
err = -ENODEV;
|
||||
|
||||
gd = alloc_disk(nr_minors);
|
||||
if (gd == NULL)
|
||||
goto out;
|
||||
goto release;
|
||||
|
||||
offset = minor / nr_parts;
|
||||
|
||||
@ -451,14 +510,13 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
|
||||
|
||||
if (xlvbd_init_blk_queue(gd, sector_size)) {
|
||||
del_gendisk(gd);
|
||||
goto out;
|
||||
goto release;
|
||||
}
|
||||
|
||||
info->rq = gd->queue;
|
||||
info->gd = gd;
|
||||
|
||||
if (info->feature_barrier)
|
||||
xlvbd_barrier(info);
|
||||
xlvbd_barrier(info);
|
||||
|
||||
if (vdisk_info & VDISK_READONLY)
|
||||
set_disk_ro(gd, 1);
|
||||
@ -471,10 +529,45 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
|
||||
|
||||
return 0;
|
||||
|
||||
release:
|
||||
xlbd_release_minors(minor, nr_minors);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void xlvbd_release_gendisk(struct blkfront_info *info)
|
||||
{
|
||||
unsigned int minor, nr_minors;
|
||||
unsigned long flags;
|
||||
|
||||
if (info->rq == NULL)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&blkif_io_lock, flags);
|
||||
|
||||
/* No more blkif_request(). */
|
||||
blk_stop_queue(info->rq);
|
||||
|
||||
/* No more gnttab callback work. */
|
||||
gnttab_cancel_free_callback(&info->callback);
|
||||
spin_unlock_irqrestore(&blkif_io_lock, flags);
|
||||
|
||||
/* Flush gnttab callback work. Must be done with no locks held. */
|
||||
flush_scheduled_work();
|
||||
|
||||
del_gendisk(info->gd);
|
||||
|
||||
minor = info->gd->first_minor;
|
||||
nr_minors = info->gd->minors;
|
||||
xlbd_release_minors(minor, nr_minors);
|
||||
|
||||
blk_cleanup_queue(info->rq);
|
||||
info->rq = NULL;
|
||||
|
||||
put_disk(info->gd);
|
||||
info->gd = NULL;
|
||||
}
|
||||
|
||||
static void kick_pending_request_queues(struct blkfront_info *info)
|
||||
{
|
||||
if (!RING_FULL(&info->ring)) {
|
||||
@ -569,7 +662,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||
printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
|
||||
info->gd->disk_name);
|
||||
error = -EOPNOTSUPP;
|
||||
info->feature_barrier = 0;
|
||||
info->feature_barrier = QUEUE_ORDERED_NONE;
|
||||
xlvbd_barrier(info);
|
||||
}
|
||||
/* fall through */
|
||||
@ -652,7 +745,7 @@ static int setup_blkring(struct xenbus_device *dev,
|
||||
|
||||
|
||||
/* Common code used when first setting up, and when resuming. */
|
||||
static int talk_to_backend(struct xenbus_device *dev,
|
||||
static int talk_to_blkback(struct xenbus_device *dev,
|
||||
struct blkfront_info *info)
|
||||
{
|
||||
const char *message = NULL;
|
||||
@ -712,7 +805,6 @@ static int talk_to_backend(struct xenbus_device *dev,
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Entry point to this code when a new device is created. Allocate the basic
|
||||
* structures and the ring buffer for communication with the backend, and
|
||||
@ -773,6 +865,7 @@ static int blkfront_probe(struct xenbus_device *dev,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mutex_init(&info->mutex);
|
||||
info->xbdev = dev;
|
||||
info->vdevice = vdevice;
|
||||
info->connected = BLKIF_STATE_DISCONNECTED;
|
||||
@ -786,7 +879,7 @@ static int blkfront_probe(struct xenbus_device *dev,
|
||||
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
|
||||
dev_set_drvdata(&dev->dev, info);
|
||||
|
||||
err = talk_to_backend(dev, info);
|
||||
err = talk_to_blkback(dev, info);
|
||||
if (err) {
|
||||
kfree(info);
|
||||
dev_set_drvdata(&dev->dev, NULL);
|
||||
@ -881,13 +974,50 @@ static int blkfront_resume(struct xenbus_device *dev)
|
||||
|
||||
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
|
||||
|
||||
err = talk_to_backend(dev, info);
|
||||
err = talk_to_blkback(dev, info);
|
||||
if (info->connected == BLKIF_STATE_SUSPENDED && !err)
|
||||
err = blkif_recover(info);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
blkfront_closing(struct blkfront_info *info)
|
||||
{
|
||||
struct xenbus_device *xbdev = info->xbdev;
|
||||
struct block_device *bdev = NULL;
|
||||
|
||||
mutex_lock(&info->mutex);
|
||||
|
||||
if (xbdev->state == XenbusStateClosing) {
|
||||
mutex_unlock(&info->mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
if (info->gd)
|
||||
bdev = bdget_disk(info->gd, 0);
|
||||
|
||||
mutex_unlock(&info->mutex);
|
||||
|
||||
if (!bdev) {
|
||||
xenbus_frontend_closed(xbdev);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
|
||||
if (bdev->bd_openers) {
|
||||
xenbus_dev_error(xbdev, -EBUSY,
|
||||
"Device in use; refusing to close");
|
||||
xenbus_switch_state(xbdev, XenbusStateClosing);
|
||||
} else {
|
||||
xlvbd_release_gendisk(info);
|
||||
xenbus_frontend_closed(xbdev);
|
||||
}
|
||||
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
bdput(bdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invoked when the backend is finally 'ready' (and has told produced
|
||||
@ -899,11 +1029,31 @@ static void blkfront_connect(struct blkfront_info *info)
|
||||
unsigned long sector_size;
|
||||
unsigned int binfo;
|
||||
int err;
|
||||
int barrier;
|
||||
|
||||
if ((info->connected == BLKIF_STATE_CONNECTED) ||
|
||||
(info->connected == BLKIF_STATE_SUSPENDED) )
|
||||
switch (info->connected) {
|
||||
case BLKIF_STATE_CONNECTED:
|
||||
/*
|
||||
* Potentially, the back-end may be signalling
|
||||
* a capacity change; update the capacity.
|
||||
*/
|
||||
err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
|
||||
"sectors", "%Lu", §ors);
|
||||
if (XENBUS_EXIST_ERR(err))
|
||||
return;
|
||||
printk(KERN_INFO "Setting capacity to %Lu\n",
|
||||
sectors);
|
||||
set_capacity(info->gd, sectors);
|
||||
revalidate_disk(info->gd);
|
||||
|
||||
/* fall through */
|
||||
case BLKIF_STATE_SUSPENDED:
|
||||
return;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
dev_dbg(&info->xbdev->dev, "%s:%s.\n",
|
||||
__func__, info->xbdev->otherend);
|
||||
|
||||
@ -920,10 +1070,26 @@ static void blkfront_connect(struct blkfront_info *info)
|
||||
}
|
||||
|
||||
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
|
||||
"feature-barrier", "%lu", &info->feature_barrier,
|
||||
"feature-barrier", "%lu", &barrier,
|
||||
NULL);
|
||||
|
||||
/*
|
||||
* If there's no "feature-barrier" defined, then it means
|
||||
* we're dealing with a very old backend which writes
|
||||
* synchronously; draining will do what needs to get done.
|
||||
*
|
||||
* If there are barriers, then we can do full queued writes
|
||||
* with tagged barriers.
|
||||
*
|
||||
* If barriers are not supported, then there's no much we can
|
||||
* do, so just set ordering to NONE.
|
||||
*/
|
||||
if (err)
|
||||
info->feature_barrier = 0;
|
||||
info->feature_barrier = QUEUE_ORDERED_DRAIN;
|
||||
else if (barrier)
|
||||
info->feature_barrier = QUEUE_ORDERED_TAG;
|
||||
else
|
||||
info->feature_barrier = QUEUE_ORDERED_NONE;
|
||||
|
||||
err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
|
||||
if (err) {
|
||||
@ -945,53 +1111,15 @@ static void blkfront_connect(struct blkfront_info *info)
|
||||
info->is_ready = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the change of state of the backend to Closing. We must delete our
|
||||
* device-layer structures now, to ensure that writes are flushed through to
|
||||
* the backend. Once is this done, we can switch to Closed in
|
||||
* acknowledgement.
|
||||
*/
|
||||
static void blkfront_closing(struct xenbus_device *dev)
|
||||
{
|
||||
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
|
||||
unsigned long flags;
|
||||
|
||||
dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename);
|
||||
|
||||
if (info->rq == NULL)
|
||||
goto out;
|
||||
|
||||
spin_lock_irqsave(&blkif_io_lock, flags);
|
||||
|
||||
/* No more blkif_request(). */
|
||||
blk_stop_queue(info->rq);
|
||||
|
||||
/* No more gnttab callback work. */
|
||||
gnttab_cancel_free_callback(&info->callback);
|
||||
spin_unlock_irqrestore(&blkif_io_lock, flags);
|
||||
|
||||
/* Flush gnttab callback work. Must be done with no locks held. */
|
||||
flush_scheduled_work();
|
||||
|
||||
blk_cleanup_queue(info->rq);
|
||||
info->rq = NULL;
|
||||
|
||||
del_gendisk(info->gd);
|
||||
|
||||
out:
|
||||
xenbus_frontend_closed(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback received when the backend's state changes.
|
||||
*/
|
||||
static void backend_changed(struct xenbus_device *dev,
|
||||
static void blkback_changed(struct xenbus_device *dev,
|
||||
enum xenbus_state backend_state)
|
||||
{
|
||||
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
|
||||
struct block_device *bd;
|
||||
|
||||
dev_dbg(&dev->dev, "blkfront:backend_changed.\n");
|
||||
dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
|
||||
|
||||
switch (backend_state) {
|
||||
case XenbusStateInitialising:
|
||||
@ -1006,35 +1134,56 @@ static void backend_changed(struct xenbus_device *dev,
|
||||
break;
|
||||
|
||||
case XenbusStateClosing:
|
||||
if (info->gd == NULL) {
|
||||
xenbus_frontend_closed(dev);
|
||||
break;
|
||||
}
|
||||
bd = bdget_disk(info->gd, 0);
|
||||
if (bd == NULL)
|
||||
xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
|
||||
|
||||
mutex_lock(&bd->bd_mutex);
|
||||
if (info->users > 0)
|
||||
xenbus_dev_error(dev, -EBUSY,
|
||||
"Device in use; refusing to close");
|
||||
else
|
||||
blkfront_closing(dev);
|
||||
mutex_unlock(&bd->bd_mutex);
|
||||
bdput(bd);
|
||||
blkfront_closing(info);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int blkfront_remove(struct xenbus_device *dev)
|
||||
static int blkfront_remove(struct xenbus_device *xbdev)
|
||||
{
|
||||
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
|
||||
struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
|
||||
struct block_device *bdev = NULL;
|
||||
struct gendisk *disk;
|
||||
|
||||
dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename);
|
||||
dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
|
||||
|
||||
blkif_free(info, 0);
|
||||
|
||||
kfree(info);
|
||||
mutex_lock(&info->mutex);
|
||||
|
||||
disk = info->gd;
|
||||
if (disk)
|
||||
bdev = bdget_disk(disk, 0);
|
||||
|
||||
info->xbdev = NULL;
|
||||
mutex_unlock(&info->mutex);
|
||||
|
||||
if (!bdev) {
|
||||
kfree(info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The xbdev was removed before we reached the Closed
|
||||
* state. See if it's safe to remove the disk. If the bdev
|
||||
* isn't closed yet, we let release take care of it.
|
||||
*/
|
||||
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
info = disk->private_data;
|
||||
|
||||
dev_warn(disk_to_dev(disk),
|
||||
"%s was hot-unplugged, %d stale handles\n",
|
||||
xbdev->nodename, bdev->bd_openers);
|
||||
|
||||
if (info && !bdev->bd_openers) {
|
||||
xlvbd_release_gendisk(info);
|
||||
disk->private_data = NULL;
|
||||
kfree(info);
|
||||
}
|
||||
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
bdput(bdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1043,30 +1192,78 @@ static int blkfront_is_ready(struct xenbus_device *dev)
|
||||
{
|
||||
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
|
||||
|
||||
return info->is_ready;
|
||||
return info->is_ready && info->xbdev;
|
||||
}
|
||||
|
||||
static int blkif_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
struct blkfront_info *info = bdev->bd_disk->private_data;
|
||||
info->users++;
|
||||
return 0;
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
struct blkfront_info *info;
|
||||
int err = 0;
|
||||
|
||||
lock_kernel();
|
||||
|
||||
info = disk->private_data;
|
||||
if (!info) {
|
||||
/* xbdev gone */
|
||||
err = -ERESTARTSYS;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&info->mutex);
|
||||
|
||||
if (!info->gd)
|
||||
/* xbdev is closed */
|
||||
err = -ERESTARTSYS;
|
||||
|
||||
mutex_unlock(&info->mutex);
|
||||
|
||||
out:
|
||||
unlock_kernel();
|
||||
return err;
|
||||
}
|
||||
|
||||
static int blkif_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct blkfront_info *info = disk->private_data;
|
||||
info->users--;
|
||||
if (info->users == 0) {
|
||||
/* Check whether we have been instructed to close. We will
|
||||
have ignored this request initially, as the device was
|
||||
still mounted. */
|
||||
struct xenbus_device *dev = info->xbdev;
|
||||
enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
|
||||
struct block_device *bdev;
|
||||
struct xenbus_device *xbdev;
|
||||
|
||||
if (state == XenbusStateClosing && info->is_ready)
|
||||
blkfront_closing(dev);
|
||||
lock_kernel();
|
||||
|
||||
bdev = bdget_disk(disk, 0);
|
||||
bdput(bdev);
|
||||
|
||||
if (bdev->bd_openers)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Check if we have been instructed to close. We will have
|
||||
* deferred this request, because the bdev was still open.
|
||||
*/
|
||||
|
||||
mutex_lock(&info->mutex);
|
||||
xbdev = info->xbdev;
|
||||
|
||||
if (xbdev && xbdev->state == XenbusStateClosing) {
|
||||
/* pending switch to state closed */
|
||||
dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
|
||||
xlvbd_release_gendisk(info);
|
||||
xenbus_frontend_closed(info->xbdev);
|
||||
}
|
||||
|
||||
mutex_unlock(&info->mutex);
|
||||
|
||||
if (!xbdev) {
|
||||
/* sudden device removal */
|
||||
dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
|
||||
xlvbd_release_gendisk(info);
|
||||
disk->private_data = NULL;
|
||||
kfree(info);
|
||||
}
|
||||
|
||||
out:
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1076,7 +1273,7 @@ static const struct block_device_operations xlvbd_block_fops =
|
||||
.open = blkif_open,
|
||||
.release = blkif_release,
|
||||
.getgeo = blkif_getgeo,
|
||||
.locked_ioctl = blkif_ioctl,
|
||||
.ioctl = blkif_ioctl,
|
||||
};
|
||||
|
||||
|
||||
@ -1092,7 +1289,7 @@ static struct xenbus_driver blkfront = {
|
||||
.probe = blkfront_probe,
|
||||
.remove = blkfront_remove,
|
||||
.resume = blkfront_resume,
|
||||
.otherend_changed = backend_changed,
|
||||
.otherend_changed = blkback_changed,
|
||||
.is_ready = blkfront_is_ready,
|
||||
};
|
||||
|
||||
|
@ -89,6 +89,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/ata.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/platform_device.h>
|
||||
@ -465,7 +466,7 @@ struct request *ace_get_next_request(struct request_queue * q)
|
||||
struct request *req;
|
||||
|
||||
while ((req = blk_peek_request(q)) != NULL) {
|
||||
if (blk_fs_request(req))
|
||||
if (req->cmd_type == REQ_TYPE_FS)
|
||||
break;
|
||||
blk_start_request(req);
|
||||
__blk_end_request_all(req, -EIO);
|
||||
@ -901,11 +902,14 @@ static int ace_open(struct block_device *bdev, fmode_t mode)
|
||||
|
||||
dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1);
|
||||
|
||||
lock_kernel();
|
||||
spin_lock_irqsave(&ace->lock, flags);
|
||||
ace->users++;
|
||||
spin_unlock_irqrestore(&ace->lock, flags);
|
||||
|
||||
check_disk_change(bdev);
|
||||
unlock_kernel();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -917,6 +921,7 @@ static int ace_release(struct gendisk *disk, fmode_t mode)
|
||||
|
||||
dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1);
|
||||
|
||||
lock_kernel();
|
||||
spin_lock_irqsave(&ace->lock, flags);
|
||||
ace->users--;
|
||||
if (ace->users == 0) {
|
||||
@ -924,6 +929,7 @@ static int ace_release(struct gendisk *disk, fmode_t mode)
|
||||
ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ);
|
||||
}
|
||||
spin_unlock_irqrestore(&ace->lock, flags);
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
@ -153,6 +154,7 @@ static int z2_open(struct block_device *bdev, fmode_t mode)
|
||||
|
||||
device = MINOR(bdev->bd_dev);
|
||||
|
||||
lock_kernel();
|
||||
if ( current_device != -1 && current_device != device )
|
||||
{
|
||||
rc = -EBUSY;
|
||||
@ -294,20 +296,25 @@ static int z2_open(struct block_device *bdev, fmode_t mode)
|
||||
set_capacity(z2ram_gendisk, z2ram_size >> 9);
|
||||
}
|
||||
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
|
||||
err_out_kfree:
|
||||
kfree(z2ram_map);
|
||||
err_out:
|
||||
unlock_kernel();
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
z2_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
if ( current_device == -1 )
|
||||
return 0;
|
||||
|
||||
lock_kernel();
|
||||
if ( current_device == -1 ) {
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
}
|
||||
unlock_kernel();
|
||||
/*
|
||||
* FIXME: unmap memory
|
||||
*/
|
||||
|
@ -242,6 +242,8 @@
|
||||
|
||||
-------------------------------------------------------------------------*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#define REVISION "Revision: 3.20"
|
||||
#define VERSION "Id: cdrom.c 3.20 2003/12/17"
|
||||
|
||||
@ -314,11 +316,17 @@ static const char *mrw_format_status[] = {
|
||||
static const char *mrw_address_space[] = { "DMA", "GAA" };
|
||||
|
||||
#if (ERRLOGMASK!=CD_NOTHING)
|
||||
#define cdinfo(type, fmt, args...) \
|
||||
if ((ERRLOGMASK & type) || debug==1 ) \
|
||||
printk(KERN_INFO "cdrom: " fmt, ## args)
|
||||
#define cdinfo(type, fmt, args...) \
|
||||
do { \
|
||||
if ((ERRLOGMASK & type) || debug == 1) \
|
||||
pr_info(fmt, ##args); \
|
||||
} while (0)
|
||||
#else
|
||||
#define cdinfo(type, fmt, args...)
|
||||
#define cdinfo(type, fmt, args...) \
|
||||
do { \
|
||||
if (0 && (ERRLOGMASK & type) || debug == 1) \
|
||||
pr_info(fmt, ##args); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/* These are used to simplify getting data in from and back to user land */
|
||||
@ -395,7 +403,7 @@ int register_cdrom(struct cdrom_device_info *cdi)
|
||||
if (cdo->open == NULL || cdo->release == NULL)
|
||||
return -EINVAL;
|
||||
if (!banner_printed) {
|
||||
printk(KERN_INFO "Uniform CD-ROM driver " REVISION "\n");
|
||||
pr_info("Uniform CD-ROM driver " REVISION "\n");
|
||||
banner_printed = 1;
|
||||
cdrom_sysctl_register();
|
||||
}
|
||||
@ -546,7 +554,7 @@ static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont)
|
||||
unsigned char buffer[12];
|
||||
int ret;
|
||||
|
||||
printk(KERN_INFO "cdrom: %sstarting format\n", cont ? "Re" : "");
|
||||
pr_info("%sstarting format\n", cont ? "Re" : "");
|
||||
|
||||
/*
|
||||
* FmtData bit set (bit 4), format type is 1
|
||||
@ -576,7 +584,7 @@ static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont)
|
||||
|
||||
ret = cdi->ops->generic_packet(cdi, &cgc);
|
||||
if (ret)
|
||||
printk(KERN_INFO "cdrom: bgformat failed\n");
|
||||
pr_info("bgformat failed\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -622,8 +630,7 @@ static int cdrom_mrw_exit(struct cdrom_device_info *cdi)
|
||||
|
||||
ret = 0;
|
||||
if (di.mrw_status == CDM_MRW_BGFORMAT_ACTIVE) {
|
||||
printk(KERN_INFO "cdrom: issuing MRW back ground "
|
||||
"format suspend\n");
|
||||
pr_info("issuing MRW background format suspend\n");
|
||||
ret = cdrom_mrw_bgformat_susp(cdi, 0);
|
||||
}
|
||||
|
||||
@ -658,7 +665,8 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
|
||||
if ((ret = cdrom_mode_select(cdi, &cgc)))
|
||||
return ret;
|
||||
|
||||
printk(KERN_INFO "cdrom: %s: mrw address space %s selected\n", cdi->name, mrw_address_space[space]);
|
||||
pr_info("%s: mrw address space %s selected\n",
|
||||
cdi->name, mrw_address_space[space]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -762,7 +770,7 @@ static int cdrom_mrw_open_write(struct cdrom_device_info *cdi)
|
||||
* always reset to DMA lba space on open
|
||||
*/
|
||||
if (cdrom_mrw_set_lba_space(cdi, MRW_LBA_DMA)) {
|
||||
printk(KERN_ERR "cdrom: failed setting lba address space\n");
|
||||
pr_err("failed setting lba address space\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -781,8 +789,7 @@ static int cdrom_mrw_open_write(struct cdrom_device_info *cdi)
|
||||
* 3 - MRW formatting complete
|
||||
*/
|
||||
ret = 0;
|
||||
printk(KERN_INFO "cdrom open: mrw_status '%s'\n",
|
||||
mrw_format_status[di.mrw_status]);
|
||||
pr_info("open: mrw_status '%s'\n", mrw_format_status[di.mrw_status]);
|
||||
if (!di.mrw_status)
|
||||
ret = 1;
|
||||
else if (di.mrw_status == CDM_MRW_BGFORMAT_INACTIVE &&
|
||||
@ -932,8 +939,7 @@ static void cdrom_dvd_rw_close_write(struct cdrom_device_info *cdi)
|
||||
return;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "cdrom: %s: dirty DVD+RW media, \"finalizing\"\n",
|
||||
cdi->name);
|
||||
pr_info("%s: dirty DVD+RW media, \"finalizing\"\n", cdi->name);
|
||||
|
||||
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
|
||||
cgc.cmd[0] = GPCMD_FLUSH_CACHE;
|
||||
@ -2176,7 +2182,7 @@ static int cdrom_read_cdda(struct cdrom_device_info *cdi, __u8 __user *ubuf,
|
||||
* frame dma, so drop to single frame dma if we need to
|
||||
*/
|
||||
if (cdi->cdda_method == CDDA_BPC_FULL && nframes > 1) {
|
||||
printk("cdrom: dropping to single frame dma\n");
|
||||
pr_info("dropping to single frame dma\n");
|
||||
cdi->cdda_method = CDDA_BPC_SINGLE;
|
||||
goto retry;
|
||||
}
|
||||
@ -2189,7 +2195,7 @@ static int cdrom_read_cdda(struct cdrom_device_info *cdi, __u8 __user *ubuf,
|
||||
if (cdi->last_sense != 0x04 && cdi->last_sense != 0x0b)
|
||||
return ret;
|
||||
|
||||
printk("cdrom: dropping to old style cdda (sense=%x)\n", cdi->last_sense);
|
||||
pr_info("dropping to old style cdda (sense=%x)\n", cdi->last_sense);
|
||||
cdi->cdda_method = CDDA_OLD;
|
||||
return cdrom_read_cdda_old(cdi, ubuf, lba, nframes);
|
||||
}
|
||||
@ -3401,7 +3407,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
|
||||
"\t%d", CDROM_CAN(val) != 0);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_INFO "cdrom: invalid option%d\n", option);
|
||||
pr_info("invalid option%d\n", option);
|
||||
return 1;
|
||||
}
|
||||
if (!ret)
|
||||
@ -3491,7 +3497,7 @@ static int cdrom_sysctl_info(ctl_table *ctl, int write,
|
||||
mutex_unlock(&cdrom_mutex);
|
||||
return proc_dostring(ctl, write, buffer, lenp, ppos);
|
||||
done:
|
||||
printk(KERN_INFO "cdrom: info buffer too small\n");
|
||||
pr_info("info buffer too small\n");
|
||||
goto doit;
|
||||
}
|
||||
|
||||
@ -3665,7 +3671,7 @@ static int __init cdrom_init(void)
|
||||
|
||||
static void __exit cdrom_exit(void)
|
||||
{
|
||||
printk(KERN_INFO "Uniform CD-ROM driver unloaded\n");
|
||||
pr_info("Uniform CD-ROM driver unloaded\n");
|
||||
cdrom_sysctl_unregister();
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
@ -32,6 +34,7 @@
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/platform_device.h>
|
||||
@ -339,8 +342,7 @@ static int gdrom_get_last_session(struct cdrom_device_info *cd_info,
|
||||
tocuse = 0;
|
||||
err = gdrom_readtoc_cmd(gd.toc, 0);
|
||||
if (err) {
|
||||
printk(KERN_INFO "GDROM: Could not get CD "
|
||||
"table of contents\n");
|
||||
pr_info("Could not get CD table of contents\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
@ -357,8 +359,7 @@ static int gdrom_get_last_session(struct cdrom_device_info *cd_info,
|
||||
} while (track >= fentry);
|
||||
|
||||
if ((track > 100) || (track < get_entry_track(gd.toc->first))) {
|
||||
printk(KERN_INFO "GDROM: No data on the last "
|
||||
"session of the CD\n");
|
||||
pr_info("No data on the last session of the CD\n");
|
||||
gdrom_getsense(NULL);
|
||||
return -ENXIO;
|
||||
}
|
||||
@ -451,14 +452,14 @@ static int gdrom_getsense(short *bufstring)
|
||||
goto cleanup_sense;
|
||||
insw(GDROM_DATA_REG, &sense, sense_command->buflen/2);
|
||||
if (sense[1] & 40) {
|
||||
printk(KERN_INFO "GDROM: Drive not ready - command aborted\n");
|
||||
pr_info("Drive not ready - command aborted\n");
|
||||
goto cleanup_sense;
|
||||
}
|
||||
sense_key = sense[1] & 0x0F;
|
||||
if (sense_key < ARRAY_SIZE(sense_texts))
|
||||
printk(KERN_INFO "GDROM: %s\n", sense_texts[sense_key].text);
|
||||
pr_info("%s\n", sense_texts[sense_key].text);
|
||||
else
|
||||
printk(KERN_ERR "GDROM: Unknown sense key: %d\n", sense_key);
|
||||
pr_err("Unknown sense key: %d\n", sense_key);
|
||||
if (bufstring) /* return addional sense data */
|
||||
memcpy(bufstring, &sense[4], 2);
|
||||
if (sense_key < 2)
|
||||
@ -492,12 +493,18 @@ static struct cdrom_device_ops gdrom_ops = {
|
||||
|
||||
static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
return cdrom_open(gd.cd_info, bdev, mode);
|
||||
int ret;
|
||||
lock_kernel();
|
||||
ret = cdrom_open(gd.cd_info, bdev, mode);
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gdrom_bdops_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
lock_kernel();
|
||||
cdrom_release(gd.cd_info, mode);
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -509,7 +516,13 @@ static int gdrom_bdops_mediachanged(struct gendisk *disk)
|
||||
static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned cmd, unsigned long arg)
|
||||
{
|
||||
return cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg);
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct block_device_operations gdrom_bdops = {
|
||||
@ -517,7 +530,7 @@ static const struct block_device_operations gdrom_bdops = {
|
||||
.open = gdrom_bdops_open,
|
||||
.release = gdrom_bdops_release,
|
||||
.media_changed = gdrom_bdops_mediachanged,
|
||||
.locked_ioctl = gdrom_bdops_ioctl,
|
||||
.ioctl = gdrom_bdops_ioctl,
|
||||
};
|
||||
|
||||
static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
|
||||
@ -643,14 +656,13 @@ static void gdrom_request(struct request_queue *rq)
|
||||
struct request *req;
|
||||
|
||||
while ((req = blk_fetch_request(rq)) != NULL) {
|
||||
if (!blk_fs_request(req)) {
|
||||
printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
|
||||
if (req->cmd_type != REQ_TYPE_FS) {
|
||||
printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
|
||||
__blk_end_request_all(req, -EIO);
|
||||
continue;
|
||||
}
|
||||
if (rq_data_dir(req) != READ) {
|
||||
printk(KERN_NOTICE "GDROM: Read only device -");
|
||||
printk(" write request ignored\n");
|
||||
pr_notice("Read only device - write request ignored\n");
|
||||
__blk_end_request_all(req, -EIO);
|
||||
continue;
|
||||
}
|
||||
@ -685,7 +697,7 @@ static int __devinit gdrom_outputversion(void)
|
||||
firmw_ver = kstrndup(id->firmver, 16, GFP_KERNEL);
|
||||
if (!firmw_ver)
|
||||
goto free_manuf_name;
|
||||
printk(KERN_INFO "GDROM: %s from %s with firmware %s\n",
|
||||
pr_info("%s from %s with firmware %s\n",
|
||||
model_name, manuf_name, firmw_ver);
|
||||
err = 0;
|
||||
kfree(firmw_ver);
|
||||
@ -757,7 +769,7 @@ static int __devinit probe_gdrom(struct platform_device *devptr)
|
||||
int err;
|
||||
/* Start the device */
|
||||
if (gdrom_execute_diagnostic() != 1) {
|
||||
printk(KERN_WARNING "GDROM: ATA Probe for GDROM failed.\n");
|
||||
pr_warning("ATA Probe for GDROM failed\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
/* Print out firmware ID */
|
||||
@ -767,7 +779,7 @@ static int __devinit probe_gdrom(struct platform_device *devptr)
|
||||
gdrom_major = register_blkdev(0, GDROM_DEV_NAME);
|
||||
if (gdrom_major <= 0)
|
||||
return gdrom_major;
|
||||
printk(KERN_INFO "GDROM: Registered with major number %d\n",
|
||||
pr_info("Registered with major number %d\n",
|
||||
gdrom_major);
|
||||
/* Specify basic properties of drive */
|
||||
gd.cd_info = kzalloc(sizeof(struct cdrom_device_info), GFP_KERNEL);
|
||||
@ -818,7 +830,7 @@ static int __devinit probe_gdrom(struct platform_device *devptr)
|
||||
unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
|
||||
gdrom_major = 0;
|
||||
probe_fail_no_mem:
|
||||
printk(KERN_WARNING "GDROM: Probe failed - error is 0x%X\n", err);
|
||||
pr_warning("Probe failed - error is 0x%X\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,8 @@
|
||||
* the OS/400 partition.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/major.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/cdrom.h>
|
||||
@ -40,6 +42,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
@ -53,9 +56,6 @@
|
||||
|
||||
#define VIOCD_VERS "1.06"
|
||||
|
||||
#define VIOCD_KERN_WARNING KERN_WARNING "viocd: "
|
||||
#define VIOCD_KERN_INFO KERN_INFO "viocd: "
|
||||
|
||||
/*
|
||||
* Should probably make this a module parameter....sigh
|
||||
*/
|
||||
@ -154,13 +154,21 @@ static const struct file_operations proc_viocd_operations = {
|
||||
static int viocd_blk_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
struct disk_info *di = bdev->bd_disk->private_data;
|
||||
return cdrom_open(&di->viocd_info, bdev, mode);
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = cdrom_open(&di->viocd_info, bdev, mode);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int viocd_blk_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct disk_info *di = disk->private_data;
|
||||
lock_kernel();
|
||||
cdrom_release(&di->viocd_info, mode);
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -168,7 +176,13 @@ static int viocd_blk_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned cmd, unsigned long arg)
|
||||
{
|
||||
struct disk_info *di = bdev->bd_disk->private_data;
|
||||
return cdrom_ioctl(&di->viocd_info, bdev, mode, cmd, arg);
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = cdrom_ioctl(&di->viocd_info, bdev, mode, cmd, arg);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int viocd_blk_media_changed(struct gendisk *disk)
|
||||
@ -181,7 +195,7 @@ static const struct block_device_operations viocd_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = viocd_blk_open,
|
||||
.release = viocd_blk_release,
|
||||
.locked_ioctl = viocd_blk_ioctl,
|
||||
.ioctl = viocd_blk_ioctl,
|
||||
.media_changed = viocd_blk_media_changed,
|
||||
};
|
||||
|
||||
@ -202,9 +216,8 @@ static int viocd_open(struct cdrom_device_info *cdi, int purpose)
|
||||
(u64)&we, VIOVERSION << 16, ((u64)device_no << 48),
|
||||
0, 0, 0);
|
||||
if (hvrc != 0) {
|
||||
printk(VIOCD_KERN_WARNING
|
||||
"bad rc on HvCallEvent_signalLpEventFast %d\n",
|
||||
(int)hvrc);
|
||||
pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
|
||||
(int)hvrc);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -213,8 +226,8 @@ static int viocd_open(struct cdrom_device_info *cdi, int purpose)
|
||||
if (we.rc) {
|
||||
const struct vio_error_entry *err =
|
||||
vio_lookup_rc(viocd_err_table, we.sub_result);
|
||||
printk(VIOCD_KERN_WARNING "bad rc %d:0x%04X on open: %s\n",
|
||||
we.rc, we.sub_result, err->msg);
|
||||
pr_warning("bad rc %d:0x%04X on open: %s\n",
|
||||
we.rc, we.sub_result, err->msg);
|
||||
return -err->errno;
|
||||
}
|
||||
|
||||
@ -234,9 +247,8 @@ static void viocd_release(struct cdrom_device_info *cdi)
|
||||
viopath_targetinst(viopath_hostLp), 0,
|
||||
VIOVERSION << 16, ((u64)device_no << 48), 0, 0, 0);
|
||||
if (hvrc != 0)
|
||||
printk(VIOCD_KERN_WARNING
|
||||
"bad rc on HvCallEvent_signalLpEventFast %d\n",
|
||||
(int)hvrc);
|
||||
pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
|
||||
(int)hvrc);
|
||||
}
|
||||
|
||||
/* Send a read or write request to OS/400 */
|
||||
@ -262,13 +274,12 @@ static int send_request(struct request *req)
|
||||
|
||||
sg_init_table(&sg, 1);
|
||||
if (blk_rq_map_sg(req->q, req, &sg) == 0) {
|
||||
printk(VIOCD_KERN_WARNING
|
||||
"error setting up scatter/gather list\n");
|
||||
pr_warning("error setting up scatter/gather list\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (dma_map_sg(diskinfo->dev, &sg, 1, direction) == 0) {
|
||||
printk(VIOCD_KERN_WARNING "error allocating sg tce\n");
|
||||
pr_warning("error allocating sg tce\n");
|
||||
return -1;
|
||||
}
|
||||
dmaaddr = sg_dma_address(&sg);
|
||||
@ -284,7 +295,7 @@ static int send_request(struct request *req)
|
||||
((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
|
||||
(u64)blk_rq_pos(req) * 512, len, 0);
|
||||
if (hvrc != HvLpEvent_Rc_Good) {
|
||||
printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc);
|
||||
pr_warning("hv error on op %d\n", (int)hvrc);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -298,11 +309,10 @@ static void do_viocd_request(struct request_queue *q)
|
||||
struct request *req;
|
||||
|
||||
while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
|
||||
if (!blk_fs_request(req))
|
||||
if (req->cmd_type != REQ_TYPE_FS)
|
||||
__blk_end_request_all(req, -EIO);
|
||||
else if (send_request(req) < 0) {
|
||||
printk(VIOCD_KERN_WARNING
|
||||
"unable to send message to OS/400!");
|
||||
pr_warning("unable to send message to OS/400!\n");
|
||||
__blk_end_request_all(req, -EIO);
|
||||
} else
|
||||
rwreq++;
|
||||
@ -327,8 +337,8 @@ static int viocd_media_changed(struct cdrom_device_info *cdi, int disc_nr)
|
||||
(u64)&we, VIOVERSION << 16, ((u64)device_no << 48),
|
||||
0, 0, 0);
|
||||
if (hvrc != 0) {
|
||||
printk(VIOCD_KERN_WARNING "bad rc on HvCallEvent_signalLpEventFast %d\n",
|
||||
(int)hvrc);
|
||||
pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
|
||||
(int)hvrc);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -338,9 +348,8 @@ static int viocd_media_changed(struct cdrom_device_info *cdi, int disc_nr)
|
||||
if (we.rc) {
|
||||
const struct vio_error_entry *err =
|
||||
vio_lookup_rc(viocd_err_table, we.sub_result);
|
||||
printk(VIOCD_KERN_WARNING
|
||||
"bad rc %d:0x%04X on check_change: %s; Assuming no change\n",
|
||||
we.rc, we.sub_result, err->msg);
|
||||
pr_warning("bad rc %d:0x%04X on check_change: %s; Assuming no change\n",
|
||||
we.rc, we.sub_result, err->msg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -367,8 +376,8 @@ static int viocd_lock_door(struct cdrom_device_info *cdi, int locking)
|
||||
(u64)&we, VIOVERSION << 16,
|
||||
(device_no << 48) | (flags << 32), 0, 0, 0);
|
||||
if (hvrc != 0) {
|
||||
printk(VIOCD_KERN_WARNING "bad rc on HvCallEvent_signalLpEventFast %d\n",
|
||||
(int)hvrc);
|
||||
pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
|
||||
(int)hvrc);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -455,8 +464,7 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
|
||||
return;
|
||||
/* First, we should NEVER get an int here...only acks */
|
||||
if (hvlpevent_is_int(event)) {
|
||||
printk(VIOCD_KERN_WARNING
|
||||
"Yikes! got an int in viocd event handler!\n");
|
||||
pr_warning("Yikes! got an int in viocd event handler!\n");
|
||||
if (hvlpevent_need_ack(event)) {
|
||||
event->xRc = HvLpEvent_Rc_InvalidSubtype;
|
||||
HvCallEvent_ackLpEvent(event);
|
||||
@ -510,10 +518,9 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
|
||||
const struct vio_error_entry *err =
|
||||
vio_lookup_rc(viocd_err_table,
|
||||
bevent->sub_result);
|
||||
printk(VIOCD_KERN_WARNING "request %p failed "
|
||||
"with rc %d:0x%04X: %s\n",
|
||||
req, event->xRc,
|
||||
bevent->sub_result, err->msg);
|
||||
pr_warning("request %p failed with rc %d:0x%04X: %s\n",
|
||||
req, event->xRc,
|
||||
bevent->sub_result, err->msg);
|
||||
__blk_end_request_all(req, -EIO);
|
||||
} else
|
||||
__blk_end_request_all(req, 0);
|
||||
@ -524,9 +531,8 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(VIOCD_KERN_WARNING
|
||||
"message with invalid subtype %0x04X!\n",
|
||||
event->xSubtype & VIOMINOR_SUBTYPE_MASK);
|
||||
pr_warning("message with invalid subtype %0x04X!\n",
|
||||
event->xSubtype & VIOMINOR_SUBTYPE_MASK);
|
||||
if (hvlpevent_need_ack(event)) {
|
||||
event->xRc = HvLpEvent_Rc_InvalidSubtype;
|
||||
HvCallEvent_ackLpEvent(event);
|
||||
@ -593,23 +599,19 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||
sprintf(c->name, VIOCD_DEVICE "%c", 'a' + deviceno);
|
||||
|
||||
if (register_cdrom(c) != 0) {
|
||||
printk(VIOCD_KERN_WARNING "Cannot register viocd CD-ROM %s!\n",
|
||||
c->name);
|
||||
pr_warning("Cannot register viocd CD-ROM %s!\n", c->name);
|
||||
goto out;
|
||||
}
|
||||
printk(VIOCD_KERN_INFO "cd %s is iSeries resource %10.10s "
|
||||
"type %4.4s, model %3.3s\n",
|
||||
c->name, d->rsrcname, d->type, d->model);
|
||||
pr_info("cd %s is iSeries resource %10.10s type %4.4s, model %3.3s\n",
|
||||
c->name, d->rsrcname, d->type, d->model);
|
||||
q = blk_init_queue(do_viocd_request, &viocd_reqlock);
|
||||
if (q == NULL) {
|
||||
printk(VIOCD_KERN_WARNING "Cannot allocate queue for %s!\n",
|
||||
c->name);
|
||||
pr_warning("Cannot allocate queue for %s!\n", c->name);
|
||||
goto out_unregister_cdrom;
|
||||
}
|
||||
gendisk = alloc_disk(1);
|
||||
if (gendisk == NULL) {
|
||||
printk(VIOCD_KERN_WARNING "Cannot create gendisk for %s!\n",
|
||||
c->name);
|
||||
pr_warning("Cannot create gendisk for %s!\n", c->name);
|
||||
goto out_cleanup_queue;
|
||||
}
|
||||
gendisk->major = VIOCD_MAJOR;
|
||||
@ -682,21 +684,19 @@ static int __init viocd_init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
printk(VIOCD_KERN_INFO "vers " VIOCD_VERS ", hosting partition %d\n",
|
||||
viopath_hostLp);
|
||||
pr_info("vers " VIOCD_VERS ", hosting partition %d\n", viopath_hostLp);
|
||||
|
||||
if (register_blkdev(VIOCD_MAJOR, VIOCD_DEVICE) != 0) {
|
||||
printk(VIOCD_KERN_WARNING "Unable to get major %d for %s\n",
|
||||
VIOCD_MAJOR, VIOCD_DEVICE);
|
||||
pr_warning("Unable to get major %d for %s\n",
|
||||
VIOCD_MAJOR, VIOCD_DEVICE);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
ret = viopath_open(viopath_hostLp, viomajorsubtype_cdio,
|
||||
MAX_CD_REQ + 2);
|
||||
if (ret) {
|
||||
printk(VIOCD_KERN_WARNING
|
||||
"error opening path to host partition %d\n",
|
||||
viopath_hostLp);
|
||||
pr_warning("error opening path to host partition %d\n",
|
||||
viopath_hostLp);
|
||||
goto out_unregister;
|
||||
}
|
||||
|
||||
|
@ -190,7 +190,7 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
|
||||
|
||||
BUG_ON(sense_len > sizeof(*sense));
|
||||
|
||||
if (blk_sense_request(rq) || drive->sense_rq_armed)
|
||||
if (rq->cmd_type == REQ_TYPE_SENSE || drive->sense_rq_armed)
|
||||
return;
|
||||
|
||||
memset(sense, 0, sizeof(*sense));
|
||||
@ -307,13 +307,16 @@ EXPORT_SYMBOL_GPL(ide_cd_expiry);
|
||||
|
||||
int ide_cd_get_xferlen(struct request *rq)
|
||||
{
|
||||
if (blk_fs_request(rq))
|
||||
switch (rq->cmd_type) {
|
||||
case REQ_TYPE_FS:
|
||||
return 32768;
|
||||
else if (blk_sense_request(rq) || blk_pc_request(rq) ||
|
||||
rq->cmd_type == REQ_TYPE_ATA_PC)
|
||||
case REQ_TYPE_SENSE:
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
case REQ_TYPE_ATA_PC:
|
||||
return blk_rq_bytes(rq);
|
||||
else
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
|
||||
|
||||
@ -474,12 +477,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
|
||||
if (uptodate == 0)
|
||||
drive->failed_pc = NULL;
|
||||
|
||||
if (blk_special_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_SPECIAL) {
|
||||
rq->errors = 0;
|
||||
error = 0;
|
||||
} else {
|
||||
|
||||
if (blk_fs_request(rq) == 0 && uptodate <= 0) {
|
||||
if (rq->cmd_type != REQ_TYPE_FS && uptodate <= 0) {
|
||||
if (rq->errors == 0)
|
||||
rq->errors = -EIO;
|
||||
}
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/errno.h>
|
||||
@ -176,7 +177,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
|
||||
if (!sense->valid)
|
||||
break;
|
||||
if (failed_command == NULL ||
|
||||
!blk_fs_request(failed_command))
|
||||
failed_command->cmd_type != REQ_TYPE_FS)
|
||||
break;
|
||||
sector = (sense->information[0] << 24) |
|
||||
(sense->information[1] << 16) |
|
||||
@ -292,7 +293,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
||||
"stat 0x%x",
|
||||
rq->cmd[0], rq->cmd_type, err, stat);
|
||||
|
||||
if (blk_sense_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_SENSE) {
|
||||
/*
|
||||
* We got an error trying to get sense info from the drive
|
||||
* (probably while trying to recover from a former error).
|
||||
@ -303,7 +304,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
||||
}
|
||||
|
||||
/* if we have an error, pass CHECK_CONDITION as the SCSI status byte */
|
||||
if (blk_pc_request(rq) && !rq->errors)
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors)
|
||||
rq->errors = SAM_STAT_CHECK_CONDITION;
|
||||
|
||||
if (blk_noretry_request(rq))
|
||||
@ -311,13 +312,14 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
||||
|
||||
switch (sense_key) {
|
||||
case NOT_READY:
|
||||
if (blk_fs_request(rq) && rq_data_dir(rq) == WRITE) {
|
||||
if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) {
|
||||
if (ide_cd_breathe(drive, rq))
|
||||
return 1;
|
||||
} else {
|
||||
cdrom_saw_media_change(drive);
|
||||
|
||||
if (blk_fs_request(rq) && !blk_rq_quiet(rq))
|
||||
if (rq->cmd_type == REQ_TYPE_FS &&
|
||||
!(rq->cmd_flags & REQ_QUIET))
|
||||
printk(KERN_ERR PFX "%s: tray open\n",
|
||||
drive->name);
|
||||
}
|
||||
@ -326,7 +328,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
||||
case UNIT_ATTENTION:
|
||||
cdrom_saw_media_change(drive);
|
||||
|
||||
if (blk_fs_request(rq) == 0)
|
||||
if (rq->cmd_type != REQ_TYPE_FS)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
@ -352,7 +354,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
||||
* No point in retrying after an illegal request or data
|
||||
* protect error.
|
||||
*/
|
||||
if (!blk_rq_quiet(rq))
|
||||
if (!(rq->cmd_flags & REQ_QUIET))
|
||||
ide_dump_status(drive, "command error", stat);
|
||||
do_end_request = 1;
|
||||
break;
|
||||
@ -361,20 +363,20 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
||||
* No point in re-trying a zillion times on a bad sector.
|
||||
* If we got here the error is not correctable.
|
||||
*/
|
||||
if (!blk_rq_quiet(rq))
|
||||
if (!(rq->cmd_flags & REQ_QUIET))
|
||||
ide_dump_status(drive, "media error "
|
||||
"(bad sector)", stat);
|
||||
do_end_request = 1;
|
||||
break;
|
||||
case BLANK_CHECK:
|
||||
/* disk appears blank? */
|
||||
if (!blk_rq_quiet(rq))
|
||||
if (!(rq->cmd_flags & REQ_QUIET))
|
||||
ide_dump_status(drive, "media error (blank)",
|
||||
stat);
|
||||
do_end_request = 1;
|
||||
break;
|
||||
default:
|
||||
if (blk_fs_request(rq) == 0)
|
||||
if (rq->cmd_type != REQ_TYPE_FS)
|
||||
break;
|
||||
if (err & ~ATA_ABORTED) {
|
||||
/* go to the default handler for other errors */
|
||||
@ -385,7 +387,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
||||
do_end_request = 1;
|
||||
}
|
||||
|
||||
if (blk_fs_request(rq) == 0) {
|
||||
if (rq->cmd_type != REQ_TYPE_FS) {
|
||||
rq->cmd_flags |= REQ_FAILED;
|
||||
do_end_request = 1;
|
||||
}
|
||||
@ -532,7 +534,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
||||
ide_expiry_t *expiry = NULL;
|
||||
int dma_error = 0, dma, thislen, uptodate = 0;
|
||||
int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
|
||||
int sense = blk_sense_request(rq);
|
||||
int sense = (rq->cmd_type == REQ_TYPE_SENSE);
|
||||
unsigned int timeout;
|
||||
u16 len;
|
||||
u8 ireason, stat;
|
||||
@ -575,7 +577,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
||||
|
||||
ide_read_bcount_and_ireason(drive, &len, &ireason);
|
||||
|
||||
thislen = blk_fs_request(rq) ? len : cmd->nleft;
|
||||
thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft;
|
||||
if (thislen > len)
|
||||
thislen = len;
|
||||
|
||||
@ -584,7 +586,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
||||
|
||||
/* If DRQ is clear, the command has completed. */
|
||||
if ((stat & ATA_DRQ) == 0) {
|
||||
if (blk_fs_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_FS) {
|
||||
/*
|
||||
* If we're not done reading/writing, complain.
|
||||
* Otherwise, complete the command normally.
|
||||
@ -598,7 +600,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
||||
rq->cmd_flags |= REQ_FAILED;
|
||||
uptodate = 0;
|
||||
}
|
||||
} else if (!blk_pc_request(rq)) {
|
||||
} else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
|
||||
ide_cd_request_sense_fixup(drive, cmd);
|
||||
|
||||
uptodate = cmd->nleft ? 0 : 1;
|
||||
@ -647,7 +649,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
||||
|
||||
/* pad, if necessary */
|
||||
if (len > 0) {
|
||||
if (blk_fs_request(rq) == 0 || write == 0)
|
||||
if (rq->cmd_type != REQ_TYPE_FS || write == 0)
|
||||
ide_pad_transfer(drive, write, len);
|
||||
else {
|
||||
printk(KERN_ERR PFX "%s: confused, missing data\n",
|
||||
@ -656,11 +658,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
||||
}
|
||||
}
|
||||
|
||||
if (blk_pc_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
timeout = rq->timeout;
|
||||
} else {
|
||||
timeout = ATAPI_WAIT_PC;
|
||||
if (!blk_fs_request(rq))
|
||||
if (rq->cmd_type != REQ_TYPE_FS)
|
||||
expiry = ide_cd_expiry;
|
||||
}
|
||||
|
||||
@ -669,7 +671,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
||||
return ide_started;
|
||||
|
||||
out_end:
|
||||
if (blk_pc_request(rq) && rc == 0) {
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) {
|
||||
rq->resid_len = 0;
|
||||
blk_end_request_all(rq, 0);
|
||||
hwif->rq = NULL;
|
||||
@ -677,7 +679,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
||||
if (sense && uptodate)
|
||||
ide_cd_complete_failed_rq(drive, rq);
|
||||
|
||||
if (blk_fs_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_FS) {
|
||||
if (cmd->nleft == 0)
|
||||
uptodate = 1;
|
||||
} else {
|
||||
@ -690,7 +692,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
||||
return ide_stopped;
|
||||
|
||||
/* make sure it's fully ended */
|
||||
if (blk_fs_request(rq) == 0) {
|
||||
if (rq->cmd_type != REQ_TYPE_FS) {
|
||||
rq->resid_len -= cmd->nbytes - cmd->nleft;
|
||||
if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
|
||||
rq->resid_len += cmd->last_xfer_len;
|
||||
@ -750,7 +752,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
|
||||
ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x",
|
||||
rq->cmd[0], rq->cmd_type);
|
||||
|
||||
if (blk_pc_request(rq))
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||
rq->cmd_flags |= REQ_QUIET;
|
||||
else
|
||||
rq->cmd_flags &= ~REQ_FAILED;
|
||||
@ -791,21 +793,26 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
|
||||
if (drive->debug_mask & IDE_DBG_RQ)
|
||||
blk_dump_rq_flags(rq, "ide_cd_do_request");
|
||||
|
||||
if (blk_fs_request(rq)) {
|
||||
switch (rq->cmd_type) {
|
||||
case REQ_TYPE_FS:
|
||||
if (cdrom_start_rw(drive, rq) == ide_stopped)
|
||||
goto out_end;
|
||||
} else if (blk_sense_request(rq) || blk_pc_request(rq) ||
|
||||
rq->cmd_type == REQ_TYPE_ATA_PC) {
|
||||
break;
|
||||
case REQ_TYPE_SENSE:
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
case REQ_TYPE_ATA_PC:
|
||||
if (!rq->timeout)
|
||||
rq->timeout = ATAPI_WAIT_PC;
|
||||
|
||||
cdrom_do_block_pc(drive, rq);
|
||||
} else if (blk_special_request(rq)) {
|
||||
break;
|
||||
case REQ_TYPE_SPECIAL:
|
||||
/* right now this can only be a reset... */
|
||||
uptodate = 1;
|
||||
goto out_end;
|
||||
} else
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* prepare sense request for this command */
|
||||
ide_prep_sense(drive, rq);
|
||||
@ -817,7 +824,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
|
||||
|
||||
cmd.rq = rq;
|
||||
|
||||
if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) {
|
||||
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
|
||||
ide_map_sg(drive, &cmd);
|
||||
}
|
||||
@ -1373,9 +1380,9 @@ static int ide_cdrom_prep_pc(struct request *rq)
|
||||
|
||||
static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
if (blk_fs_request(rq))
|
||||
if (rq->cmd_type == REQ_TYPE_FS)
|
||||
return ide_cdrom_prep_fs(q, rq);
|
||||
else if (blk_pc_request(rq))
|
||||
else if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||
return ide_cdrom_prep_pc(rq);
|
||||
|
||||
return 0;
|
||||
@ -1592,17 +1599,19 @@ static struct ide_driver ide_cdrom_driver = {
|
||||
|
||||
static int idecd_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
struct cdrom_info *info = ide_cd_get(bdev->bd_disk);
|
||||
int rc = -ENOMEM;
|
||||
struct cdrom_info *info;
|
||||
int rc = -ENXIO;
|
||||
|
||||
lock_kernel();
|
||||
info = ide_cd_get(bdev->bd_disk);
|
||||
if (!info)
|
||||
return -ENXIO;
|
||||
goto out;
|
||||
|
||||
rc = cdrom_open(&info->devinfo, bdev, mode);
|
||||
|
||||
if (rc < 0)
|
||||
ide_cd_put(info);
|
||||
|
||||
out:
|
||||
unlock_kernel();
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1610,9 +1619,11 @@ static int idecd_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
|
||||
|
||||
lock_kernel();
|
||||
cdrom_release(&info->devinfo, mode);
|
||||
|
||||
ide_cd_put(info);
|
||||
unlock_kernel();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1656,7 +1667,7 @@ static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
static int idecd_locked_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info);
|
||||
@ -1678,6 +1689,19 @@ static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = idecd_locked_ioctl(bdev, mode, cmd, arg);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int idecd_media_changed(struct gendisk *disk)
|
||||
{
|
||||
struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
|
||||
@ -1698,7 +1722,7 @@ static const struct block_device_operations idecd_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = idecd_open,
|
||||
.release = idecd_release,
|
||||
.locked_ioctl = idecd_ioctl,
|
||||
.ioctl = idecd_ioctl,
|
||||
.media_changed = idecd_media_changed,
|
||||
.revalidate_disk = idecd_revalidate_disk
|
||||
};
|
||||
|
@ -454,7 +454,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
|
||||
touch it at all. */
|
||||
|
||||
if (cgc->data_direction == CGC_DATA_WRITE)
|
||||
flags |= REQ_RW;
|
||||
flags |= REQ_WRITE;
|
||||
|
||||
if (cgc->sense)
|
||||
memset(cgc->sense, 0, sizeof(struct request_sense));
|
||||
|
@ -184,7 +184,7 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
|
||||
ide_hwif_t *hwif = drive->hwif;
|
||||
|
||||
BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
|
||||
BUG_ON(!blk_fs_request(rq));
|
||||
BUG_ON(rq->cmd_type != REQ_TYPE_FS);
|
||||
|
||||
ledtrig_ide_activity();
|
||||
|
||||
@ -427,10 +427,15 @@ static void ide_disk_unlock_native_capacity(ide_drive_t *drive)
|
||||
drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */
|
||||
}
|
||||
|
||||
static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
|
||||
static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
ide_drive_t *drive = q->queuedata;
|
||||
struct ide_cmd *cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
|
||||
struct ide_cmd *cmd;
|
||||
|
||||
if (!(rq->cmd_flags & REQ_FLUSH))
|
||||
return BLKPREP_OK;
|
||||
|
||||
cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
|
||||
|
||||
/* FIXME: map struct ide_taskfile on rq->cmd[] */
|
||||
BUG_ON(cmd == NULL);
|
||||
@ -448,6 +453,8 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
|
||||
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
|
||||
rq->special = cmd;
|
||||
cmd->rq = rq;
|
||||
|
||||
return BLKPREP_OK;
|
||||
}
|
||||
|
||||
ide_devset_get(multcount, mult_count);
|
||||
@ -513,7 +520,6 @@ static void update_ordered(ide_drive_t *drive)
|
||||
{
|
||||
u16 *id = drive->id;
|
||||
unsigned ordered = QUEUE_ORDERED_NONE;
|
||||
prepare_flush_fn *prep_fn = NULL;
|
||||
|
||||
if (drive->dev_flags & IDE_DFLAG_WCACHE) {
|
||||
unsigned long long capacity;
|
||||
@ -538,12 +544,12 @@ static void update_ordered(ide_drive_t *drive)
|
||||
|
||||
if (barrier) {
|
||||
ordered = QUEUE_ORDERED_DRAIN_FLUSH;
|
||||
prep_fn = idedisk_prepare_flush;
|
||||
blk_queue_prep_rq(drive->queue, idedisk_prep_fn);
|
||||
}
|
||||
} else
|
||||
ordered = QUEUE_ORDERED_DRAIN;
|
||||
|
||||
blk_queue_ordered(drive->queue, ordered, prep_fn);
|
||||
blk_queue_ordered(drive->queue, ordered);
|
||||
}
|
||||
|
||||
ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/ide.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/smp_lock.h>
|
||||
|
||||
#include "ide-disk.h"
|
||||
|
||||
@ -18,9 +19,13 @@ int ide_disk_ioctl(ide_drive_t *drive, struct block_device *bdev, fmode_t mode,
|
||||
{
|
||||
int err;
|
||||
|
||||
lock_kernel();
|
||||
err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_disk_ioctl_settings);
|
||||
if (err != -EOPNOTSUPP)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
return generic_ide_ioctl(drive, bdev, cmd, arg);
|
||||
err = generic_ide_ioctl(drive, bdev, cmd, arg);
|
||||
out:
|
||||
unlock_kernel();
|
||||
return err;
|
||||
}
|
||||
|
@ -122,7 +122,7 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
|
||||
return ide_stopped;
|
||||
|
||||
/* retry only "normal" I/O: */
|
||||
if (!blk_fs_request(rq)) {
|
||||
if (rq->cmd_type != REQ_TYPE_FS) {
|
||||
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
|
||||
struct ide_cmd *cmd = rq->special;
|
||||
|
||||
@ -146,7 +146,8 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
|
||||
{
|
||||
struct request *rq = drive->hwif->rq;
|
||||
|
||||
if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) {
|
||||
if (rq && rq->cmd_type == REQ_TYPE_SPECIAL &&
|
||||
rq->cmd[0] == REQ_DRIVE_RESET) {
|
||||
if (err <= 0 && rq->errors == 0)
|
||||
rq->errors = -EIO;
|
||||
ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
|
||||
|
@ -73,7 +73,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
|
||||
drive->failed_pc = NULL;
|
||||
|
||||
if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 ||
|
||||
(rq && blk_pc_request(rq)))
|
||||
(rq && rq->cmd_type == REQ_TYPE_BLOCK_PC))
|
||||
uptodate = 1; /* FIXME */
|
||||
else if (pc->c[0] == GPCMD_REQUEST_SENSE) {
|
||||
|
||||
@ -98,7 +98,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
|
||||
"Aborting request!\n");
|
||||
}
|
||||
|
||||
if (blk_special_request(rq))
|
||||
if (rq->cmd_type == REQ_TYPE_SPECIAL)
|
||||
rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;
|
||||
|
||||
return uptodate;
|
||||
@ -207,7 +207,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
|
||||
memcpy(rq->cmd, pc->c, 12);
|
||||
|
||||
pc->rq = rq;
|
||||
if (rq->cmd_flags & REQ_RW)
|
||||
if (rq->cmd_flags & REQ_WRITE)
|
||||
pc->flags |= PC_FLAG_WRITING;
|
||||
|
||||
pc->flags |= PC_FLAG_DMA_OK;
|
||||
@ -247,14 +247,16 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
|
||||
} else
|
||||
printk(KERN_ERR PFX "%s: I/O error\n", drive->name);
|
||||
|
||||
if (blk_special_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_SPECIAL) {
|
||||
rq->errors = 0;
|
||||
ide_complete_rq(drive, 0, blk_rq_bytes(rq));
|
||||
return ide_stopped;
|
||||
} else
|
||||
goto out_end;
|
||||
}
|
||||
if (blk_fs_request(rq)) {
|
||||
|
||||
switch (rq->cmd_type) {
|
||||
case REQ_TYPE_FS:
|
||||
if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
|
||||
(blk_rq_sectors(rq) % floppy->bs_factor)) {
|
||||
printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
|
||||
@ -263,13 +265,18 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
|
||||
}
|
||||
pc = &floppy->queued_pc;
|
||||
idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
|
||||
} else if (blk_special_request(rq) || blk_sense_request(rq)) {
|
||||
break;
|
||||
case REQ_TYPE_SPECIAL:
|
||||
case REQ_TYPE_SENSE:
|
||||
pc = (struct ide_atapi_pc *)rq->special;
|
||||
} else if (blk_pc_request(rq)) {
|
||||
break;
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
pc = &floppy->queued_pc;
|
||||
idefloppy_blockpc_cmd(floppy, pc, rq);
|
||||
} else
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
ide_prep_sense(drive, rq);
|
||||
|
||||
@ -280,7 +287,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
|
||||
|
||||
cmd.rq = rq;
|
||||
|
||||
if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) {
|
||||
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
|
||||
ide_map_sg(drive, &cmd);
|
||||
}
|
||||
@ -290,7 +297,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
|
||||
return ide_floppy_issue_pc(drive, &cmd, pc);
|
||||
out_end:
|
||||
drive->failed_pc = NULL;
|
||||
if (blk_fs_request(rq) == 0 && rq->errors == 0)
|
||||
if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
|
||||
rq->errors = -EIO;
|
||||
ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
|
||||
return ide_stopped;
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/ide.h>
|
||||
#include <linux/cdrom.h>
|
||||
#include <linux/smp_lock.h>
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
@ -275,12 +276,15 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
|
||||
void __user *argp = (void __user *)arg;
|
||||
int err;
|
||||
|
||||
if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR)
|
||||
return ide_floppy_lockdoor(drive, &pc, arg, cmd);
|
||||
lock_kernel();
|
||||
if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) {
|
||||
err = ide_floppy_lockdoor(drive, &pc, arg, cmd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = ide_floppy_format_ioctl(drive, &pc, mode, cmd, argp);
|
||||
if (err != -ENOTTY)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* skip SCSI_IOCTL_SEND_COMMAND (deprecated)
|
||||
@ -293,5 +297,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
|
||||
if (err == -ENOTTY)
|
||||
err = generic_ide_ioctl(drive, bdev, cmd, arg);
|
||||
|
||||
out:
|
||||
unlock_kernel();
|
||||
return err;
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/string.h>
|
||||
@ -237,6 +238,18 @@ static int ide_gd_open(struct block_device *bdev, fmode_t mode)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ide_gd_unlocked_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = ide_gd_open(bdev, mode);
|
||||
unlock_kernel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int ide_gd_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
|
||||
@ -244,6 +257,7 @@ static int ide_gd_release(struct gendisk *disk, fmode_t mode)
|
||||
|
||||
ide_debug_log(IDE_DBG_FUNC, "enter");
|
||||
|
||||
lock_kernel();
|
||||
if (idkp->openers == 1)
|
||||
drive->disk_ops->flush(drive);
|
||||
|
||||
@ -255,6 +269,7 @@ static int ide_gd_release(struct gendisk *disk, fmode_t mode)
|
||||
idkp->openers--;
|
||||
|
||||
ide_disk_put(idkp);
|
||||
unlock_kernel();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -321,9 +336,9 @@ static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
|
||||
static const struct block_device_operations ide_gd_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = ide_gd_open,
|
||||
.open = ide_gd_unlocked_open,
|
||||
.release = ide_gd_release,
|
||||
.locked_ioctl = ide_gd_ioctl,
|
||||
.ioctl = ide_gd_ioctl,
|
||||
.getgeo = ide_gd_getgeo,
|
||||
.media_changed = ide_gd_media_changed,
|
||||
.unlock_native_capacity = ide_gd_unlock_native_capacity,
|
||||
|
@ -135,7 +135,7 @@ EXPORT_SYMBOL(ide_complete_rq);
|
||||
|
||||
void ide_kill_rq(ide_drive_t *drive, struct request *rq)
|
||||
{
|
||||
u8 drv_req = blk_special_request(rq) && rq->rq_disk;
|
||||
u8 drv_req = (rq->cmd_type == REQ_TYPE_SPECIAL) && rq->rq_disk;
|
||||
u8 media = drive->media;
|
||||
|
||||
drive->failed_pc = NULL;
|
||||
@ -145,7 +145,7 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
|
||||
} else {
|
||||
if (media == ide_tape)
|
||||
rq->errors = IDE_DRV_ERROR_GENERAL;
|
||||
else if (blk_fs_request(rq) == 0 && rq->errors == 0)
|
||||
else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
|
||||
rq->errors = -EIO;
|
||||
}
|
||||
|
||||
@ -307,7 +307,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
|
||||
{
|
||||
ide_startstop_t startstop;
|
||||
|
||||
BUG_ON(!blk_rq_started(rq));
|
||||
BUG_ON(!(rq->cmd_flags & REQ_STARTED));
|
||||
|
||||
#ifdef DEBUG
|
||||
printk("%s: start_request: current=0x%08lx\n",
|
||||
@ -353,7 +353,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
|
||||
pm->pm_step == IDE_PM_COMPLETED)
|
||||
ide_complete_pm_rq(drive, rq);
|
||||
return startstop;
|
||||
} else if (!rq->rq_disk && blk_special_request(rq))
|
||||
} else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_SPECIAL)
|
||||
/*
|
||||
* TODO: Once all ULDs have been modified to
|
||||
* check for specific op codes rather than
|
||||
|
@ -191,10 +191,10 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
|
||||
|
||||
#ifdef DEBUG_PM
|
||||
printk("%s: completing PM request, %s\n", drive->name,
|
||||
blk_pm_suspend_request(rq) ? "suspend" : "resume");
|
||||
(rq->cmd_type == REQ_TYPE_PM_SUSPEND) ? "suspend" : "resume");
|
||||
#endif
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
if (blk_pm_suspend_request(rq))
|
||||
if (rq->cmd_type == REQ_TYPE_PM_SUSPEND)
|
||||
blk_stop_queue(q);
|
||||
else
|
||||
drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
|
||||
@ -210,11 +210,11 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
|
||||
{
|
||||
struct request_pm_state *pm = rq->special;
|
||||
|
||||
if (blk_pm_suspend_request(rq) &&
|
||||
if (rq->cmd_type == REQ_TYPE_PM_SUSPEND &&
|
||||
pm->pm_step == IDE_PM_START_SUSPEND)
|
||||
/* Mark drive blocked when starting the suspend sequence. */
|
||||
drive->dev_flags |= IDE_DFLAG_BLOCKED;
|
||||
else if (blk_pm_resume_request(rq) &&
|
||||
else if (rq->cmd_type == REQ_TYPE_PM_RESUME &&
|
||||
pm->pm_step == IDE_PM_START_RESUME) {
|
||||
/*
|
||||
* The first thing we do on wakeup is to wait for BSY bit to
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/ide.h>
|
||||
@ -577,7 +578,8 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
|
||||
rq->cmd[0], (unsigned long long)blk_rq_pos(rq),
|
||||
blk_rq_sectors(rq));
|
||||
|
||||
BUG_ON(!(blk_special_request(rq) || blk_sense_request(rq)));
|
||||
BUG_ON(!(rq->cmd_type == REQ_TYPE_SPECIAL ||
|
||||
rq->cmd_type == REQ_TYPE_SENSE));
|
||||
|
||||
/* Retry a failed packet command */
|
||||
if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) {
|
||||
@ -1905,7 +1907,11 @@ static const struct file_operations idetape_fops = {
|
||||
|
||||
static int idetape_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk, false, 0);
|
||||
struct ide_tape_obj *tape;
|
||||
|
||||
lock_kernel();
|
||||
tape = ide_tape_get(bdev->bd_disk, false, 0);
|
||||
unlock_kernel();
|
||||
|
||||
if (!tape)
|
||||
return -ENXIO;
|
||||
@ -1917,7 +1923,10 @@ static int idetape_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct ide_tape_obj *tape = ide_drv_g(disk, ide_tape_obj);
|
||||
|
||||
lock_kernel();
|
||||
ide_tape_put(tape);
|
||||
unlock_kernel();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1926,9 +1935,14 @@ static int idetape_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
{
|
||||
struct ide_tape_obj *tape = ide_drv_g(bdev->bd_disk, ide_tape_obj);
|
||||
ide_drive_t *drive = tape->drive;
|
||||
int err = generic_ide_ioctl(drive, bdev, cmd, arg);
|
||||
int err;
|
||||
|
||||
lock_kernel();
|
||||
err = generic_ide_ioctl(drive, bdev, cmd, arg);
|
||||
if (err == -EINVAL)
|
||||
err = idetape_blkdev_ioctl(drive, cmd, arg);
|
||||
unlock_kernel();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1936,7 +1950,7 @@ static const struct block_device_operations idetape_block_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = idetape_open,
|
||||
.release = idetape_release,
|
||||
.locked_ioctl = idetape_ioctl,
|
||||
.ioctl = idetape_ioctl,
|
||||
};
|
||||
|
||||
static int ide_tape_probe(ide_drive_t *drive)
|
||||
|
@ -356,7 +356,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
|
||||
BUG_ON(num_regions > DM_IO_MAX_REGIONS);
|
||||
|
||||
if (sync)
|
||||
rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
|
||||
rw |= REQ_SYNC | REQ_UNPLUG;
|
||||
|
||||
/*
|
||||
* For multiple regions we need to be careful to rewind
|
||||
@ -364,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
|
||||
*/
|
||||
for (i = 0; i < num_regions; i++) {
|
||||
*dp = old_pages;
|
||||
if (where[i].count || (rw & (1 << BIO_RW_BARRIER)))
|
||||
if (where[i].count || (rw & REQ_HARDBARRIER))
|
||||
do_region(rw, i, where + i, dp, io);
|
||||
}
|
||||
|
||||
@ -412,8 +412,8 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
|
||||
rw &= ~(1 << BIO_RW_BARRIER);
|
||||
if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) {
|
||||
rw &= ~REQ_HARDBARRIER;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@ -479,8 +479,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
|
||||
* New collapsed (a)synchronous interface.
|
||||
*
|
||||
* If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
|
||||
* the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
|
||||
* io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
|
||||
* the queue with blk_unplug() some time later or set REQ_SYNC in
|
||||
io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
|
||||
* the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
|
||||
*/
|
||||
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
|
||||
|
@ -345,7 +345,7 @@ static int run_io_job(struct kcopyd_job *job)
|
||||
{
|
||||
int r;
|
||||
struct dm_io_request io_req = {
|
||||
.bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG),
|
||||
.bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG,
|
||||
.mem.type = DM_IO_PAGE_LIST,
|
||||
.mem.ptr.pl = job->pages,
|
||||
.mem.offset = job->offset,
|
||||
|
@ -1211,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
|
||||
if (error == -EOPNOTSUPP)
|
||||
goto out;
|
||||
|
||||
if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
|
||||
if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
|
||||
goto out;
|
||||
|
||||
if (unlikely(error)) {
|
||||
|
@ -284,7 +284,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
|
||||
if (!error)
|
||||
return 0; /* I/O complete */
|
||||
|
||||
if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
|
||||
if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
|
||||
return error;
|
||||
|
||||
if (error == -EOPNOTSUPP)
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/blkpg.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/idr.h>
|
||||
@ -338,6 +339,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
struct mapped_device *md;
|
||||
|
||||
lock_kernel();
|
||||
spin_lock(&_minor_lock);
|
||||
|
||||
md = bdev->bd_disk->private_data;
|
||||
@ -355,6 +357,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
|
||||
|
||||
out:
|
||||
spin_unlock(&_minor_lock);
|
||||
unlock_kernel();
|
||||
|
||||
return md ? 0 : -ENXIO;
|
||||
}
|
||||
@ -362,8 +365,12 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
|
||||
static int dm_blk_close(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct mapped_device *md = disk->private_data;
|
||||
|
||||
lock_kernel();
|
||||
atomic_dec(&md->open_count);
|
||||
dm_put(md);
|
||||
unlock_kernel();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -614,7 +621,7 @@ static void dec_pending(struct dm_io *io, int error)
|
||||
*/
|
||||
spin_lock_irqsave(&md->deferred_lock, flags);
|
||||
if (__noflush_suspending(md)) {
|
||||
if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
|
||||
if (!(io->bio->bi_rw & REQ_HARDBARRIER))
|
||||
bio_list_add_head(&md->deferred,
|
||||
io->bio);
|
||||
} else
|
||||
@ -626,7 +633,7 @@ static void dec_pending(struct dm_io *io, int error)
|
||||
io_error = io->error;
|
||||
bio = io->bio;
|
||||
|
||||
if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
|
||||
if (bio->bi_rw & REQ_HARDBARRIER) {
|
||||
/*
|
||||
* There can be just one barrier request so we use
|
||||
* a per-device variable for error reporting.
|
||||
@ -792,12 +799,12 @@ static void dm_end_request(struct request *clone, int error)
|
||||
{
|
||||
int rw = rq_data_dir(clone);
|
||||
int run_queue = 1;
|
||||
bool is_barrier = blk_barrier_rq(clone);
|
||||
bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER;
|
||||
struct dm_rq_target_io *tio = clone->end_io_data;
|
||||
struct mapped_device *md = tio->md;
|
||||
struct request *rq = tio->orig;
|
||||
|
||||
if (blk_pc_request(rq) && !is_barrier) {
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) {
|
||||
rq->errors = clone->errors;
|
||||
rq->resid_len = clone->resid_len;
|
||||
|
||||
@ -844,7 +851,7 @@ void dm_requeue_unmapped_request(struct request *clone)
|
||||
struct request_queue *q = rq->q;
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(blk_barrier_rq(clone))) {
|
||||
if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
|
||||
/*
|
||||
* Barrier clones share an original request.
|
||||
* Leave it to dm_end_request(), which handles this special
|
||||
@ -943,7 +950,7 @@ static void dm_complete_request(struct request *clone, int error)
|
||||
struct dm_rq_target_io *tio = clone->end_io_data;
|
||||
struct request *rq = tio->orig;
|
||||
|
||||
if (unlikely(blk_barrier_rq(clone))) {
|
||||
if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
|
||||
/*
|
||||
* Barrier clones share an original request. So can't use
|
||||
* softirq_done with the original.
|
||||
@ -972,7 +979,7 @@ void dm_kill_unmapped_request(struct request *clone, int error)
|
||||
struct dm_rq_target_io *tio = clone->end_io_data;
|
||||
struct request *rq = tio->orig;
|
||||
|
||||
if (unlikely(blk_barrier_rq(clone))) {
|
||||
if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
|
||||
/*
|
||||
* Barrier clones share an original request.
|
||||
* Leave it to dm_end_request(), which handles this special
|
||||
@ -1106,7 +1113,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
|
||||
|
||||
clone->bi_sector = sector;
|
||||
clone->bi_bdev = bio->bi_bdev;
|
||||
clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
|
||||
clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
|
||||
clone->bi_vcnt = 1;
|
||||
clone->bi_size = to_bytes(len);
|
||||
clone->bi_io_vec->bv_offset = offset;
|
||||
@ -1133,7 +1140,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
|
||||
|
||||
clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
|
||||
__bio_clone(clone, bio);
|
||||
clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
|
||||
clone->bi_rw &= ~REQ_HARDBARRIER;
|
||||
clone->bi_destructor = dm_bio_destructor;
|
||||
clone->bi_sector = sector;
|
||||
clone->bi_idx = idx;
|
||||
@ -1301,7 +1308,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
|
||||
|
||||
ci.map = dm_get_live_table(md);
|
||||
if (unlikely(!ci.map)) {
|
||||
if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
|
||||
if (!(bio->bi_rw & REQ_HARDBARRIER))
|
||||
bio_io_error(bio);
|
||||
else
|
||||
if (!md->barrier_error)
|
||||
@ -1414,7 +1421,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
|
||||
* we have to queue this io for later.
|
||||
*/
|
||||
if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
|
||||
unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||
unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
|
||||
up_read(&md->io_lock);
|
||||
|
||||
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
|
||||
@ -1455,20 +1462,9 @@ static int dm_request(struct request_queue *q, struct bio *bio)
|
||||
return _dm_request(q, bio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark this request as flush request, so that dm_request_fn() can
|
||||
* recognize.
|
||||
*/
|
||||
static void dm_rq_prepare_flush(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
|
||||
rq->cmd[0] = REQ_LB_OP_FLUSH;
|
||||
}
|
||||
|
||||
static bool dm_rq_is_flush_request(struct request *rq)
|
||||
{
|
||||
if (rq->cmd_type == REQ_TYPE_LINUX_BLOCK &&
|
||||
rq->cmd[0] == REQ_LB_OP_FLUSH)
|
||||
if (rq->cmd_flags & REQ_FLUSH)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
@ -1912,8 +1908,7 @@ static struct mapped_device *alloc_dev(int minor)
|
||||
blk_queue_softirq_done(md->queue, dm_softirq_done);
|
||||
blk_queue_prep_rq(md->queue, dm_prep_fn);
|
||||
blk_queue_lld_busy(md->queue, dm_lld_busy);
|
||||
blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH,
|
||||
dm_rq_prepare_flush);
|
||||
blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
|
||||
|
||||
md->disk = alloc_disk(1);
|
||||
if (!md->disk)
|
||||
@ -2296,7 +2291,7 @@ static void dm_wq_work(struct work_struct *work)
|
||||
if (dm_request_based(md))
|
||||
generic_make_request(c);
|
||||
else {
|
||||
if (bio_rw_flagged(c, BIO_RW_BARRIER))
|
||||
if (c->bi_rw & REQ_HARDBARRIER)
|
||||
process_barrier(md, c);
|
||||
else
|
||||
__split_and_process_bio(md, c);
|
||||
|
@ -294,7 +294,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
|
||||
dev_info_t *tmp_dev;
|
||||
sector_t start_sector;
|
||||
|
||||
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||
if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
|
||||
md_barrier_request(mddev, bio);
|
||||
return 0;
|
||||
}
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/buffer_head.h> /* for invalidate_bdev */
|
||||
#include <linux/poll.h>
|
||||
#include <linux/ctype.h>
|
||||
@ -353,7 +354,7 @@ static void md_submit_barrier(struct work_struct *ws)
|
||||
/* an empty barrier - all done */
|
||||
bio_endio(bio, 0);
|
||||
else {
|
||||
bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
|
||||
bio->bi_rw &= ~REQ_HARDBARRIER;
|
||||
if (mddev->pers->make_request(mddev, bio))
|
||||
generic_make_request(bio);
|
||||
mddev->barrier = POST_REQUEST_BARRIER;
|
||||
@ -675,11 +676,11 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
|
||||
* if zero is reached.
|
||||
* If an error occurred, call md_error
|
||||
*
|
||||
* As we might need to resubmit the request if BIO_RW_BARRIER
|
||||
* As we might need to resubmit the request if REQ_HARDBARRIER
|
||||
* causes ENOTSUPP, we allocate a spare bio...
|
||||
*/
|
||||
struct bio *bio = bio_alloc(GFP_NOIO, 1);
|
||||
int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
|
||||
int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG;
|
||||
|
||||
bio->bi_bdev = rdev->bdev;
|
||||
bio->bi_sector = sector;
|
||||
@ -691,7 +692,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
|
||||
atomic_inc(&mddev->pending_writes);
|
||||
if (!test_bit(BarriersNotsupp, &rdev->flags)) {
|
||||
struct bio *rbio;
|
||||
rw |= (1<<BIO_RW_BARRIER);
|
||||
rw |= REQ_HARDBARRIER;
|
||||
rbio = bio_clone(bio, GFP_NOIO);
|
||||
rbio->bi_private = bio;
|
||||
rbio->bi_end_io = super_written_barrier;
|
||||
@ -736,7 +737,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size,
|
||||
struct completion event;
|
||||
int ret;
|
||||
|
||||
rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
|
||||
rw |= REQ_SYNC | REQ_UNPLUG;
|
||||
|
||||
bio->bi_bdev = bdev;
|
||||
bio->bi_sector = sector;
|
||||
@ -5902,6 +5903,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
|
||||
mddev_t *mddev = mddev_find(bdev->bd_dev);
|
||||
int err;
|
||||
|
||||
lock_kernel();
|
||||
if (mddev->gendisk != bdev->bd_disk) {
|
||||
/* we are racing with mddev_put which is discarding this
|
||||
* bd_disk.
|
||||
@ -5910,6 +5912,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
|
||||
/* Wait until bdev->bd_disk is definitely gone */
|
||||
flush_scheduled_work();
|
||||
/* Then retry the open from the top */
|
||||
unlock_kernel();
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
BUG_ON(mddev != bdev->bd_disk->private_data);
|
||||
@ -5923,6 +5926,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
|
||||
|
||||
check_disk_size_change(mddev->gendisk, bdev);
|
||||
out:
|
||||
unlock_kernel();
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -5931,8 +5935,10 @@ static int md_release(struct gendisk *disk, fmode_t mode)
|
||||
mddev_t *mddev = disk->private_data;
|
||||
|
||||
BUG_ON(!mddev);
|
||||
lock_kernel();
|
||||
atomic_dec(&mddev->openers);
|
||||
mddev_put(mddev);
|
||||
unlock_kernel();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ struct mdk_rdev_s
|
||||
#define Faulty 1 /* device is known to have a fault */
|
||||
#define In_sync 2 /* device is in_sync with rest of array */
|
||||
#define WriteMostly 4 /* Avoid reading if at all possible */
|
||||
#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */
|
||||
#define BarriersNotsupp 5 /* REQ_HARDBARRIER is not supported */
|
||||
#define AllReserved 6 /* If whole device is reserved for
|
||||
* one array */
|
||||
#define AutoDetected 7 /* added by auto-detect */
|
||||
@ -254,7 +254,7 @@ struct mddev_s
|
||||
* fails. Only supported
|
||||
*/
|
||||
struct bio *biolist; /* bios that need to be retried
|
||||
* because BIO_RW_BARRIER is not supported
|
||||
* because REQ_HARDBARRIER is not supported
|
||||
*/
|
||||
|
||||
atomic_t recovery_active; /* blocks scheduled, but not written */
|
||||
|
@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio, int error)
|
||||
|
||||
if (uptodate)
|
||||
multipath_end_bh_io(mp_bh, 0);
|
||||
else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) {
|
||||
else if (!(bio->bi_rw & REQ_RAHEAD)) {
|
||||
/*
|
||||
* oops, IO error:
|
||||
*/
|
||||
@ -142,7 +142,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
|
||||
struct multipath_bh * mp_bh;
|
||||
struct multipath_info *multipath;
|
||||
|
||||
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||
if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
|
||||
md_barrier_request(mddev, bio);
|
||||
return 0;
|
||||
}
|
||||
@ -163,7 +163,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
|
||||
mp_bh->bio = *bio;
|
||||
mp_bh->bio.bi_sector += multipath->rdev->data_offset;
|
||||
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
|
||||
mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
|
||||
mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
|
||||
mp_bh->bio.bi_end_io = multipath_end_request;
|
||||
mp_bh->bio.bi_private = mp_bh;
|
||||
generic_make_request(&mp_bh->bio);
|
||||
@ -398,7 +398,7 @@ static void multipathd (mddev_t *mddev)
|
||||
*bio = *(mp_bh->master_bio);
|
||||
bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
|
||||
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
|
||||
bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
|
||||
bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
|
||||
bio->bi_end_io = multipath_end_request;
|
||||
bio->bi_private = mp_bh;
|
||||
generic_make_request(bio);
|
||||
|
@ -483,7 +483,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
|
||||
struct strip_zone *zone;
|
||||
mdk_rdev_t *tmp_dev;
|
||||
|
||||
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||
if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
|
||||
md_barrier_request(mddev, bio);
|
||||
return 0;
|
||||
}
|
||||
|
@ -787,7 +787,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
|
||||
struct bio_list bl;
|
||||
struct page **behind_pages = NULL;
|
||||
const int rw = bio_data_dir(bio);
|
||||
const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
|
||||
const bool do_sync = (bio->bi_rw & REQ_SYNC);
|
||||
bool do_barriers;
|
||||
mdk_rdev_t *blocked_rdev;
|
||||
|
||||
@ -822,7 +822,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
|
||||
finish_wait(&conf->wait_barrier, &w);
|
||||
}
|
||||
if (unlikely(!mddev->barriers_work &&
|
||||
bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||
(bio->bi_rw & REQ_HARDBARRIER))) {
|
||||
if (rw == WRITE)
|
||||
md_write_end(mddev);
|
||||
bio_endio(bio, -EOPNOTSUPP);
|
||||
@ -877,7 +877,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
|
||||
read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
|
||||
read_bio->bi_bdev = mirror->rdev->bdev;
|
||||
read_bio->bi_end_io = raid1_end_read_request;
|
||||
read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
|
||||
read_bio->bi_rw = READ | do_sync;
|
||||
read_bio->bi_private = r1_bio;
|
||||
|
||||
generic_make_request(read_bio);
|
||||
@ -959,7 +959,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
|
||||
atomic_set(&r1_bio->remaining, 0);
|
||||
atomic_set(&r1_bio->behind_remaining, 0);
|
||||
|
||||
do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER);
|
||||
do_barriers = bio->bi_rw & REQ_HARDBARRIER;
|
||||
if (do_barriers)
|
||||
set_bit(R1BIO_Barrier, &r1_bio->state);
|
||||
|
||||
@ -975,8 +975,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
|
||||
mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
|
||||
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
|
||||
mbio->bi_end_io = raid1_end_write_request;
|
||||
mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) |
|
||||
(do_sync << BIO_RW_SYNCIO);
|
||||
mbio->bi_rw = WRITE | do_barriers | do_sync;
|
||||
mbio->bi_private = r1_bio;
|
||||
|
||||
if (behind_pages) {
|
||||
@ -1633,7 +1632,7 @@ static void raid1d(mddev_t *mddev)
|
||||
sync_request_write(mddev, r1_bio);
|
||||
unplug = 1;
|
||||
} else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
|
||||
/* some requests in the r1bio were BIO_RW_BARRIER
|
||||
/* some requests in the r1bio were REQ_HARDBARRIER
|
||||
* requests which failed with -EOPNOTSUPP. Hohumm..
|
||||
* Better resubmit without the barrier.
|
||||
* We know which devices to resubmit for, because
|
||||
@ -1641,7 +1640,7 @@ static void raid1d(mddev_t *mddev)
|
||||
* We already have a nr_pending reference on these rdevs.
|
||||
*/
|
||||
int i;
|
||||
const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
|
||||
const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
|
||||
clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
|
||||
clear_bit(R1BIO_Barrier, &r1_bio->state);
|
||||
for (i=0; i < conf->raid_disks; i++)
|
||||
@ -1662,8 +1661,7 @@ static void raid1d(mddev_t *mddev)
|
||||
conf->mirrors[i].rdev->data_offset;
|
||||
bio->bi_bdev = conf->mirrors[i].rdev->bdev;
|
||||
bio->bi_end_io = raid1_end_write_request;
|
||||
bio->bi_rw = WRITE |
|
||||
(do_sync << BIO_RW_SYNCIO);
|
||||
bio->bi_rw = WRITE | do_sync;
|
||||
bio->bi_private = r1_bio;
|
||||
r1_bio->bios[i] = bio;
|
||||
generic_make_request(bio);
|
||||
@ -1698,7 +1696,7 @@ static void raid1d(mddev_t *mddev)
|
||||
(unsigned long long)r1_bio->sector);
|
||||
raid_end_bio_io(r1_bio);
|
||||
} else {
|
||||
const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
|
||||
const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
|
||||
r1_bio->bios[r1_bio->read_disk] =
|
||||
mddev->ro ? IO_BLOCKED : NULL;
|
||||
r1_bio->read_disk = disk;
|
||||
@ -1715,7 +1713,7 @@ static void raid1d(mddev_t *mddev)
|
||||
bio->bi_sector = r1_bio->sector + rdev->data_offset;
|
||||
bio->bi_bdev = rdev->bdev;
|
||||
bio->bi_end_io = raid1_end_read_request;
|
||||
bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
|
||||
bio->bi_rw = READ | do_sync;
|
||||
bio->bi_private = r1_bio;
|
||||
unplug = 1;
|
||||
generic_make_request(bio);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user