blk-mq: rework flush sequencing logic
Witch to using a preallocated flush_rq for blk-mq similar to what's done with the old request path. This allows us to set up the request properly with a tag from the actually allowed range and ->rq_disk as needed by some drivers. To make life easier we also switch to dynamic allocation of ->flush_rq for the old path. This effectively reverts most of "blk-mq: fix for flush deadlock" and "blk-mq: Don't reserve a tag for flush request" Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
ce2c350b2c
commit
18741986a4
7 changed files with 76 additions and 117 deletions
|
@ -36,15 +36,12 @@ struct blk_mq_hw_ctx {
|
|||
struct list_head page_list;
|
||||
struct blk_mq_tags *tags;
|
||||
|
||||
atomic_t pending_flush;
|
||||
|
||||
unsigned long queued;
|
||||
unsigned long run;
|
||||
#define BLK_MQ_MAX_DISPATCH_ORDER 10
|
||||
unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
|
||||
|
||||
unsigned int queue_depth;
|
||||
unsigned int reserved_tags;
|
||||
unsigned int numa_node;
|
||||
unsigned int cmd_size; /* per-request extra data */
|
||||
|
||||
|
@ -129,7 +126,7 @@ void blk_mq_insert_request(struct request_queue *, struct request *,
|
|||
void blk_mq_run_queues(struct request_queue *q, bool async);
|
||||
void blk_mq_free_request(struct request *rq);
|
||||
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
|
||||
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
|
||||
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
|
||||
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
|
||||
struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue