Skip to content

Commit 9d497e2

Browse files
Ming Leiaxboe
authored andcommitted
block: don't protect submit_bio_checks by q_usage_counter
Commit cc9c884 ("block: call submit_bio_checks under q_usage_counter") uses q_usage_counter to protect submit_bio_checks for avoiding IO after disk is deleted by del_gendisk(). Turns out the protection isn't necessary, because once blk_mq_freeze_queue_wait() in del_gendisk() returns: 1) all in-flight IO has been done 2) all new IO will be failed in __bio_queue_enter() because q_usage_counter is dead, and GD_DEAD is set 3) both disk and request queue instance are safe since caller of submit_bio() guarantees that the disk can't be closed. Once submit_bio_checks() needn't the protection of q_usage_counter, we can move submit_bio_checks before calling blk_mq_submit_bio() and ->submit_bio(). With this change, we needn't to throttle queue with holding one allocated request, then precise driver tag or request won't be wasted in throttling. Meantime we can unify the bio check for both bio based and request based driver. Cc: Christoph Hellwig <[email protected]> Signed-off-by: Ming Lei <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 292c33c commit 9d497e2

File tree

2 files changed

+22
-31
lines changed

2 files changed

+22
-31
lines changed

block/blk-core.c

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -787,17 +787,21 @@ noinline_for_stack bool submit_bio_checks(struct bio *bio)
787787

788788
static void __submit_bio_fops(struct gendisk *disk, struct bio *bio)
789789
{
790-
if (unlikely(bio_queue_enter(bio) != 0))
791-
return;
792-
if (submit_bio_checks(bio) && blk_crypto_bio_prep(&bio))
793-
disk->fops->submit_bio(bio);
794-
blk_queue_exit(disk->queue);
790+
if (blk_crypto_bio_prep(&bio)) {
791+
if (likely(bio_queue_enter(bio) == 0)) {
792+
disk->fops->submit_bio(bio);
793+
blk_queue_exit(disk->queue);
794+
}
795+
}
795796
}
796797

797798
static void __submit_bio(struct bio *bio)
798799
{
799800
struct gendisk *disk = bio->bi_bdev->bd_disk;
800801

802+
if (unlikely(!submit_bio_checks(bio)))
803+
return;
804+
801805
if (!disk->fops->submit_bio)
802806
blk_mq_submit_bio(bio);
803807
else

block/blk-mq.c

Lines changed: 13 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -2714,26 +2714,18 @@ static bool blk_mq_attempt_bio_merge(struct request_queue *q,
27142714

27152715
static struct request *blk_mq_get_new_requests(struct request_queue *q,
27162716
struct blk_plug *plug,
2717-
struct bio *bio,
2718-
unsigned int nsegs)
2717+
struct bio *bio)
27192718
{
27202719
struct blk_mq_alloc_data data = {
27212720
.q = q,
27222721
.nr_tags = 1,
2722+
.cmd_flags = bio->bi_opf,
27232723
};
27242724
struct request *rq;
27252725

27262726
if (unlikely(bio_queue_enter(bio)))
27272727
return NULL;
2728-
if (unlikely(!submit_bio_checks(bio)))
2729-
goto queue_exit;
2730-
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
2731-
goto queue_exit;
27322728

2733-
rq_qos_throttle(q, bio);
2734-
2735-
/* ->bi_opf is finalized after submit_bio_checks() returns */
2736-
data.cmd_flags = bio->bi_opf;
27372729
if (plug) {
27382730
data.nr_tags = plug->nr_ios;
27392731
plug->nr_ios = 1;
@@ -2746,13 +2738,12 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
27462738
rq_qos_cleanup(q, bio);
27472739
if (bio->bi_opf & REQ_NOWAIT)
27482740
bio_wouldblock_error(bio);
2749-
queue_exit:
27502741
blk_queue_exit(q);
27512742
return NULL;
27522743
}
27532744

27542745
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
2755-
struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
2746+
struct blk_plug *plug, struct bio *bio)
27562747
{
27572748
struct request *rq;
27582749

@@ -2762,21 +2753,14 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
27622753
if (!rq || rq->q != q)
27632754
return NULL;
27642755

2765-
if (unlikely(!submit_bio_checks(*bio)))
2766-
return NULL;
2767-
if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
2768-
*bio = NULL;
2756+
if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
27692757
return NULL;
2770-
}
2771-
if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
2772-
return NULL;
2773-
if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
2758+
if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
27742759
return NULL;
27752760

2776-
rq->cmd_flags = (*bio)->bi_opf;
2761+
rq->cmd_flags = bio->bi_opf;
27772762
plug->cached_rq = rq_list_next(rq);
27782763
INIT_LIST_HEAD(&rq->queuelist);
2779-
rq_qos_throttle(q, *bio);
27802764
return rq;
27812765
}
27822766

@@ -2812,11 +2796,14 @@ void blk_mq_submit_bio(struct bio *bio)
28122796
if (!bio_integrity_prep(bio))
28132797
return;
28142798

2815-
rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
2799+
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
2800+
return;
2801+
2802+
rq_qos_throttle(q, bio);
2803+
2804+
rq = blk_mq_get_cached_request(q, plug, bio);
28162805
if (!rq) {
2817-
if (!bio)
2818-
return;
2819-
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
2806+
rq = blk_mq_get_new_requests(q, plug, bio);
28202807
if (unlikely(!rq))
28212808
return;
28222809
}

0 commit comments

Comments
 (0)