block: remove queue_lockdep_assert_held
authorChristoph Hellwig <hch@lst.de>
Wed, 14 Nov 2018 16:02:08 +0000 (17:02 +0100)
committerJens Axboe <axboe@kernel.dk>
Thu, 15 Nov 2018 19:13:21 +0000 (12:13 -0700)
The only remaining user unconditionally drops and reacquires the lock,
which means we really don't need any additional (conditional) annotation.

Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-throttle.c
block/blk.h

index 8e6f3c9821c26d395878ad4950c4e28d6e72d89b..a665b0950369f25f09af2ab51ef6403f933708ab 100644 (file)
@@ -2353,7 +2353,6 @@ void blk_throtl_drain(struct request_queue *q)
        struct bio *bio;
        int rw;
 
-       queue_lockdep_assert_held(q);
        rcu_read_lock();
 
        /*
index f2ddc71e93dac8fa43bd879f42c926149c5ce23f..027a0ccc175e261fc995e1f29e51025fbaa6fe35 100644 (file)
@@ -35,19 +35,6 @@ extern struct kmem_cache *blk_requestq_cachep;
 extern struct kobj_type blk_queue_ktype;
 extern struct ida blk_queue_ida;
 
-/*
- * @q->queue_lock is set while a queue is being initialized. Since we know
- * that no other threads access the queue object before @q->queue_lock has
- * been set, it is safe to manipulate queue flags without holding the
- * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
- * blk_init_allocated_queue().
- */
-static inline void queue_lockdep_assert_held(struct request_queue *q)
-{
-       if (q->queue_lock)
-               lockdep_assert_held(q->queue_lock);
-}
-
 static inline struct blk_flush_queue *
 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
 {