Merge remote-tracking branches 'asoc/topic/max9878', 'asoc/topic/max98927', 'asoc...
[sfrench/cifs-2.6.git] / block / blk-mq-sched.h
1 #ifndef BLK_MQ_SCHED_H
2 #define BLK_MQ_SCHED_H
3
4 #include "blk-mq.h"
5 #include "blk-mq-tag.h"
6
7 int blk_mq_sched_init_hctx_data(struct request_queue *q, size_t size,
8                                 int (*init)(struct blk_mq_hw_ctx *),
9                                 void (*exit)(struct blk_mq_hw_ctx *));
10
11 void blk_mq_sched_free_hctx_data(struct request_queue *q,
12                                  void (*exit)(struct blk_mq_hw_ctx *));
13
14 struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data);
15 void blk_mq_sched_put_request(struct request *rq);
16
17 void blk_mq_sched_request_inserted(struct request *rq);
18 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
19                                 struct request **merged_request);
20 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
21 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
22 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
23
24 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
25                                  bool run_queue, bool async, bool can_block);
26 void blk_mq_sched_insert_requests(struct request_queue *q,
27                                   struct blk_mq_ctx *ctx,
28                                   struct list_head *list, bool run_queue_async);
29
30 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
31 void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
32                         struct list_head *rq_list,
33                         struct request *(*get_rq)(struct blk_mq_hw_ctx *));
34
35 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
36 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
37
38 int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
39                            unsigned int hctx_idx);
40 void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
41                             unsigned int hctx_idx);
42
43 int blk_mq_sched_init(struct request_queue *q);
44
45 static inline bool
46 blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
47 {
48         struct elevator_queue *e = q->elevator;
49
50         if (!e || blk_queue_nomerges(q) || !bio_mergeable(bio))
51                 return false;
52
53         return __blk_mq_sched_bio_merge(q, bio);
54 }
55
56 static inline int blk_mq_sched_get_rq_priv(struct request_queue *q,
57                                            struct request *rq,
58                                            struct bio *bio)
59 {
60         struct elevator_queue *e = q->elevator;
61
62         if (e && e->type->ops.mq.get_rq_priv)
63                 return e->type->ops.mq.get_rq_priv(q, rq, bio);
64
65         return 0;
66 }
67
68 static inline void blk_mq_sched_put_rq_priv(struct request_queue *q,
69                                             struct request *rq)
70 {
71         struct elevator_queue *e = q->elevator;
72
73         if (e && e->type->ops.mq.put_rq_priv)
74                 e->type->ops.mq.put_rq_priv(q, rq);
75 }
76
77 static inline bool
78 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
79                          struct bio *bio)
80 {
81         struct elevator_queue *e = q->elevator;
82
83         if (e && e->type->ops.mq.allow_merge)
84                 return e->type->ops.mq.allow_merge(q, rq, bio);
85
86         return true;
87 }
88
89 static inline void
90 blk_mq_sched_completed_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
91 {
92         struct elevator_queue *e = hctx->queue->elevator;
93
94         if (e && e->type->ops.mq.completed_request)
95                 e->type->ops.mq.completed_request(hctx, rq);
96
97         BUG_ON(rq->internal_tag == -1);
98
99         blk_mq_put_tag(hctx, hctx->sched_tags, rq->mq_ctx, rq->internal_tag);
100 }
101
102 static inline void blk_mq_sched_started_request(struct request *rq)
103 {
104         struct request_queue *q = rq->q;
105         struct elevator_queue *e = q->elevator;
106
107         if (e && e->type->ops.mq.started_request)
108                 e->type->ops.mq.started_request(rq);
109 }
110
111 static inline void blk_mq_sched_requeue_request(struct request *rq)
112 {
113         struct request_queue *q = rq->q;
114         struct elevator_queue *e = q->elevator;
115
116         if (e && e->type->ops.mq.requeue_request)
117                 e->type->ops.mq.requeue_request(rq);
118 }
119
120 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
121 {
122         struct elevator_queue *e = hctx->queue->elevator;
123
124         if (e && e->type->ops.mq.has_work)
125                 return e->type->ops.mq.has_work(hctx);
126
127         return false;
128 }
129
130 /*
131  * Mark a hardware queue as needing a restart.
132  */
133 static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
134 {
135         if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
136                 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
137 }
138
139 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
140 {
141         return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
142 }
143
144 #endif