Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / lightnvm / pblk-rl.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-rl.c - pblk's rate limiter for user I/O
16  *
17  */
18
19 #include "pblk.h"
20
21 static void pblk_rl_kick_u_timer(struct pblk_rl *rl)
22 {
23         mod_timer(&rl->u_timer, jiffies + msecs_to_jiffies(5000));
24 }
25
26 int pblk_rl_is_limit(struct pblk_rl *rl)
27 {
28         int rb_space;
29
30         rb_space = atomic_read(&rl->rb_space);
31
32         return (rb_space == 0);
33 }
34
35 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries)
36 {
37         int rb_user_cnt = atomic_read(&rl->rb_user_cnt);
38         int rb_space = atomic_read(&rl->rb_space);
39
40         if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0))
41                 return NVM_IO_ERR;
42
43         if (rb_user_cnt >= rl->rb_user_max)
44                 return NVM_IO_REQUEUE;
45
46         return NVM_IO_OK;
47 }
48
49 void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries)
50 {
51         int rb_space = atomic_read(&rl->rb_space);
52
53         if (unlikely(rb_space >= 0))
54                 atomic_sub(nr_entries, &rl->rb_space);
55 }
56
57 int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries)
58 {
59         int rb_gc_cnt = atomic_read(&rl->rb_gc_cnt);
60         int rb_user_active;
61
62         /* If there is no user I/O let GC take over space on the write buffer */
63         rb_user_active = READ_ONCE(rl->rb_user_active);
64         return (!(rb_gc_cnt >= rl->rb_gc_max && rb_user_active));
65 }
66
67 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries)
68 {
69         atomic_add(nr_entries, &rl->rb_user_cnt);
70
71         /* Release user I/O state. Protect from GC */
72         smp_store_release(&rl->rb_user_active, 1);
73         pblk_rl_kick_u_timer(rl);
74 }
75
76 void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries)
77 {
78         atomic_add(nr_entries, &rl->rb_gc_cnt);
79 }
80
81 void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc)
82 {
83         atomic_sub(nr_user, &rl->rb_user_cnt);
84         atomic_sub(nr_gc, &rl->rb_gc_cnt);
85 }
86
87 unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
88 {
89         return atomic_read(&rl->free_blocks);
90 }
91
92 unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
93 {
94         return atomic_read(&rl->free_user_blocks);
95 }
96
97 static void __pblk_rl_update_rates(struct pblk_rl *rl,
98                                    unsigned long free_blocks)
99 {
100         struct pblk *pblk = container_of(rl, struct pblk, rl);
101         int max = rl->rb_budget;
102
103         if (free_blocks >= rl->high) {
104                 rl->rb_user_max = max;
105                 rl->rb_gc_max = 0;
106                 rl->rb_state = PBLK_RL_HIGH;
107         } else if (free_blocks < rl->high) {
108                 int shift = rl->high_pw - rl->rb_windows_pw;
109                 int user_windows = free_blocks >> shift;
110                 int user_max = user_windows << PBLK_MAX_REQ_ADDRS_PW;
111
112                 rl->rb_user_max = user_max;
113                 rl->rb_gc_max = max - user_max;
114
115                 if (free_blocks <= rl->rsv_blocks) {
116                         rl->rb_user_max = 0;
117                         rl->rb_gc_max = max;
118                 }
119
120                 /* In the worst case, we will need to GC lines in the low list
121                  * (high valid sector count). If there are lines to GC on high
122                  * or mid lists, these will be prioritized
123                  */
124                 rl->rb_state = PBLK_RL_LOW;
125         }
126
127         if (rl->rb_state == (PBLK_RL_MID | PBLK_RL_LOW))
128                 pblk_gc_should_start(pblk);
129         else
130                 pblk_gc_should_stop(pblk);
131 }
132
133 void pblk_rl_update_rates(struct pblk_rl *rl)
134 {
135         __pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
136 }
137
138 void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
139 {
140         int blk_in_line = atomic_read(&line->blk_in_line);
141         int free_blocks;
142
143         atomic_add(blk_in_line, &rl->free_blocks);
144         free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
145
146         __pblk_rl_update_rates(rl, free_blocks);
147 }
148
149 void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
150                             bool used)
151 {
152         int blk_in_line = atomic_read(&line->blk_in_line);
153         int free_blocks;
154
155         atomic_sub(blk_in_line, &rl->free_blocks);
156
157         if (used)
158                 free_blocks = atomic_sub_return(blk_in_line,
159                                                         &rl->free_user_blocks);
160         else
161                 free_blocks = atomic_read(&rl->free_user_blocks);
162
163         __pblk_rl_update_rates(rl, free_blocks);
164 }
165
166 int pblk_rl_high_thrs(struct pblk_rl *rl)
167 {
168         return rl->high;
169 }
170
171 int pblk_rl_max_io(struct pblk_rl *rl)
172 {
173         return rl->rb_max_io;
174 }
175
176 static void pblk_rl_u_timer(struct timer_list *t)
177 {
178         struct pblk_rl *rl = from_timer(rl, t, u_timer);
179
180         /* Release user I/O state. Protect from GC */
181         smp_store_release(&rl->rb_user_active, 0);
182 }
183
184 void pblk_rl_free(struct pblk_rl *rl)
185 {
186         del_timer(&rl->u_timer);
187 }
188
189 void pblk_rl_init(struct pblk_rl *rl, int budget)
190 {
191         struct pblk *pblk = container_of(rl, struct pblk, rl);
192         struct nvm_tgt_dev *dev = pblk->dev;
193         struct nvm_geo *geo = &dev->geo;
194         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
195         struct pblk_line_meta *lm = &pblk->lm;
196         int min_blocks = lm->blk_per_line * PBLK_GC_RSV_LINE;
197         int sec_meta, blk_meta;
198
199         unsigned int rb_windows;
200
201         /* Consider sectors used for metadata */
202         sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
203         blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk);
204
205         rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
206         rl->high_pw = get_count_order(rl->high);
207
208         rl->rsv_blocks = min_blocks;
209
210         /* This will always be a power-of-2 */
211         rb_windows = budget / PBLK_MAX_REQ_ADDRS;
212         rl->rb_windows_pw = get_count_order(rb_windows);
213
214         /* To start with, all buffer is available to user I/O writers */
215         rl->rb_budget = budget;
216         rl->rb_user_max = budget;
217         rl->rb_max_io = budget >> 1;
218         rl->rb_gc_max = 0;
219         rl->rb_state = PBLK_RL_HIGH;
220
221         atomic_set(&rl->rb_user_cnt, 0);
222         atomic_set(&rl->rb_gc_cnt, 0);
223         atomic_set(&rl->rb_space, -1);
224
225         timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
226
227         rl->rb_user_active = 0;
228         rl->rb_gc_active = 0;
229 }