lightnvm: pblk: rename sync_point to flush_point
[sfrench/cifs-2.6.git] / drivers / lightnvm / pblk-write.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-write.c - pblk's write path from write buffer to media
16  */
17
18 #include "pblk.h"
19
20 static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
21                                     struct pblk_c_ctx *c_ctx)
22 {
23         struct bio *original_bio;
24         unsigned long ret;
25         int i;
26
27         for (i = 0; i < c_ctx->nr_valid; i++) {
28                 struct pblk_w_ctx *w_ctx;
29
30                 w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
31                 while ((original_bio = bio_list_pop(&w_ctx->bios)))
32                         bio_endio(original_bio);
33         }
34
35         if (c_ctx->nr_padded)
36                 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
37                                                         c_ctx->nr_padded);
38
39 #ifdef CONFIG_NVM_DEBUG
40         atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
41 #endif
42
43         ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
44
45         bio_put(rqd->bio);
46         pblk_free_rqd(pblk, rqd, PBLK_WRITE);
47
48         return ret;
49 }
50
51 static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
52                                            struct nvm_rq *rqd,
53                                            struct pblk_c_ctx *c_ctx)
54 {
55         list_del(&c_ctx->list);
56         return pblk_end_w_bio(pblk, rqd, c_ctx);
57 }
58
59 static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
60                                 struct pblk_c_ctx *c_ctx)
61 {
62         struct pblk_c_ctx *c, *r;
63         unsigned long flags;
64         unsigned long pos;
65
66 #ifdef CONFIG_NVM_DEBUG
67         atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
68 #endif
69
70         pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
71
72         pos = pblk_rb_sync_init(&pblk->rwb, &flags);
73         if (pos == c_ctx->sentry) {
74                 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
75
76 retry:
77                 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
78                         rqd = nvm_rq_from_c_ctx(c);
79                         if (c->sentry == pos) {
80                                 pos = pblk_end_queued_w_bio(pblk, rqd, c);
81                                 goto retry;
82                         }
83                 }
84         } else {
85                 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
86                 list_add_tail(&c_ctx->list, &pblk->compl_list);
87         }
88         pblk_rb_sync_end(&pblk->rwb, &flags);
89 }
90
91 /* When a write fails, we are not sure whether the block has grown bad or a page
92  * range is more susceptible to write errors. If a high number of pages fail, we
93  * assume that the block is bad and we mark it accordingly. In all cases, we
94  * remap and resubmit the failed entries as fast as possible; if a flush is
95  * waiting on a completion, the whole stack would stall otherwise.
96  */
97 static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
98 {
99         void *comp_bits = &rqd->ppa_status;
100         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
101         struct pblk_rec_ctx *recovery;
102         struct ppa_addr *ppa_list = rqd->ppa_list;
103         int nr_ppas = rqd->nr_ppas;
104         unsigned int c_entries;
105         int bit, ret;
106
107         if (unlikely(nr_ppas == 1))
108                 ppa_list = &rqd->ppa_addr;
109
110         recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
111
112         INIT_LIST_HEAD(&recovery->failed);
113
114         bit = -1;
115         while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
116                 struct pblk_rb_entry *entry;
117                 struct ppa_addr ppa;
118
119                 /* Logic error */
120                 if (bit > c_ctx->nr_valid) {
121                         WARN_ONCE(1, "pblk: corrupted write request\n");
122                         mempool_free(recovery, pblk->rec_pool);
123                         goto out;
124                 }
125
126                 ppa = ppa_list[bit];
127                 entry = pblk_rb_sync_scan_entry(&pblk->rwb, &ppa);
128                 if (!entry) {
129                         pr_err("pblk: could not scan entry on write failure\n");
130                         mempool_free(recovery, pblk->rec_pool);
131                         goto out;
132                 }
133
134                 /* The list is filled first and emptied afterwards. No need for
135                  * protecting it with a lock
136                  */
137                 list_add_tail(&entry->index, &recovery->failed);
138         }
139
140         c_entries = find_first_bit(comp_bits, nr_ppas);
141         ret = pblk_recov_setup_rq(pblk, c_ctx, recovery, comp_bits, c_entries);
142         if (ret) {
143                 pr_err("pblk: could not recover from write failure\n");
144                 mempool_free(recovery, pblk->rec_pool);
145                 goto out;
146         }
147
148         INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
149         queue_work(pblk->close_wq, &recovery->ws_rec);
150
151 out:
152         pblk_complete_write(pblk, rqd, c_ctx);
153 }
154
155 static void pblk_end_io_write(struct nvm_rq *rqd)
156 {
157         struct pblk *pblk = rqd->private;
158         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
159
160         if (rqd->error) {
161                 pblk_log_write_err(pblk, rqd);
162                 return pblk_end_w_fail(pblk, rqd);
163         }
164 #ifdef CONFIG_NVM_DEBUG
165         else
166                 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
167 #endif
168
169         pblk_complete_write(pblk, rqd, c_ctx);
170         atomic_dec(&pblk->inflight_io);
171 }
172
173 static void pblk_end_io_write_meta(struct nvm_rq *rqd)
174 {
175         struct pblk *pblk = rqd->private;
176         struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
177         struct pblk_line *line = m_ctx->private;
178         struct pblk_emeta *emeta = line->emeta;
179         int sync;
180
181         pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
182
183         if (rqd->error) {
184                 pblk_log_write_err(pblk, rqd);
185                 pr_err("pblk: metadata I/O failed. Line %d\n", line->id);
186         }
187
188         sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
189         if (sync == emeta->nr_entries)
190                 pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
191                                                 GFP_ATOMIC, pblk->close_wq);
192
193         pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
194
195         atomic_dec(&pblk->inflight_io);
196 }
197
198 static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
199                            unsigned int nr_secs,
200                            nvm_end_io_fn(*end_io))
201 {
202         struct nvm_tgt_dev *dev = pblk->dev;
203
204         /* Setup write request */
205         rqd->opcode = NVM_OP_PWRITE;
206         rqd->nr_ppas = nr_secs;
207         rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
208         rqd->private = pblk;
209         rqd->end_io = end_io;
210
211         rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
212                                                         &rqd->dma_meta_list);
213         if (!rqd->meta_list)
214                 return -ENOMEM;
215
216         rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
217         rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
218
219         return 0;
220 }
221
222 static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
223                            struct ppa_addr *erase_ppa)
224 {
225         struct pblk_line_meta *lm = &pblk->lm;
226         struct pblk_line *e_line = pblk_line_get_erase(pblk);
227         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
228         unsigned int valid = c_ctx->nr_valid;
229         unsigned int padded = c_ctx->nr_padded;
230         unsigned int nr_secs = valid + padded;
231         unsigned long *lun_bitmap;
232         int ret;
233
234         lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
235         if (!lun_bitmap)
236                 return -ENOMEM;
237         c_ctx->lun_bitmap = lun_bitmap;
238
239         ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
240         if (ret) {
241                 kfree(lun_bitmap);
242                 return ret;
243         }
244
245         if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
246                 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
247         else
248                 pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
249                                                         valid, erase_ppa);
250
251         return 0;
252 }
253
254 int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
255                         struct pblk_c_ctx *c_ctx)
256 {
257         struct pblk_line_meta *lm = &pblk->lm;
258         unsigned long *lun_bitmap;
259         int ret;
260
261         lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
262         if (!lun_bitmap)
263                 return -ENOMEM;
264
265         c_ctx->lun_bitmap = lun_bitmap;
266
267         ret = pblk_alloc_w_rq(pblk, rqd, rqd->nr_ppas, pblk_end_io_write);
268         if (ret)
269                 return ret;
270
271         pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
272
273         rqd->ppa_status = (u64)0;
274         rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
275
276         return ret;
277 }
278
279 static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
280                                   unsigned int secs_to_flush)
281 {
282         int secs_to_sync;
283
284         secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
285
286 #ifdef CONFIG_NVM_DEBUG
287         if ((!secs_to_sync && secs_to_flush)
288                         || (secs_to_sync < 0)
289                         || (secs_to_sync > secs_avail && !secs_to_flush)) {
290                 pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
291                                 secs_avail, secs_to_sync, secs_to_flush);
292         }
293 #endif
294
295         return secs_to_sync;
296 }
297
298 int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
299 {
300         struct nvm_tgt_dev *dev = pblk->dev;
301         struct nvm_geo *geo = &dev->geo;
302         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
303         struct pblk_line_meta *lm = &pblk->lm;
304         struct pblk_emeta *emeta = meta_line->emeta;
305         struct pblk_g_ctx *m_ctx;
306         struct bio *bio;
307         struct nvm_rq *rqd;
308         void *data;
309         u64 paddr;
310         int rq_ppas = pblk->min_write_pgs;
311         int id = meta_line->id;
312         int rq_len;
313         int i, j;
314         int ret;
315
316         rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
317
318         m_ctx = nvm_rq_to_pdu(rqd);
319         m_ctx->private = meta_line;
320
321         rq_len = rq_ppas * geo->sec_size;
322         data = ((void *)emeta->buf) + emeta->mem;
323
324         bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
325                                         l_mg->emeta_alloc_type, GFP_KERNEL);
326         if (IS_ERR(bio)) {
327                 ret = PTR_ERR(bio);
328                 goto fail_free_rqd;
329         }
330         bio->bi_iter.bi_sector = 0; /* internal bio */
331         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
332         rqd->bio = bio;
333
334         ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
335         if (ret)
336                 goto fail_free_bio;
337
338         for (i = 0; i < rqd->nr_ppas; ) {
339                 spin_lock(&meta_line->lock);
340                 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
341                 spin_unlock(&meta_line->lock);
342                 for (j = 0; j < rq_ppas; j++, i++, paddr++)
343                         rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
344         }
345
346         emeta->mem += rq_len;
347         if (emeta->mem >= lm->emeta_len[0]) {
348                 spin_lock(&l_mg->close_lock);
349                 list_del(&meta_line->list);
350                 spin_unlock(&l_mg->close_lock);
351         }
352
353         pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
354
355         ret = pblk_submit_io(pblk, rqd);
356         if (ret) {
357                 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
358                 goto fail_rollback;
359         }
360
361         return NVM_IO_OK;
362
363 fail_rollback:
364         pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
365         spin_lock(&l_mg->close_lock);
366         pblk_dealloc_page(pblk, meta_line, rq_ppas);
367         list_add(&meta_line->list, &meta_line->list);
368         spin_unlock(&l_mg->close_lock);
369 fail_free_bio:
370         bio_put(bio);
371 fail_free_rqd:
372         pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
373         return ret;
374 }
375
376 static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
377                                        struct pblk_line *meta_line,
378                                        struct nvm_rq *data_rqd)
379 {
380         struct nvm_tgt_dev *dev = pblk->dev;
381         struct nvm_geo *geo = &dev->geo;
382         struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
383         struct pblk_line *data_line = pblk_line_get_data(pblk);
384         struct ppa_addr ppa, ppa_opt;
385         u64 paddr;
386         int pos_opt;
387
388         /* Schedule a metadata I/O that is half the distance from the data I/O
389          * with regards to the number of LUNs forming the pblk instance. This
390          * balances LUN conflicts across every I/O.
391          *
392          * When the LUN configuration changes (e.g., due to GC), this distance
393          * can align, which would result on metadata and data I/Os colliding. In
394          * this case, modify the distance to not be optimal, but move the
395          * optimal in the right direction.
396          */
397         paddr = pblk_lookup_page(pblk, meta_line);
398         ppa = addr_to_gen_ppa(pblk, paddr, 0);
399         ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
400         pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
401
402         if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
403                                 test_bit(pos_opt, data_line->blk_bitmap))
404                 return true;
405
406         if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
407                 data_line->meta_distance--;
408
409         return false;
410 }
411
412 static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
413                                                     struct nvm_rq *data_rqd)
414 {
415         struct pblk_line_meta *lm = &pblk->lm;
416         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
417         struct pblk_line *meta_line;
418
419         spin_lock(&l_mg->close_lock);
420 retry:
421         if (list_empty(&l_mg->emeta_list)) {
422                 spin_unlock(&l_mg->close_lock);
423                 return NULL;
424         }
425         meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
426         if (meta_line->emeta->mem >= lm->emeta_len[0])
427                 goto retry;
428         spin_unlock(&l_mg->close_lock);
429
430         if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
431                 return NULL;
432
433         return meta_line;
434 }
435
436 static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
437 {
438         struct ppa_addr erase_ppa;
439         struct pblk_line *meta_line;
440         int err;
441
442         pblk_ppa_set_empty(&erase_ppa);
443
444         /* Assign lbas to ppas and populate request structure */
445         err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
446         if (err) {
447                 pr_err("pblk: could not setup write request: %d\n", err);
448                 return NVM_IO_ERR;
449         }
450
451         meta_line = pblk_should_submit_meta_io(pblk, rqd);
452
453         /* Submit data write for current data line */
454         err = pblk_submit_io(pblk, rqd);
455         if (err) {
456                 pr_err("pblk: data I/O submission failed: %d\n", err);
457                 return NVM_IO_ERR;
458         }
459
460         if (!pblk_ppa_empty(erase_ppa)) {
461                 /* Submit erase for next data line */
462                 if (pblk_blk_erase_async(pblk, erase_ppa)) {
463                         struct pblk_line *e_line = pblk_line_get_erase(pblk);
464                         struct nvm_tgt_dev *dev = pblk->dev;
465                         struct nvm_geo *geo = &dev->geo;
466                         int bit;
467
468                         atomic_inc(&e_line->left_eblks);
469                         bit = pblk_ppa_to_pos(geo, erase_ppa);
470                         WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
471                 }
472         }
473
474         if (meta_line) {
475                 /* Submit metadata write for previous data line */
476                 err = pblk_submit_meta_io(pblk, meta_line);
477                 if (err) {
478                         pr_err("pblk: metadata I/O submission failed: %d", err);
479                         return NVM_IO_ERR;
480                 }
481         }
482
483         return NVM_IO_OK;
484 }
485
486 static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
487 {
488         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
489         struct bio *bio = rqd->bio;
490
491         if (c_ctx->nr_padded)
492                 pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
493                                                         c_ctx->nr_padded);
494 }
495
496 static int pblk_submit_write(struct pblk *pblk)
497 {
498         struct bio *bio;
499         struct nvm_rq *rqd;
500         unsigned int secs_avail, secs_to_sync, secs_to_com;
501         unsigned int secs_to_flush;
502         unsigned long pos;
503
504         /* If there are no sectors in the cache, flushes (bios without data)
505          * will be cleared on the cache threads
506          */
507         secs_avail = pblk_rb_read_count(&pblk->rwb);
508         if (!secs_avail)
509                 return 1;
510
511         secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
512         if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
513                 return 1;
514
515         secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush);
516         if (secs_to_sync > pblk->max_write_pgs) {
517                 pr_err("pblk: bad buffer sync calculation\n");
518                 return 1;
519         }
520
521         secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
522         pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
523
524         bio = bio_alloc(GFP_KERNEL, secs_to_sync);
525
526         bio->bi_iter.bi_sector = 0; /* internal bio */
527         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
528
529         rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
530         rqd->bio = bio;
531
532         if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
533                                                                 secs_avail)) {
534                 pr_err("pblk: corrupted write bio\n");
535                 goto fail_put_bio;
536         }
537
538         if (pblk_submit_io_set(pblk, rqd))
539                 goto fail_free_bio;
540
541 #ifdef CONFIG_NVM_DEBUG
542         atomic_long_add(secs_to_sync, &pblk->sub_writes);
543 #endif
544
545         return 0;
546
547 fail_free_bio:
548         pblk_free_write_rqd(pblk, rqd);
549 fail_put_bio:
550         bio_put(bio);
551         pblk_free_rqd(pblk, rqd, PBLK_WRITE);
552
553         return 1;
554 }
555
556 int pblk_write_ts(void *data)
557 {
558         struct pblk *pblk = data;
559
560         while (!kthread_should_stop()) {
561                 if (!pblk_submit_write(pblk))
562                         continue;
563                 set_current_state(TASK_INTERRUPTIBLE);
564                 io_schedule();
565         }
566
567         return 0;
568 }