Merge branch 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm
[sfrench/cifs-2.6.git] / drivers / lightnvm / pblk-read.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-read.c - pblk's read path
16  */
17
18 #include "pblk.h"
19
20 /*
21  * There is no guarantee that the value read from cache has not been updated and
22  * resides at another location in the cache. We guarantee though that if the
23  * value is read from the cache, it belongs to the mapped lba. In order to
24  * guarantee and order between writes and reads are ordered, a flush must be
25  * issued.
26  */
27 static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
28                                 sector_t lba, struct ppa_addr ppa,
29                                 int bio_iter, bool advanced_bio)
30 {
31 #ifdef CONFIG_NVM_DEBUG
32         /* Callers must ensure that the ppa points to a cache address */
33         BUG_ON(pblk_ppa_empty(ppa));
34         BUG_ON(!pblk_addr_in_cache(ppa));
35 #endif
36
37         return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
38                                                 bio_iter, advanced_bio);
39 }
40
41 static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
42                                  sector_t blba, unsigned long *read_bitmap)
43 {
44         struct pblk_sec_meta *meta_list = rqd->meta_list;
45         struct bio *bio = rqd->bio;
46         struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
47         int nr_secs = rqd->nr_ppas;
48         bool advanced_bio = false;
49         int i, j = 0;
50
51         pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
52
53         for (i = 0; i < nr_secs; i++) {
54                 struct ppa_addr p = ppas[i];
55                 sector_t lba = blba + i;
56
57 retry:
58                 if (pblk_ppa_empty(p)) {
59                         WARN_ON(test_and_set_bit(i, read_bitmap));
60                         meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
61
62                         if (unlikely(!advanced_bio)) {
63                                 bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
64                                 advanced_bio = true;
65                         }
66
67                         goto next;
68                 }
69
70                 /* Try to read from write buffer. The address is later checked
71                  * on the write buffer to prevent retrieving overwritten data.
72                  */
73                 if (pblk_addr_in_cache(p)) {
74                         if (!pblk_read_from_cache(pblk, bio, lba, p, i,
75                                                                 advanced_bio)) {
76                                 pblk_lookup_l2p_seq(pblk, &p, lba, 1);
77                                 goto retry;
78                         }
79                         WARN_ON(test_and_set_bit(i, read_bitmap));
80                         meta_list[i].lba = cpu_to_le64(lba);
81                         advanced_bio = true;
82 #ifdef CONFIG_NVM_DEBUG
83                         atomic_long_inc(&pblk->cache_reads);
84 #endif
85                 } else {
86                         /* Read from media non-cached sectors */
87                         rqd->ppa_list[j++] = p;
88                 }
89
90 next:
91                 if (advanced_bio)
92                         bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
93         }
94
95         if (pblk_io_aligned(pblk, nr_secs))
96                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
97         else
98                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
99
100 #ifdef CONFIG_NVM_DEBUG
101         atomic_long_add(nr_secs, &pblk->inflight_reads);
102 #endif
103 }
104
105 static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
106 {
107         int err;
108
109         err = pblk_submit_io(pblk, rqd);
110         if (err)
111                 return NVM_IO_ERR;
112
113         return NVM_IO_OK;
114 }
115
116 static void pblk_read_check(struct pblk *pblk, struct nvm_rq *rqd,
117                            sector_t blba)
118 {
119         struct pblk_sec_meta *meta_list = rqd->meta_list;
120         int nr_lbas = rqd->nr_ppas;
121         int i;
122
123         for (i = 0; i < nr_lbas; i++) {
124                 u64 lba = le64_to_cpu(meta_list[i].lba);
125
126                 if (lba == ADDR_EMPTY)
127                         continue;
128
129                 WARN(lba != blba + i, "pblk: corrupted read LBA\n");
130         }
131 }
132
133 static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
134 {
135         struct ppa_addr *ppa_list;
136         int i;
137
138         ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
139
140         for (i = 0; i < rqd->nr_ppas; i++) {
141                 struct ppa_addr ppa = ppa_list[i];
142                 struct pblk_line *line;
143
144                 line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
145                 kref_put(&line->ref, pblk_line_put_wq);
146         }
147 }
148
149 static void pblk_end_user_read(struct bio *bio)
150 {
151 #ifdef CONFIG_NVM_DEBUG
152         WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
153 #endif
154         bio_endio(bio);
155         bio_put(bio);
156 }
157
158 static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
159                                bool put_line)
160 {
161         struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
162         struct bio *bio = rqd->bio;
163
164         if (rqd->error)
165                 pblk_log_read_err(pblk, rqd);
166 #ifdef CONFIG_NVM_DEBUG
167         else
168                 WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
169 #endif
170
171         pblk_read_check(pblk, rqd, r_ctx->lba);
172
173         bio_put(bio);
174         if (r_ctx->private)
175                 pblk_end_user_read((struct bio *)r_ctx->private);
176
177         if (put_line)
178                 pblk_read_put_rqd_kref(pblk, rqd);
179
180 #ifdef CONFIG_NVM_DEBUG
181         atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
182         atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
183 #endif
184
185         pblk_free_rqd(pblk, rqd, PBLK_READ);
186         atomic_dec(&pblk->inflight_io);
187 }
188
189 static void pblk_end_io_read(struct nvm_rq *rqd)
190 {
191         struct pblk *pblk = rqd->private;
192
193         __pblk_end_io_read(pblk, rqd, true);
194 }
195
196 static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
197                                       unsigned int bio_init_idx,
198                                       unsigned long *read_bitmap)
199 {
200         struct bio *new_bio, *bio = rqd->bio;
201         struct pblk_sec_meta *meta_list = rqd->meta_list;
202         struct bio_vec src_bv, dst_bv;
203         void *ppa_ptr = NULL;
204         void *src_p, *dst_p;
205         dma_addr_t dma_ppa_list = 0;
206         __le64 *lba_list_mem, *lba_list_media;
207         int nr_secs = rqd->nr_ppas;
208         int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
209         int i, ret, hole;
210
211         /* Re-use allocated memory for intermediate lbas */
212         lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
213         lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
214
215         new_bio = bio_alloc(GFP_KERNEL, nr_holes);
216
217         if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
218                 goto err;
219
220         if (nr_holes != new_bio->bi_vcnt) {
221                 pr_err("pblk: malformed bio\n");
222                 goto err;
223         }
224
225         for (i = 0; i < nr_secs; i++)
226                 lba_list_mem[i] = meta_list[i].lba;
227
228         new_bio->bi_iter.bi_sector = 0; /* internal bio */
229         bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
230
231         rqd->bio = new_bio;
232         rqd->nr_ppas = nr_holes;
233         rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
234
235         if (unlikely(nr_holes == 1)) {
236                 ppa_ptr = rqd->ppa_list;
237                 dma_ppa_list = rqd->dma_ppa_list;
238                 rqd->ppa_addr = rqd->ppa_list[0];
239         }
240
241         ret = pblk_submit_io_sync(pblk, rqd);
242         if (ret) {
243                 bio_put(rqd->bio);
244                 pr_err("pblk: sync read IO submission failed\n");
245                 goto err;
246         }
247
248         if (rqd->error) {
249                 atomic_long_inc(&pblk->read_failed);
250 #ifdef CONFIG_NVM_DEBUG
251                 pblk_print_failed_rqd(pblk, rqd, rqd->error);
252 #endif
253         }
254
255         if (unlikely(nr_holes == 1)) {
256                 struct ppa_addr ppa;
257
258                 ppa = rqd->ppa_addr;
259                 rqd->ppa_list = ppa_ptr;
260                 rqd->dma_ppa_list = dma_ppa_list;
261                 rqd->ppa_list[0] = ppa;
262         }
263
264         for (i = 0; i < nr_secs; i++) {
265                 lba_list_media[i] = meta_list[i].lba;
266                 meta_list[i].lba = lba_list_mem[i];
267         }
268
269         /* Fill the holes in the original bio */
270         i = 0;
271         hole = find_first_zero_bit(read_bitmap, nr_secs);
272         do {
273                 int line_id = pblk_dev_ppa_to_line(rqd->ppa_list[i]);
274                 struct pblk_line *line = &pblk->lines[line_id];
275
276                 kref_put(&line->ref, pblk_line_put);
277
278                 meta_list[hole].lba = lba_list_media[i];
279
280                 src_bv = new_bio->bi_io_vec[i++];
281                 dst_bv = bio->bi_io_vec[bio_init_idx + hole];
282
283                 src_p = kmap_atomic(src_bv.bv_page);
284                 dst_p = kmap_atomic(dst_bv.bv_page);
285
286                 memcpy(dst_p + dst_bv.bv_offset,
287                         src_p + src_bv.bv_offset,
288                         PBLK_EXPOSED_PAGE_SIZE);
289
290                 kunmap_atomic(src_p);
291                 kunmap_atomic(dst_p);
292
293                 mempool_free(src_bv.bv_page, pblk->page_bio_pool);
294
295                 hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
296         } while (hole < nr_secs);
297
298         bio_put(new_bio);
299
300         /* Complete the original bio and associated request */
301         bio_endio(bio);
302         rqd->bio = bio;
303         rqd->nr_ppas = nr_secs;
304
305         __pblk_end_io_read(pblk, rqd, false);
306         return NVM_IO_OK;
307
308 err:
309         /* Free allocated pages in new bio */
310         pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
311         __pblk_end_io_read(pblk, rqd, false);
312         return NVM_IO_ERR;
313 }
314
315 static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
316                          sector_t lba, unsigned long *read_bitmap)
317 {
318         struct pblk_sec_meta *meta_list = rqd->meta_list;
319         struct bio *bio = rqd->bio;
320         struct ppa_addr ppa;
321
322         pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
323
324 #ifdef CONFIG_NVM_DEBUG
325         atomic_long_inc(&pblk->inflight_reads);
326 #endif
327
328 retry:
329         if (pblk_ppa_empty(ppa)) {
330                 WARN_ON(test_and_set_bit(0, read_bitmap));
331                 meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
332                 return;
333         }
334
335         /* Try to read from write buffer. The address is later checked on the
336          * write buffer to prevent retrieving overwritten data.
337          */
338         if (pblk_addr_in_cache(ppa)) {
339                 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
340                         pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
341                         goto retry;
342                 }
343
344                 WARN_ON(test_and_set_bit(0, read_bitmap));
345                 meta_list[0].lba = cpu_to_le64(lba);
346
347 #ifdef CONFIG_NVM_DEBUG
348                 atomic_long_inc(&pblk->cache_reads);
349 #endif
350         } else {
351                 rqd->ppa_addr = ppa;
352         }
353
354         rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
355 }
356
357 int pblk_submit_read(struct pblk *pblk, struct bio *bio)
358 {
359         struct nvm_tgt_dev *dev = pblk->dev;
360         sector_t blba = pblk_get_lba(bio);
361         unsigned int nr_secs = pblk_get_secs(bio);
362         struct pblk_g_ctx *r_ctx;
363         struct nvm_rq *rqd;
364         unsigned int bio_init_idx;
365         unsigned long read_bitmap; /* Max 64 ppas per request */
366         int ret = NVM_IO_ERR;
367
368         /* logic error: lba out-of-bounds. Ignore read request */
369         if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
370                 WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
371                                         (unsigned long long)blba, nr_secs);
372                 return NVM_IO_ERR;
373         }
374
375         bitmap_zero(&read_bitmap, nr_secs);
376
377         rqd = pblk_alloc_rqd(pblk, PBLK_READ);
378
379         rqd->opcode = NVM_OP_PREAD;
380         rqd->bio = bio;
381         rqd->nr_ppas = nr_secs;
382         rqd->private = pblk;
383         rqd->end_io = pblk_end_io_read;
384
385         r_ctx = nvm_rq_to_pdu(rqd);
386         r_ctx->lba = blba;
387
388         /* Save the index for this bio's start. This is needed in case
389          * we need to fill a partial read.
390          */
391         bio_init_idx = pblk_get_bi_idx(bio);
392
393         rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
394                                                         &rqd->dma_meta_list);
395         if (!rqd->meta_list) {
396                 pr_err("pblk: not able to allocate ppa list\n");
397                 goto fail_rqd_free;
398         }
399
400         if (nr_secs > 1) {
401                 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
402                 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
403
404                 pblk_read_ppalist_rq(pblk, rqd, blba, &read_bitmap);
405         } else {
406                 pblk_read_rq(pblk, rqd, blba, &read_bitmap);
407         }
408
409         bio_get(bio);
410         if (bitmap_full(&read_bitmap, nr_secs)) {
411                 bio_endio(bio);
412                 atomic_inc(&pblk->inflight_io);
413                 __pblk_end_io_read(pblk, rqd, false);
414                 return NVM_IO_OK;
415         }
416
417         /* All sectors are to be read from the device */
418         if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
419                 struct bio *int_bio = NULL;
420
421                 /* Clone read bio to deal with read errors internally */
422                 int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
423                 if (!int_bio) {
424                         pr_err("pblk: could not clone read bio\n");
425                         return NVM_IO_ERR;
426                 }
427
428                 rqd->bio = int_bio;
429                 r_ctx->private = bio;
430
431                 ret = pblk_submit_read_io(pblk, rqd);
432                 if (ret) {
433                         pr_err("pblk: read IO submission failed\n");
434                         if (int_bio)
435                                 bio_put(int_bio);
436                         return ret;
437                 }
438
439                 return NVM_IO_OK;
440         }
441
442         /* The read bio request could be partially filled by the write buffer,
443          * but there are some holes that need to be read from the drive.
444          */
445         ret = pblk_fill_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
446         if (ret) {
447                 pr_err("pblk: failed to perform partial read\n");
448                 return ret;
449         }
450
451         return NVM_IO_OK;
452
453 fail_rqd_free:
454         pblk_free_rqd(pblk, rqd, PBLK_READ);
455         return ret;
456 }
457
458 static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
459                               struct pblk_line *line, u64 *lba_list,
460                               u64 *paddr_list_gc, unsigned int nr_secs)
461 {
462         struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
463         struct ppa_addr ppa_gc;
464         int valid_secs = 0;
465         int i;
466
467         pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
468
469         for (i = 0; i < nr_secs; i++) {
470                 if (lba_list[i] == ADDR_EMPTY)
471                         continue;
472
473                 ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
474                 if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
475                         paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
476                         continue;
477                 }
478
479                 rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
480         }
481
482 #ifdef CONFIG_NVM_DEBUG
483         atomic_long_add(valid_secs, &pblk->inflight_reads);
484 #endif
485
486         return valid_secs;
487 }
488
489 static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
490                       struct pblk_line *line, sector_t lba,
491                       u64 paddr_gc)
492 {
493         struct ppa_addr ppa_l2p, ppa_gc;
494         int valid_secs = 0;
495
496         if (lba == ADDR_EMPTY)
497                 goto out;
498
499         /* logic error: lba out-of-bounds */
500         if (lba >= pblk->rl.nr_secs) {
501                 WARN(1, "pblk: read lba out of bounds\n");
502                 goto out;
503         }
504
505         spin_lock(&pblk->trans_lock);
506         ppa_l2p = pblk_trans_map_get(pblk, lba);
507         spin_unlock(&pblk->trans_lock);
508
509         ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
510         if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
511                 goto out;
512
513         rqd->ppa_addr = ppa_l2p;
514         valid_secs = 1;
515
516 #ifdef CONFIG_NVM_DEBUG
517         atomic_long_inc(&pblk->inflight_reads);
518 #endif
519
520 out:
521         return valid_secs;
522 }
523
524 int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
525 {
526         struct nvm_tgt_dev *dev = pblk->dev;
527         struct nvm_geo *geo = &dev->geo;
528         struct bio *bio;
529         struct nvm_rq rqd;
530         int data_len;
531         int ret = NVM_IO_OK;
532
533         memset(&rqd, 0, sizeof(struct nvm_rq));
534
535         rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
536                                                         &rqd.dma_meta_list);
537         if (!rqd.meta_list)
538                 return -ENOMEM;
539
540         if (gc_rq->nr_secs > 1) {
541                 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
542                 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
543
544                 gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
545                                                         gc_rq->lba_list,
546                                                         gc_rq->paddr_list,
547                                                         gc_rq->nr_secs);
548                 if (gc_rq->secs_to_gc == 1)
549                         rqd.ppa_addr = rqd.ppa_list[0];
550         } else {
551                 gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
552                                                         gc_rq->lba_list[0],
553                                                         gc_rq->paddr_list[0]);
554         }
555
556         if (!(gc_rq->secs_to_gc))
557                 goto out;
558
559         data_len = (gc_rq->secs_to_gc) * geo->sec_size;
560         bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
561                                                 PBLK_VMALLOC_META, GFP_KERNEL);
562         if (IS_ERR(bio)) {
563                 pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
564                 goto err_free_dma;
565         }
566
567         bio->bi_iter.bi_sector = 0; /* internal bio */
568         bio_set_op_attrs(bio, REQ_OP_READ, 0);
569
570         rqd.opcode = NVM_OP_PREAD;
571         rqd.nr_ppas = gc_rq->secs_to_gc;
572         rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
573         rqd.bio = bio;
574
575         if (pblk_submit_io_sync(pblk, &rqd)) {
576                 ret = -EIO;
577                 pr_err("pblk: GC read request failed\n");
578                 goto err_free_bio;
579         }
580
581         atomic_dec(&pblk->inflight_io);
582
583         if (rqd.error) {
584                 atomic_long_inc(&pblk->read_failed_gc);
585 #ifdef CONFIG_NVM_DEBUG
586                 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
587 #endif
588         }
589
590 #ifdef CONFIG_NVM_DEBUG
591         atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
592         atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
593         atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
594 #endif
595
596 out:
597         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
598         return ret;
599
600 err_free_bio:
601         bio_put(bio);
602 err_free_dma:
603         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
604         return ret;
605 }