Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / lightnvm / pblk-core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016 CNEX Labs
4  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5  *                  Matias Bjorling <matias@cnexlabs.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * pblk-core.c - pblk's core functionality
17  *
18  */
19
20 #define CREATE_TRACE_POINTS
21
22 #include "pblk.h"
23 #include "pblk-trace.h"
24
25 static void pblk_line_mark_bb(struct work_struct *work)
26 {
27         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
28                                                                         ws);
29         struct pblk *pblk = line_ws->pblk;
30         struct nvm_tgt_dev *dev = pblk->dev;
31         struct ppa_addr *ppa = line_ws->priv;
32         int ret;
33
34         ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
35         if (ret) {
36                 struct pblk_line *line;
37                 int pos;
38
39                 line = pblk_ppa_to_line(pblk, *ppa);
40                 pos = pblk_ppa_to_pos(&dev->geo, *ppa);
41
42                 pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
43                                 line->id, pos);
44         }
45
46         kfree(ppa);
47         mempool_free(line_ws, &pblk->gen_ws_pool);
48 }
49
50 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
51                          struct ppa_addr ppa_addr)
52 {
53         struct nvm_tgt_dev *dev = pblk->dev;
54         struct nvm_geo *geo = &dev->geo;
55         struct ppa_addr *ppa;
56         int pos = pblk_ppa_to_pos(geo, ppa_addr);
57
58         pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
59         atomic_long_inc(&pblk->erase_failed);
60
61         atomic_dec(&line->blk_in_line);
62         if (test_and_set_bit(pos, line->blk_bitmap))
63                 pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
64                                                         line->id, pos);
65
66         /* Not necessary to mark bad blocks on 2.0 spec. */
67         if (geo->version == NVM_OCSSD_SPEC_20)
68                 return;
69
70         ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
71         if (!ppa)
72                 return;
73
74         *ppa = ppa_addr;
75         pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
76                                                 GFP_ATOMIC, pblk->bb_wq);
77 }
78
79 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
80 {
81         struct nvm_tgt_dev *dev = pblk->dev;
82         struct nvm_geo *geo = &dev->geo;
83         struct nvm_chk_meta *chunk;
84         struct pblk_line *line;
85         int pos;
86
87         line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
88         pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
89         chunk = &line->chks[pos];
90
91         atomic_dec(&line->left_seblks);
92
93         if (rqd->error) {
94                 trace_pblk_chunk_reset(pblk_disk_name(pblk),
95                                 &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
96
97                 chunk->state = NVM_CHK_ST_OFFLINE;
98                 pblk_mark_bb(pblk, line, rqd->ppa_addr);
99         } else {
100                 trace_pblk_chunk_reset(pblk_disk_name(pblk),
101                                 &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
102
103                 chunk->state = NVM_CHK_ST_FREE;
104         }
105
106         trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
107                                 chunk->state);
108
109         atomic_dec(&pblk->inflight_io);
110 }
111
112 /* Erase completion assumes that only one block is erased at the time */
113 static void pblk_end_io_erase(struct nvm_rq *rqd)
114 {
115         struct pblk *pblk = rqd->private;
116
117         __pblk_end_io_erase(pblk, rqd);
118         mempool_free(rqd, &pblk->e_rq_pool);
119 }
120
121 /*
122  * Get information for all chunks from the device.
123  *
124  * The caller is responsible for freeing (vmalloc) the returned structure
125  */
126 struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
127 {
128         struct nvm_tgt_dev *dev = pblk->dev;
129         struct nvm_geo *geo = &dev->geo;
130         struct nvm_chk_meta *meta;
131         struct ppa_addr ppa;
132         unsigned long len;
133         int ret;
134
135         ppa.ppa = 0;
136
137         len = geo->all_chunks * sizeof(*meta);
138         meta = vzalloc(len);
139         if (!meta)
140                 return ERR_PTR(-ENOMEM);
141
142         ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
143         if (ret) {
144                 kfree(meta);
145                 return ERR_PTR(-EIO);
146         }
147
148         return meta;
149 }
150
151 struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
152                                               struct nvm_chk_meta *meta,
153                                               struct ppa_addr ppa)
154 {
155         struct nvm_tgt_dev *dev = pblk->dev;
156         struct nvm_geo *geo = &dev->geo;
157         int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
158         int lun_off = ppa.m.pu * geo->num_chk;
159         int chk_off = ppa.m.chk;
160
161         return meta + ch_off + lun_off + chk_off;
162 }
163
164 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
165                            u64 paddr)
166 {
167         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
168         struct list_head *move_list = NULL;
169
170         /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
171          * table is modified with reclaimed sectors, a check is done to endure
172          * that newer updates are not overwritten.
173          */
174         spin_lock(&line->lock);
175         WARN_ON(line->state == PBLK_LINESTATE_FREE);
176
177         if (test_and_set_bit(paddr, line->invalid_bitmap)) {
178                 WARN_ONCE(1, "pblk: double invalidate\n");
179                 spin_unlock(&line->lock);
180                 return;
181         }
182         le32_add_cpu(line->vsc, -1);
183
184         if (line->state == PBLK_LINESTATE_CLOSED)
185                 move_list = pblk_line_gc_list(pblk, line);
186         spin_unlock(&line->lock);
187
188         if (move_list) {
189                 spin_lock(&l_mg->gc_lock);
190                 spin_lock(&line->lock);
191                 /* Prevent moving a line that has just been chosen for GC */
192                 if (line->state == PBLK_LINESTATE_GC) {
193                         spin_unlock(&line->lock);
194                         spin_unlock(&l_mg->gc_lock);
195                         return;
196                 }
197                 spin_unlock(&line->lock);
198
199                 list_move_tail(&line->list, move_list);
200                 spin_unlock(&l_mg->gc_lock);
201         }
202 }
203
204 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
205 {
206         struct pblk_line *line;
207         u64 paddr;
208
209 #ifdef CONFIG_NVM_PBLK_DEBUG
210         /* Callers must ensure that the ppa points to a device address */
211         BUG_ON(pblk_addr_in_cache(ppa));
212         BUG_ON(pblk_ppa_empty(ppa));
213 #endif
214
215         line = pblk_ppa_to_line(pblk, ppa);
216         paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
217
218         __pblk_map_invalidate(pblk, line, paddr);
219 }
220
221 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
222                                   unsigned int nr_secs)
223 {
224         sector_t lba;
225
226         spin_lock(&pblk->trans_lock);
227         for (lba = slba; lba < slba + nr_secs; lba++) {
228                 struct ppa_addr ppa;
229
230                 ppa = pblk_trans_map_get(pblk, lba);
231
232                 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
233                         pblk_map_invalidate(pblk, ppa);
234
235                 pblk_ppa_set_empty(&ppa);
236                 pblk_trans_map_set(pblk, lba, ppa);
237         }
238         spin_unlock(&pblk->trans_lock);
239 }
240
241 int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
242 {
243         struct nvm_tgt_dev *dev = pblk->dev;
244
245         rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
246                                                         &rqd->dma_meta_list);
247         if (!rqd->meta_list)
248                 return -ENOMEM;
249
250         if (rqd->nr_ppas == 1)
251                 return 0;
252
253         rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
254         rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
255
256         return 0;
257 }
258
259 void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
260 {
261         struct nvm_tgt_dev *dev = pblk->dev;
262
263         if (rqd->meta_list)
264                 nvm_dev_dma_free(dev->parent, rqd->meta_list,
265                                 rqd->dma_meta_list);
266 }
267
268 /* Caller must guarantee that the request is a valid type */
269 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
270 {
271         mempool_t *pool;
272         struct nvm_rq *rqd;
273         int rq_size;
274
275         switch (type) {
276         case PBLK_WRITE:
277         case PBLK_WRITE_INT:
278                 pool = &pblk->w_rq_pool;
279                 rq_size = pblk_w_rq_size;
280                 break;
281         case PBLK_READ:
282                 pool = &pblk->r_rq_pool;
283                 rq_size = pblk_g_rq_size;
284                 break;
285         default:
286                 pool = &pblk->e_rq_pool;
287                 rq_size = pblk_g_rq_size;
288         }
289
290         rqd = mempool_alloc(pool, GFP_KERNEL);
291         memset(rqd, 0, rq_size);
292
293         return rqd;
294 }
295
296 /* Typically used on completion path. Cannot guarantee request consistency */
297 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
298 {
299         mempool_t *pool;
300
301         switch (type) {
302         case PBLK_WRITE:
303                 kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
304                 /* fall through */
305         case PBLK_WRITE_INT:
306                 pool = &pblk->w_rq_pool;
307                 break;
308         case PBLK_READ:
309                 pool = &pblk->r_rq_pool;
310                 break;
311         case PBLK_ERASE:
312                 pool = &pblk->e_rq_pool;
313                 break;
314         default:
315                 pblk_err(pblk, "trying to free unknown rqd type\n");
316                 return;
317         }
318
319         pblk_free_rqd_meta(pblk, rqd);
320         mempool_free(rqd, pool);
321 }
322
323 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
324                          int nr_pages)
325 {
326         struct bio_vec bv;
327         int i;
328
329         WARN_ON(off + nr_pages != bio->bi_vcnt);
330
331         for (i = off; i < nr_pages + off; i++) {
332                 bv = bio->bi_io_vec[i];
333                 mempool_free(bv.bv_page, &pblk->page_bio_pool);
334         }
335 }
336
337 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
338                        int nr_pages)
339 {
340         struct request_queue *q = pblk->dev->q;
341         struct page *page;
342         int i, ret;
343
344         for (i = 0; i < nr_pages; i++) {
345                 page = mempool_alloc(&pblk->page_bio_pool, flags);
346
347                 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
348                 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
349                         pblk_err(pblk, "could not add page to bio\n");
350                         mempool_free(page, &pblk->page_bio_pool);
351                         goto err;
352                 }
353         }
354
355         return 0;
356 err:
357         pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
358         return -1;
359 }
360
361 void pblk_write_kick(struct pblk *pblk)
362 {
363         wake_up_process(pblk->writer_ts);
364         mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
365 }
366
367 void pblk_write_timer_fn(struct timer_list *t)
368 {
369         struct pblk *pblk = from_timer(pblk, t, wtimer);
370
371         /* kick the write thread every tick to flush outstanding data */
372         pblk_write_kick(pblk);
373 }
374
375 void pblk_write_should_kick(struct pblk *pblk)
376 {
377         unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
378
379         if (secs_avail >= pblk->min_write_pgs)
380                 pblk_write_kick(pblk);
381 }
382
383 static void pblk_wait_for_meta(struct pblk *pblk)
384 {
385         do {
386                 if (!atomic_read(&pblk->inflight_io))
387                         break;
388
389                 schedule();
390         } while (1);
391 }
392
393 static void pblk_flush_writer(struct pblk *pblk)
394 {
395         pblk_rb_flush(&pblk->rwb);
396         do {
397                 if (!pblk_rb_sync_count(&pblk->rwb))
398                         break;
399
400                 pblk_write_kick(pblk);
401                 schedule();
402         } while (1);
403 }
404
405 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
406 {
407         struct pblk_line_meta *lm = &pblk->lm;
408         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
409         struct list_head *move_list = NULL;
410         int vsc = le32_to_cpu(*line->vsc);
411
412         lockdep_assert_held(&line->lock);
413
414         if (line->w_err_gc->has_write_err) {
415                 if (line->gc_group != PBLK_LINEGC_WERR) {
416                         line->gc_group = PBLK_LINEGC_WERR;
417                         move_list = &l_mg->gc_werr_list;
418                         pblk_rl_werr_line_in(&pblk->rl);
419                 }
420         } else if (!vsc) {
421                 if (line->gc_group != PBLK_LINEGC_FULL) {
422                         line->gc_group = PBLK_LINEGC_FULL;
423                         move_list = &l_mg->gc_full_list;
424                 }
425         } else if (vsc < lm->high_thrs) {
426                 if (line->gc_group != PBLK_LINEGC_HIGH) {
427                         line->gc_group = PBLK_LINEGC_HIGH;
428                         move_list = &l_mg->gc_high_list;
429                 }
430         } else if (vsc < lm->mid_thrs) {
431                 if (line->gc_group != PBLK_LINEGC_MID) {
432                         line->gc_group = PBLK_LINEGC_MID;
433                         move_list = &l_mg->gc_mid_list;
434                 }
435         } else if (vsc < line->sec_in_line) {
436                 if (line->gc_group != PBLK_LINEGC_LOW) {
437                         line->gc_group = PBLK_LINEGC_LOW;
438                         move_list = &l_mg->gc_low_list;
439                 }
440         } else if (vsc == line->sec_in_line) {
441                 if (line->gc_group != PBLK_LINEGC_EMPTY) {
442                         line->gc_group = PBLK_LINEGC_EMPTY;
443                         move_list = &l_mg->gc_empty_list;
444                 }
445         } else {
446                 line->state = PBLK_LINESTATE_CORRUPT;
447                 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
448                                         line->state);
449
450                 line->gc_group = PBLK_LINEGC_NONE;
451                 move_list =  &l_mg->corrupt_list;
452                 pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
453                                                 line->id, vsc,
454                                                 line->sec_in_line,
455                                                 lm->high_thrs, lm->mid_thrs);
456         }
457
458         return move_list;
459 }
460
461 void pblk_discard(struct pblk *pblk, struct bio *bio)
462 {
463         sector_t slba = pblk_get_lba(bio);
464         sector_t nr_secs = pblk_get_secs(bio);
465
466         pblk_invalidate_range(pblk, slba, nr_secs);
467 }
468
469 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
470 {
471         atomic_long_inc(&pblk->write_failed);
472 #ifdef CONFIG_NVM_PBLK_DEBUG
473         pblk_print_failed_rqd(pblk, rqd, rqd->error);
474 #endif
475 }
476
477 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
478 {
479         /* Empty page read is not necessarily an error (e.g., L2P recovery) */
480         if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
481                 atomic_long_inc(&pblk->read_empty);
482                 return;
483         }
484
485         switch (rqd->error) {
486         case NVM_RSP_WARN_HIGHECC:
487                 atomic_long_inc(&pblk->read_high_ecc);
488                 break;
489         case NVM_RSP_ERR_FAILECC:
490         case NVM_RSP_ERR_FAILCRC:
491                 atomic_long_inc(&pblk->read_failed);
492                 break;
493         default:
494                 pblk_err(pblk, "unknown read error:%d\n", rqd->error);
495         }
496 #ifdef CONFIG_NVM_PBLK_DEBUG
497         pblk_print_failed_rqd(pblk, rqd, rqd->error);
498 #endif
499 }
500
501 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
502 {
503         pblk->sec_per_write = sec_per_write;
504 }
505
506 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
507 {
508         struct nvm_tgt_dev *dev = pblk->dev;
509
510         atomic_inc(&pblk->inflight_io);
511
512 #ifdef CONFIG_NVM_PBLK_DEBUG
513         if (pblk_check_io(pblk, rqd))
514                 return NVM_IO_ERR;
515 #endif
516
517         return nvm_submit_io(dev, rqd);
518 }
519
520 void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
521 {
522         struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
523
524         int i;
525
526         for (i = 0; i < rqd->nr_ppas; i++) {
527                 struct ppa_addr *ppa = &ppa_list[i];
528                 struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
529                 u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
530
531                 if (caddr == 0)
532                         trace_pblk_chunk_state(pblk_disk_name(pblk),
533                                                         ppa, NVM_CHK_ST_OPEN);
534                 else if (caddr == chunk->cnlb)
535                         trace_pblk_chunk_state(pblk_disk_name(pblk),
536                                                         ppa, NVM_CHK_ST_CLOSED);
537         }
538 }
539
540 int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
541 {
542         struct nvm_tgt_dev *dev = pblk->dev;
543         int ret;
544
545         atomic_inc(&pblk->inflight_io);
546
547 #ifdef CONFIG_NVM_PBLK_DEBUG
548         if (pblk_check_io(pblk, rqd))
549                 return NVM_IO_ERR;
550 #endif
551
552         ret = nvm_submit_io_sync(dev, rqd);
553
554         if (trace_pblk_chunk_state_enabled() && !ret &&
555             rqd->opcode == NVM_OP_PWRITE)
556                 pblk_check_chunk_state_update(pblk, rqd);
557
558         return ret;
559 }
560
561 int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd)
562 {
563         struct ppa_addr *ppa_list;
564         int ret;
565
566         ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
567
568         pblk_down_chunk(pblk, ppa_list[0]);
569         ret = pblk_submit_io_sync(pblk, rqd);
570         pblk_up_chunk(pblk, ppa_list[0]);
571
572         return ret;
573 }
574
575 static void pblk_bio_map_addr_endio(struct bio *bio)
576 {
577         bio_put(bio);
578 }
579
580 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
581                               unsigned int nr_secs, unsigned int len,
582                               int alloc_type, gfp_t gfp_mask)
583 {
584         struct nvm_tgt_dev *dev = pblk->dev;
585         void *kaddr = data;
586         struct page *page;
587         struct bio *bio;
588         int i, ret;
589
590         if (alloc_type == PBLK_KMALLOC_META)
591                 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
592
593         bio = bio_kmalloc(gfp_mask, nr_secs);
594         if (!bio)
595                 return ERR_PTR(-ENOMEM);
596
597         for (i = 0; i < nr_secs; i++) {
598                 page = vmalloc_to_page(kaddr);
599                 if (!page) {
600                         pblk_err(pblk, "could not map vmalloc bio\n");
601                         bio_put(bio);
602                         bio = ERR_PTR(-ENOMEM);
603                         goto out;
604                 }
605
606                 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
607                 if (ret != PAGE_SIZE) {
608                         pblk_err(pblk, "could not add page to bio\n");
609                         bio_put(bio);
610                         bio = ERR_PTR(-ENOMEM);
611                         goto out;
612                 }
613
614                 kaddr += PAGE_SIZE;
615         }
616
617         bio->bi_end_io = pblk_bio_map_addr_endio;
618 out:
619         return bio;
620 }
621
622 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
623                    unsigned long secs_to_flush)
624 {
625         int max = pblk->sec_per_write;
626         int min = pblk->min_write_pgs;
627         int secs_to_sync = 0;
628
629         if (secs_avail >= max)
630                 secs_to_sync = max;
631         else if (secs_avail >= min)
632                 secs_to_sync = min * (secs_avail / min);
633         else if (secs_to_flush)
634                 secs_to_sync = min;
635
636         return secs_to_sync;
637 }
638
639 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
640 {
641         u64 addr;
642         int i;
643
644         spin_lock(&line->lock);
645         addr = find_next_zero_bit(line->map_bitmap,
646                                         pblk->lm.sec_per_line, line->cur_sec);
647         line->cur_sec = addr - nr_secs;
648
649         for (i = 0; i < nr_secs; i++, line->cur_sec--)
650                 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
651         spin_unlock(&line->lock);
652 }
653
654 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
655 {
656         u64 addr;
657         int i;
658
659         lockdep_assert_held(&line->lock);
660
661         /* logic error: ppa out-of-bounds. Prevent generating bad address */
662         if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
663                 WARN(1, "pblk: page allocation out of bounds\n");
664                 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
665         }
666
667         line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
668                                         pblk->lm.sec_per_line, line->cur_sec);
669         for (i = 0; i < nr_secs; i++, line->cur_sec++)
670                 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
671
672         return addr;
673 }
674
675 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
676 {
677         u64 addr;
678
679         /* Lock needed in case a write fails and a recovery needs to remap
680          * failed write buffer entries
681          */
682         spin_lock(&line->lock);
683         addr = __pblk_alloc_page(pblk, line, nr_secs);
684         line->left_msecs -= nr_secs;
685         WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
686         spin_unlock(&line->lock);
687
688         return addr;
689 }
690
691 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
692 {
693         u64 paddr;
694
695         spin_lock(&line->lock);
696         paddr = find_next_zero_bit(line->map_bitmap,
697                                         pblk->lm.sec_per_line, line->cur_sec);
698         spin_unlock(&line->lock);
699
700         return paddr;
701 }
702
703 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
704 {
705         struct nvm_tgt_dev *dev = pblk->dev;
706         struct nvm_geo *geo = &dev->geo;
707         struct pblk_line_meta *lm = &pblk->lm;
708         int bit;
709
710         /* This usually only happens on bad lines */
711         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
712         if (bit >= lm->blk_per_line)
713                 return -1;
714
715         return bit * geo->ws_opt;
716 }
717
718 int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
719 {
720         struct nvm_tgt_dev *dev = pblk->dev;
721         struct pblk_line_meta *lm = &pblk->lm;
722         struct bio *bio;
723         struct nvm_rq rqd;
724         u64 paddr = pblk_line_smeta_start(pblk, line);
725         int i, ret;
726
727         memset(&rqd, 0, sizeof(struct nvm_rq));
728
729         ret = pblk_alloc_rqd_meta(pblk, &rqd);
730         if (ret)
731                 return ret;
732
733         bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
734         if (IS_ERR(bio)) {
735                 ret = PTR_ERR(bio);
736                 goto clear_rqd;
737         }
738
739         bio->bi_iter.bi_sector = 0; /* internal bio */
740         bio_set_op_attrs(bio, REQ_OP_READ, 0);
741
742         rqd.bio = bio;
743         rqd.opcode = NVM_OP_PREAD;
744         rqd.nr_ppas = lm->smeta_sec;
745         rqd.is_seq = 1;
746
747         for (i = 0; i < lm->smeta_sec; i++, paddr++)
748                 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
749
750         ret = pblk_submit_io_sync(pblk, &rqd);
751         if (ret) {
752                 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
753                 bio_put(bio);
754                 goto clear_rqd;
755         }
756
757         atomic_dec(&pblk->inflight_io);
758
759         if (rqd.error)
760                 pblk_log_read_err(pblk, &rqd);
761
762 clear_rqd:
763         pblk_free_rqd_meta(pblk, &rqd);
764         return ret;
765 }
766
767 static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
768                                  u64 paddr)
769 {
770         struct nvm_tgt_dev *dev = pblk->dev;
771         struct pblk_line_meta *lm = &pblk->lm;
772         struct bio *bio;
773         struct nvm_rq rqd;
774         __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
775         __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
776         int i, ret;
777
778         memset(&rqd, 0, sizeof(struct nvm_rq));
779
780         ret = pblk_alloc_rqd_meta(pblk, &rqd);
781         if (ret)
782                 return ret;
783
784         bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
785         if (IS_ERR(bio)) {
786                 ret = PTR_ERR(bio);
787                 goto clear_rqd;
788         }
789
790         bio->bi_iter.bi_sector = 0; /* internal bio */
791         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
792
793         rqd.bio = bio;
794         rqd.opcode = NVM_OP_PWRITE;
795         rqd.nr_ppas = lm->smeta_sec;
796         rqd.is_seq = 1;
797
798         for (i = 0; i < lm->smeta_sec; i++, paddr++) {
799                 struct pblk_sec_meta *meta_list = rqd.meta_list;
800
801                 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
802                 meta_list[i].lba = lba_list[paddr] = addr_empty;
803         }
804
805         ret = pblk_submit_io_sync_sem(pblk, &rqd);
806         if (ret) {
807                 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
808                 bio_put(bio);
809                 goto clear_rqd;
810         }
811
812         atomic_dec(&pblk->inflight_io);
813
814         if (rqd.error) {
815                 pblk_log_write_err(pblk, &rqd);
816                 ret = -EIO;
817         }
818
819 clear_rqd:
820         pblk_free_rqd_meta(pblk, &rqd);
821         return ret;
822 }
823
824 int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
825                          void *emeta_buf)
826 {
827         struct nvm_tgt_dev *dev = pblk->dev;
828         struct nvm_geo *geo = &dev->geo;
829         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
830         struct pblk_line_meta *lm = &pblk->lm;
831         void *ppa_list, *meta_list;
832         struct bio *bio;
833         struct nvm_rq rqd;
834         u64 paddr = line->emeta_ssec;
835         dma_addr_t dma_ppa_list, dma_meta_list;
836         int min = pblk->min_write_pgs;
837         int left_ppas = lm->emeta_sec[0];
838         int line_id = line->id;
839         int rq_ppas, rq_len;
840         int i, j;
841         int ret;
842
843         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
844                                                         &dma_meta_list);
845         if (!meta_list)
846                 return -ENOMEM;
847
848         ppa_list = meta_list + pblk_dma_meta_size;
849         dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
850
851 next_rq:
852         memset(&rqd, 0, sizeof(struct nvm_rq));
853
854         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
855         rq_len = rq_ppas * geo->csecs;
856
857         bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
858                                         l_mg->emeta_alloc_type, GFP_KERNEL);
859         if (IS_ERR(bio)) {
860                 ret = PTR_ERR(bio);
861                 goto free_rqd_dma;
862         }
863
864         bio->bi_iter.bi_sector = 0; /* internal bio */
865         bio_set_op_attrs(bio, REQ_OP_READ, 0);
866
867         rqd.bio = bio;
868         rqd.meta_list = meta_list;
869         rqd.ppa_list = ppa_list;
870         rqd.dma_meta_list = dma_meta_list;
871         rqd.dma_ppa_list = dma_ppa_list;
872         rqd.opcode = NVM_OP_PREAD;
873         rqd.nr_ppas = rq_ppas;
874
875         for (i = 0; i < rqd.nr_ppas; ) {
876                 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
877                 int pos = pblk_ppa_to_pos(geo, ppa);
878
879                 if (pblk_io_aligned(pblk, rq_ppas))
880                         rqd.is_seq = 1;
881
882                 while (test_bit(pos, line->blk_bitmap)) {
883                         paddr += min;
884                         if (pblk_boundary_paddr_checks(pblk, paddr)) {
885                                 bio_put(bio);
886                                 ret = -EINTR;
887                                 goto free_rqd_dma;
888                         }
889
890                         ppa = addr_to_gen_ppa(pblk, paddr, line_id);
891                         pos = pblk_ppa_to_pos(geo, ppa);
892                 }
893
894                 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
895                         bio_put(bio);
896                         ret = -EINTR;
897                         goto free_rqd_dma;
898                 }
899
900                 for (j = 0; j < min; j++, i++, paddr++)
901                         rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
902         }
903
904         ret = pblk_submit_io_sync(pblk, &rqd);
905         if (ret) {
906                 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
907                 bio_put(bio);
908                 goto free_rqd_dma;
909         }
910
911         atomic_dec(&pblk->inflight_io);
912
913         if (rqd.error)
914                 pblk_log_read_err(pblk, &rqd);
915
916         emeta_buf += rq_len;
917         left_ppas -= rq_ppas;
918         if (left_ppas)
919                 goto next_rq;
920
921 free_rqd_dma:
922         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
923         return ret;
924 }
925
926 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
927                             struct ppa_addr ppa)
928 {
929         rqd->opcode = NVM_OP_ERASE;
930         rqd->ppa_addr = ppa;
931         rqd->nr_ppas = 1;
932         rqd->is_seq = 1;
933         rqd->bio = NULL;
934 }
935
936 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
937 {
938         struct nvm_rq rqd = {NULL};
939         int ret;
940
941         trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
942                                 PBLK_CHUNK_RESET_START);
943
944         pblk_setup_e_rq(pblk, &rqd, ppa);
945
946         /* The write thread schedules erases so that it minimizes disturbances
947          * with writes. Thus, there is no need to take the LUN semaphore.
948          */
949         ret = pblk_submit_io_sync(pblk, &rqd);
950         rqd.private = pblk;
951         __pblk_end_io_erase(pblk, &rqd);
952
953         return ret;
954 }
955
956 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
957 {
958         struct pblk_line_meta *lm = &pblk->lm;
959         struct ppa_addr ppa;
960         int ret, bit = -1;
961
962         /* Erase only good blocks, one at a time */
963         do {
964                 spin_lock(&line->lock);
965                 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
966                                                                 bit + 1);
967                 if (bit >= lm->blk_per_line) {
968                         spin_unlock(&line->lock);
969                         break;
970                 }
971
972                 ppa = pblk->luns[bit].bppa; /* set ch and lun */
973                 ppa.a.blk = line->id;
974
975                 atomic_dec(&line->left_eblks);
976                 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
977                 spin_unlock(&line->lock);
978
979                 ret = pblk_blk_erase_sync(pblk, ppa);
980                 if (ret) {
981                         pblk_err(pblk, "failed to erase line %d\n", line->id);
982                         return ret;
983                 }
984         } while (1);
985
986         return 0;
987 }
988
989 static void pblk_line_setup_metadata(struct pblk_line *line,
990                                      struct pblk_line_mgmt *l_mg,
991                                      struct pblk_line_meta *lm)
992 {
993         int meta_line;
994
995         lockdep_assert_held(&l_mg->free_lock);
996
997 retry_meta:
998         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
999         if (meta_line == PBLK_DATA_LINES) {
1000                 spin_unlock(&l_mg->free_lock);
1001                 io_schedule();
1002                 spin_lock(&l_mg->free_lock);
1003                 goto retry_meta;
1004         }
1005
1006         set_bit(meta_line, &l_mg->meta_bitmap);
1007         line->meta_line = meta_line;
1008
1009         line->smeta = l_mg->sline_meta[meta_line];
1010         line->emeta = l_mg->eline_meta[meta_line];
1011
1012         memset(line->smeta, 0, lm->smeta_len);
1013         memset(line->emeta->buf, 0, lm->emeta_len[0]);
1014
1015         line->emeta->mem = 0;
1016         atomic_set(&line->emeta->sync, 0);
1017 }
1018
1019 /* For now lines are always assumed full lines. Thus, smeta former and current
1020  * lun bitmaps are omitted.
1021  */
1022 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
1023                                   struct pblk_line *cur)
1024 {
1025         struct nvm_tgt_dev *dev = pblk->dev;
1026         struct nvm_geo *geo = &dev->geo;
1027         struct pblk_line_meta *lm = &pblk->lm;
1028         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1029         struct pblk_emeta *emeta = line->emeta;
1030         struct line_emeta *emeta_buf = emeta->buf;
1031         struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
1032         int nr_blk_line;
1033
1034         /* After erasing the line, new bad blocks might appear and we risk
1035          * having an invalid line
1036          */
1037         nr_blk_line = lm->blk_per_line -
1038                         bitmap_weight(line->blk_bitmap, lm->blk_per_line);
1039         if (nr_blk_line < lm->min_blk_line) {
1040                 spin_lock(&l_mg->free_lock);
1041                 spin_lock(&line->lock);
1042                 line->state = PBLK_LINESTATE_BAD;
1043                 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1044                                         line->state);
1045                 spin_unlock(&line->lock);
1046
1047                 list_add_tail(&line->list, &l_mg->bad_list);
1048                 spin_unlock(&l_mg->free_lock);
1049
1050                 pblk_debug(pblk, "line %d is bad\n", line->id);
1051
1052                 return 0;
1053         }
1054
1055         /* Run-time metadata */
1056         line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
1057
1058         /* Mark LUNs allocated in this line (all for now) */
1059         bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
1060
1061         smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1062         memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
1063         smeta_buf->header.id = cpu_to_le32(line->id);
1064         smeta_buf->header.type = cpu_to_le16(line->type);
1065         smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
1066         smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
1067
1068         /* Start metadata */
1069         smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1070         smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
1071
1072         /* Fill metadata among lines */
1073         if (cur) {
1074                 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
1075                 smeta_buf->prev_id = cpu_to_le32(cur->id);
1076                 cur->emeta->buf->next_id = cpu_to_le32(line->id);
1077         } else {
1078                 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
1079         }
1080
1081         /* All smeta must be set at this point */
1082         smeta_buf->header.crc = cpu_to_le32(
1083                         pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1084         smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
1085
1086         /* End metadata */
1087         memcpy(&emeta_buf->header, &smeta_buf->header,
1088                                                 sizeof(struct line_header));
1089
1090         emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1091         emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1092         emeta_buf->header.crc = cpu_to_le32(
1093                         pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1094
1095         emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1096         emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1097         emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1098         emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1099         emeta_buf->crc = cpu_to_le32(0);
1100         emeta_buf->prev_id = smeta_buf->prev_id;
1101
1102         return 1;
1103 }
1104
1105 static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1106 {
1107         struct pblk_line_meta *lm = &pblk->lm;
1108         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1109
1110         line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1111         if (!line->map_bitmap)
1112                 return -ENOMEM;
1113
1114         memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1115
1116         /* will be initialized using bb info from map_bitmap */
1117         line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1118         if (!line->invalid_bitmap) {
1119                 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1120                 line->map_bitmap = NULL;
1121                 return -ENOMEM;
1122         }
1123
1124         return 0;
1125 }
1126
1127 /* For now lines are always assumed full lines. Thus, smeta former and current
1128  * lun bitmaps are omitted.
1129  */
1130 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1131                              int init)
1132 {
1133         struct nvm_tgt_dev *dev = pblk->dev;
1134         struct nvm_geo *geo = &dev->geo;
1135         struct pblk_line_meta *lm = &pblk->lm;
1136         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1137         u64 off;
1138         int bit = -1;
1139         int emeta_secs;
1140
1141         line->sec_in_line = lm->sec_per_line;
1142
1143         /* Capture bad block information on line mapping bitmaps */
1144         while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1145                                         bit + 1)) < lm->blk_per_line) {
1146                 off = bit * geo->ws_opt;
1147                 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1148                                                         lm->sec_per_line);
1149                 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1150                                                         lm->sec_per_line);
1151                 line->sec_in_line -= geo->clba;
1152         }
1153
1154         /* Mark smeta metadata sectors as bad sectors */
1155         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1156         off = bit * geo->ws_opt;
1157         bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1158         line->sec_in_line -= lm->smeta_sec;
1159         line->smeta_ssec = off;
1160         line->cur_sec = off + lm->smeta_sec;
1161
1162         if (init && pblk_line_smeta_write(pblk, line, off)) {
1163                 pblk_debug(pblk, "line smeta I/O failed. Retry\n");
1164                 return 0;
1165         }
1166
1167         bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1168
1169         /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1170          * blocks to make sure that there are enough sectors to store emeta
1171          */
1172         emeta_secs = lm->emeta_sec[0];
1173         off = lm->sec_per_line;
1174         while (emeta_secs) {
1175                 off -= geo->ws_opt;
1176                 if (!test_bit(off, line->invalid_bitmap)) {
1177                         bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1178                         emeta_secs -= geo->ws_opt;
1179                 }
1180         }
1181
1182         line->emeta_ssec = off;
1183         line->sec_in_line -= lm->emeta_sec[0];
1184         line->nr_valid_lbas = 0;
1185         line->left_msecs = line->sec_in_line;
1186         *line->vsc = cpu_to_le32(line->sec_in_line);
1187
1188         if (lm->sec_per_line - line->sec_in_line !=
1189                 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1190                 spin_lock(&line->lock);
1191                 line->state = PBLK_LINESTATE_BAD;
1192                 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1193                                         line->state);
1194                 spin_unlock(&line->lock);
1195
1196                 list_add_tail(&line->list, &l_mg->bad_list);
1197                 pblk_err(pblk, "unexpected line %d is bad\n", line->id);
1198
1199                 return 0;
1200         }
1201
1202         return 1;
1203 }
1204
1205 static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1206 {
1207         struct pblk_line_meta *lm = &pblk->lm;
1208         struct nvm_tgt_dev *dev = pblk->dev;
1209         struct nvm_geo *geo = &dev->geo;
1210         int blk_to_erase = atomic_read(&line->blk_in_line);
1211         int i;
1212
1213         for (i = 0; i < lm->blk_per_line; i++) {
1214                 struct pblk_lun *rlun = &pblk->luns[i];
1215                 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1216                 int state = line->chks[pos].state;
1217
1218                 /* Free chunks should not be erased */
1219                 if (state & NVM_CHK_ST_FREE) {
1220                         set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1221                                                         line->erase_bitmap);
1222                         blk_to_erase--;
1223                 }
1224         }
1225
1226         return blk_to_erase;
1227 }
1228
1229 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1230 {
1231         struct pblk_line_meta *lm = &pblk->lm;
1232         int blk_in_line = atomic_read(&line->blk_in_line);
1233         int blk_to_erase;
1234
1235         /* Bad blocks do not need to be erased */
1236         bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1237
1238         spin_lock(&line->lock);
1239
1240         /* If we have not written to this line, we need to mark up free chunks
1241          * as already erased
1242          */
1243         if (line->state == PBLK_LINESTATE_NEW) {
1244                 blk_to_erase = pblk_prepare_new_line(pblk, line);
1245                 line->state = PBLK_LINESTATE_FREE;
1246                 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1247                                         line->state);
1248         } else {
1249                 blk_to_erase = blk_in_line;
1250         }
1251
1252         if (blk_in_line < lm->min_blk_line) {
1253                 spin_unlock(&line->lock);
1254                 return -EAGAIN;
1255         }
1256
1257         if (line->state != PBLK_LINESTATE_FREE) {
1258                 WARN(1, "pblk: corrupted line %d, state %d\n",
1259                                                         line->id, line->state);
1260                 spin_unlock(&line->lock);
1261                 return -EINTR;
1262         }
1263
1264         line->state = PBLK_LINESTATE_OPEN;
1265         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1266                                 line->state);
1267
1268         atomic_set(&line->left_eblks, blk_to_erase);
1269         atomic_set(&line->left_seblks, blk_to_erase);
1270
1271         line->meta_distance = lm->meta_distance;
1272         spin_unlock(&line->lock);
1273
1274         kref_init(&line->ref);
1275
1276         return 0;
1277 }
1278
1279 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1280 {
1281         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1282         int ret;
1283
1284         spin_lock(&l_mg->free_lock);
1285         l_mg->data_line = line;
1286         list_del(&line->list);
1287
1288         ret = pblk_line_prepare(pblk, line);
1289         if (ret) {
1290                 list_add(&line->list, &l_mg->free_list);
1291                 spin_unlock(&l_mg->free_lock);
1292                 return ret;
1293         }
1294         spin_unlock(&l_mg->free_lock);
1295
1296         ret = pblk_line_alloc_bitmaps(pblk, line);
1297         if (ret)
1298                 return ret;
1299
1300         if (!pblk_line_init_bb(pblk, line, 0)) {
1301                 list_add(&line->list, &l_mg->free_list);
1302                 return -EINTR;
1303         }
1304
1305         pblk_rl_free_lines_dec(&pblk->rl, line, true);
1306         return 0;
1307 }
1308
1309 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1310 {
1311         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1312
1313         mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1314         line->map_bitmap = NULL;
1315         line->smeta = NULL;
1316         line->emeta = NULL;
1317 }
1318
1319 static void pblk_line_reinit(struct pblk_line *line)
1320 {
1321         *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1322
1323         line->map_bitmap = NULL;
1324         line->invalid_bitmap = NULL;
1325         line->smeta = NULL;
1326         line->emeta = NULL;
1327 }
1328
1329 void pblk_line_free(struct pblk_line *line)
1330 {
1331         struct pblk *pblk = line->pblk;
1332         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1333
1334         mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1335         mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
1336
1337         pblk_line_reinit(line);
1338 }
1339
1340 struct pblk_line *pblk_line_get(struct pblk *pblk)
1341 {
1342         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1343         struct pblk_line_meta *lm = &pblk->lm;
1344         struct pblk_line *line;
1345         int ret, bit;
1346
1347         lockdep_assert_held(&l_mg->free_lock);
1348
1349 retry:
1350         if (list_empty(&l_mg->free_list)) {
1351                 pblk_err(pblk, "no free lines\n");
1352                 return NULL;
1353         }
1354
1355         line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1356         list_del(&line->list);
1357         l_mg->nr_free_lines--;
1358
1359         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1360         if (unlikely(bit >= lm->blk_per_line)) {
1361                 spin_lock(&line->lock);
1362                 line->state = PBLK_LINESTATE_BAD;
1363                 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1364                                         line->state);
1365                 spin_unlock(&line->lock);
1366
1367                 list_add_tail(&line->list, &l_mg->bad_list);
1368
1369                 pblk_debug(pblk, "line %d is bad\n", line->id);
1370                 goto retry;
1371         }
1372
1373         ret = pblk_line_prepare(pblk, line);
1374         if (ret) {
1375                 switch (ret) {
1376                 case -EAGAIN:
1377                         list_add(&line->list, &l_mg->bad_list);
1378                         goto retry;
1379                 case -EINTR:
1380                         list_add(&line->list, &l_mg->corrupt_list);
1381                         goto retry;
1382                 default:
1383                         pblk_err(pblk, "failed to prepare line %d\n", line->id);
1384                         list_add(&line->list, &l_mg->free_list);
1385                         l_mg->nr_free_lines++;
1386                         return NULL;
1387                 }
1388         }
1389
1390         return line;
1391 }
1392
1393 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1394                                          struct pblk_line *line)
1395 {
1396         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1397         struct pblk_line *retry_line;
1398
1399 retry:
1400         spin_lock(&l_mg->free_lock);
1401         retry_line = pblk_line_get(pblk);
1402         if (!retry_line) {
1403                 l_mg->data_line = NULL;
1404                 spin_unlock(&l_mg->free_lock);
1405                 return NULL;
1406         }
1407
1408         retry_line->map_bitmap = line->map_bitmap;
1409         retry_line->invalid_bitmap = line->invalid_bitmap;
1410         retry_line->smeta = line->smeta;
1411         retry_line->emeta = line->emeta;
1412         retry_line->meta_line = line->meta_line;
1413
1414         pblk_line_reinit(line);
1415
1416         l_mg->data_line = retry_line;
1417         spin_unlock(&l_mg->free_lock);
1418
1419         pblk_rl_free_lines_dec(&pblk->rl, line, false);
1420
1421         if (pblk_line_erase(pblk, retry_line))
1422                 goto retry;
1423
1424         return retry_line;
1425 }
1426
1427 static void pblk_set_space_limit(struct pblk *pblk)
1428 {
1429         struct pblk_rl *rl = &pblk->rl;
1430
1431         atomic_set(&rl->rb_space, 0);
1432 }
1433
1434 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1435 {
1436         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1437         struct pblk_line *line;
1438
1439         spin_lock(&l_mg->free_lock);
1440         line = pblk_line_get(pblk);
1441         if (!line) {
1442                 spin_unlock(&l_mg->free_lock);
1443                 return NULL;
1444         }
1445
1446         line->seq_nr = l_mg->d_seq_nr++;
1447         line->type = PBLK_LINETYPE_DATA;
1448         l_mg->data_line = line;
1449
1450         pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1451
1452         /* Allocate next line for preparation */
1453         l_mg->data_next = pblk_line_get(pblk);
1454         if (!l_mg->data_next) {
1455                 /* If we cannot get a new line, we need to stop the pipeline.
1456                  * Only allow as many writes in as we can store safely and then
1457                  * fail gracefully
1458                  */
1459                 pblk_set_space_limit(pblk);
1460
1461                 l_mg->data_next = NULL;
1462         } else {
1463                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1464                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1465         }
1466         spin_unlock(&l_mg->free_lock);
1467
1468         if (pblk_line_alloc_bitmaps(pblk, line))
1469                 return NULL;
1470
1471         if (pblk_line_erase(pblk, line)) {
1472                 line = pblk_line_retry(pblk, line);
1473                 if (!line)
1474                         return NULL;
1475         }
1476
1477 retry_setup:
1478         if (!pblk_line_init_metadata(pblk, line, NULL)) {
1479                 line = pblk_line_retry(pblk, line);
1480                 if (!line)
1481                         return NULL;
1482
1483                 goto retry_setup;
1484         }
1485
1486         if (!pblk_line_init_bb(pblk, line, 1)) {
1487                 line = pblk_line_retry(pblk, line);
1488                 if (!line)
1489                         return NULL;
1490
1491                 goto retry_setup;
1492         }
1493
1494         pblk_rl_free_lines_dec(&pblk->rl, line, true);
1495
1496         return line;
1497 }
1498
1499 void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
1500 {
1501         struct pblk_line *line;
1502
1503         line = pblk_ppa_to_line(pblk, ppa);
1504         kref_put(&line->ref, pblk_line_put_wq);
1505 }
1506
1507 void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
1508 {
1509         struct ppa_addr *ppa_list;
1510         int i;
1511
1512         ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
1513
1514         for (i = 0; i < rqd->nr_ppas; i++)
1515                 pblk_ppa_to_line_put(pblk, ppa_list[i]);
1516 }
1517
1518 static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1519 {
1520         lockdep_assert_held(&pblk->l_mg.free_lock);
1521
1522         pblk_set_space_limit(pblk);
1523         pblk->state = PBLK_STATE_STOPPING;
1524         trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1525 }
1526
1527 static void pblk_line_close_meta_sync(struct pblk *pblk)
1528 {
1529         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1530         struct pblk_line_meta *lm = &pblk->lm;
1531         struct pblk_line *line, *tline;
1532         LIST_HEAD(list);
1533
1534         spin_lock(&l_mg->close_lock);
1535         if (list_empty(&l_mg->emeta_list)) {
1536                 spin_unlock(&l_mg->close_lock);
1537                 return;
1538         }
1539
1540         list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1541         spin_unlock(&l_mg->close_lock);
1542
1543         list_for_each_entry_safe(line, tline, &list, list) {
1544                 struct pblk_emeta *emeta = line->emeta;
1545
1546                 while (emeta->mem < lm->emeta_len[0]) {
1547                         int ret;
1548
1549                         ret = pblk_submit_meta_io(pblk, line);
1550                         if (ret) {
1551                                 pblk_err(pblk, "sync meta line %d failed (%d)\n",
1552                                                         line->id, ret);
1553                                 return;
1554                         }
1555                 }
1556         }
1557
1558         pblk_wait_for_meta(pblk);
1559         flush_workqueue(pblk->close_wq);
1560 }
1561
1562 void __pblk_pipeline_flush(struct pblk *pblk)
1563 {
1564         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1565         int ret;
1566
1567         spin_lock(&l_mg->free_lock);
1568         if (pblk->state == PBLK_STATE_RECOVERING ||
1569                                         pblk->state == PBLK_STATE_STOPPED) {
1570                 spin_unlock(&l_mg->free_lock);
1571                 return;
1572         }
1573         pblk->state = PBLK_STATE_RECOVERING;
1574         trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1575         spin_unlock(&l_mg->free_lock);
1576
1577         pblk_flush_writer(pblk);
1578         pblk_wait_for_meta(pblk);
1579
1580         ret = pblk_recov_pad(pblk);
1581         if (ret) {
1582                 pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
1583                 return;
1584         }
1585
1586         flush_workqueue(pblk->bb_wq);
1587         pblk_line_close_meta_sync(pblk);
1588 }
1589
1590 void __pblk_pipeline_stop(struct pblk *pblk)
1591 {
1592         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1593
1594         spin_lock(&l_mg->free_lock);
1595         pblk->state = PBLK_STATE_STOPPED;
1596         trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1597         l_mg->data_line = NULL;
1598         l_mg->data_next = NULL;
1599         spin_unlock(&l_mg->free_lock);
1600 }
1601
1602 void pblk_pipeline_stop(struct pblk *pblk)
1603 {
1604         __pblk_pipeline_flush(pblk);
1605         __pblk_pipeline_stop(pblk);
1606 }
1607
1608 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1609 {
1610         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1611         struct pblk_line *cur, *new = NULL;
1612         unsigned int left_seblks;
1613
1614         new = l_mg->data_next;
1615         if (!new)
1616                 goto out;
1617
1618         spin_lock(&l_mg->free_lock);
1619         cur = l_mg->data_line;
1620         l_mg->data_line = new;
1621
1622         pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1623         spin_unlock(&l_mg->free_lock);
1624
1625 retry_erase:
1626         left_seblks = atomic_read(&new->left_seblks);
1627         if (left_seblks) {
1628                 /* If line is not fully erased, erase it */
1629                 if (atomic_read(&new->left_eblks)) {
1630                         if (pblk_line_erase(pblk, new))
1631                                 goto out;
1632                 } else {
1633                         io_schedule();
1634                 }
1635                 goto retry_erase;
1636         }
1637
1638         if (pblk_line_alloc_bitmaps(pblk, new))
1639                 return NULL;
1640
1641 retry_setup:
1642         if (!pblk_line_init_metadata(pblk, new, cur)) {
1643                 new = pblk_line_retry(pblk, new);
1644                 if (!new)
1645                         goto out;
1646
1647                 goto retry_setup;
1648         }
1649
1650         if (!pblk_line_init_bb(pblk, new, 1)) {
1651                 new = pblk_line_retry(pblk, new);
1652                 if (!new)
1653                         goto out;
1654
1655                 goto retry_setup;
1656         }
1657
1658         pblk_rl_free_lines_dec(&pblk->rl, new, true);
1659
1660         /* Allocate next line for preparation */
1661         spin_lock(&l_mg->free_lock);
1662         l_mg->data_next = pblk_line_get(pblk);
1663         if (!l_mg->data_next) {
1664                 /* If we cannot get a new line, we need to stop the pipeline.
1665                  * Only allow as many writes in as we can store safely and then
1666                  * fail gracefully
1667                  */
1668                 pblk_stop_writes(pblk, new);
1669                 l_mg->data_next = NULL;
1670         } else {
1671                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1672                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1673         }
1674         spin_unlock(&l_mg->free_lock);
1675
1676 out:
1677         return new;
1678 }
1679
1680 static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
1681 {
1682         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1683         struct pblk_gc *gc = &pblk->gc;
1684
1685         spin_lock(&line->lock);
1686         WARN_ON(line->state != PBLK_LINESTATE_GC);
1687         line->state = PBLK_LINESTATE_FREE;
1688         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1689                                         line->state);
1690         line->gc_group = PBLK_LINEGC_NONE;
1691         pblk_line_free(line);
1692
1693         if (line->w_err_gc->has_write_err) {
1694                 pblk_rl_werr_line_out(&pblk->rl);
1695                 line->w_err_gc->has_write_err = 0;
1696         }
1697
1698         spin_unlock(&line->lock);
1699         atomic_dec(&gc->pipeline_gc);
1700
1701         spin_lock(&l_mg->free_lock);
1702         list_add_tail(&line->list, &l_mg->free_list);
1703         l_mg->nr_free_lines++;
1704         spin_unlock(&l_mg->free_lock);
1705
1706         pblk_rl_free_lines_inc(&pblk->rl, line);
1707 }
1708
1709 static void pblk_line_put_ws(struct work_struct *work)
1710 {
1711         struct pblk_line_ws *line_put_ws = container_of(work,
1712                                                 struct pblk_line_ws, ws);
1713         struct pblk *pblk = line_put_ws->pblk;
1714         struct pblk_line *line = line_put_ws->line;
1715
1716         __pblk_line_put(pblk, line);
1717         mempool_free(line_put_ws, &pblk->gen_ws_pool);
1718 }
1719
1720 void pblk_line_put(struct kref *ref)
1721 {
1722         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1723         struct pblk *pblk = line->pblk;
1724
1725         __pblk_line_put(pblk, line);
1726 }
1727
1728 void pblk_line_put_wq(struct kref *ref)
1729 {
1730         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1731         struct pblk *pblk = line->pblk;
1732         struct pblk_line_ws *line_put_ws;
1733
1734         line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
1735         if (!line_put_ws)
1736                 return;
1737
1738         line_put_ws->pblk = pblk;
1739         line_put_ws->line = line;
1740         line_put_ws->priv = NULL;
1741
1742         INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1743         queue_work(pblk->r_end_wq, &line_put_ws->ws);
1744 }
1745
1746 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1747 {
1748         struct nvm_rq *rqd;
1749         int err;
1750
1751         rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1752
1753         pblk_setup_e_rq(pblk, rqd, ppa);
1754
1755         rqd->end_io = pblk_end_io_erase;
1756         rqd->private = pblk;
1757
1758         trace_pblk_chunk_reset(pblk_disk_name(pblk),
1759                                 &ppa, PBLK_CHUNK_RESET_START);
1760
1761         /* The write thread schedules erases so that it minimizes disturbances
1762          * with writes. Thus, there is no need to take the LUN semaphore.
1763          */
1764         err = pblk_submit_io(pblk, rqd);
1765         if (err) {
1766                 struct nvm_tgt_dev *dev = pblk->dev;
1767                 struct nvm_geo *geo = &dev->geo;
1768
1769                 pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
1770                                         pblk_ppa_to_line_id(ppa),
1771                                         pblk_ppa_to_pos(geo, ppa));
1772         }
1773
1774         return err;
1775 }
1776
1777 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1778 {
1779         return pblk->l_mg.data_line;
1780 }
1781
1782 /* For now, always erase next line */
1783 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1784 {
1785         return pblk->l_mg.data_next;
1786 }
1787
1788 int pblk_line_is_full(struct pblk_line *line)
1789 {
1790         return (line->left_msecs == 0);
1791 }
1792
1793 static void pblk_line_should_sync_meta(struct pblk *pblk)
1794 {
1795         if (pblk_rl_is_limit(&pblk->rl))
1796                 pblk_line_close_meta_sync(pblk);
1797 }
1798
1799 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1800 {
1801         struct nvm_tgt_dev *dev = pblk->dev;
1802         struct nvm_geo *geo = &dev->geo;
1803         struct pblk_line_meta *lm = &pblk->lm;
1804         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1805         struct list_head *move_list;
1806         int i;
1807
1808 #ifdef CONFIG_NVM_PBLK_DEBUG
1809         WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1810                                 "pblk: corrupt closed line %d\n", line->id);
1811 #endif
1812
1813         spin_lock(&l_mg->free_lock);
1814         WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1815         spin_unlock(&l_mg->free_lock);
1816
1817         spin_lock(&l_mg->gc_lock);
1818         spin_lock(&line->lock);
1819         WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1820         line->state = PBLK_LINESTATE_CLOSED;
1821         move_list = pblk_line_gc_list(pblk, line);
1822         list_add_tail(&line->list, move_list);
1823
1824         mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1825         line->map_bitmap = NULL;
1826         line->smeta = NULL;
1827         line->emeta = NULL;
1828
1829         for (i = 0; i < lm->blk_per_line; i++) {
1830                 struct pblk_lun *rlun = &pblk->luns[i];
1831                 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1832                 int state = line->chks[pos].state;
1833
1834                 if (!(state & NVM_CHK_ST_OFFLINE))
1835                         state = NVM_CHK_ST_CLOSED;
1836         }
1837
1838         spin_unlock(&line->lock);
1839         spin_unlock(&l_mg->gc_lock);
1840
1841         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1842                                         line->state);
1843 }
1844
1845 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1846 {
1847         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1848         struct pblk_line_meta *lm = &pblk->lm;
1849         struct pblk_emeta *emeta = line->emeta;
1850         struct line_emeta *emeta_buf = emeta->buf;
1851         struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
1852
1853         /* No need for exact vsc value; avoid a big line lock and take aprox. */
1854         memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1855         memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1856
1857         wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1858         wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1859         wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1860
1861         if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
1862                 emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1863                 memcpy(emeta_buf->header.uuid, pblk->instance_uuid, 16);
1864                 emeta_buf->header.id = cpu_to_le32(line->id);
1865                 emeta_buf->header.type = cpu_to_le16(line->type);
1866                 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1867                 emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1868                 emeta_buf->header.crc = cpu_to_le32(
1869                         pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1870         }
1871
1872         emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1873         emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1874
1875         spin_lock(&l_mg->close_lock);
1876         spin_lock(&line->lock);
1877
1878         /* Update the in-memory start address for emeta, in case it has
1879          * shifted due to write errors
1880          */
1881         if (line->emeta_ssec != line->cur_sec)
1882                 line->emeta_ssec = line->cur_sec;
1883
1884         list_add_tail(&line->list, &l_mg->emeta_list);
1885         spin_unlock(&line->lock);
1886         spin_unlock(&l_mg->close_lock);
1887
1888         pblk_line_should_sync_meta(pblk);
1889 }
1890
1891 static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1892 {
1893         struct pblk_line_meta *lm = &pblk->lm;
1894         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1895         unsigned int lba_list_size = lm->emeta_len[2];
1896         struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1897         struct pblk_emeta *emeta = line->emeta;
1898
1899         w_err_gc->lba_list = pblk_malloc(lba_list_size,
1900                                          l_mg->emeta_alloc_type, GFP_KERNEL);
1901         memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1902                                 lba_list_size);
1903 }
1904
1905 void pblk_line_close_ws(struct work_struct *work)
1906 {
1907         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1908                                                                         ws);
1909         struct pblk *pblk = line_ws->pblk;
1910         struct pblk_line *line = line_ws->line;
1911         struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1912
1913         /* Write errors makes the emeta start address stored in smeta invalid,
1914          * so keep a copy of the lba list until we've gc'd the line
1915          */
1916         if (w_err_gc->has_write_err)
1917                 pblk_save_lba_list(pblk, line);
1918
1919         pblk_line_close(pblk, line);
1920         mempool_free(line_ws, &pblk->gen_ws_pool);
1921 }
1922
1923 void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1924                       void (*work)(struct work_struct *), gfp_t gfp_mask,
1925                       struct workqueue_struct *wq)
1926 {
1927         struct pblk_line_ws *line_ws;
1928
1929         line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
1930
1931         line_ws->pblk = pblk;
1932         line_ws->line = line;
1933         line_ws->priv = priv;
1934
1935         INIT_WORK(&line_ws->ws, work);
1936         queue_work(wq, &line_ws->ws);
1937 }
1938
1939 static void __pblk_down_chunk(struct pblk *pblk, int pos)
1940 {
1941         struct pblk_lun *rlun = &pblk->luns[pos];
1942         int ret;
1943
1944         /*
1945          * Only send one inflight I/O per LUN. Since we map at a page
1946          * granurality, all ppas in the I/O will map to the same LUN
1947          */
1948
1949         ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1950         if (ret == -ETIME || ret == -EINTR)
1951                 pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
1952                                 -ret);
1953 }
1954
1955 void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
1956 {
1957         struct nvm_tgt_dev *dev = pblk->dev;
1958         struct nvm_geo *geo = &dev->geo;
1959         int pos = pblk_ppa_to_pos(geo, ppa);
1960
1961         __pblk_down_chunk(pblk, pos);
1962 }
1963
1964 void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
1965                   unsigned long *lun_bitmap)
1966 {
1967         struct nvm_tgt_dev *dev = pblk->dev;
1968         struct nvm_geo *geo = &dev->geo;
1969         int pos = pblk_ppa_to_pos(geo, ppa);
1970
1971         /* If the LUN has been locked for this same request, do no attempt to
1972          * lock it again
1973          */
1974         if (test_and_set_bit(pos, lun_bitmap))
1975                 return;
1976
1977         __pblk_down_chunk(pblk, pos);
1978 }
1979
1980 void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
1981 {
1982         struct nvm_tgt_dev *dev = pblk->dev;
1983         struct nvm_geo *geo = &dev->geo;
1984         struct pblk_lun *rlun;
1985         int pos = pblk_ppa_to_pos(geo, ppa);
1986
1987         rlun = &pblk->luns[pos];
1988         up(&rlun->wr_sem);
1989 }
1990
1991 void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
1992 {
1993         struct nvm_tgt_dev *dev = pblk->dev;
1994         struct nvm_geo *geo = &dev->geo;
1995         struct pblk_lun *rlun;
1996         int num_lun = geo->all_luns;
1997         int bit = -1;
1998
1999         while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
2000                 rlun = &pblk->luns[bit];
2001                 up(&rlun->wr_sem);
2002         }
2003 }
2004
2005 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
2006 {
2007         struct ppa_addr ppa_l2p;
2008
2009         /* logic error: lba out-of-bounds. Ignore update */
2010         if (!(lba < pblk->rl.nr_secs)) {
2011                 WARN(1, "pblk: corrupted L2P map request\n");
2012                 return;
2013         }
2014
2015         spin_lock(&pblk->trans_lock);
2016         ppa_l2p = pblk_trans_map_get(pblk, lba);
2017
2018         if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
2019                 pblk_map_invalidate(pblk, ppa_l2p);
2020
2021         pblk_trans_map_set(pblk, lba, ppa);
2022         spin_unlock(&pblk->trans_lock);
2023 }
2024
2025 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
2026 {
2027
2028 #ifdef CONFIG_NVM_PBLK_DEBUG
2029         /* Callers must ensure that the ppa points to a cache address */
2030         BUG_ON(!pblk_addr_in_cache(ppa));
2031         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
2032 #endif
2033
2034         pblk_update_map(pblk, lba, ppa);
2035 }
2036
2037 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
2038                        struct pblk_line *gc_line, u64 paddr_gc)
2039 {
2040         struct ppa_addr ppa_l2p, ppa_gc;
2041         int ret = 1;
2042
2043 #ifdef CONFIG_NVM_PBLK_DEBUG
2044         /* Callers must ensure that the ppa points to a cache address */
2045         BUG_ON(!pblk_addr_in_cache(ppa_new));
2046         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
2047 #endif
2048
2049         /* logic error: lba out-of-bounds. Ignore update */
2050         if (!(lba < pblk->rl.nr_secs)) {
2051                 WARN(1, "pblk: corrupted L2P map request\n");
2052                 return 0;
2053         }
2054
2055         spin_lock(&pblk->trans_lock);
2056         ppa_l2p = pblk_trans_map_get(pblk, lba);
2057         ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
2058
2059         if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
2060                 spin_lock(&gc_line->lock);
2061                 WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
2062                                                 "pblk: corrupted GC update");
2063                 spin_unlock(&gc_line->lock);
2064
2065                 ret = 0;
2066                 goto out;
2067         }
2068
2069         pblk_trans_map_set(pblk, lba, ppa_new);
2070 out:
2071         spin_unlock(&pblk->trans_lock);
2072         return ret;
2073 }
2074
2075 void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
2076                          struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
2077 {
2078         struct ppa_addr ppa_l2p;
2079
2080 #ifdef CONFIG_NVM_PBLK_DEBUG
2081         /* Callers must ensure that the ppa points to a device address */
2082         BUG_ON(pblk_addr_in_cache(ppa_mapped));
2083 #endif
2084         /* Invalidate and discard padded entries */
2085         if (lba == ADDR_EMPTY) {
2086                 atomic64_inc(&pblk->pad_wa);
2087 #ifdef CONFIG_NVM_PBLK_DEBUG
2088                 atomic_long_inc(&pblk->padded_wb);
2089 #endif
2090                 if (!pblk_ppa_empty(ppa_mapped))
2091                         pblk_map_invalidate(pblk, ppa_mapped);
2092                 return;
2093         }
2094
2095         /* logic error: lba out-of-bounds. Ignore update */
2096         if (!(lba < pblk->rl.nr_secs)) {
2097                 WARN(1, "pblk: corrupted L2P map request\n");
2098                 return;
2099         }
2100
2101         spin_lock(&pblk->trans_lock);
2102         ppa_l2p = pblk_trans_map_get(pblk, lba);
2103
2104         /* Do not update L2P if the cacheline has been updated. In this case,
2105          * the mapped ppa must be invalidated
2106          */
2107         if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
2108                 if (!pblk_ppa_empty(ppa_mapped))
2109                         pblk_map_invalidate(pblk, ppa_mapped);
2110                 goto out;
2111         }
2112
2113 #ifdef CONFIG_NVM_PBLK_DEBUG
2114         WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
2115 #endif
2116
2117         pblk_trans_map_set(pblk, lba, ppa_mapped);
2118 out:
2119         spin_unlock(&pblk->trans_lock);
2120 }
2121
2122 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2123                          sector_t blba, int nr_secs)
2124 {
2125         int i;
2126
2127         spin_lock(&pblk->trans_lock);
2128         for (i = 0; i < nr_secs; i++) {
2129                 struct ppa_addr ppa;
2130
2131                 ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2132
2133                 /* If the L2P entry maps to a line, the reference is valid */
2134                 if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
2135                         struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
2136
2137                         kref_get(&line->ref);
2138                 }
2139         }
2140         spin_unlock(&pblk->trans_lock);
2141 }
2142
2143 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2144                           u64 *lba_list, int nr_secs)
2145 {
2146         u64 lba;
2147         int i;
2148
2149         spin_lock(&pblk->trans_lock);
2150         for (i = 0; i < nr_secs; i++) {
2151                 lba = lba_list[i];
2152                 if (lba != ADDR_EMPTY) {
2153                         /* logic error: lba out-of-bounds. Ignore update */
2154                         if (!(lba < pblk->rl.nr_secs)) {
2155                                 WARN(1, "pblk: corrupted L2P map request\n");
2156                                 continue;
2157                         }
2158                         ppas[i] = pblk_trans_map_get(pblk, lba);
2159                 }
2160         }
2161         spin_unlock(&pblk->trans_lock);
2162 }