Merge branch 'for-4.6/drivers' of git://git.kernel.dk/linux-block
[sfrench/cifs-2.6.git] / drivers / lightnvm / rrpc.c
index 307db1ea22defbcbddcfdc3d63cfe86bda43fa0c..82343783aa47239b427fee1bbc7e066e5160b630 100644 (file)
@@ -38,7 +38,7 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
 
        spin_lock(&rblk->lock);
 
-       div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
+       div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
        WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
        rblk->nr_invalid_pages++;
 
@@ -113,14 +113,24 @@ static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
 
 static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
 {
-       return (rblk->next_page == rrpc->dev->pgs_per_blk);
+       return (rblk->next_page == rrpc->dev->sec_per_blk);
 }
 
+/* Calculate relative addr for the given block, considering instantiated LUNs */
+static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
+{
+       struct nvm_block *blk = rblk->parent;
+       int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
+
+       return lun_blk * rrpc->dev->sec_per_blk;
+}
+
+/* Calculate global addr for the given block */
 static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
 {
        struct nvm_block *blk = rblk->parent;
 
-       return blk->id * rrpc->dev->pgs_per_blk;
+       return blk->id * rrpc->dev->sec_per_blk;
 }
 
 static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
@@ -136,7 +146,7 @@ static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
        l.g.sec = secs;
 
        sector_div(ppa, dev->sec_per_pg);
-       div_u64_rem(ppa, dev->sec_per_blk, &pgs);
+       div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
        l.g.pg = pgs;
 
        sector_div(ppa, dev->pgs_per_blk);
@@ -191,12 +201,12 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
                return NULL;
        }
 
-       rblk = &rlun->blocks[blk->id];
+       rblk = rrpc_get_rblk(rlun, blk->id);
        list_add_tail(&rblk->list, &rlun->open_list);
        spin_unlock(&lun->lock);
 
        blk->priv = rblk;
-       bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
+       bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
        rblk->next_page = 0;
        rblk->nr_invalid_pages = 0;
        atomic_set(&rblk->data_cmnt_size, 0);
@@ -286,11 +296,11 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
        struct bio *bio;
        struct page *page;
        int slot;
-       int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
+       int nr_sec_per_blk = rrpc->dev->sec_per_blk;
        u64 phys_addr;
        DECLARE_COMPLETION_ONSTACK(wait);
 
-       if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
+       if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
                return 0;
 
        bio = bio_alloc(GFP_NOIO, 1);
@@ -306,10 +316,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
        }
 
        while ((slot = find_first_zero_bit(rblk->invalid_pages,
-                                           nr_pgs_per_blk)) < nr_pgs_per_blk) {
+                                           nr_sec_per_blk)) < nr_sec_per_blk) {
 
                /* Lock laddr */
-               phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
+               phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
 
 try:
                spin_lock(&rrpc->rev_lock);
@@ -381,7 +391,7 @@ finished:
        mempool_free(page, rrpc->page_pool);
        bio_put(bio);
 
-       if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) {
+       if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
                pr_err("nvm: failed to garbage collect block\n");
                return -EIO;
        }
@@ -499,12 +509,21 @@ static void rrpc_gc_queue(struct work_struct *work)
        struct rrpc *rrpc = gcb->rrpc;
        struct rrpc_block *rblk = gcb->rblk;
        struct nvm_lun *lun = rblk->parent->lun;
+       struct nvm_block *blk = rblk->parent;
        struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
 
        spin_lock(&rlun->lock);
        list_add_tail(&rblk->prio, &rlun->prio_list);
        spin_unlock(&rlun->lock);
 
+       spin_lock(&lun->lock);
+       lun->nr_open_blocks--;
+       lun->nr_closed_blocks++;
+       blk->state &= ~NVM_BLK_ST_OPEN;
+       blk->state |= NVM_BLK_ST_CLOSED;
+       list_move_tail(&rblk->list, &rlun->closed_list);
+       spin_unlock(&lun->lock);
+
        mempool_free(gcb, rrpc->gcb_pool);
        pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
                                                        rblk->parent->id);
@@ -545,7 +564,7 @@ static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
        struct rrpc_addr *gp;
        struct rrpc_rev_addr *rev;
 
-       BUG_ON(laddr >= rrpc->nr_pages);
+       BUG_ON(laddr >= rrpc->nr_sects);
 
        gp = &rrpc->trans_map[laddr];
        spin_lock(&rrpc->rev_lock);
@@ -668,20 +687,8 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
                lun = rblk->parent->lun;
 
                cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
-               if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
-                       struct nvm_block *blk = rblk->parent;
-                       struct rrpc_lun *rlun = rblk->rlun;
-
-                       spin_lock(&lun->lock);
-                       lun->nr_open_blocks--;
-                       lun->nr_closed_blocks++;
-                       blk->state &= ~NVM_BLK_ST_OPEN;
-                       blk->state |= NVM_BLK_ST_CLOSED;
-                       list_move_tail(&rblk->list, &rlun->closed_list);
-                       spin_unlock(&lun->lock);
-
+               if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
                        rrpc_run_gc(rrpc, rblk);
-               }
        }
 }
 
@@ -726,7 +733,7 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
 
        for (i = 0; i < npages; i++) {
                /* We assume that mapping occurs at 4KB granularity */
-               BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_pages));
+               BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
                gp = &rrpc->trans_map[laddr + i];
 
                if (gp->rblk) {
@@ -757,7 +764,7 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
        if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
                return NVM_IO_REQUEUE;
 
-       BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_pages));
+       BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
        gp = &rrpc->trans_map[laddr];
 
        if (gp->rblk) {
@@ -1007,21 +1014,21 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
        struct nvm_dev *dev = rrpc->dev;
        struct rrpc_addr *addr = rrpc->trans_map + slba;
        struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
-       sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
        u64 elba = slba + nlb;
        u64 i;
 
-       if (unlikely(elba > dev->total_pages)) {
+       if (unlikely(elba > dev->total_secs)) {
                pr_err("nvm: L2P data from device is out of bounds!\n");
                return -EINVAL;
        }
 
        for (i = 0; i < nlb; i++) {
                u64 pba = le64_to_cpu(entries[i]);
+               unsigned int mod;
                /* LNVM treats address-spaces as silos, LBA and PBA are
                 * equally large and zero-indexed.
                 */
-               if (unlikely(pba >= max_pages && pba != U64_MAX)) {
+               if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
                        pr_err("nvm: L2P data entry is out of bounds!\n");
                        return -EINVAL;
                }
@@ -1033,8 +1040,10 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
                if (!pba)
                        continue;
 
+               div_u64_rem(pba, rrpc->nr_sects, &mod);
+
                addr[i].addr = pba;
-               raddr[pba].addr = slba + i;
+               raddr[mod].addr = slba + i;
        }
 
        return 0;
@@ -1046,16 +1055,16 @@ static int rrpc_map_init(struct rrpc *rrpc)
        sector_t i;
        int ret;
 
-       rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
+       rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
        if (!rrpc->trans_map)
                return -ENOMEM;
 
        rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
-                                                       * rrpc->nr_pages);
+                                                       * rrpc->nr_sects);
        if (!rrpc->rev_trans_map)
                return -ENOMEM;
 
-       for (i = 0; i < rrpc->nr_pages; i++) {
+       for (i = 0; i < rrpc->nr_sects; i++) {
                struct rrpc_addr *p = &rrpc->trans_map[i];
                struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
 
@@ -1067,8 +1076,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
                return 0;
 
        /* Bring up the mapping table from device */
-       ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
-                                                       rrpc_l2p_update, rrpc);
+       ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs, rrpc_l2p_update,
+                                                                       rrpc);
        if (ret) {
                pr_err("nvm: rrpc: could not read L2P table.\n");
                return -EINVAL;
@@ -1141,7 +1150,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
        struct rrpc_lun *rlun;
        int i, j;
 
-       if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
+       if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
                pr_err("rrpc: number of pages per block too high.");
                return -EINVAL;
        }
@@ -1168,7 +1177,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
                spin_lock_init(&rlun->lock);
 
                rrpc->total_blocks += dev->blks_per_lun;
-               rrpc->nr_pages += dev->sec_per_lun;
+               rrpc->nr_sects += dev->sec_per_lun;
 
                rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
                                                rrpc->dev->blks_per_lun);
@@ -1221,9 +1230,9 @@ static sector_t rrpc_capacity(void *private)
 
        /* cur, gc, and two emergency blocks for each lun */
        reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4;
-       provisioned = rrpc->nr_pages - reserved;
+       provisioned = rrpc->nr_sects - reserved;
 
-       if (reserved > rrpc->nr_pages) {
+       if (reserved > rrpc->nr_sects) {
                pr_err("rrpc: not enough space available to expose storage.\n");
                return 0;
        }
@@ -1242,10 +1251,11 @@ static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
        struct nvm_dev *dev = rrpc->dev;
        int offset;
        struct rrpc_addr *laddr;
-       u64 paddr, pladdr;
+       u64 bpaddr, paddr, pladdr;
 
-       for (offset = 0; offset < dev->pgs_per_blk; offset++) {
-               paddr = block_to_addr(rrpc, rblk) + offset;
+       bpaddr = block_to_rel_addr(rrpc, rblk);
+       for (offset = 0; offset < dev->sec_per_blk; offset++) {
+               paddr = bpaddr + offset;
 
                pladdr = rrpc->rev_trans_map[paddr].addr;
                if (pladdr == ADDR_EMPTY)
@@ -1386,7 +1396,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
        blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
 
        pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
-                       rrpc->nr_luns, (unsigned long long)rrpc->nr_pages);
+                       rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
 
        mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));