kill dentry_update_name_case()
[sfrench/cifs-2.6.git] / drivers / s390 / block / scm_blk.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Block driver for s390 storage class memory.
4  *
5  * Copyright IBM Corp. 2012
6  * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
7  */
8
9 #define KMSG_COMPONENT "scm_block"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/mempool.h>
15 #include <linux/module.h>
16 #include <linux/blkdev.h>
17 #include <linux/blk-mq.h>
18 #include <linux/genhd.h>
19 #include <linux/slab.h>
20 #include <linux/list.h>
21 #include <asm/eadm.h>
22 #include "scm_blk.h"
23
24 debug_info_t *scm_debug;
25 static int scm_major;
26 static mempool_t *aidaw_pool;
27 static DEFINE_SPINLOCK(list_lock);
28 static LIST_HEAD(inactive_requests);
29 static unsigned int nr_requests = 64;
30 static unsigned int nr_requests_per_io = 8;
31 static atomic_t nr_devices = ATOMIC_INIT(0);
32 module_param(nr_requests, uint, S_IRUGO);
33 MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
34
35 module_param(nr_requests_per_io, uint, S_IRUGO);
36 MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO.");
37
38 MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
39 MODULE_LICENSE("GPL");
40 MODULE_ALIAS("scm:scmdev*");
41
42 static void __scm_free_rq(struct scm_request *scmrq)
43 {
44         struct aob_rq_header *aobrq = to_aobrq(scmrq);
45
46         free_page((unsigned long) scmrq->aob);
47         kfree(scmrq->request);
48         kfree(aobrq);
49 }
50
51 static void scm_free_rqs(void)
52 {
53         struct list_head *iter, *safe;
54         struct scm_request *scmrq;
55
56         spin_lock_irq(&list_lock);
57         list_for_each_safe(iter, safe, &inactive_requests) {
58                 scmrq = list_entry(iter, struct scm_request, list);
59                 list_del(&scmrq->list);
60                 __scm_free_rq(scmrq);
61         }
62         spin_unlock_irq(&list_lock);
63
64         mempool_destroy(aidaw_pool);
65 }
66
67 static int __scm_alloc_rq(void)
68 {
69         struct aob_rq_header *aobrq;
70         struct scm_request *scmrq;
71
72         aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
73         if (!aobrq)
74                 return -ENOMEM;
75
76         scmrq = (void *) aobrq->data;
77         scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
78         if (!scmrq->aob)
79                 goto free;
80
81         scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]),
82                                  GFP_KERNEL);
83         if (!scmrq->request)
84                 goto free;
85
86         INIT_LIST_HEAD(&scmrq->list);
87         spin_lock_irq(&list_lock);
88         list_add(&scmrq->list, &inactive_requests);
89         spin_unlock_irq(&list_lock);
90
91         return 0;
92 free:
93         __scm_free_rq(scmrq);
94         return -ENOMEM;
95 }
96
97 static int scm_alloc_rqs(unsigned int nrqs)
98 {
99         int ret = 0;
100
101         aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
102         if (!aidaw_pool)
103                 return -ENOMEM;
104
105         while (nrqs-- && !ret)
106                 ret = __scm_alloc_rq();
107
108         return ret;
109 }
110
111 static struct scm_request *scm_request_fetch(void)
112 {
113         struct scm_request *scmrq = NULL;
114
115         spin_lock_irq(&list_lock);
116         if (list_empty(&inactive_requests))
117                 goto out;
118         scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
119         list_del(&scmrq->list);
120 out:
121         spin_unlock_irq(&list_lock);
122         return scmrq;
123 }
124
125 static void scm_request_done(struct scm_request *scmrq)
126 {
127         unsigned long flags;
128         struct msb *msb;
129         u64 aidaw;
130         int i;
131
132         for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
133                 msb = &scmrq->aob->msb[i];
134                 aidaw = msb->data_addr;
135
136                 if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
137                     IS_ALIGNED(aidaw, PAGE_SIZE))
138                         mempool_free(virt_to_page(aidaw), aidaw_pool);
139         }
140
141         spin_lock_irqsave(&list_lock, flags);
142         list_add(&scmrq->list, &inactive_requests);
143         spin_unlock_irqrestore(&list_lock, flags);
144 }
145
146 static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
147 {
148         return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
149 }
150
151 static inline struct aidaw *scm_aidaw_alloc(void)
152 {
153         struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
154
155         return page ? page_address(page) : NULL;
156 }
157
158 static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
159 {
160         unsigned long _aidaw = (unsigned long) aidaw;
161         unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
162
163         return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
164 }
165
166 struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
167 {
168         struct aidaw *aidaw;
169
170         if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
171                 return scmrq->next_aidaw;
172
173         aidaw = scm_aidaw_alloc();
174         if (aidaw)
175                 memset(aidaw, 0, PAGE_SIZE);
176         return aidaw;
177 }
178
179 static int scm_request_prepare(struct scm_request *scmrq)
180 {
181         struct scm_blk_dev *bdev = scmrq->bdev;
182         struct scm_device *scmdev = bdev->gendisk->private_data;
183         int pos = scmrq->aob->request.msb_count;
184         struct msb *msb = &scmrq->aob->msb[pos];
185         struct request *req = scmrq->request[pos];
186         struct req_iterator iter;
187         struct aidaw *aidaw;
188         struct bio_vec bv;
189
190         aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
191         if (!aidaw)
192                 return -ENOMEM;
193
194         msb->bs = MSB_BS_4K;
195         scmrq->aob->request.msb_count++;
196         msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
197         msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
198         msb->flags |= MSB_FLAG_IDA;
199         msb->data_addr = (u64) aidaw;
200
201         rq_for_each_segment(bv, req, iter) {
202                 WARN_ON(bv.bv_offset);
203                 msb->blk_count += bv.bv_len >> 12;
204                 aidaw->data_addr = (u64) page_address(bv.bv_page);
205                 aidaw++;
206         }
207
208         scmrq->next_aidaw = aidaw;
209         return 0;
210 }
211
212 static inline void scm_request_set(struct scm_request *scmrq,
213                                    struct request *req)
214 {
215         scmrq->request[scmrq->aob->request.msb_count] = req;
216 }
217
218 static inline void scm_request_init(struct scm_blk_dev *bdev,
219                                     struct scm_request *scmrq)
220 {
221         struct aob_rq_header *aobrq = to_aobrq(scmrq);
222         struct aob *aob = scmrq->aob;
223
224         memset(scmrq->request, 0,
225                nr_requests_per_io * sizeof(scmrq->request[0]));
226         memset(aob, 0, sizeof(*aob));
227         aobrq->scmdev = bdev->scmdev;
228         aob->request.cmd_code = ARQB_CMD_MOVE;
229         aob->request.data = (u64) aobrq;
230         scmrq->bdev = bdev;
231         scmrq->retries = 4;
232         scmrq->error = BLK_STS_OK;
233         /* We don't use all msbs - place aidaws at the end of the aob page. */
234         scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
235 }
236
237 static void scm_request_requeue(struct scm_request *scmrq)
238 {
239         struct scm_blk_dev *bdev = scmrq->bdev;
240         int i;
241
242         for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
243                 blk_mq_requeue_request(scmrq->request[i], false);
244
245         atomic_dec(&bdev->queued_reqs);
246         scm_request_done(scmrq);
247         blk_mq_kick_requeue_list(bdev->rq);
248 }
249
250 static void scm_request_finish(struct scm_request *scmrq)
251 {
252         struct scm_blk_dev *bdev = scmrq->bdev;
253         blk_status_t *error;
254         int i;
255
256         for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
257                 error = blk_mq_rq_to_pdu(scmrq->request[i]);
258                 *error = scmrq->error;
259                 blk_mq_complete_request(scmrq->request[i]);
260         }
261
262         atomic_dec(&bdev->queued_reqs);
263         scm_request_done(scmrq);
264 }
265
266 static void scm_request_start(struct scm_request *scmrq)
267 {
268         struct scm_blk_dev *bdev = scmrq->bdev;
269
270         atomic_inc(&bdev->queued_reqs);
271         if (eadm_start_aob(scmrq->aob)) {
272                 SCM_LOG(5, "no subchannel");
273                 scm_request_requeue(scmrq);
274         }
275 }
276
277 struct scm_queue {
278         struct scm_request *scmrq;
279         spinlock_t lock;
280 };
281
282 static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx,
283                            const struct blk_mq_queue_data *qd)
284 {
285         struct scm_device *scmdev = hctx->queue->queuedata;
286         struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
287         struct scm_queue *sq = hctx->driver_data;
288         struct request *req = qd->rq;
289         struct scm_request *scmrq;
290
291         spin_lock(&sq->lock);
292         if (!scm_permit_request(bdev, req)) {
293                 spin_unlock(&sq->lock);
294                 return BLK_STS_RESOURCE;
295         }
296
297         scmrq = sq->scmrq;
298         if (!scmrq) {
299                 scmrq = scm_request_fetch();
300                 if (!scmrq) {
301                         SCM_LOG(5, "no request");
302                         spin_unlock(&sq->lock);
303                         return BLK_STS_RESOURCE;
304                 }
305                 scm_request_init(bdev, scmrq);
306                 sq->scmrq = scmrq;
307         }
308         scm_request_set(scmrq, req);
309
310         if (scm_request_prepare(scmrq)) {
311                 SCM_LOG(5, "aidaw alloc failed");
312                 scm_request_set(scmrq, NULL);
313
314                 if (scmrq->aob->request.msb_count)
315                         scm_request_start(scmrq);
316
317                 sq->scmrq = NULL;
318                 spin_unlock(&sq->lock);
319                 return BLK_STS_RESOURCE;
320         }
321         blk_mq_start_request(req);
322
323         if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) {
324                 scm_request_start(scmrq);
325                 sq->scmrq = NULL;
326         }
327         spin_unlock(&sq->lock);
328         return BLK_STS_OK;
329 }
330
331 static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
332                              unsigned int idx)
333 {
334         struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL);
335
336         if (!qd)
337                 return -ENOMEM;
338
339         spin_lock_init(&qd->lock);
340         hctx->driver_data = qd;
341
342         return 0;
343 }
344
345 static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
346 {
347         struct scm_queue *qd = hctx->driver_data;
348
349         WARN_ON(qd->scmrq);
350         kfree(hctx->driver_data);
351         hctx->driver_data = NULL;
352 }
353
354 static void __scmrq_log_error(struct scm_request *scmrq)
355 {
356         struct aob *aob = scmrq->aob;
357
358         if (scmrq->error == BLK_STS_TIMEOUT)
359                 SCM_LOG(1, "Request timeout");
360         else {
361                 SCM_LOG(1, "Request error");
362                 SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
363         }
364         if (scmrq->retries)
365                 SCM_LOG(1, "Retry request");
366         else
367                 pr_err("An I/O operation to SCM failed with rc=%d\n",
368                        scmrq->error);
369 }
370
371 static void scm_blk_handle_error(struct scm_request *scmrq)
372 {
373         struct scm_blk_dev *bdev = scmrq->bdev;
374         unsigned long flags;
375
376         if (scmrq->error != BLK_STS_IOERR)
377                 goto restart;
378
379         /* For -EIO the response block is valid. */
380         switch (scmrq->aob->response.eqc) {
381         case EQC_WR_PROHIBIT:
382                 spin_lock_irqsave(&bdev->lock, flags);
383                 if (bdev->state != SCM_WR_PROHIBIT)
384                         pr_info("%lx: Write access to the SCM increment is suspended\n",
385                                 (unsigned long) bdev->scmdev->address);
386                 bdev->state = SCM_WR_PROHIBIT;
387                 spin_unlock_irqrestore(&bdev->lock, flags);
388                 goto requeue;
389         default:
390                 break;
391         }
392
393 restart:
394         if (!eadm_start_aob(scmrq->aob))
395                 return;
396
397 requeue:
398         scm_request_requeue(scmrq);
399 }
400
401 void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
402 {
403         struct scm_request *scmrq = data;
404
405         scmrq->error = error;
406         if (error) {
407                 __scmrq_log_error(scmrq);
408                 if (scmrq->retries-- > 0) {
409                         scm_blk_handle_error(scmrq);
410                         return;
411                 }
412         }
413
414         scm_request_finish(scmrq);
415 }
416
417 static void scm_blk_request_done(struct request *req)
418 {
419         blk_status_t *error = blk_mq_rq_to_pdu(req);
420
421         blk_mq_end_request(req, *error);
422 }
423
424 static const struct block_device_operations scm_blk_devops = {
425         .owner = THIS_MODULE,
426 };
427
428 static const struct blk_mq_ops scm_mq_ops = {
429         .queue_rq = scm_blk_request,
430         .complete = scm_blk_request_done,
431         .init_hctx = scm_blk_init_hctx,
432         .exit_hctx = scm_blk_exit_hctx,
433 };
434
435 int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
436 {
437         unsigned int devindex, nr_max_blk;
438         struct request_queue *rq;
439         int len, ret;
440
441         devindex = atomic_inc_return(&nr_devices) - 1;
442         /* scma..scmz + scmaa..scmzz */
443         if (devindex > 701) {
444                 ret = -ENODEV;
445                 goto out;
446         }
447
448         bdev->scmdev = scmdev;
449         bdev->state = SCM_OPER;
450         spin_lock_init(&bdev->lock);
451         atomic_set(&bdev->queued_reqs, 0);
452
453         bdev->tag_set.ops = &scm_mq_ops;
454         bdev->tag_set.cmd_size = sizeof(blk_status_t);
455         bdev->tag_set.nr_hw_queues = nr_requests;
456         bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
457         bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
458
459         ret = blk_mq_alloc_tag_set(&bdev->tag_set);
460         if (ret)
461                 goto out;
462
463         rq = blk_mq_init_queue(&bdev->tag_set);
464         if (IS_ERR(rq)) {
465                 ret = PTR_ERR(rq);
466                 goto out_tag;
467         }
468         bdev->rq = rq;
469         nr_max_blk = min(scmdev->nr_max_block,
470                          (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
471
472         blk_queue_logical_block_size(rq, 1 << 12);
473         blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
474         blk_queue_max_segments(rq, nr_max_blk);
475         blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
476         blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
477
478         bdev->gendisk = alloc_disk(SCM_NR_PARTS);
479         if (!bdev->gendisk) {
480                 ret = -ENOMEM;
481                 goto out_queue;
482         }
483         rq->queuedata = scmdev;
484         bdev->gendisk->private_data = scmdev;
485         bdev->gendisk->fops = &scm_blk_devops;
486         bdev->gendisk->queue = rq;
487         bdev->gendisk->major = scm_major;
488         bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
489
490         len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
491         if (devindex > 25) {
492                 len += snprintf(bdev->gendisk->disk_name + len,
493                                 DISK_NAME_LEN - len, "%c",
494                                 'a' + (devindex / 26) - 1);
495                 devindex = devindex % 26;
496         }
497         snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
498                  'a' + devindex);
499
500         /* 512 byte sectors */
501         set_capacity(bdev->gendisk, scmdev->size >> 9);
502         device_add_disk(&scmdev->dev, bdev->gendisk);
503         return 0;
504
505 out_queue:
506         blk_cleanup_queue(rq);
507 out_tag:
508         blk_mq_free_tag_set(&bdev->tag_set);
509 out:
510         atomic_dec(&nr_devices);
511         return ret;
512 }
513
514 void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
515 {
516         del_gendisk(bdev->gendisk);
517         blk_cleanup_queue(bdev->gendisk->queue);
518         blk_mq_free_tag_set(&bdev->tag_set);
519         put_disk(bdev->gendisk);
520 }
521
522 void scm_blk_set_available(struct scm_blk_dev *bdev)
523 {
524         unsigned long flags;
525
526         spin_lock_irqsave(&bdev->lock, flags);
527         if (bdev->state == SCM_WR_PROHIBIT)
528                 pr_info("%lx: Write access to the SCM increment is restored\n",
529                         (unsigned long) bdev->scmdev->address);
530         bdev->state = SCM_OPER;
531         spin_unlock_irqrestore(&bdev->lock, flags);
532 }
533
534 static bool __init scm_blk_params_valid(void)
535 {
536         if (!nr_requests_per_io || nr_requests_per_io > 64)
537                 return false;
538
539         return true;
540 }
541
542 static int __init scm_blk_init(void)
543 {
544         int ret = -EINVAL;
545
546         if (!scm_blk_params_valid())
547                 goto out;
548
549         ret = register_blkdev(0, "scm");
550         if (ret < 0)
551                 goto out;
552
553         scm_major = ret;
554         ret = scm_alloc_rqs(nr_requests);
555         if (ret)
556                 goto out_free;
557
558         scm_debug = debug_register("scm_log", 16, 1, 16);
559         if (!scm_debug) {
560                 ret = -ENOMEM;
561                 goto out_free;
562         }
563
564         debug_register_view(scm_debug, &debug_hex_ascii_view);
565         debug_set_level(scm_debug, 2);
566
567         ret = scm_drv_init();
568         if (ret)
569                 goto out_dbf;
570
571         return ret;
572
573 out_dbf:
574         debug_unregister(scm_debug);
575 out_free:
576         scm_free_rqs();
577         unregister_blkdev(scm_major, "scm");
578 out:
579         return ret;
580 }
581 module_init(scm_blk_init);
582
583 static void __exit scm_blk_cleanup(void)
584 {
585         scm_drv_cleanup();
586         debug_unregister(scm_debug);
587         scm_free_rqs();
588         unregister_blkdev(scm_major, "scm");
589 }
590 module_exit(scm_blk_cleanup);