]> git.samba.org - sfrench/cifs-2.6.git/blob - drivers/md/raid5-ppl.c
zonefs: convert zonefs to use the new mount api
[sfrench/cifs-2.6.git] / drivers / md / raid5-ppl.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Partial Parity Log for closing the RAID5 write hole
4  * Copyright (c) 2017, Intel Corporation.
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/crc32c.h>
11 #include <linux/async_tx.h>
12 #include <linux/raid/md_p.h>
13 #include "md.h"
14 #include "raid5.h"
15 #include "raid5-log.h"
16
17 /*
18  * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for
19  * partial parity data. The header contains an array of entries
20  * (struct ppl_header_entry) which describe the logged write requests.
21  * Partial parity for the entries comes after the header, written in the same
22  * sequence as the entries:
23  *
24  * Header
25  *   entry0
26  *   ...
27  *   entryN
28  * PP data
29  *   PP for entry0
30  *   ...
31  *   PP for entryN
32  *
33  * An entry describes one or more consecutive stripe_heads, up to a full
34  * stripe. The modifed raid data chunks form an m-by-n matrix, where m is the
35  * number of stripe_heads in the entry and n is the number of modified data
36  * disks. Every stripe_head in the entry must write to the same data disks.
37  * An example of a valid case described by a single entry (writes to the first
38  * stripe of a 4 disk array, 16k chunk size):
39  *
40  * sh->sector   dd0   dd1   dd2    ppl
41  *            +-----+-----+-----+
42  * 0          | --- | --- | --- | +----+
43  * 8          | -W- | -W- | --- | | pp |   data_sector = 8
44  * 16         | -W- | -W- | --- | | pp |   data_size = 3 * 2 * 4k
45  * 24         | -W- | -W- | --- | | pp |   pp_size = 3 * 4k
46  *            +-----+-----+-----+ +----+
47  *
48  * data_sector is the first raid sector of the modified data, data_size is the
49  * total size of modified data and pp_size is the size of partial parity for
50  * this entry. Entries for full stripe writes contain no partial parity
51  * (pp_size = 0), they only mark the stripes for which parity should be
52  * recalculated after an unclean shutdown. Every entry holds a checksum of its
53  * partial parity, the header also has a checksum of the header itself.
54  *
55  * A write request is always logged to the PPL instance stored on the parity
56  * disk of the corresponding stripe. For each member disk there is one ppl_log
57  * used to handle logging for this disk, independently from others. They are
58  * grouped in child_logs array in struct ppl_conf, which is assigned to
59  * r5conf->log_private.
60  *
61  * ppl_io_unit represents a full PPL write, header_page contains the ppl_header.
62  * PPL entries for logged stripes are added in ppl_log_stripe(). A stripe_head
63  * can be appended to the last entry if it meets the conditions for a valid
64  * entry described above, otherwise a new entry is added. Checksums of entries
65  * are calculated incrementally as stripes containing partial parity are being
66  * added. ppl_submit_iounit() calculates the checksum of the header and submits
67  * a bio containing the header page and partial parity pages (sh->ppl_page) for
68  * all stripes of the io_unit. When the PPL write completes, the stripes
69  * associated with the io_unit are released and raid5d starts writing their data
70  * and parity. When all stripes are written, the io_unit is freed and the next
71  * can be submitted.
72  *
73  * An io_unit is used to gather stripes until it is submitted or becomes full
74  * (if the maximum number of entries or size of PPL is reached). Another io_unit
75  * can't be submitted until the previous has completed (PPL and stripe
76  * data+parity is written). The log->io_list tracks all io_units of a log
77  * (for a single member disk). New io_units are added to the end of the list
78  * and the first io_unit is submitted, if it is not submitted already.
79  * The current io_unit accepting new stripes is always at the end of the list.
80  *
81  * If write-back cache is enabled for any of the disks in the array, its data
82  * must be flushed before next io_unit is submitted.
83  */
84
85 #define PPL_SPACE_SIZE (128 * 1024)
86
87 struct ppl_conf {
88         struct mddev *mddev;
89
90         /* array of child logs, one for each raid disk */
91         struct ppl_log *child_logs;
92         int count;
93
94         int block_size;         /* the logical block size used for data_sector
95                                  * in ppl_header_entry */
96         u32 signature;          /* raid array identifier */
97         atomic64_t seq;         /* current log write sequence number */
98
99         struct kmem_cache *io_kc;
100         mempool_t io_pool;
101         struct bio_set bs;
102         struct bio_set flush_bs;
103
104         /* used only for recovery */
105         int recovered_entries;
106         int mismatch_count;
107
108         /* stripes to retry if failed to allocate io_unit */
109         struct list_head no_mem_stripes;
110         spinlock_t no_mem_stripes_lock;
111
112         unsigned short write_hint;
113 };
114
115 struct ppl_log {
116         struct ppl_conf *ppl_conf;      /* shared between all log instances */
117
118         struct md_rdev *rdev;           /* array member disk associated with
119                                          * this log instance */
120         struct mutex io_mutex;
121         struct ppl_io_unit *current_io; /* current io_unit accepting new data
122                                          * always at the end of io_list */
123         spinlock_t io_list_lock;
124         struct list_head io_list;       /* all io_units of this log */
125
126         sector_t next_io_sector;
127         unsigned int entry_space;
128         bool use_multippl;
129         bool wb_cache_on;
130         unsigned long disk_flush_bitmap;
131 };
132
133 #define PPL_IO_INLINE_BVECS 32
134
135 struct ppl_io_unit {
136         struct ppl_log *log;
137
138         struct page *header_page;       /* for ppl_header */
139
140         unsigned int entries_count;     /* number of entries in ppl_header */
141         unsigned int pp_size;           /* total size current of partial parity */
142
143         u64 seq;                        /* sequence number of this log write */
144         struct list_head log_sibling;   /* log->io_list */
145
146         struct list_head stripe_list;   /* stripes added to the io_unit */
147         atomic_t pending_stripes;       /* how many stripes not written to raid */
148         atomic_t pending_flushes;       /* how many disk flushes are in progress */
149
150         bool submitted;                 /* true if write to log started */
151
152         /* inline bio and its biovec for submitting the iounit */
153         struct bio bio;
154         struct bio_vec biovec[PPL_IO_INLINE_BVECS];
155 };
156
157 struct dma_async_tx_descriptor *
158 ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
159                        struct dma_async_tx_descriptor *tx)
160 {
161         int disks = sh->disks;
162         struct page **srcs = percpu->scribble;
163         int count = 0, pd_idx = sh->pd_idx, i;
164         struct async_submit_ctl submit;
165
166         pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
167
168         /*
169          * Partial parity is the XOR of stripe data chunks that are not changed
170          * during the write request. Depending on available data
171          * (read-modify-write vs. reconstruct-write case) we calculate it
172          * differently.
173          */
174         if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
175                 /*
176                  * rmw: xor old data and parity from updated disks
177                  * This is calculated earlier by ops_run_prexor5() so just copy
178                  * the parity dev page.
179                  */
180                 srcs[count++] = sh->dev[pd_idx].page;
181         } else if (sh->reconstruct_state == reconstruct_state_drain_run) {
182                 /* rcw: xor data from all not updated disks */
183                 for (i = disks; i--;) {
184                         struct r5dev *dev = &sh->dev[i];
185                         if (test_bit(R5_UPTODATE, &dev->flags))
186                                 srcs[count++] = dev->page;
187                 }
188         } else {
189                 return tx;
190         }
191
192         init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, tx,
193                           NULL, sh, (void *) (srcs + sh->disks + 2));
194
195         if (count == 1)
196                 tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE,
197                                   &submit);
198         else
199                 tx = async_xor(sh->ppl_page, srcs, 0, count, PAGE_SIZE,
200                                &submit);
201
202         return tx;
203 }
204
205 static void *ppl_io_pool_alloc(gfp_t gfp_mask, void *pool_data)
206 {
207         struct kmem_cache *kc = pool_data;
208         struct ppl_io_unit *io;
209
210         io = kmem_cache_alloc(kc, gfp_mask);
211         if (!io)
212                 return NULL;
213
214         io->header_page = alloc_page(gfp_mask);
215         if (!io->header_page) {
216                 kmem_cache_free(kc, io);
217                 return NULL;
218         }
219
220         return io;
221 }
222
223 static void ppl_io_pool_free(void *element, void *pool_data)
224 {
225         struct kmem_cache *kc = pool_data;
226         struct ppl_io_unit *io = element;
227
228         __free_page(io->header_page);
229         kmem_cache_free(kc, io);
230 }
231
232 static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
233                                           struct stripe_head *sh)
234 {
235         struct ppl_conf *ppl_conf = log->ppl_conf;
236         struct ppl_io_unit *io;
237         struct ppl_header *pplhdr;
238         struct page *header_page;
239
240         io = mempool_alloc(&ppl_conf->io_pool, GFP_NOWAIT);
241         if (!io)
242                 return NULL;
243
244         header_page = io->header_page;
245         memset(io, 0, sizeof(*io));
246         io->header_page = header_page;
247
248         io->log = log;
249         INIT_LIST_HEAD(&io->log_sibling);
250         INIT_LIST_HEAD(&io->stripe_list);
251         atomic_set(&io->pending_stripes, 0);
252         atomic_set(&io->pending_flushes, 0);
253         bio_init(&io->bio, log->rdev->bdev, io->biovec, PPL_IO_INLINE_BVECS,
254                  REQ_OP_WRITE | REQ_FUA);
255
256         pplhdr = page_address(io->header_page);
257         clear_page(pplhdr);
258         memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
259         pplhdr->signature = cpu_to_le32(ppl_conf->signature);
260
261         io->seq = atomic64_add_return(1, &ppl_conf->seq);
262         pplhdr->generation = cpu_to_le64(io->seq);
263
264         return io;
265 }
266
267 static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
268 {
269         struct ppl_io_unit *io = log->current_io;
270         struct ppl_header_entry *e = NULL;
271         struct ppl_header *pplhdr;
272         int i;
273         sector_t data_sector = 0;
274         int data_disks = 0;
275         struct r5conf *conf = sh->raid_conf;
276
277         pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector);
278
279         /* check if current io_unit is full */
280         if (io && (io->pp_size == log->entry_space ||
281                    io->entries_count == PPL_HDR_MAX_ENTRIES)) {
282                 pr_debug("%s: add io_unit blocked by seq: %llu\n",
283                          __func__, io->seq);
284                 io = NULL;
285         }
286
287         /* add a new unit if there is none or the current is full */
288         if (!io) {
289                 io = ppl_new_iounit(log, sh);
290                 if (!io)
291                         return -ENOMEM;
292                 spin_lock_irq(&log->io_list_lock);
293                 list_add_tail(&io->log_sibling, &log->io_list);
294                 spin_unlock_irq(&log->io_list_lock);
295
296                 log->current_io = io;
297         }
298
299         for (i = 0; i < sh->disks; i++) {
300                 struct r5dev *dev = &sh->dev[i];
301
302                 if (i != sh->pd_idx && test_bit(R5_Wantwrite, &dev->flags)) {
303                         if (!data_disks || dev->sector < data_sector)
304                                 data_sector = dev->sector;
305                         data_disks++;
306                 }
307         }
308         BUG_ON(!data_disks);
309
310         pr_debug("%s: seq: %llu data_sector: %llu data_disks: %d\n", __func__,
311                  io->seq, (unsigned long long)data_sector, data_disks);
312
313         pplhdr = page_address(io->header_page);
314
315         if (io->entries_count > 0) {
316                 struct ppl_header_entry *last =
317                                 &pplhdr->entries[io->entries_count - 1];
318                 struct stripe_head *sh_last = list_last_entry(
319                                 &io->stripe_list, struct stripe_head, log_list);
320                 u64 data_sector_last = le64_to_cpu(last->data_sector);
321                 u32 data_size_last = le32_to_cpu(last->data_size);
322
323                 /*
324                  * Check if we can append the stripe to the last entry. It must
325                  * be just after the last logged stripe and write to the same
326                  * disks. Use bit shift and logarithm to avoid 64-bit division.
327                  */
328                 if ((sh->sector == sh_last->sector + RAID5_STRIPE_SECTORS(conf)) &&
329                     (data_sector >> ilog2(conf->chunk_sectors) ==
330                      data_sector_last >> ilog2(conf->chunk_sectors)) &&
331                     ((data_sector - data_sector_last) * data_disks ==
332                      data_size_last >> 9))
333                         e = last;
334         }
335
336         if (!e) {
337                 e = &pplhdr->entries[io->entries_count++];
338                 e->data_sector = cpu_to_le64(data_sector);
339                 e->parity_disk = cpu_to_le32(sh->pd_idx);
340                 e->checksum = cpu_to_le32(~0);
341         }
342
343         le32_add_cpu(&e->data_size, data_disks << PAGE_SHIFT);
344
345         /* don't write any PP if full stripe write */
346         if (!test_bit(STRIPE_FULL_WRITE, &sh->state)) {
347                 le32_add_cpu(&e->pp_size, PAGE_SIZE);
348                 io->pp_size += PAGE_SIZE;
349                 e->checksum = cpu_to_le32(crc32c_le(le32_to_cpu(e->checksum),
350                                                     page_address(sh->ppl_page),
351                                                     PAGE_SIZE));
352         }
353
354         list_add_tail(&sh->log_list, &io->stripe_list);
355         atomic_inc(&io->pending_stripes);
356         sh->ppl_io = io;
357
358         return 0;
359 }
360
361 int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh)
362 {
363         struct ppl_conf *ppl_conf = conf->log_private;
364         struct ppl_io_unit *io = sh->ppl_io;
365         struct ppl_log *log;
366
367         if (io || test_bit(STRIPE_SYNCING, &sh->state) || !sh->ppl_page ||
368             !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
369             !test_bit(R5_Insync, &sh->dev[sh->pd_idx].flags)) {
370                 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
371                 return -EAGAIN;
372         }
373
374         log = &ppl_conf->child_logs[sh->pd_idx];
375
376         mutex_lock(&log->io_mutex);
377
378         if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
379                 mutex_unlock(&log->io_mutex);
380                 return -EAGAIN;
381         }
382
383         set_bit(STRIPE_LOG_TRAPPED, &sh->state);
384         clear_bit(STRIPE_DELAYED, &sh->state);
385         atomic_inc(&sh->count);
386
387         if (ppl_log_stripe(log, sh)) {
388                 spin_lock_irq(&ppl_conf->no_mem_stripes_lock);
389                 list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes);
390                 spin_unlock_irq(&ppl_conf->no_mem_stripes_lock);
391         }
392
393         mutex_unlock(&log->io_mutex);
394
395         return 0;
396 }
397
398 static void ppl_log_endio(struct bio *bio)
399 {
400         struct ppl_io_unit *io = bio->bi_private;
401         struct ppl_log *log = io->log;
402         struct ppl_conf *ppl_conf = log->ppl_conf;
403         struct stripe_head *sh, *next;
404
405         pr_debug("%s: seq: %llu\n", __func__, io->seq);
406
407         if (bio->bi_status)
408                 md_error(ppl_conf->mddev, log->rdev);
409
410         list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
411                 list_del_init(&sh->log_list);
412
413                 set_bit(STRIPE_HANDLE, &sh->state);
414                 raid5_release_stripe(sh);
415         }
416 }
417
418 static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
419 {
420         pr_debug("%s: seq: %llu size: %u sector: %llu dev: %pg\n",
421                  __func__, io->seq, bio->bi_iter.bi_size,
422                  (unsigned long long)bio->bi_iter.bi_sector,
423                  bio->bi_bdev);
424
425         submit_bio(bio);
426 }
427
428 static void ppl_submit_iounit(struct ppl_io_unit *io)
429 {
430         struct ppl_log *log = io->log;
431         struct ppl_conf *ppl_conf = log->ppl_conf;
432         struct ppl_header *pplhdr = page_address(io->header_page);
433         struct bio *bio = &io->bio;
434         struct stripe_head *sh;
435         int i;
436
437         bio->bi_private = io;
438
439         if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
440                 ppl_log_endio(bio);
441                 return;
442         }
443
444         for (i = 0; i < io->entries_count; i++) {
445                 struct ppl_header_entry *e = &pplhdr->entries[i];
446
447                 pr_debug("%s: seq: %llu entry: %d data_sector: %llu pp_size: %u data_size: %u\n",
448                          __func__, io->seq, i, le64_to_cpu(e->data_sector),
449                          le32_to_cpu(e->pp_size), le32_to_cpu(e->data_size));
450
451                 e->data_sector = cpu_to_le64(le64_to_cpu(e->data_sector) >>
452                                              ilog2(ppl_conf->block_size >> 9));
453                 e->checksum = cpu_to_le32(~le32_to_cpu(e->checksum));
454         }
455
456         pplhdr->entries_count = cpu_to_le32(io->entries_count);
457         pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE));
458
459         /* Rewind the buffer if current PPL is larger then remaining space */
460         if (log->use_multippl &&
461             log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector <
462             (PPL_HEADER_SIZE + io->pp_size) >> 9)
463                 log->next_io_sector = log->rdev->ppl.sector;
464
465
466         bio->bi_end_io = ppl_log_endio;
467         bio->bi_iter.bi_sector = log->next_io_sector;
468         __bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
469
470         pr_debug("%s: log->current_io_sector: %llu\n", __func__,
471             (unsigned long long)log->next_io_sector);
472
473         if (log->use_multippl)
474                 log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
475
476         WARN_ON(log->disk_flush_bitmap != 0);
477
478         list_for_each_entry(sh, &io->stripe_list, log_list) {
479                 for (i = 0; i < sh->disks; i++) {
480                         struct r5dev *dev = &sh->dev[i];
481
482                         if ((ppl_conf->child_logs[i].wb_cache_on) &&
483                             (test_bit(R5_Wantwrite, &dev->flags))) {
484                                 set_bit(i, &log->disk_flush_bitmap);
485                         }
486                 }
487
488                 /* entries for full stripe writes have no partial parity */
489                 if (test_bit(STRIPE_FULL_WRITE, &sh->state))
490                         continue;
491
492                 if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
493                         struct bio *prev = bio;
494
495                         bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_VECS,
496                                                prev->bi_opf, GFP_NOIO,
497                                                &ppl_conf->bs);
498                         bio->bi_iter.bi_sector = bio_end_sector(prev);
499                         __bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
500
501                         bio_chain(bio, prev);
502                         ppl_submit_iounit_bio(io, prev);
503                 }
504         }
505
506         ppl_submit_iounit_bio(io, bio);
507 }
508
509 static void ppl_submit_current_io(struct ppl_log *log)
510 {
511         struct ppl_io_unit *io;
512
513         spin_lock_irq(&log->io_list_lock);
514
515         io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
516                                       log_sibling);
517         if (io && io->submitted)
518                 io = NULL;
519
520         spin_unlock_irq(&log->io_list_lock);
521
522         if (io) {
523                 io->submitted = true;
524
525                 if (io == log->current_io)
526                         log->current_io = NULL;
527
528                 ppl_submit_iounit(io);
529         }
530 }
531
532 void ppl_write_stripe_run(struct r5conf *conf)
533 {
534         struct ppl_conf *ppl_conf = conf->log_private;
535         struct ppl_log *log;
536         int i;
537
538         for (i = 0; i < ppl_conf->count; i++) {
539                 log = &ppl_conf->child_logs[i];
540
541                 mutex_lock(&log->io_mutex);
542                 ppl_submit_current_io(log);
543                 mutex_unlock(&log->io_mutex);
544         }
545 }
546
547 static void ppl_io_unit_finished(struct ppl_io_unit *io)
548 {
549         struct ppl_log *log = io->log;
550         struct ppl_conf *ppl_conf = log->ppl_conf;
551         struct r5conf *conf = ppl_conf->mddev->private;
552         unsigned long flags;
553
554         pr_debug("%s: seq: %llu\n", __func__, io->seq);
555
556         local_irq_save(flags);
557
558         spin_lock(&log->io_list_lock);
559         list_del(&io->log_sibling);
560         spin_unlock(&log->io_list_lock);
561
562         mempool_free(io, &ppl_conf->io_pool);
563
564         spin_lock(&ppl_conf->no_mem_stripes_lock);
565         if (!list_empty(&ppl_conf->no_mem_stripes)) {
566                 struct stripe_head *sh;
567
568                 sh = list_first_entry(&ppl_conf->no_mem_stripes,
569                                       struct stripe_head, log_list);
570                 list_del_init(&sh->log_list);
571                 set_bit(STRIPE_HANDLE, &sh->state);
572                 raid5_release_stripe(sh);
573         }
574         spin_unlock(&ppl_conf->no_mem_stripes_lock);
575
576         local_irq_restore(flags);
577
578         wake_up(&conf->wait_for_quiescent);
579 }
580
581 static void ppl_flush_endio(struct bio *bio)
582 {
583         struct ppl_io_unit *io = bio->bi_private;
584         struct ppl_log *log = io->log;
585         struct ppl_conf *ppl_conf = log->ppl_conf;
586         struct r5conf *conf = ppl_conf->mddev->private;
587
588         pr_debug("%s: dev: %pg\n", __func__, bio->bi_bdev);
589
590         if (bio->bi_status) {
591                 struct md_rdev *rdev;
592
593                 rcu_read_lock();
594                 rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio));
595                 if (rdev)
596                         md_error(rdev->mddev, rdev);
597                 rcu_read_unlock();
598         }
599
600         bio_put(bio);
601
602         if (atomic_dec_and_test(&io->pending_flushes)) {
603                 ppl_io_unit_finished(io);
604                 md_wakeup_thread(conf->mddev->thread);
605         }
606 }
607
608 static void ppl_do_flush(struct ppl_io_unit *io)
609 {
610         struct ppl_log *log = io->log;
611         struct ppl_conf *ppl_conf = log->ppl_conf;
612         struct r5conf *conf = ppl_conf->mddev->private;
613         int raid_disks = conf->raid_disks;
614         int flushed_disks = 0;
615         int i;
616
617         atomic_set(&io->pending_flushes, raid_disks);
618
619         for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) {
620                 struct md_rdev *rdev;
621                 struct block_device *bdev = NULL;
622
623                 rdev = conf->disks[i].rdev;
624                 if (rdev && !test_bit(Faulty, &rdev->flags))
625                         bdev = rdev->bdev;
626
627                 if (bdev) {
628                         struct bio *bio;
629
630                         bio = bio_alloc_bioset(bdev, 0,
631                                                REQ_OP_WRITE | REQ_PREFLUSH,
632                                                GFP_NOIO, &ppl_conf->flush_bs);
633                         bio->bi_private = io;
634                         bio->bi_end_io = ppl_flush_endio;
635
636                         pr_debug("%s: dev: %ps\n", __func__, bio->bi_bdev);
637
638                         submit_bio(bio);
639                         flushed_disks++;
640                 }
641         }
642
643         log->disk_flush_bitmap = 0;
644
645         for (i = flushed_disks ; i < raid_disks; i++) {
646                 if (atomic_dec_and_test(&io->pending_flushes))
647                         ppl_io_unit_finished(io);
648         }
649 }
650
651 static inline bool ppl_no_io_unit_submitted(struct r5conf *conf,
652                                             struct ppl_log *log)
653 {
654         struct ppl_io_unit *io;
655
656         io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
657                                       log_sibling);
658
659         return !io || !io->submitted;
660 }
661
662 void ppl_quiesce(struct r5conf *conf, int quiesce)
663 {
664         struct ppl_conf *ppl_conf = conf->log_private;
665         int i;
666
667         if (quiesce) {
668                 for (i = 0; i < ppl_conf->count; i++) {
669                         struct ppl_log *log = &ppl_conf->child_logs[i];
670
671                         spin_lock_irq(&log->io_list_lock);
672                         wait_event_lock_irq(conf->wait_for_quiescent,
673                                             ppl_no_io_unit_submitted(conf, log),
674                                             log->io_list_lock);
675                         spin_unlock_irq(&log->io_list_lock);
676                 }
677         }
678 }
679
680 int ppl_handle_flush_request(struct bio *bio)
681 {
682         if (bio->bi_iter.bi_size == 0) {
683                 bio_endio(bio);
684                 return 0;
685         }
686         bio->bi_opf &= ~REQ_PREFLUSH;
687         return -EAGAIN;
688 }
689
690 void ppl_stripe_write_finished(struct stripe_head *sh)
691 {
692         struct ppl_io_unit *io;
693
694         io = sh->ppl_io;
695         sh->ppl_io = NULL;
696
697         if (io && atomic_dec_and_test(&io->pending_stripes)) {
698                 if (io->log->disk_flush_bitmap)
699                         ppl_do_flush(io);
700                 else
701                         ppl_io_unit_finished(io);
702         }
703 }
704
705 static void ppl_xor(int size, struct page *page1, struct page *page2)
706 {
707         struct async_submit_ctl submit;
708         struct dma_async_tx_descriptor *tx;
709         struct page *xor_srcs[] = { page1, page2 };
710
711         init_async_submit(&submit, ASYNC_TX_ACK|ASYNC_TX_XOR_DROP_DST,
712                           NULL, NULL, NULL, NULL);
713         tx = async_xor(page1, xor_srcs, 0, 2, size, &submit);
714
715         async_tx_quiesce(&tx);
716 }
717
718 /*
719  * PPL recovery strategy: xor partial parity and data from all modified data
720  * disks within a stripe and write the result as the new stripe parity. If all
721  * stripe data disks are modified (full stripe write), no partial parity is
722  * available, so just xor the data disks.
723  *
724  * Recovery of a PPL entry shall occur only if all modified data disks are
725  * available and read from all of them succeeds.
726  *
727  * A PPL entry applies to a stripe, partial parity size for an entry is at most
728  * the size of the chunk. Examples of possible cases for a single entry:
729  *
730  * case 0: single data disk write:
731  *   data0    data1    data2     ppl        parity
732  * +--------+--------+--------+           +--------------------+
733  * | ------ | ------ | ------ | +----+    | (no change)        |
734  * | ------ | -data- | ------ | | pp | -> | data1 ^ pp         |
735  * | ------ | -data- | ------ | | pp | -> | data1 ^ pp         |
736  * | ------ | ------ | ------ | +----+    | (no change)        |
737  * +--------+--------+--------+           +--------------------+
738  * pp_size = data_size
739  *
740  * case 1: more than one data disk write:
741  *   data0    data1    data2     ppl        parity
742  * +--------+--------+--------+           +--------------------+
743  * | ------ | ------ | ------ | +----+    | (no change)        |
744  * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
745  * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
746  * | ------ | ------ | ------ | +----+    | (no change)        |
747  * +--------+--------+--------+           +--------------------+
748  * pp_size = data_size / modified_data_disks
749  *
750  * case 2: write to all data disks (also full stripe write):
751  *   data0    data1    data2                parity
752  * +--------+--------+--------+           +--------------------+
753  * | ------ | ------ | ------ |           | (no change)        |
754  * | -data- | -data- | -data- | --------> | xor all data       |
755  * | ------ | ------ | ------ | --------> | (no change)        |
756  * | ------ | ------ | ------ |           | (no change)        |
757  * +--------+--------+--------+           +--------------------+
758  * pp_size = 0
759  *
760  * The following cases are possible only in other implementations. The recovery
761  * code can handle them, but they are not generated at runtime because they can
762  * be reduced to cases 0, 1 and 2:
763  *
764  * case 3:
765  *   data0    data1    data2     ppl        parity
766  * +--------+--------+--------+ +----+    +--------------------+
767  * | ------ | -data- | -data- | | pp |    | data1 ^ data2 ^ pp |
768  * | ------ | -data- | -data- | | pp | -> | data1 ^ data2 ^ pp |
769  * | -data- | -data- | -data- | | -- | -> | xor all data       |
770  * | -data- | -data- | ------ | | pp |    | data0 ^ data1 ^ pp |
771  * +--------+--------+--------+ +----+    +--------------------+
772  * pp_size = chunk_size
773  *
774  * case 4:
775  *   data0    data1    data2     ppl        parity
776  * +--------+--------+--------+ +----+    +--------------------+
777  * | ------ | -data- | ------ | | pp |    | data1 ^ pp         |
778  * | ------ | ------ | ------ | | -- | -> | (no change)        |
779  * | ------ | ------ | ------ | | -- | -> | (no change)        |
780  * | -data- | ------ | ------ | | pp |    | data0 ^ pp         |
781  * +--------+--------+--------+ +----+    +--------------------+
782  * pp_size = chunk_size
783  */
784 static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
785                              sector_t ppl_sector)
786 {
787         struct ppl_conf *ppl_conf = log->ppl_conf;
788         struct mddev *mddev = ppl_conf->mddev;
789         struct r5conf *conf = mddev->private;
790         int block_size = ppl_conf->block_size;
791         struct page *page1;
792         struct page *page2;
793         sector_t r_sector_first;
794         sector_t r_sector_last;
795         int strip_sectors;
796         int data_disks;
797         int i;
798         int ret = 0;
799         unsigned int pp_size = le32_to_cpu(e->pp_size);
800         unsigned int data_size = le32_to_cpu(e->data_size);
801
802         page1 = alloc_page(GFP_KERNEL);
803         page2 = alloc_page(GFP_KERNEL);
804
805         if (!page1 || !page2) {
806                 ret = -ENOMEM;
807                 goto out;
808         }
809
810         r_sector_first = le64_to_cpu(e->data_sector) * (block_size >> 9);
811
812         if ((pp_size >> 9) < conf->chunk_sectors) {
813                 if (pp_size > 0) {
814                         data_disks = data_size / pp_size;
815                         strip_sectors = pp_size >> 9;
816                 } else {
817                         data_disks = conf->raid_disks - conf->max_degraded;
818                         strip_sectors = (data_size >> 9) / data_disks;
819                 }
820                 r_sector_last = r_sector_first +
821                                 (data_disks - 1) * conf->chunk_sectors +
822                                 strip_sectors;
823         } else {
824                 data_disks = conf->raid_disks - conf->max_degraded;
825                 strip_sectors = conf->chunk_sectors;
826                 r_sector_last = r_sector_first + (data_size >> 9);
827         }
828
829         pr_debug("%s: array sector first: %llu last: %llu\n", __func__,
830                  (unsigned long long)r_sector_first,
831                  (unsigned long long)r_sector_last);
832
833         /* if start and end is 4k aligned, use a 4k block */
834         if (block_size == 512 &&
835             (r_sector_first & (RAID5_STRIPE_SECTORS(conf) - 1)) == 0 &&
836             (r_sector_last & (RAID5_STRIPE_SECTORS(conf) - 1)) == 0)
837                 block_size = RAID5_STRIPE_SIZE(conf);
838
839         /* iterate through blocks in strip */
840         for (i = 0; i < strip_sectors; i += (block_size >> 9)) {
841                 bool update_parity = false;
842                 sector_t parity_sector;
843                 struct md_rdev *parity_rdev;
844                 struct stripe_head sh;
845                 int disk;
846                 int indent = 0;
847
848                 pr_debug("%s:%*s iter %d start\n", __func__, indent, "", i);
849                 indent += 2;
850
851                 memset(page_address(page1), 0, PAGE_SIZE);
852
853                 /* iterate through data member disks */
854                 for (disk = 0; disk < data_disks; disk++) {
855                         int dd_idx;
856                         struct md_rdev *rdev;
857                         sector_t sector;
858                         sector_t r_sector = r_sector_first + i +
859                                             (disk * conf->chunk_sectors);
860
861                         pr_debug("%s:%*s data member disk %d start\n",
862                                  __func__, indent, "", disk);
863                         indent += 2;
864
865                         if (r_sector >= r_sector_last) {
866                                 pr_debug("%s:%*s array sector %llu doesn't need parity update\n",
867                                          __func__, indent, "",
868                                          (unsigned long long)r_sector);
869                                 indent -= 2;
870                                 continue;
871                         }
872
873                         update_parity = true;
874
875                         /* map raid sector to member disk */
876                         sector = raid5_compute_sector(conf, r_sector, 0,
877                                                       &dd_idx, NULL);
878                         pr_debug("%s:%*s processing array sector %llu => data member disk %d, sector %llu\n",
879                                  __func__, indent, "",
880                                  (unsigned long long)r_sector, dd_idx,
881                                  (unsigned long long)sector);
882
883                         rdev = conf->disks[dd_idx].rdev;
884                         if (!rdev || (!test_bit(In_sync, &rdev->flags) &&
885                                       sector >= rdev->recovery_offset)) {
886                                 pr_debug("%s:%*s data member disk %d missing\n",
887                                          __func__, indent, "", dd_idx);
888                                 update_parity = false;
889                                 break;
890                         }
891
892                         pr_debug("%s:%*s reading data member disk %pg sector %llu\n",
893                                  __func__, indent, "", rdev->bdev,
894                                  (unsigned long long)sector);
895                         if (!sync_page_io(rdev, sector, block_size, page2,
896                                         REQ_OP_READ, false)) {
897                                 md_error(mddev, rdev);
898                                 pr_debug("%s:%*s read failed!\n", __func__,
899                                          indent, "");
900                                 ret = -EIO;
901                                 goto out;
902                         }
903
904                         ppl_xor(block_size, page1, page2);
905
906                         indent -= 2;
907                 }
908
909                 if (!update_parity)
910                         continue;
911
912                 if (pp_size > 0) {
913                         pr_debug("%s:%*s reading pp disk sector %llu\n",
914                                  __func__, indent, "",
915                                  (unsigned long long)(ppl_sector + i));
916                         if (!sync_page_io(log->rdev,
917                                         ppl_sector - log->rdev->data_offset + i,
918                                         block_size, page2, REQ_OP_READ,
919                                         false)) {
920                                 pr_debug("%s:%*s read failed!\n", __func__,
921                                          indent, "");
922                                 md_error(mddev, log->rdev);
923                                 ret = -EIO;
924                                 goto out;
925                         }
926
927                         ppl_xor(block_size, page1, page2);
928                 }
929
930                 /* map raid sector to parity disk */
931                 parity_sector = raid5_compute_sector(conf, r_sector_first + i,
932                                 0, &disk, &sh);
933                 BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk));
934
935                 parity_rdev = conf->disks[sh.pd_idx].rdev;
936
937                 BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
938                 pr_debug("%s:%*s write parity at sector %llu, disk %pg\n",
939                          __func__, indent, "",
940                          (unsigned long long)parity_sector,
941                          parity_rdev->bdev);
942                 if (!sync_page_io(parity_rdev, parity_sector, block_size,
943                                   page1, REQ_OP_WRITE, false)) {
944                         pr_debug("%s:%*s parity write error!\n", __func__,
945                                  indent, "");
946                         md_error(mddev, parity_rdev);
947                         ret = -EIO;
948                         goto out;
949                 }
950         }
951 out:
952         if (page1)
953                 __free_page(page1);
954         if (page2)
955                 __free_page(page2);
956         return ret;
957 }
958
959 static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
960                        sector_t offset)
961 {
962         struct ppl_conf *ppl_conf = log->ppl_conf;
963         struct md_rdev *rdev = log->rdev;
964         struct mddev *mddev = rdev->mddev;
965         sector_t ppl_sector = rdev->ppl.sector + offset +
966                               (PPL_HEADER_SIZE >> 9);
967         struct page *page;
968         int i;
969         int ret = 0;
970
971         page = alloc_page(GFP_KERNEL);
972         if (!page)
973                 return -ENOMEM;
974
975         /* iterate through all PPL entries saved */
976         for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++) {
977                 struct ppl_header_entry *e = &pplhdr->entries[i];
978                 u32 pp_size = le32_to_cpu(e->pp_size);
979                 sector_t sector = ppl_sector;
980                 int ppl_entry_sectors = pp_size >> 9;
981                 u32 crc, crc_stored;
982
983                 pr_debug("%s: disk: %d entry: %d ppl_sector: %llu pp_size: %u\n",
984                          __func__, rdev->raid_disk, i,
985                          (unsigned long long)ppl_sector, pp_size);
986
987                 crc = ~0;
988                 crc_stored = le32_to_cpu(e->checksum);
989
990                 /* read parial parity for this entry and calculate its checksum */
991                 while (pp_size) {
992                         int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size;
993
994                         if (!sync_page_io(rdev, sector - rdev->data_offset,
995                                         s, page, REQ_OP_READ, false)) {
996                                 md_error(mddev, rdev);
997                                 ret = -EIO;
998                                 goto out;
999                         }
1000
1001                         crc = crc32c_le(crc, page_address(page), s);
1002
1003                         pp_size -= s;
1004                         sector += s >> 9;
1005                 }
1006
1007                 crc = ~crc;
1008
1009                 if (crc != crc_stored) {
1010                         /*
1011                          * Don't recover this entry if the checksum does not
1012                          * match, but keep going and try to recover other
1013                          * entries.
1014                          */
1015                         pr_debug("%s: ppl entry crc does not match: stored: 0x%x calculated: 0x%x\n",
1016                                  __func__, crc_stored, crc);
1017                         ppl_conf->mismatch_count++;
1018                 } else {
1019                         ret = ppl_recover_entry(log, e, ppl_sector);
1020                         if (ret)
1021                                 goto out;
1022                         ppl_conf->recovered_entries++;
1023                 }
1024
1025                 ppl_sector += ppl_entry_sectors;
1026         }
1027
1028         /* flush the disk cache after recovery if necessary */
1029         ret = blkdev_issue_flush(rdev->bdev);
1030 out:
1031         __free_page(page);
1032         return ret;
1033 }
1034
1035 static int ppl_write_empty_header(struct ppl_log *log)
1036 {
1037         struct page *page;
1038         struct ppl_header *pplhdr;
1039         struct md_rdev *rdev = log->rdev;
1040         int ret = 0;
1041
1042         pr_debug("%s: disk: %d ppl_sector: %llu\n", __func__,
1043                  rdev->raid_disk, (unsigned long long)rdev->ppl.sector);
1044
1045         page = alloc_page(GFP_NOIO | __GFP_ZERO);
1046         if (!page)
1047                 return -ENOMEM;
1048
1049         pplhdr = page_address(page);
1050         /* zero out PPL space to avoid collision with old PPLs */
1051         blkdev_issue_zeroout(rdev->bdev, rdev->ppl.sector,
1052                             log->rdev->ppl.size, GFP_NOIO, 0);
1053         memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
1054         pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
1055         pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
1056
1057         if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
1058                           PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
1059                           REQ_FUA, false)) {
1060                 md_error(rdev->mddev, rdev);
1061                 ret = -EIO;
1062         }
1063
1064         __free_page(page);
1065         return ret;
1066 }
1067
1068 static int ppl_load_distributed(struct ppl_log *log)
1069 {
1070         struct ppl_conf *ppl_conf = log->ppl_conf;
1071         struct md_rdev *rdev = log->rdev;
1072         struct mddev *mddev = rdev->mddev;
1073         struct page *page, *page2;
1074         struct ppl_header *pplhdr = NULL, *prev_pplhdr = NULL;
1075         u32 crc, crc_stored;
1076         u32 signature;
1077         int ret = 0, i;
1078         sector_t pplhdr_offset = 0, prev_pplhdr_offset = 0;
1079
1080         pr_debug("%s: disk: %d\n", __func__, rdev->raid_disk);
1081         /* read PPL headers, find the recent one */
1082         page = alloc_page(GFP_KERNEL);
1083         if (!page)
1084                 return -ENOMEM;
1085
1086         page2 = alloc_page(GFP_KERNEL);
1087         if (!page2) {
1088                 __free_page(page);
1089                 return -ENOMEM;
1090         }
1091
1092         /* searching ppl area for latest ppl */
1093         while (pplhdr_offset < rdev->ppl.size - (PPL_HEADER_SIZE >> 9)) {
1094                 if (!sync_page_io(rdev,
1095                                   rdev->ppl.sector - rdev->data_offset +
1096                                   pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ,
1097                                   false)) {
1098                         md_error(mddev, rdev);
1099                         ret = -EIO;
1100                         /* if not able to read - don't recover any PPL */
1101                         pplhdr = NULL;
1102                         break;
1103                 }
1104                 pplhdr = page_address(page);
1105
1106                 /* check header validity */
1107                 crc_stored = le32_to_cpu(pplhdr->checksum);
1108                 pplhdr->checksum = 0;
1109                 crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE);
1110
1111                 if (crc_stored != crc) {
1112                         pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n",
1113                                  __func__, crc_stored, crc,
1114                                  (unsigned long long)pplhdr_offset);
1115                         pplhdr = prev_pplhdr;
1116                         pplhdr_offset = prev_pplhdr_offset;
1117                         break;
1118                 }
1119
1120                 signature = le32_to_cpu(pplhdr->signature);
1121
1122                 if (mddev->external) {
1123                         /*
1124                          * For external metadata the header signature is set and
1125                          * validated in userspace.
1126                          */
1127                         ppl_conf->signature = signature;
1128                 } else if (ppl_conf->signature != signature) {
1129                         pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n",
1130                                  __func__, signature, ppl_conf->signature,
1131                                  (unsigned long long)pplhdr_offset);
1132                         pplhdr = prev_pplhdr;
1133                         pplhdr_offset = prev_pplhdr_offset;
1134                         break;
1135                 }
1136
1137                 if (prev_pplhdr && le64_to_cpu(prev_pplhdr->generation) >
1138                     le64_to_cpu(pplhdr->generation)) {
1139                         /* previous was newest */
1140                         pplhdr = prev_pplhdr;
1141                         pplhdr_offset = prev_pplhdr_offset;
1142                         break;
1143                 }
1144
1145                 prev_pplhdr_offset = pplhdr_offset;
1146                 prev_pplhdr = pplhdr;
1147
1148                 swap(page, page2);
1149
1150                 /* calculate next potential ppl offset */
1151                 for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++)
1152                         pplhdr_offset +=
1153                             le32_to_cpu(pplhdr->entries[i].pp_size) >> 9;
1154                 pplhdr_offset += PPL_HEADER_SIZE >> 9;
1155         }
1156
1157         /* no valid ppl found */
1158         if (!pplhdr)
1159                 ppl_conf->mismatch_count++;
1160         else
1161                 pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n",
1162                     __func__, (unsigned long long)pplhdr_offset,
1163                     le64_to_cpu(pplhdr->generation));
1164
1165         /* attempt to recover from log if we are starting a dirty array */
1166         if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector)
1167                 ret = ppl_recover(log, pplhdr, pplhdr_offset);
1168
1169         /* write empty header if we are starting the array */
1170         if (!ret && !mddev->pers)
1171                 ret = ppl_write_empty_header(log);
1172
1173         __free_page(page);
1174         __free_page(page2);
1175
1176         pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1177                  __func__, ret, ppl_conf->mismatch_count,
1178                  ppl_conf->recovered_entries);
1179         return ret;
1180 }
1181
1182 static int ppl_load(struct ppl_conf *ppl_conf)
1183 {
1184         int ret = 0;
1185         u32 signature = 0;
1186         bool signature_set = false;
1187         int i;
1188
1189         for (i = 0; i < ppl_conf->count; i++) {
1190                 struct ppl_log *log = &ppl_conf->child_logs[i];
1191
1192                 /* skip missing drive */
1193                 if (!log->rdev)
1194                         continue;
1195
1196                 ret = ppl_load_distributed(log);
1197                 if (ret)
1198                         break;
1199
1200                 /*
1201                  * For external metadata we can't check if the signature is
1202                  * correct on a single drive, but we can check if it is the same
1203                  * on all drives.
1204                  */
1205                 if (ppl_conf->mddev->external) {
1206                         if (!signature_set) {
1207                                 signature = ppl_conf->signature;
1208                                 signature_set = true;
1209                         } else if (signature != ppl_conf->signature) {
1210                                 pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n",
1211                                         mdname(ppl_conf->mddev));
1212                                 ret = -EINVAL;
1213                                 break;
1214                         }
1215                 }
1216         }
1217
1218         pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1219                  __func__, ret, ppl_conf->mismatch_count,
1220                  ppl_conf->recovered_entries);
1221         return ret;
1222 }
1223
1224 static void __ppl_exit_log(struct ppl_conf *ppl_conf)
1225 {
1226         clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1227         clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags);
1228
1229         kfree(ppl_conf->child_logs);
1230
1231         bioset_exit(&ppl_conf->bs);
1232         bioset_exit(&ppl_conf->flush_bs);
1233         mempool_exit(&ppl_conf->io_pool);
1234         kmem_cache_destroy(ppl_conf->io_kc);
1235
1236         kfree(ppl_conf);
1237 }
1238
1239 void ppl_exit_log(struct r5conf *conf)
1240 {
1241         struct ppl_conf *ppl_conf = conf->log_private;
1242
1243         if (ppl_conf) {
1244                 __ppl_exit_log(ppl_conf);
1245                 conf->log_private = NULL;
1246         }
1247 }
1248
1249 static int ppl_validate_rdev(struct md_rdev *rdev)
1250 {
1251         int ppl_data_sectors;
1252         int ppl_size_new;
1253
1254         /*
1255          * The configured PPL size must be enough to store
1256          * the header and (at the very least) partial parity
1257          * for one stripe. Round it down to ensure the data
1258          * space is cleanly divisible by stripe size.
1259          */
1260         ppl_data_sectors = rdev->ppl.size - (PPL_HEADER_SIZE >> 9);
1261
1262         if (ppl_data_sectors > 0)
1263                 ppl_data_sectors = rounddown(ppl_data_sectors,
1264                                 RAID5_STRIPE_SECTORS((struct r5conf *)rdev->mddev->private));
1265
1266         if (ppl_data_sectors <= 0) {
1267                 pr_warn("md/raid:%s: PPL space too small on %pg\n",
1268                         mdname(rdev->mddev), rdev->bdev);
1269                 return -ENOSPC;
1270         }
1271
1272         ppl_size_new = ppl_data_sectors + (PPL_HEADER_SIZE >> 9);
1273
1274         if ((rdev->ppl.sector < rdev->data_offset &&
1275              rdev->ppl.sector + ppl_size_new > rdev->data_offset) ||
1276             (rdev->ppl.sector >= rdev->data_offset &&
1277              rdev->data_offset + rdev->sectors > rdev->ppl.sector)) {
1278                 pr_warn("md/raid:%s: PPL space overlaps with data on %pg\n",
1279                         mdname(rdev->mddev), rdev->bdev);
1280                 return -EINVAL;
1281         }
1282
1283         if (!rdev->mddev->external &&
1284             ((rdev->ppl.offset > 0 && rdev->ppl.offset < (rdev->sb_size >> 9)) ||
1285              (rdev->ppl.offset <= 0 && rdev->ppl.offset + ppl_size_new > 0))) {
1286                 pr_warn("md/raid:%s: PPL space overlaps with superblock on %pg\n",
1287                         mdname(rdev->mddev), rdev->bdev);
1288                 return -EINVAL;
1289         }
1290
1291         rdev->ppl.size = ppl_size_new;
1292
1293         return 0;
1294 }
1295
1296 static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
1297 {
1298         if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
1299                                       PPL_HEADER_SIZE) * 2) {
1300                 log->use_multippl = true;
1301                 set_bit(MD_HAS_MULTIPLE_PPLS,
1302                         &log->ppl_conf->mddev->flags);
1303                 log->entry_space = PPL_SPACE_SIZE;
1304         } else {
1305                 log->use_multippl = false;
1306                 log->entry_space = (log->rdev->ppl.size << 9) -
1307                                    PPL_HEADER_SIZE;
1308         }
1309         log->next_io_sector = rdev->ppl.sector;
1310
1311         if (bdev_write_cache(rdev->bdev))
1312                 log->wb_cache_on = true;
1313 }
1314
1315 int ppl_init_log(struct r5conf *conf)
1316 {
1317         struct ppl_conf *ppl_conf;
1318         struct mddev *mddev = conf->mddev;
1319         int ret = 0;
1320         int max_disks;
1321         int i;
1322
1323         pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
1324                  mdname(conf->mddev));
1325
1326         if (PAGE_SIZE != 4096)
1327                 return -EINVAL;
1328
1329         if (mddev->level != 5) {
1330                 pr_warn("md/raid:%s PPL is not compatible with raid level %d\n",
1331                         mdname(mddev), mddev->level);
1332                 return -EINVAL;
1333         }
1334
1335         if (mddev->bitmap_info.file || mddev->bitmap_info.offset) {
1336                 pr_warn("md/raid:%s PPL is not compatible with bitmap\n",
1337                         mdname(mddev));
1338                 return -EINVAL;
1339         }
1340
1341         if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1342                 pr_warn("md/raid:%s PPL is not compatible with journal\n",
1343                         mdname(mddev));
1344                 return -EINVAL;
1345         }
1346
1347         max_disks = sizeof_field(struct ppl_log, disk_flush_bitmap) *
1348                 BITS_PER_BYTE;
1349         if (conf->raid_disks > max_disks) {
1350                 pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
1351                         mdname(mddev), max_disks);
1352                 return -EINVAL;
1353         }
1354
1355         ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL);
1356         if (!ppl_conf)
1357                 return -ENOMEM;
1358
1359         ppl_conf->mddev = mddev;
1360
1361         ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0);
1362         if (!ppl_conf->io_kc) {
1363                 ret = -ENOMEM;
1364                 goto err;
1365         }
1366
1367         ret = mempool_init(&ppl_conf->io_pool, conf->raid_disks, ppl_io_pool_alloc,
1368                            ppl_io_pool_free, ppl_conf->io_kc);
1369         if (ret)
1370                 goto err;
1371
1372         ret = bioset_init(&ppl_conf->bs, conf->raid_disks, 0, BIOSET_NEED_BVECS);
1373         if (ret)
1374                 goto err;
1375
1376         ret = bioset_init(&ppl_conf->flush_bs, conf->raid_disks, 0, 0);
1377         if (ret)
1378                 goto err;
1379
1380         ppl_conf->count = conf->raid_disks;
1381         ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
1382                                        GFP_KERNEL);
1383         if (!ppl_conf->child_logs) {
1384                 ret = -ENOMEM;
1385                 goto err;
1386         }
1387
1388         atomic64_set(&ppl_conf->seq, 0);
1389         INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
1390         spin_lock_init(&ppl_conf->no_mem_stripes_lock);
1391
1392         if (!mddev->external) {
1393                 ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
1394                 ppl_conf->block_size = 512;
1395         } else {
1396                 ppl_conf->block_size = queue_logical_block_size(mddev->queue);
1397         }
1398
1399         for (i = 0; i < ppl_conf->count; i++) {
1400                 struct ppl_log *log = &ppl_conf->child_logs[i];
1401                 struct md_rdev *rdev = conf->disks[i].rdev;
1402
1403                 mutex_init(&log->io_mutex);
1404                 spin_lock_init(&log->io_list_lock);
1405                 INIT_LIST_HEAD(&log->io_list);
1406
1407                 log->ppl_conf = ppl_conf;
1408                 log->rdev = rdev;
1409
1410                 if (rdev) {
1411                         ret = ppl_validate_rdev(rdev);
1412                         if (ret)
1413                                 goto err;
1414
1415                         ppl_init_child_log(log, rdev);
1416                 }
1417         }
1418
1419         /* load and possibly recover the logs from the member disks */
1420         ret = ppl_load(ppl_conf);
1421
1422         if (ret) {
1423                 goto err;
1424         } else if (!mddev->pers && mddev->recovery_cp == 0 &&
1425                    ppl_conf->recovered_entries > 0 &&
1426                    ppl_conf->mismatch_count == 0) {
1427                 /*
1428                  * If we are starting a dirty array and the recovery succeeds
1429                  * without any issues, set the array as clean.
1430                  */
1431                 mddev->recovery_cp = MaxSector;
1432                 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
1433         } else if (mddev->pers && ppl_conf->mismatch_count > 0) {
1434                 /* no mismatch allowed when enabling PPL for a running array */
1435                 ret = -EINVAL;
1436                 goto err;
1437         }
1438
1439         conf->log_private = ppl_conf;
1440         set_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1441
1442         return 0;
1443 err:
1444         __ppl_exit_log(ppl_conf);
1445         return ret;
1446 }
1447
1448 int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
1449 {
1450         struct ppl_conf *ppl_conf = conf->log_private;
1451         struct ppl_log *log;
1452         int ret = 0;
1453
1454         if (!rdev)
1455                 return -EINVAL;
1456
1457         pr_debug("%s: disk: %d operation: %s dev: %pg\n",
1458                  __func__, rdev->raid_disk, add ? "add" : "remove",
1459                  rdev->bdev);
1460
1461         if (rdev->raid_disk < 0)
1462                 return 0;
1463
1464         if (rdev->raid_disk >= ppl_conf->count)
1465                 return -ENODEV;
1466
1467         log = &ppl_conf->child_logs[rdev->raid_disk];
1468
1469         mutex_lock(&log->io_mutex);
1470         if (add) {
1471                 ret = ppl_validate_rdev(rdev);
1472                 if (!ret) {
1473                         log->rdev = rdev;
1474                         ret = ppl_write_empty_header(log);
1475                         ppl_init_child_log(log, rdev);
1476                 }
1477         } else {
1478                 log->rdev = NULL;
1479         }
1480         mutex_unlock(&log->io_mutex);
1481
1482         return ret;
1483 }
1484
1485 static ssize_t
1486 ppl_write_hint_show(struct mddev *mddev, char *buf)
1487 {
1488         return sprintf(buf, "%d\n", 0);
1489 }
1490
1491 static ssize_t
1492 ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
1493 {
1494         struct r5conf *conf;
1495         int err = 0;
1496         unsigned short new;
1497
1498         if (len >= PAGE_SIZE)
1499                 return -EINVAL;
1500         if (kstrtou16(page, 10, &new))
1501                 return -EINVAL;
1502
1503         err = mddev_lock(mddev);
1504         if (err)
1505                 return err;
1506
1507         conf = mddev->private;
1508         if (!conf)
1509                 err = -ENODEV;
1510         else if (!raid5_has_ppl(conf) || !conf->log_private)
1511                 err = -EINVAL;
1512
1513         mddev_unlock(mddev);
1514
1515         return err ?: len;
1516 }
1517
1518 struct md_sysfs_entry
1519 ppl_write_hint = __ATTR(ppl_write_hint, S_IRUGO | S_IWUSR,
1520                         ppl_write_hint_show,
1521                         ppl_write_hint_store);