Merge branches 'release', 'APERF', 'ARAT', 'misc', 'kelvin', 'device-lock' and 'bjorn...
[sfrench/cifs-2.6.git] / fs / nilfs2 / segbuf.c
1 /*
2  * segbuf.c - NILFS segment buffer
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23
24 #include <linux/buffer_head.h>
25 #include <linux/writeback.h>
26 #include <linux/crc32.h>
27 #include "page.h"
28 #include "segbuf.h"
29 #include "seglist.h"
30
31
32 static struct kmem_cache *nilfs_segbuf_cachep;
33
34 static void nilfs_segbuf_init_once(void *obj)
35 {
36         memset(obj, 0, sizeof(struct nilfs_segment_buffer));
37 }
38
39 int __init nilfs_init_segbuf_cache(void)
40 {
41         nilfs_segbuf_cachep =
42                 kmem_cache_create("nilfs2_segbuf_cache",
43                                   sizeof(struct nilfs_segment_buffer),
44                                   0, SLAB_RECLAIM_ACCOUNT,
45                                   nilfs_segbuf_init_once);
46
47         return (nilfs_segbuf_cachep == NULL) ? -ENOMEM : 0;
48 }
49
50 void nilfs_destroy_segbuf_cache(void)
51 {
52         kmem_cache_destroy(nilfs_segbuf_cachep);
53 }
54
55 struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
56 {
57         struct nilfs_segment_buffer *segbuf;
58
59         segbuf = kmem_cache_alloc(nilfs_segbuf_cachep, GFP_NOFS);
60         if (unlikely(!segbuf))
61                 return NULL;
62
63         segbuf->sb_super = sb;
64         INIT_LIST_HEAD(&segbuf->sb_list);
65         INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
66         INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
67         return segbuf;
68 }
69
70 void nilfs_segbuf_free(struct nilfs_segment_buffer *segbuf)
71 {
72         kmem_cache_free(nilfs_segbuf_cachep, segbuf);
73 }
74
75 void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum,
76                      unsigned long offset, struct the_nilfs *nilfs)
77 {
78         segbuf->sb_segnum = segnum;
79         nilfs_get_segment_range(nilfs, segnum, &segbuf->sb_fseg_start,
80                                 &segbuf->sb_fseg_end);
81
82         segbuf->sb_pseg_start = segbuf->sb_fseg_start + offset;
83         segbuf->sb_rest_blocks =
84                 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
85 }
86
87 void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf,
88                                   __u64 nextnum, struct the_nilfs *nilfs)
89 {
90         segbuf->sb_nextnum = nextnum;
91         segbuf->sb_sum.next = nilfs_get_segment_start_blocknr(nilfs, nextnum);
92 }
93
94 int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf)
95 {
96         struct buffer_head *bh;
97
98         bh = sb_getblk(segbuf->sb_super,
99                        segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk);
100         if (unlikely(!bh))
101                 return -ENOMEM;
102
103         nilfs_segbuf_add_segsum_buffer(segbuf, bh);
104         return 0;
105 }
106
107 int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf,
108                                 struct buffer_head **bhp)
109 {
110         struct buffer_head *bh;
111
112         bh = sb_getblk(segbuf->sb_super,
113                        segbuf->sb_pseg_start + segbuf->sb_sum.nblocks);
114         if (unlikely(!bh))
115                 return -ENOMEM;
116
117         nilfs_segbuf_add_payload_buffer(segbuf, bh);
118         *bhp = bh;
119         return 0;
120 }
121
122 int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
123                        time_t ctime)
124 {
125         int err;
126
127         segbuf->sb_sum.nblocks = segbuf->sb_sum.nsumblk = 0;
128         err = nilfs_segbuf_extend_segsum(segbuf);
129         if (unlikely(err))
130                 return err;
131
132         segbuf->sb_sum.flags = flags;
133         segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
134         segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
135         segbuf->sb_sum.ctime = ctime;
136
137         segbuf->sb_io_error = 0;
138         return 0;
139 }
140
141 /*
142  * Setup segument summary
143  */
144 void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *segbuf)
145 {
146         struct nilfs_segment_summary *raw_sum;
147         struct buffer_head *bh_sum;
148
149         bh_sum = list_entry(segbuf->sb_segsum_buffers.next,
150                             struct buffer_head, b_assoc_buffers);
151         raw_sum = (struct nilfs_segment_summary *)bh_sum->b_data;
152
153         raw_sum->ss_magic    = cpu_to_le32(NILFS_SEGSUM_MAGIC);
154         raw_sum->ss_bytes    = cpu_to_le16(sizeof(*raw_sum));
155         raw_sum->ss_flags    = cpu_to_le16(segbuf->sb_sum.flags);
156         raw_sum->ss_seq      = cpu_to_le64(segbuf->sb_sum.seg_seq);
157         raw_sum->ss_create   = cpu_to_le64(segbuf->sb_sum.ctime);
158         raw_sum->ss_next     = cpu_to_le64(segbuf->sb_sum.next);
159         raw_sum->ss_nblocks  = cpu_to_le32(segbuf->sb_sum.nblocks);
160         raw_sum->ss_nfinfo   = cpu_to_le32(segbuf->sb_sum.nfinfo);
161         raw_sum->ss_sumbytes = cpu_to_le32(segbuf->sb_sum.sumbytes);
162         raw_sum->ss_pad      = 0;
163 }
164
165 /*
166  * CRC calculation routines
167  */
168 void nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf,
169                                      u32 seed)
170 {
171         struct buffer_head *bh;
172         struct nilfs_segment_summary *raw_sum;
173         unsigned long size, bytes = segbuf->sb_sum.sumbytes;
174         u32 crc;
175
176         bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
177                         b_assoc_buffers);
178
179         raw_sum = (struct nilfs_segment_summary *)bh->b_data;
180         size = min_t(unsigned long, bytes, bh->b_size);
181         crc = crc32_le(seed,
182                        (unsigned char *)raw_sum +
183                        sizeof(raw_sum->ss_datasum) + sizeof(raw_sum->ss_sumsum),
184                        size - (sizeof(raw_sum->ss_datasum) +
185                                sizeof(raw_sum->ss_sumsum)));
186
187         list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
188                                      b_assoc_buffers) {
189                 bytes -= size;
190                 size = min_t(unsigned long, bytes, bh->b_size);
191                 crc = crc32_le(crc, bh->b_data, size);
192         }
193         raw_sum->ss_sumsum = cpu_to_le32(crc);
194 }
195
196 void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
197                                    u32 seed)
198 {
199         struct buffer_head *bh;
200         struct nilfs_segment_summary *raw_sum;
201         void *kaddr;
202         u32 crc;
203
204         bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
205                         b_assoc_buffers);
206         raw_sum = (struct nilfs_segment_summary *)bh->b_data;
207         crc = crc32_le(seed,
208                        (unsigned char *)raw_sum + sizeof(raw_sum->ss_datasum),
209                        bh->b_size - sizeof(raw_sum->ss_datasum));
210
211         list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
212                                      b_assoc_buffers) {
213                 crc = crc32_le(crc, bh->b_data, bh->b_size);
214         }
215         list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
216                 kaddr = kmap_atomic(bh->b_page, KM_USER0);
217                 crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size);
218                 kunmap_atomic(kaddr, KM_USER0);
219         }
220         raw_sum->ss_datasum = cpu_to_le32(crc);
221 }
222
223 void nilfs_release_buffers(struct list_head *list)
224 {
225         struct buffer_head *bh, *n;
226
227         list_for_each_entry_safe(bh, n, list, b_assoc_buffers) {
228                 list_del_init(&bh->b_assoc_buffers);
229                 if (buffer_nilfs_allocated(bh)) {
230                         struct page *clone_page = bh->b_page;
231
232                         /* remove clone page */
233                         brelse(bh);
234                         page_cache_release(clone_page); /* for each bh */
235                         if (page_count(clone_page) <= 2) {
236                                 lock_page(clone_page);
237                                 nilfs_free_private_page(clone_page);
238                         }
239                         continue;
240                 }
241                 brelse(bh);
242         }
243 }
244
245 /*
246  * BIO operations
247  */
248 static void nilfs_end_bio_write(struct bio *bio, int err)
249 {
250         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
251         struct nilfs_write_info *wi = bio->bi_private;
252
253         if (err == -EOPNOTSUPP) {
254                 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
255                 bio_put(bio);
256                 /* to be detected by submit_seg_bio() */
257         }
258
259         if (!uptodate)
260                 atomic_inc(&wi->err);
261
262         bio_put(bio);
263         complete(&wi->bio_event);
264 }
265
266 static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
267 {
268         struct bio *bio = wi->bio;
269         int err;
270
271         if (wi->nbio > 0 && bdi_write_congested(wi->bdi)) {
272                 wait_for_completion(&wi->bio_event);
273                 wi->nbio--;
274                 if (unlikely(atomic_read(&wi->err))) {
275                         bio_put(bio);
276                         err = -EIO;
277                         goto failed;
278                 }
279         }
280
281         bio->bi_end_io = nilfs_end_bio_write;
282         bio->bi_private = wi;
283         bio_get(bio);
284         submit_bio(mode, bio);
285         if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
286                 bio_put(bio);
287                 err = -EOPNOTSUPP;
288                 goto failed;
289         }
290         wi->nbio++;
291         bio_put(bio);
292
293         wi->bio = NULL;
294         wi->rest_blocks -= wi->end - wi->start;
295         wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
296         wi->start = wi->end;
297         return 0;
298
299  failed:
300         wi->bio = NULL;
301         return err;
302 }
303
304 /**
305  * nilfs_alloc_seg_bio - allocate a bio for writing segment.
306  * @sb: super block
307  * @start: beginning disk block number of this BIO.
308  * @nr_vecs: request size of page vector.
309  *
310  * alloc_seg_bio() allocates a new BIO structure and initialize it.
311  *
312  * Return Value: On success, pointer to the struct bio is returned.
313  * On error, NULL is returned.
314  */
315 static struct bio *nilfs_alloc_seg_bio(struct super_block *sb, sector_t start,
316                                        int nr_vecs)
317 {
318         struct bio *bio;
319
320         bio = bio_alloc(GFP_NOWAIT, nr_vecs);
321         if (bio == NULL) {
322                 while (!bio && (nr_vecs >>= 1))
323                         bio = bio_alloc(GFP_NOWAIT, nr_vecs);
324         }
325         if (likely(bio)) {
326                 bio->bi_bdev = sb->s_bdev;
327                 bio->bi_sector = (sector_t)start << (sb->s_blocksize_bits - 9);
328         }
329         return bio;
330 }
331
332 void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
333                                 struct nilfs_write_info *wi)
334 {
335         wi->bio = NULL;
336         wi->rest_blocks = segbuf->sb_sum.nblocks;
337         wi->max_pages = bio_get_nr_vecs(wi->sb->s_bdev);
338         wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
339         wi->start = wi->end = 0;
340         wi->nbio = 0;
341         wi->blocknr = segbuf->sb_pseg_start;
342
343         atomic_set(&wi->err, 0);
344         init_completion(&wi->bio_event);
345 }
346
347 static int nilfs_submit_bh(struct nilfs_write_info *wi, struct buffer_head *bh,
348                            int mode)
349 {
350         int len, err;
351
352         BUG_ON(wi->nr_vecs <= 0);
353  repeat:
354         if (!wi->bio) {
355                 wi->bio = nilfs_alloc_seg_bio(wi->sb, wi->blocknr + wi->end,
356                                               wi->nr_vecs);
357                 if (unlikely(!wi->bio))
358                         return -ENOMEM;
359         }
360
361         len = bio_add_page(wi->bio, bh->b_page, bh->b_size, bh_offset(bh));
362         if (len == bh->b_size) {
363                 wi->end++;
364                 return 0;
365         }
366         /* bio is FULL */
367         err = nilfs_submit_seg_bio(wi, mode);
368         /* never submit current bh */
369         if (likely(!err))
370                 goto repeat;
371         return err;
372 }
373
374 int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
375                        struct nilfs_write_info *wi)
376 {
377         struct buffer_head *bh;
378         int res, rw = WRITE;
379
380         list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
381                 res = nilfs_submit_bh(wi, bh, rw);
382                 if (unlikely(res))
383                         goto failed_bio;
384         }
385
386         list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
387                 res = nilfs_submit_bh(wi, bh, rw);
388                 if (unlikely(res))
389                         goto failed_bio;
390         }
391
392         if (wi->bio) {
393                 /*
394                  * Last BIO is always sent through the following
395                  * submission.
396                  */
397                 rw |= (1 << BIO_RW_SYNCIO);
398                 res = nilfs_submit_seg_bio(wi, rw);
399                 if (unlikely(res))
400                         goto failed_bio;
401         }
402
403         res = 0;
404  out:
405         return res;
406
407  failed_bio:
408         atomic_inc(&wi->err);
409         goto out;
410 }
411
412 /**
413  * nilfs_segbuf_wait - wait for completion of requested BIOs
414  * @wi: nilfs_write_info
415  *
416  * Return Value: On Success, 0 is returned. On Error, one of the following
417  * negative error code is returned.
418  *
419  * %-EIO - I/O error
420  */
421 int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf,
422                       struct nilfs_write_info *wi)
423 {
424         int err = 0;
425
426         if (!wi->nbio)
427                 return 0;
428
429         do {
430                 wait_for_completion(&wi->bio_event);
431         } while (--wi->nbio > 0);
432
433         if (unlikely(atomic_read(&wi->err) > 0)) {
434                 printk(KERN_ERR "NILFS: IO error writing segment\n");
435                 err = -EIO;
436                 segbuf->sb_io_error = 1;
437         }
438         return err;
439 }