1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Buffer cache handling
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
27 #include <linux/types.h>
28 #include <linux/highmem.h>
29 #include <linux/bio.h>
31 #include <cluster/masklog.h>
39 #include "buffer_head_io.h"
40 #include "ocfs2_trace.h"
43 * Bits on bh->b_state used by ocfs2.
45 * These MUST be after the JBD2 bits. Hence, we use BH_JBDPrivateStart.
47 enum ocfs2_state_bits {
48 BH_NeedsValidate = BH_JBDPrivateStart,
51 /* Expand the magic b_state functions */
52 BUFFER_FNS(NeedsValidate, needs_validate);
54 int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
55 struct ocfs2_caching_info *ci)
59 trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);
61 BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
62 BUG_ON(buffer_jbd(bh));
64 /* No need to check for a soft readonly file system here. non
65 * journalled writes are only ever done on system files which
66 * can get modified during recovery even if read-only. */
67 if (ocfs2_is_hard_readonly(osb)) {
73 ocfs2_metadata_cache_io_lock(ci);
76 set_buffer_uptodate(bh);
78 /* remove from dirty list before I/O. */
79 clear_buffer_dirty(bh);
81 get_bh(bh); /* for end_buffer_write_sync() */
82 bh->b_end_io = end_buffer_write_sync;
83 submit_bh(REQ_OP_WRITE, 0, bh);
87 if (buffer_uptodate(bh)) {
88 ocfs2_set_buffer_uptodate(ci, bh);
90 /* We don't need to remove the clustered uptodate
91 * information for this bh as it's not marked locally
97 ocfs2_metadata_cache_io_unlock(ci);
102 /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
103 * will be easier to handle read failure.
105 int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
106 unsigned int nr, struct buffer_head *bhs[])
110 struct buffer_head *bh;
113 trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
118 /* Don't put buffer head and re-assign it to NULL if it is allocated
119 * outside since the caller can't be aware of this alternation!
121 new_bh = (bhs[0] == NULL);
123 for (i = 0 ; i < nr ; i++) {
124 if (bhs[i] == NULL) {
125 bhs[i] = sb_getblk(osb->sb, block++);
126 if (bhs[i] == NULL) {
134 if (buffer_jbd(bh)) {
135 trace_ocfs2_read_blocks_sync_jbd(
136 (unsigned long long)bh->b_blocknr);
140 if (buffer_dirty(bh)) {
141 /* This should probably be a BUG, or
142 * at least return an error. */
144 "trying to sync read a dirty "
145 "buffer! (blocknr = %llu), skipping\n",
146 (unsigned long long)bh->b_blocknr);
151 if (buffer_jbd(bh)) {
152 #ifdef CATCH_BH_JBD_RACES
154 "block %llu had the JBD bit set "
155 "while I was in lock_buffer!",
156 (unsigned long long)bh->b_blocknr);
164 clear_buffer_uptodate(bh);
165 get_bh(bh); /* for end_buffer_read_sync() */
166 bh->b_end_io = end_buffer_read_sync;
167 submit_bh(REQ_OP_READ, 0, bh);
171 for (i = nr; i > 0; i--) {
174 if (unlikely(status)) {
176 /* If middle bh fails, let previous bh
177 * finish its read and then put it to
184 } else if (bh && buffer_uptodate(bh)) {
185 clear_buffer_uptodate(bh);
190 /* No need to wait on the buffer if it's managed by JBD. */
194 if (!buffer_uptodate(bh)) {
195 /* Status won't be cleared from here on out,
196 * so we can safely record this and loop back
197 * to cleanup the other buffers. */
207 /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
208 * will be easier to handle read failure.
210 int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
211 struct buffer_head *bhs[], int flags,
212 int (*validate)(struct super_block *sb,
213 struct buffer_head *bh))
216 int i, ignore_cache = 0;
217 struct buffer_head *bh;
218 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
221 trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
224 BUG_ON((flags & OCFS2_BH_READAHEAD) &&
225 (flags & OCFS2_BH_IGNORE_CACHE));
234 mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
245 /* Don't put buffer head and re-assign it to NULL if it is allocated
246 * outside since the caller can't be aware of this alternation!
248 new_bh = (bhs[0] == NULL);
250 ocfs2_metadata_cache_io_lock(ci);
251 for (i = 0 ; i < nr ; i++) {
252 if (bhs[i] == NULL) {
253 bhs[i] = sb_getblk(sb, block++);
254 if (bhs[i] == NULL) {
255 ocfs2_metadata_cache_io_unlock(ci);
258 /* Don't forget to put previous bh! */
263 ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE);
265 /* There are three read-ahead cases here which we need to
266 * be concerned with. All three assume a buffer has
267 * previously been submitted with OCFS2_BH_READAHEAD
268 * and it hasn't yet completed I/O.
270 * 1) The current request is sync to disk. This rarely
271 * happens these days, and never when performance
272 * matters - the code can just wait on the buffer
273 * lock and re-submit.
275 * 2) The current request is cached, but not
276 * readahead. ocfs2_buffer_uptodate() will return
277 * false anyway, so we'll wind up waiting on the
278 * buffer lock to do I/O. We re-check the request
279 * with after getting the lock to avoid a re-submit.
281 * 3) The current request is readahead (and so must
282 * also be a caching one). We short circuit if the
283 * buffer is locked (under I/O) and if it's in the
284 * uptodate cache. The re-check from #2 catches the
285 * case that the previous read-ahead completes just
286 * before our is-it-in-flight check.
289 if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
290 trace_ocfs2_read_blocks_from_disk(
291 (unsigned long long)bh->b_blocknr,
292 (unsigned long long)ocfs2_metadata_cache_owner(ci));
293 /* We're using ignore_cache here to say
298 trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr,
299 ignore_cache, buffer_jbd(bh), buffer_dirty(bh));
301 if (buffer_jbd(bh)) {
306 if (buffer_dirty(bh)) {
307 /* This should probably be a BUG, or
308 * at least return an error. */
312 /* A read-ahead request was made - if the
313 * buffer is already under read-ahead from a
314 * previously submitted request than we are
316 if ((flags & OCFS2_BH_READAHEAD)
317 && ocfs2_buffer_read_ahead(ci, bh))
321 if (buffer_jbd(bh)) {
322 #ifdef CATCH_BH_JBD_RACES
323 mlog(ML_ERROR, "block %llu had the JBD bit set "
324 "while I was in lock_buffer!",
325 (unsigned long long)bh->b_blocknr);
333 /* Re-check ocfs2_buffer_uptodate() as a
334 * previously read-ahead buffer may have
335 * completed I/O while we were waiting for the
337 if (!(flags & OCFS2_BH_IGNORE_CACHE)
338 && !(flags & OCFS2_BH_READAHEAD)
339 && ocfs2_buffer_uptodate(ci, bh)) {
344 clear_buffer_uptodate(bh);
345 get_bh(bh); /* for end_buffer_read_sync() */
347 set_buffer_needs_validate(bh);
348 bh->b_end_io = end_buffer_read_sync;
349 submit_bh(REQ_OP_READ, 0, bh);
355 for (i = (nr - 1); i >= 0; i--) {
358 if (!(flags & OCFS2_BH_READAHEAD)) {
359 if (unlikely(status)) {
360 /* Clear the buffers on error including those
361 * ever succeeded in reading
364 /* If middle bh fails, let previous bh
365 * finish its read and then put it to
372 } else if (bh && buffer_uptodate(bh)) {
373 clear_buffer_uptodate(bh);
377 /* We know this can't have changed as we hold the
378 * owner sem. Avoid doing any work on the bh if the
383 if (!buffer_uptodate(bh)) {
384 /* Status won't be cleared from here on out,
385 * so we can safely record this and loop back
386 * to cleanup the other buffers. Don't need to
387 * remove the clustered uptodate information
388 * for this bh as it's not marked locally
391 clear_buffer_needs_validate(bh);
395 if (buffer_needs_validate(bh)) {
396 /* We never set NeedsValidate if the
397 * buffer was held by the journal, so
398 * that better not have changed */
399 BUG_ON(buffer_jbd(bh));
400 clear_buffer_needs_validate(bh);
401 status = validate(sb, bh);
407 /* Always set the buffer in the cache, even if it was
408 * a forced read, or read-ahead which hasn't yet
410 ocfs2_set_buffer_uptodate(ci, bh);
412 ocfs2_metadata_cache_io_unlock(ci);
414 trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
415 flags, ignore_cache);
422 /* Check whether the blkno is the super block or one of the backups. */
423 static void ocfs2_check_super_or_backup(struct super_block *sb,
429 if (blkno == OCFS2_SUPER_BLOCK_BLKNO)
432 for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
433 backup_blkno = ocfs2_backup_super_blkno(sb, i);
434 if (backup_blkno == blkno)
442 * Write super block and backups doesn't need to collaborate with journal,
443 * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed
444 * into this function.
446 int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
447 struct buffer_head *bh)
450 struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
452 BUG_ON(buffer_jbd(bh));
453 ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
455 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
462 set_buffer_uptodate(bh);
464 /* remove from dirty list before I/O. */
465 clear_buffer_dirty(bh);
467 get_bh(bh); /* for end_buffer_write_sync() */
468 bh->b_end_io = end_buffer_write_sync;
469 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
470 submit_bh(REQ_OP_WRITE, 0, bh);
474 if (!buffer_uptodate(bh)) {