#include <linux/uio.h>
#include <linux/hash.h>
#include <linux/writeback.h>
+#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
+#include <linux/backing-dev.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/cpuset.h>
* ->private_lock (__free_pte->__set_page_dirty_buffers)
* ->swap_lock (exclusive_swap_page, others)
* ->mapping->tree_lock
+ * ->zone.lock
*
* ->i_mutex
* ->i_mmap_lock (truncate->unmap_mapping_range)
mapping->nrpages--;
__dec_zone_page_state(page, NR_FILE_PAGES);
BUG_ON(page_mapped(page));
+
+ /*
+ * Some filesystems seem to re-dirty the page even after
+ * the VM has canceled the dirty bit (eg ext3 journaling).
+ *
+ * Fix it up by doing a final dirty accounting check after
+ * having removed the page entirely.
+ */
+ if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
+ dec_zone_page_state(page, NR_FILE_DIRTY);
+ dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
+ }
}
void remove_from_page_cache(struct page *page)
return 0;
}
+static int sync_page_killable(void *word)
+{
+ sync_page(word);
+ return fatal_signal_pending(current) ? -EINTR : 0;
+}
+
/**
* __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
* @mapping: address space structure to write
}
EXPORT_SYMBOL(__lock_page);
+int fastcall __lock_page_killable(struct page *page)
+{
+ DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+
+ return __wait_on_bit_lock(page_waitqueue(page), &wait,
+ sync_page_killable, TASK_KILLABLE);
+}
+
/*
* Variant of lock_page that does not require the caller to hold a reference
* on the page's mapping.
/**
* do_generic_mapping_read - generic file read routine
* @mapping: address_space to be read
- * @_ra: file's readahead state
+ * @ra: file's readahead state
* @filp: the file to read
* @ppos: current file position
* @desc: read_descriptor
page_not_up_to_date:
/* Get exclusive access to the page ... */
- lock_page(page);
+ if (lock_page_killable(page))
+ goto readpage_eio;
/* Did it get truncated before we got the lock? */
if (!page->mapping) {
}
if (!PageUptodate(page)) {
- lock_page(page);
+ if (lock_page_killable(page))
+ goto readpage_eio;
if (!PageUptodate(page)) {
if (page->mapping == NULL) {
/*
goto find_page;
}
unlock_page(page);
- error = -EIO;
shrink_readahead_size_eio(filp, ra);
- goto readpage_error;
+ goto readpage_eio;
}
unlock_page(page);
}
goto page_ok;
+readpage_eio:
+ error = -EIO;
readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */
desc->error = error;
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (vmf->pgoff >= size)
- goto outside_data_content;
+ return VM_FAULT_SIGBUS;
/* If we don't want any read-ahead, don't bother */
if (VM_RandomReadHint(vma))
if (unlikely(vmf->pgoff >= size)) {
unlock_page(page);
page_cache_release(page);
- goto outside_data_content;
+ return VM_FAULT_SIGBUS;
}
/*
vmf->page = page;
return ret | VM_FAULT_LOCKED;
-outside_data_content:
- /*
- * An external ptracer can access pages that normally aren't
- * accessible..
- */
- if (vma->vm_mm == current->mm)
- return VM_FAULT_SIGBUS;
-
- /* Fall through to the non-read-ahead case */
no_cached_page:
/*
* We're only likely to ever get here if MADV_RANDOM is in
int remove_suid(struct dentry *dentry)
{
- int kill = should_remove_suid(dentry);
+ int killsuid = should_remove_suid(dentry);
+ int killpriv = security_inode_need_killpriv(dentry);
+ int error = 0;
- if (unlikely(kill))
- return __remove_suid(dentry, kill);
+ if (killpriv < 0)
+ return killpriv;
+ if (killpriv)
+ error = security_inode_killpriv(dentry);
+ if (!error && killsuid)
+ error = __remove_suid(dentry, killsuid);
- return 0;
+ return error;
}
EXPORT_SYMBOL(remove_suid);
return copied;
}
+EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
/*
* This has the same sideeffects and return value as
kunmap(page);
return copied;
}
+EXPORT_SYMBOL(iov_iter_copy_from_user);
static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
{
__iov_iter_advance_iov(i, bytes);
i->count -= bytes;
}
+EXPORT_SYMBOL(iov_iter_advance);
/*
* Fault in the first iovec of the given iov_iter, to a maximum length
bytes = min(bytes, i->iov->iov_len - i->iov_offset);
return fault_in_pages_readable(buf, bytes);
}
+EXPORT_SYMBOL(iov_iter_fault_in_readable);
/*
* Return the count of just the current iov_iter segment.
else
return min(i->count, iov->iov_len - i->iov_offset);
}
+EXPORT_SYMBOL(iov_iter_single_seg_count);
/*
* Performs necessary checks before doing a write
ret = aops->prepare_write(file, page, offset, offset+len);
if (ret) {
- if (ret != AOP_TRUNCATED_PAGE)
- unlock_page(page);
+ unlock_page(page);
page_cache_release(page);
if (pos + len > inode->i_size)
vmtruncate(inode, inode->i_size);
- if (ret == AOP_TRUNCATED_PAGE)
- goto again;
}
return ret;
}
unlock_page(page);
mark_page_accessed(page);
page_cache_release(page);
- BUG_ON(ret == AOP_TRUNCATED_PAGE); /* can't deal with */
if (ret < 0) {
if (pos + len > inode->i_size)
* cannot take a pagefault with the destination page locked.
* So pin the source page to copy it.
*/
- if (!PageUptodate(page)) {
+ if (!PageUptodate(page) && !segment_eq(get_fs(), KERNEL_DS)) {
unlock_page(page);
src_page = alloc_page(GFP_KERNEL);
flush_dcache_page(page);
status = a_ops->commit_write(file, page, offset, offset+bytes);
- if (unlikely(status < 0 || status == AOP_TRUNCATED_PAGE))
+ if (unlikely(status < 0))
goto fs_write_aop_error;
if (unlikely(status > 0)) /* filesystem did partial write */
copied = min_t(size_t, copied, status);
continue;
fs_write_aop_error:
- if (status != AOP_TRUNCATED_PAGE)
- unlock_page(page);
+ unlock_page(page);
page_cache_release(page);
if (src_page)
page_cache_release(src_page);
*/
if (pos + bytes > inode->i_size)
vmtruncate(inode, inode->i_size);
- if (status == AOP_TRUNCATED_PAGE)
- continue;
- else
- break;
+ break;
} while (iov_iter_count(i));
return written ? written : status;
const struct address_space_operations *a_ops = mapping->a_ops;
long status = 0;
ssize_t written = 0;
+ unsigned int flags = 0;
+
+ /*
+ * Copies from kernel address space cannot fail (NFSD is a big user).
+ */
+ if (segment_eq(get_fs(), KERNEL_DS))
+ flags |= AOP_FLAG_UNINTERRUPTIBLE;
do {
struct page *page;
break;
}
- status = a_ops->write_begin(file, mapping, pos, bytes, 0,
+ status = a_ops->write_begin(file, mapping, pos, bytes, flags,
&page, &fsdata);
if (unlikely(status))
break;
}
retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs);
- if (retval)
- goto out;
/*
* Finally, try again to invalidate clean pages which might have been
- * faulted in by get_user_pages() if the source of the write was an
- * mmap()ed region of the file we're writing. That's a pretty crazy
- * thing to do, so we don't support it 100%. If this invalidation
- * fails and we have -EIOCBQUEUED we ignore the failure.
+ * cached by non-direct readahead, or faulted in by get_user_pages()
+ * if the source of the write was an mmap'ed region of the file
+ * we're writing. Either one is a pretty crazy thing to do,
+ * so we don't support it 100%. If this invalidation
+ * fails, tough, the write still worked...
*/
if (rw == WRITE && mapping->nrpages) {
- int err = invalidate_inode_pages2_range(mapping,
- offset >> PAGE_CACHE_SHIFT, end);
- if (err && retval >= 0)
- retval = err;
+ invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end);
}
out:
return retval;