struct address_space *mapping, gfp_t mask)
{
tree->state.rb_node = NULL;
+ tree->buffer.rb_node = NULL;
tree->ops = NULL;
tree->dirty_bytes = 0;
spin_lock_init(&tree->lock);
- spin_lock_init(&tree->lru_lock);
+ spin_lock_init(&tree->buffer_lock);
tree->mapping = mapping;
- INIT_LIST_HEAD(&tree->buffer_lru);
- tree->lru_size = 0;
tree->last = NULL;
}
EXPORT_SYMBOL(extent_io_tree_init);
-void extent_io_tree_empty_lru(struct extent_io_tree *tree)
-{
- struct extent_buffer *eb;
- while(!list_empty(&tree->buffer_lru)) {
- eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
- lru);
- list_del_init(&eb->lru);
- free_extent_buffer(eb);
- }
-}
-EXPORT_SYMBOL(extent_io_tree_empty_lru);
-
struct extent_state *alloc_extent_state(gfp_t mask)
{
struct extent_state *state;
return ret;
}
+static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
+ u64 offset, struct rb_node *node)
+{
+ struct rb_root *root = &tree->buffer;
+ struct rb_node ** p = &root->rb_node;
+ struct rb_node * parent = NULL;
+ struct extent_buffer *eb;
+
+ while(*p) {
+ parent = *p;
+ eb = rb_entry(parent, struct extent_buffer, rb_node);
+
+ if (offset < eb->start)
+ p = &(*p)->rb_left;
+ else if (offset > eb->start)
+ p = &(*p)->rb_right;
+ else
+ return eb;
+ }
+
+ rb_link_node(node, parent, p);
+ rb_insert_color(node, root);
+ return NULL;
+}
+
+static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
+ u64 offset)
+{
+ struct rb_root *root = &tree->buffer;
+ struct rb_node * n = root->rb_node;
+ struct extent_buffer *eb;
+
+ while(n) {
+ eb = rb_entry(n, struct extent_buffer, rb_node);
+ if (offset < eb->start)
+ n = n->rb_left;
+ else if (offset > eb->start)
+ n = n->rb_right;
+ else
+ return eb;
+ }
+ return NULL;
+}
+
/*
* utility function to look for merge candidates inside a given range.
* Any extents with matching state are merged together into a single
{
if (!PagePrivate(page)) {
SetPagePrivate(page);
- WARN_ON(!page->mapping->a_ops->invalidatepage);
- set_page_private(page, EXTENT_PAGE_PRIVATE);
page_cache_get(page);
+ set_page_private(page, EXTENT_PAGE_PRIVATE);
}
}
struct block_device *bdev;
int ret;
int nr = 0;
- size_t page_offset = 0;
+ size_t pg_offset = 0;
size_t blocksize;
loff_t i_size = i_size_read(inode);
unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
u64 delalloc_end;
WARN_ON(!PageLocked(page));
- page_offset = i_size & (PAGE_CACHE_SIZE - 1);
+ pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
if (page->index > end_index ||
- (page->index == end_index && !page_offset)) {
+ (page->index == end_index && !pg_offset)) {
page->mapping->a_ops->invalidatepage(page, 0);
unlock_page(page);
return 0;
char *userpage;
userpage = kmap_atomic(page, KM_USER0);
- memset(userpage + page_offset, 0,
- PAGE_CACHE_SIZE - page_offset);
+ memset(userpage + pg_offset, 0,
+ PAGE_CACHE_SIZE - pg_offset);
kunmap_atomic(userpage, KM_USER0);
flush_dcache_page(page);
}
- page_offset = 0;
+ pg_offset = 0;
set_page_extent_mapped(page);
unlock_start = page_end + 1;
break;
}
- em = epd->get_extent(inode, page, page_offset, cur,
+ em = epd->get_extent(inode, page, pg_offset, cur,
end - cur + 1, 1);
if (IS_ERR(em) || !em) {
SetPageError(page);
unlock_extent(tree, unlock_start, cur + iosize -1,
GFP_NOFS);
+
if (tree->ops && tree->ops->writepage_end_io_hook)
tree->ops->writepage_end_io_hook(page, cur,
cur + iosize - 1,
NULL, 1);
cur = cur + iosize;
- page_offset += iosize;
+ pg_offset += iosize;
unlock_start = cur;
continue;
}
if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
EXTENT_DIRTY, 0)) {
cur = cur + iosize;
- page_offset += iosize;
+ pg_offset += iosize;
continue;
}
clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
SetPageError(page);
} else {
unsigned long max_nr = end_index + 1;
+
set_range_writeback(tree, cur, cur + iosize - 1);
if (!PageWriteback(page)) {
printk("warning page %lu not writeback, "
}
ret = submit_extent_page(WRITE, tree, page, sector,
- iosize, page_offset, bdev,
+ iosize, pg_offset, bdev,
&epd->bio, max_nr,
end_bio_extent_writepage, 0);
if (ret)
SetPageError(page);
}
cur = cur + iosize;
- page_offset += iosize;
+ pg_offset += iosize;
nr++;
}
done:
spin_unlock(&map->lock);
break;
}
- if (em->start != start) {
+ if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
+ em->start != start) {
spin_unlock(&map->lock);
free_extent_map(em);
break;
return sector;
}
-static int add_lru(struct extent_io_tree *tree, struct extent_buffer *eb)
-{
- if (list_empty(&eb->lru)) {
- extent_buffer_get(eb);
- list_add(&eb->lru, &tree->buffer_lru);
- tree->lru_size++;
- if (tree->lru_size >= BUFFER_LRU_MAX) {
- struct extent_buffer *rm;
- rm = list_entry(tree->buffer_lru.prev,
- struct extent_buffer, lru);
- tree->lru_size--;
- list_del_init(&rm->lru);
- free_extent_buffer(rm);
- }
- } else
- list_move(&eb->lru, &tree->buffer_lru);
- return 0;
-}
-static struct extent_buffer *find_lru(struct extent_io_tree *tree,
- u64 start, unsigned long len)
-{
- struct list_head *lru = &tree->buffer_lru;
- struct list_head *cur = lru->next;
- struct extent_buffer *eb;
-
- if (list_empty(lru))
- return NULL;
-
- do {
- eb = list_entry(cur, struct extent_buffer, lru);
- if (eb->start == start && eb->len == len) {
- extent_buffer_get(eb);
- return eb;
- }
- cur = cur->next;
- } while (cur != lru);
- return NULL;
-}
-
-static inline unsigned long num_extent_pages(u64 start, u64 len)
-{
- return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
- (start >> PAGE_CACHE_SHIFT);
-}
-
static inline struct page *extent_buffer_page(struct extent_buffer *eb,
unsigned long i)
{
return p;
}
-int release_extent_buffer_tail_pages(struct extent_buffer *eb)
-{
- unsigned long num_pages = num_extent_pages(eb->start, eb->len);
- struct page *page;
- unsigned long i;
-
- if (num_pages == 1)
- return 0;
- for (i = 1; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
- page_cache_release(page);
- }
- return 0;
-}
-
-
-int invalidate_extent_lru(struct extent_io_tree *tree, u64 start,
- unsigned long len)
+static inline unsigned long num_extent_pages(u64 start, u64 len)
{
- struct list_head *lru = &tree->buffer_lru;
- struct list_head *cur = lru->next;
- struct extent_buffer *eb;
- int found = 0;
-
- spin_lock(&tree->lru_lock);
- if (list_empty(lru))
- goto out;
-
- do {
- eb = list_entry(cur, struct extent_buffer, lru);
- if (eb->start <= start && eb->start + eb->len > start) {
- eb->flags &= ~EXTENT_UPTODATE;
- }
- cur = cur->next;
- } while (cur != lru);
-out:
- spin_unlock(&tree->lru_lock);
- return found;
+ return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
+ (start >> PAGE_CACHE_SHIFT);
}
static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
struct extent_buffer *eb = NULL;
unsigned long flags;
- spin_lock(&tree->lru_lock);
- eb = find_lru(tree, start, len);
- spin_unlock(&tree->lru_lock);
- if (eb) {
- return eb;
- }
-
eb = kmem_cache_zalloc(extent_buffer_cache, mask);
- INIT_LIST_HEAD(&eb->lru);
eb->start = start;
eb->len = len;
spin_lock_irqsave(&leak_lock, flags);
unsigned long i;
unsigned long index = start >> PAGE_CACHE_SHIFT;
struct extent_buffer *eb;
+ struct extent_buffer *exists = NULL;
struct page *p;
struct address_space *mapping = tree->mapping;
int uptodate = 1;
+ spin_lock(&tree->buffer_lock);
+ eb = buffer_search(tree, start);
+ if (eb) {
+ atomic_inc(&eb->refs);
+ spin_unlock(&tree->buffer_lock);
+ return eb;
+ }
+ spin_unlock(&tree->buffer_lock);
+
eb = __alloc_extent_buffer(tree, start, len, mask);
if (!eb)
return NULL;
- if (eb->flags & EXTENT_BUFFER_FILLED)
- goto lru_add;
-
if (page0) {
eb->first_page = page0;
i = 1;
p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
if (!p) {
WARN_ON(1);
- goto fail;
+ goto free_eb;
}
set_page_extent_mapped(p);
mark_page_accessed(p);
eb->flags |= EXTENT_UPTODATE;
eb->flags |= EXTENT_BUFFER_FILLED;
-lru_add:
- spin_lock(&tree->lru_lock);
- add_lru(tree, eb);
- spin_unlock(&tree->lru_lock);
+ spin_lock(&tree->buffer_lock);
+ exists = buffer_tree_insert(tree, start, &eb->rb_node);
+ if (exists) {
+ /* add one reference for the caller */
+ atomic_inc(&exists->refs);
+ spin_unlock(&tree->buffer_lock);
+ goto free_eb;
+ }
+ spin_unlock(&tree->buffer_lock);
+
+ /* add one reference for the tree */
+ atomic_inc(&eb->refs);
return eb;
-fail:
- spin_lock(&tree->lru_lock);
- list_del_init(&eb->lru);
- spin_unlock(&tree->lru_lock);
+free_eb:
if (!atomic_dec_and_test(&eb->refs))
- return NULL;
- for (index = 1; index < i; index++) {
+ return exists;
+ for (index = 1; index < i; index++)
page_cache_release(extent_buffer_page(eb, index));
- }
- if (i > 0)
- page_cache_release(extent_buffer_page(eb, 0));
+ page_cache_release(extent_buffer_page(eb, 0));
__free_extent_buffer(eb);
- return NULL;
+ return exists;
}
EXPORT_SYMBOL(alloc_extent_buffer);
u64 start, unsigned long len,
gfp_t mask)
{
- unsigned long num_pages = num_extent_pages(start, len);
- unsigned long i;
- unsigned long index = start >> PAGE_CACHE_SHIFT;
struct extent_buffer *eb;
- struct page *p;
- struct address_space *mapping = tree->mapping;
- int uptodate = 1;
-
- eb = __alloc_extent_buffer(tree, start, len, mask);
- if (!eb)
- return NULL;
-
- if (eb->flags & EXTENT_BUFFER_FILLED)
- goto lru_add;
-
- for (i = 0; i < num_pages; i++, index++) {
- p = find_get_page(mapping, index);
- if (!p) {
- goto fail;
- }
- if (TestSetPageLocked(p)) {
- page_cache_release(p);
- goto fail;
- }
- set_page_extent_mapped(p);
- mark_page_accessed(p);
+ spin_lock(&tree->buffer_lock);
+ eb = buffer_search(tree, start);
+ if (eb)
+ atomic_inc(&eb->refs);
+ spin_unlock(&tree->buffer_lock);
- if (i == 0) {
- eb->first_page = p;
- set_page_extent_head(p, len);
- } else {
- set_page_private(p, EXTENT_PAGE_PRIVATE);
- }
-
- if (!PageUptodate(p))
- uptodate = 0;
- unlock_page(p);
- }
- if (uptodate)
- eb->flags |= EXTENT_UPTODATE;
- eb->flags |= EXTENT_BUFFER_FILLED;
-
-lru_add:
- spin_lock(&tree->lru_lock);
- add_lru(tree, eb);
- spin_unlock(&tree->lru_lock);
return eb;
-fail:
- spin_lock(&tree->lru_lock);
- list_del_init(&eb->lru);
- spin_unlock(&tree->lru_lock);
- if (!atomic_dec_and_test(&eb->refs))
- return NULL;
- for (index = 1; index < i; index++) {
- page_cache_release(extent_buffer_page(eb, index));
- }
- if (i > 0)
- page_cache_release(extent_buffer_page(eb, 0));
- __free_extent_buffer(eb);
- return NULL;
}
EXPORT_SYMBOL(find_extent_buffer);
void free_extent_buffer(struct extent_buffer *eb)
{
- unsigned long i;
- unsigned long num_pages;
-
if (!eb)
return;
if (!atomic_dec_and_test(&eb->refs))
return;
- WARN_ON(!list_empty(&eb->lru));
- num_pages = num_extent_pages(eb->start, eb->len);
-
- for (i = 1; i < num_pages; i++) {
- page_cache_release(extent_buffer_page(eb, i));
- }
- page_cache_release(extent_buffer_page(eb, 0));
- __free_extent_buffer(eb);
+ WARN_ON(1);
}
EXPORT_SYMBOL(free_extent_buffer);
}
}
EXPORT_SYMBOL(memmove_extent_buffer);
+
+int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
+{
+ u64 start = page_offset(page);
+ struct extent_buffer *eb;
+ int ret = 1;
+ unsigned long i;
+ unsigned long num_pages;
+
+ spin_lock(&tree->buffer_lock);
+ eb = buffer_search(tree, start);
+ if (!eb)
+ goto out;
+
+ if (atomic_read(&eb->refs) > 1) {
+ ret = 0;
+ goto out;
+ }
+ /* at this point we can safely release the extent buffer */
+ num_pages = num_extent_pages(eb->start, eb->len);
+ for (i = 0; i < num_pages; i++) {
+ struct page *page = extent_buffer_page(eb, i);
+ page_cache_release(page);
+ }
+ rb_erase(&eb->rb_node, &tree->buffer);
+ __free_extent_buffer(eb);
+out:
+ spin_unlock(&tree->buffer_lock);
+ return ret;
+}
+EXPORT_SYMBOL(try_release_extent_buffer);
+