Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 19 Jul 2010 20:19:32 +0000 (13:19 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 19 Jul 2010 20:19:32 +0000 (13:19 -0700)
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, pci, mrst: Add extra sanity check in walking the PCI extended cap chain
  x86: Fix x2apic preenabled system with kexec
  x86: Force HPET readback_cmp for all ATI chipsets

42 files changed:
arch/x86/kernel/kprobes.c
arch/x86/pci/i386.c
crypto/ablkcipher.c
drivers/gpu/drm/i915/i915_gem.c
drivers/input/mouse/synaptics.c
drivers/input/touchscreen/w90p910_ts.c
drivers/pci/setup-res.c
drivers/platform/x86/intel_scu_ipc.c
drivers/s390/block/dasd_devmap.c
drivers/s390/cio/chsc.c
fs/gfs2/bmap.c
fs/gfs2/dir.c
fs/gfs2/glock.c
fs/gfs2/inode.c
fs/gfs2/quota.c
fs/jbd2/journal.c
fs/jbd2/transaction.c
fs/ocfs2/aops.c
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/file.c
fs/ocfs2/file.h
fs/ocfs2/journal.c
fs/ocfs2/localalloc.c
fs/ocfs2/quota_global.c
fs/ocfs2/quota_local.c
fs/ocfs2/refcounttree.c
fs/ocfs2/suballoc.c
fs/ocfs2/xattr.c
fs/partitions/ibm.c
include/linux/i8042.h
include/linux/jbd2.h
include/linux/pci.h
include/linux/syscalls.h
kernel/early_res.c
mm/page_alloc.c
mm/page_cgroup.c
tools/perf/builtin-report.c
tools/perf/util/PERF-VERSION-GEN
tools/perf/util/callchain.c
tools/perf/util/callchain.h

index 345a4b1fe1446812d65e25fd424886d05aeb1fe4..675879b65ce666c91b868c96972ea35f107810f4 100644 (file)
@@ -640,8 +640,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
        /* Skip cs, ip, orig_ax and gs. */      \
        "       subl $16, %esp\n"       \
        "       pushl %fs\n"            \
-       "       pushl %ds\n"            \
        "       pushl %es\n"            \
+       "       pushl %ds\n"            \
        "       pushl %eax\n"           \
        "       pushl %ebp\n"           \
        "       pushl %edi\n"           \
index 6fdb3ec30c3197e15fc54e18c91291f5eb403450..55253095be84c66d37c5dcaaba63bd4ebaee6df2 100644 (file)
@@ -184,6 +184,7 @@ static void __init pcibios_allocate_resources(int pass)
                                        idx, r, disabled, pass);
                                if (pci_claim_resource(dev, idx) < 0) {
                                        /* We'll assign a new address later */
+                                       dev->fw_addr[idx] = r->start;
                                        r->end -= r->start;
                                        r->start = 0;
                                }
index 98a66103f4f23b1aafaec67e66116e74e1bf0c8c..a854df2a5a4b860b223d6ad49f45255b0678e970 100644 (file)
@@ -165,7 +165,7 @@ static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
 
        p = kmalloc(n, GFP_ATOMIC);
        if (!p)
-               ablkcipher_walk_done(req, walk, -ENOMEM);
+               return ablkcipher_walk_done(req, walk, -ENOMEM);
 
        base = p + 1;
 
index 074385882ccfe721ff5630b9e825950c6479af7e..8757ecf6e96bd117527c18424c22b10e0576f94c 100644 (file)
@@ -2241,6 +2241,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
                page = read_cache_page_gfp(mapping, i,
                                           GFP_HIGHUSER |
                                           __GFP_COLD |
+                                          __GFP_RECLAIMABLE |
                                           gfpmask);
                if (IS_ERR(page))
                        goto err_pages;
index 40cea334ad13f8159d0f71057680496e42150791..9ba9c4a17e1541a48a5f8a5a90606d6d3aecb02e 100644 (file)
@@ -206,6 +206,7 @@ static int synaptics_resolution(struct psmouse *psmouse)
        unsigned char max[3];
 
        if (SYN_ID_MAJOR(priv->identity) < 4)
+               return 0;
 
        if (synaptics_send_cmd(psmouse, SYN_QUE_RESOLUTION, res) == 0) {
                if (res[0] != 0 && (res[1] & 0x80) && res[2] != 0) {
index cc18265be1a8f2266480f93841c0530d3e180cea..7a45d68c35166d62d2e2404d3f65644b32cea42c 100644 (file)
@@ -233,7 +233,7 @@ static int __devinit w90x900ts_probe(struct platform_device *pdev)
        w90p910_ts->state = TS_IDLE;
        spin_lock_init(&w90p910_ts->lock);
        setup_timer(&w90p910_ts->timer, w90p910_check_pen_up,
-                   (unsigned long)&w90p910_ts);
+                   (unsigned long)w90p910_ts);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
index 92379e2d37e77ed4797dba23b7402f795dec80e2..2aaa13150de3ba40fd0a65cbb569f1fe96b9a49f 100644 (file)
@@ -156,6 +156,38 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
                                             pcibios_align_resource, dev);
        }
 
+       if (ret < 0 && dev->fw_addr[resno]) {
+               struct resource *root, *conflict;
+               resource_size_t start, end;
+
+               /*
+                * If we failed to assign anything, let's try the address
+                * where firmware left it.  That at least has a chance of
+                * working, which is better than just leaving it disabled.
+                */
+
+               if (res->flags & IORESOURCE_IO)
+                       root = &ioport_resource;
+               else
+                       root = &iomem_resource;
+
+               start = res->start;
+               end = res->end;
+               res->start = dev->fw_addr[resno];
+               res->end = res->start + size - 1;
+               dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
+                        resno, res);
+               conflict = request_resource_conflict(root, res);
+               if (conflict) {
+                       dev_info(&dev->dev,
+                                "BAR %d: %pR conflicts with %s %pR\n", resno,
+                                res, conflict->name, conflict);
+                       res->start = start;
+                       res->end = end;
+               } else
+                       ret = 0;
+       }
+
        if (!ret) {
                res->flags &= ~IORESOURCE_STARTALIGN;
                dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
index 40658e3385b45346ddc91132816e28ccfee4dd84..bb2f1fba637b36ec041909a26dd176be6f472c0a 100644 (file)
@@ -489,7 +489,7 @@ int intel_scu_ipc_simple_command(int cmd, int sub)
                mutex_unlock(&ipclock);
                return -ENODEV;
        }
-       ipc_command(cmd << 12 | sub);
+       ipc_command(sub << 12 | cmd);
        err = busy_loop();
        mutex_unlock(&ipclock);
        return err;
@@ -501,9 +501,9 @@ EXPORT_SYMBOL(intel_scu_ipc_simple_command);
  *     @cmd: command
  *     @sub: sub type
  *     @in: input data
- *     @inlen: input length
+ *     @inlen: input length in dwords
  *     @out: output data
- *     @outlein: output length
+ *     @outlein: output length in dwords
  *
  *     Issue a command to the SCU which involves data transfers. Do the
  *     data copies under the lock but leave it for the caller to interpret
@@ -524,7 +524,7 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
        for (i = 0; i < inlen; i++)
                ipc_data_writel(*in++, 4 * i);
 
-       ipc_command((cmd << 12) | sub | (inlen << 18));
+       ipc_command((sub << 12) | cmd | (inlen << 18));
        err = busy_loop();
 
        for (i = 0; i < outlen; i++)
@@ -556,6 +556,10 @@ int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
        u32 cmd = 0;
 
        mutex_lock(&ipclock);
+       if (ipcdev.pdev == NULL) {
+               mutex_unlock(&ipclock);
+               return -ENODEV;
+       }
        cmd = (addr >> 24) & 0xFF;
        if (cmd == IPC_I2C_READ) {
                writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
index 34d51dd4c53902d79ef3c21434db61923ee79fc8..bed7b4634ccd4648f42c188d8e5e2937534b1f8e 100644 (file)
@@ -948,8 +948,10 @@ static ssize_t dasd_alias_show(struct device *dev,
        if (device->discipline && device->discipline->get_uid &&
            !device->discipline->get_uid(device, &uid)) {
                if (uid.type == UA_BASE_PAV_ALIAS ||
-                   uid.type == UA_HYPER_PAV_ALIAS)
+                   uid.type == UA_HYPER_PAV_ALIAS) {
+                       dasd_put_device(device);
                        return sprintf(buf, "1\n");
+               }
        }
        dasd_put_device(device);
 
index ce7cb87479fe3b8aed786eeea182549da7352b35..407d0e9adfaf96a0d4362f8e56c986f0c17700fa 100644 (file)
@@ -713,7 +713,7 @@ int chsc_determine_base_channel_path_desc(struct chp_id chpid,
        ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
        if (ret)
                goto out_free;
-       memcpy(desc, &chsc_resp->data, chsc_resp->length);
+       memcpy(desc, &chsc_resp->data, sizeof(*desc));
 out_free:
        kfree(chsc_resp);
        return ret;
index 4a48c0f4b40275bce81f5c4b5fb40784a45a00bc..84da64b551b2c1170f4e68550ead612ed6fd7b41 100644 (file)
@@ -1041,6 +1041,7 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
 
        if (gfs2_is_stuffed(ip)) {
                u64 dsize = size + sizeof(struct gfs2_inode);
+               ip->i_disksize = size;
                ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
                gfs2_trans_add_bh(ip->i_gl, dibh, 1);
                gfs2_dinode_out(ip, dibh->b_data);
index 8295c5b5d4a9591e5d112d88ec99a8fd50bcc4d7..26ca3361a8bcc3525758ed00c6186fb62494a6e3 100644 (file)
@@ -392,7 +392,7 @@ static int gfs2_dirent_find_space(const struct gfs2_dirent *dent,
        unsigned totlen = be16_to_cpu(dent->de_rec_len);
 
        if (gfs2_dirent_sentinel(dent))
-               actual = GFS2_DIRENT_SIZE(0);
+               actual = 0;
        if (totlen - actual >= required)
                return 1;
        return 0;
index ddcdbf4935366b4eac1a85a74590f98c3a4b0c62..dbab3fdc258292ea027d5b2692e93cf1928a37fc 100644 (file)
@@ -706,8 +706,18 @@ static void glock_work_func(struct work_struct *work)
 {
        unsigned long delay = 0;
        struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
+       struct gfs2_holder *gh;
        int drop_ref = 0;
 
+       if (unlikely(test_bit(GLF_FROZEN, &gl->gl_flags))) {
+               spin_lock(&gl->gl_spin);
+               gh = find_first_waiter(gl);
+               if (gh && (gh->gh_flags & LM_FLAG_NOEXP) &&
+                   test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
+                       set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+               spin_unlock(&gl->gl_spin);
+       }
+
        if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
                finish_xmote(gl, gl->gl_reply);
                drop_ref = 1;
index b5612cbb62a51b75dc97699490658ce6939a4118..f03afd9c44bc748b51811bff46ccdab3d66fa619 100644 (file)
@@ -169,7 +169,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb,
 {
        struct inode *inode;
        struct gfs2_inode *ip;
-       struct gfs2_glock *io_gl;
+       struct gfs2_glock *io_gl = NULL;
        int error;
 
        inode = gfs2_iget(sb, no_addr);
@@ -198,6 +198,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb,
                ip->i_iopen_gh.gh_gl->gl_object = ip;
 
                gfs2_glock_put(io_gl);
+               io_gl = NULL;
 
                if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
                        goto gfs2_nfsbypass;
@@ -228,7 +229,8 @@ gfs2_nfsbypass:
 fail_glock:
        gfs2_glock_dq(&ip->i_iopen_gh);
 fail_iopen:
-       gfs2_glock_put(io_gl);
+       if (io_gl)
+               gfs2_glock_put(io_gl);
 fail_put:
        if (inode->i_state & I_NEW)
                ip->i_gl->gl_object = NULL;
@@ -256,7 +258,7 @@ void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr)
 {
        struct gfs2_sbd *sdp;
        struct gfs2_inode *ip;
-       struct gfs2_glock *io_gl;
+       struct gfs2_glock *io_gl = NULL;
        int error;
        struct gfs2_holder gh;
        struct inode *inode;
@@ -293,6 +295,7 @@ void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr)
 
        ip->i_iopen_gh.gh_gl->gl_object = ip;
        gfs2_glock_put(io_gl);
+       io_gl = NULL;
 
        inode->i_mode = DT2IF(DT_UNKNOWN);
 
@@ -319,7 +322,8 @@ void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr)
 fail_glock:
        gfs2_glock_dq(&ip->i_iopen_gh);
 fail_iopen:
-       gfs2_glock_put(io_gl);
+       if (io_gl)
+               gfs2_glock_put(io_gl);
 fail_put:
        ip->i_gl->gl_object = NULL;
        gfs2_glock_put(ip->i_gl);
index 49667d68769ef8fe5a82920a79f1a37df216804b..b256d6f24288d6507bef949804e57d3e69c8cce2 100644 (file)
@@ -694,10 +694,8 @@ get_a_page:
                if (!buffer_mapped(bh))
                        goto unlock_out;
                /* If it's a newly allocated disk block for quota, zero it */
-               if (buffer_new(bh)) {
-                       memset(bh->b_data, 0, bh->b_size);
-                       set_buffer_uptodate(bh);
-               }
+               if (buffer_new(bh))
+                       zero_user(page, pos - blocksize, bh->b_size);
        }
 
        if (PageUptodate(page))
@@ -723,7 +721,7 @@ get_a_page:
 
        /* If quota straddles page boundary, we need to update the rest of the
         * quota at the beginning of the next page */
-       if (offset != 0) { /* first page, offset is closer to PAGE_CACHE_SIZE */
+       if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
                ptr = ptr + nbytes;
                nbytes = sizeof(struct gfs2_quota) - nbytes;
                offset = 0;
index bc2ff5932769199f271db9055f7881b2e9c4bf54..036880895bfc8c2e99c42f6fd900819315bd508a 100644 (file)
@@ -297,7 +297,6 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
        struct page *new_page;
        unsigned int new_offset;
        struct buffer_head *bh_in = jh2bh(jh_in);
-       struct jbd2_buffer_trigger_type *triggers;
        journal_t *journal = transaction->t_journal;
 
        /*
@@ -328,21 +327,21 @@ repeat:
                done_copy_out = 1;
                new_page = virt_to_page(jh_in->b_frozen_data);
                new_offset = offset_in_page(jh_in->b_frozen_data);
-               triggers = jh_in->b_frozen_triggers;
        } else {
                new_page = jh2bh(jh_in)->b_page;
                new_offset = offset_in_page(jh2bh(jh_in)->b_data);
-               triggers = jh_in->b_triggers;
        }
 
        mapped_data = kmap_atomic(new_page, KM_USER0);
        /*
-        * Fire any commit trigger.  Do this before checking for escaping,
-        * as the trigger may modify the magic offset.  If a copy-out
-        * happens afterwards, it will have the correct data in the buffer.
+        * Fire data frozen trigger if data already wasn't frozen.  Do this
+        * before checking for escaping, as the trigger may modify the magic
+        * offset.  If a copy-out happens afterwards, it will have the correct
+        * data in the buffer.
         */
-       jbd2_buffer_commit_trigger(jh_in, mapped_data + new_offset,
-                                  triggers);
+       if (!done_copy_out)
+               jbd2_buffer_frozen_trigger(jh_in, mapped_data + new_offset,
+                                          jh_in->b_triggers);
 
        /*
         * Check for escaping
index e214d68620ac167fb5ddeb71775ead1b6063a571..b8e0806681bb0f4acf63964fa3f72ae00e1f8900 100644 (file)
@@ -725,6 +725,9 @@ done:
                page = jh2bh(jh)->b_page;
                offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
                source = kmap_atomic(page, KM_USER0);
+               /* Fire data frozen trigger just before we copy the data */
+               jbd2_buffer_frozen_trigger(jh, source + offset,
+                                          jh->b_triggers);
                memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
                kunmap_atomic(source, KM_USER0);
 
@@ -963,15 +966,15 @@ void jbd2_journal_set_triggers(struct buffer_head *bh,
        jh->b_triggers = type;
 }
 
-void jbd2_buffer_commit_trigger(struct journal_head *jh, void *mapped_data,
+void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
                                struct jbd2_buffer_trigger_type *triggers)
 {
        struct buffer_head *bh = jh2bh(jh);
 
-       if (!triggers || !triggers->t_commit)
+       if (!triggers || !triggers->t_frozen)
                return;
 
-       triggers->t_commit(triggers, bh, mapped_data, bh->b_size);
+       triggers->t_frozen(triggers, bh, mapped_data, bh->b_size);
 }
 
 void jbd2_buffer_abort_trigger(struct journal_head *jh,
index 3623ca20cc186046cfbf840f03ce9705146b17ba..356e976772bf1adb112aaf3c8b28ddb71b639afb 100644 (file)
@@ -196,15 +196,14 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
                        dump_stack();
                        goto bail;
                }
-
-               past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
-               mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
-                    (unsigned long long)past_eof);
-
-               if (create && (iblock >= past_eof))
-                       set_buffer_new(bh_result);
        }
 
+       past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
+       mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
+            (unsigned long long)past_eof);
+       if (create && (iblock >= past_eof))
+               set_buffer_new(bh_result);
+
 bail:
        if (err < 0)
                err = -EIO;
@@ -459,36 +458,6 @@ int walk_page_buffers(     handle_t *handle,
        return ret;
 }
 
-handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
-                                                        struct page *page,
-                                                        unsigned from,
-                                                        unsigned to)
-{
-       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-       handle_t *handle;
-       int ret = 0;
-
-       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
-       if (IS_ERR(handle)) {
-               ret = -ENOMEM;
-               mlog_errno(ret);
-               goto out;
-       }
-
-       if (ocfs2_should_order_data(inode)) {
-               ret = ocfs2_jbd2_file_inode(handle, inode);
-               if (ret < 0)
-                       mlog_errno(ret);
-       }
-out:
-       if (ret) {
-               if (!IS_ERR(handle))
-                       ocfs2_commit_trans(osb, handle);
-               handle = ERR_PTR(ret);
-       }
-       return handle;
-}
-
 static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
 {
        sector_t status;
@@ -1131,23 +1100,37 @@ out:
  */
 static int ocfs2_grab_pages_for_write(struct address_space *mapping,
                                      struct ocfs2_write_ctxt *wc,
-                                     u32 cpos, loff_t user_pos, int new,
+                                     u32 cpos, loff_t user_pos,
+                                     unsigned user_len, int new,
                                      struct page *mmap_page)
 {
        int ret = 0, i;
-       unsigned long start, target_index, index;
+       unsigned long start, target_index, end_index, index;
        struct inode *inode = mapping->host;
+       loff_t last_byte;
 
        target_index = user_pos >> PAGE_CACHE_SHIFT;
 
        /*
         * Figure out how many pages we'll be manipulating here. For
         * non allocating write, we just change the one
-        * page. Otherwise, we'll need a whole clusters worth.
+        * page. Otherwise, we'll need a whole clusters worth.  If we're
+        * writing past i_size, we only need enough pages to cover the
+        * last page of the write.
         */
        if (new) {
                wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
                start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
+               /*
+                * We need the index *past* the last page we could possibly
+                * touch.  This is the page past the end of the write or
+                * i_size, whichever is greater.
+                */
+               last_byte = max(user_pos + user_len, i_size_read(inode));
+               BUG_ON(last_byte < 1);
+               end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1;
+               if ((start + wc->w_num_pages) > end_index)
+                       wc->w_num_pages = end_index - start;
        } else {
                wc->w_num_pages = 1;
                start = target_index;
@@ -1620,21 +1603,20 @@ out:
  * write path can treat it as an non-allocating write, which has no
  * special case code for sparse/nonsparse files.
  */
-static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
-                                       unsigned len,
+static int ocfs2_expand_nonsparse_inode(struct inode *inode,
+                                       struct buffer_head *di_bh,
+                                       loff_t pos, unsigned len,
                                        struct ocfs2_write_ctxt *wc)
 {
        int ret;
-       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        loff_t newsize = pos + len;
 
-       if (ocfs2_sparse_alloc(osb))
-               return 0;
+       BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
 
        if (newsize <= i_size_read(inode))
                return 0;
 
-       ret = ocfs2_extend_no_holes(inode, newsize, pos);
+       ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos);
        if (ret)
                mlog_errno(ret);
 
@@ -1644,6 +1626,18 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
        return ret;
 }
 
+static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
+                          loff_t pos)
+{
+       int ret = 0;
+
+       BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
+       if (pos > i_size_read(inode))
+               ret = ocfs2_zero_extend(inode, di_bh, pos);
+
+       return ret;
+}
+
 int ocfs2_write_begin_nolock(struct address_space *mapping,
                             loff_t pos, unsigned len, unsigned flags,
                             struct page **pagep, void **fsdata,
@@ -1679,7 +1673,11 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
                }
        }
 
-       ret = ocfs2_expand_nonsparse_inode(inode, pos, len, wc);
+       if (ocfs2_sparse_alloc(osb))
+               ret = ocfs2_zero_tail(inode, di_bh, pos);
+       else
+               ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, len,
+                                                  wc);
        if (ret) {
                mlog_errno(ret);
                goto out;
@@ -1789,7 +1787,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
         * that we can zero and flush if we error after adding the
         * extent.
         */
-       ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos,
+       ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
                                         cluster_of_pages, mmap_page);
        if (ret) {
                mlog_errno(ret);
index 6b5a492e1749f8063097a1ebef4e3ef9ad53fdb5..153abb5abef024d2ca63f6d4a23ee1c126b89f13 100644 (file)
@@ -1671,7 +1671,7 @@ struct dlm_ctxt * dlm_register_domain(const char *domain,
        struct dlm_ctxt *dlm = NULL;
        struct dlm_ctxt *new_ctxt = NULL;
 
-       if (strlen(domain) > O2NM_MAX_NAME_LEN) {
+       if (strlen(domain) >= O2NM_MAX_NAME_LEN) {
                ret = -ENAMETOOLONG;
                mlog(ML_ERROR, "domain name length too long\n");
                goto leave;
@@ -1709,6 +1709,7 @@ retry:
                }
 
                if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) {
+                       spin_unlock(&dlm_domain_lock);
                        mlog(ML_ERROR,
                             "Requested locking protocol version is not "
                             "compatible with already registered domain "
index 4a7506a4e314c34014fd58ab739e836f9babe4e3..94b97fc6a88e62522b1958ed6a89de5e6803525b 100644 (file)
@@ -2808,14 +2808,8 @@ again:
                mlog(0, "trying again...\n");
                goto again;
        }
-       /* now that we are sure the MIGRATING state is there, drop
-        * the unneded state which blocked threads trying to DIRTY */
-       spin_lock(&res->spinlock);
-       BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
-       BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
-       res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
-       spin_unlock(&res->spinlock);
 
+       ret = 0;
        /* did the target go down or die? */
        spin_lock(&dlm->spinlock);
        if (!test_bit(target, dlm->domain_map)) {
@@ -2825,10 +2819,22 @@ again:
        }
        spin_unlock(&dlm->spinlock);
 
+       /*
+        * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
+        * another try; otherwise, we are sure the MIGRATING state is there,
+        * drop the unneded state which blocked threads trying to DIRTY
+        */
+       spin_lock(&res->spinlock);
+       BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
+       res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
+       if (!ret)
+               BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
+       spin_unlock(&res->spinlock);
+
        /*
         * at this point:
         *
-        *   o the DLM_LOCK_RES_MIGRATING flag is set
+        *   o the DLM_LOCK_RES_MIGRATING flag is set if target not down
         *   o there are no pending asts on this lockres
         *   o all processes trying to reserve an ast on this
         *     lockres must wait for the MIGRATING flag to clear
index f8b75ce4be7019ab20d9c5f40d1567a4e520b8fd..9dfaac73b36da4350cc5a2f2dc7a34918a21e11e 100644 (file)
@@ -463,7 +463,7 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
        if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
                int bit;
 
-               bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
+               bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
                if (bit >= O2NM_MAX_NODES || bit < 0)
                        dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
                else
index 6a13ea64c44773fc239ad55a5237422ddee18e94..2b10b36d15772efcae056a62b4df1a9fe8aa2c53 100644 (file)
@@ -724,28 +724,55 @@ leave:
        return status;
 }
 
+/*
+ * While a write will already be ordering the data, a truncate will not.
+ * Thus, we need to explicitly order the zeroed pages.
+ */
+static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode)
+{
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       handle_t *handle = NULL;
+       int ret = 0;
+
+       if (!ocfs2_should_order_data(inode))
+               goto out;
+
+       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+       if (IS_ERR(handle)) {
+               ret = -ENOMEM;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_jbd2_file_inode(handle, inode);
+       if (ret < 0)
+               mlog_errno(ret);
+
+out:
+       if (ret) {
+               if (!IS_ERR(handle))
+                       ocfs2_commit_trans(osb, handle);
+               handle = ERR_PTR(ret);
+       }
+       return handle;
+}
+
 /* Some parts of this taken from generic_cont_expand, which turned out
  * to be too fragile to do exactly what we need without us having to
  * worry about recursive locking in ->write_begin() and ->write_end(). */
-static int ocfs2_write_zero_page(struct inode *inode,
-                                u64 size)
+static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
+                                u64 abs_to)
 {
        struct address_space *mapping = inode->i_mapping;
        struct page *page;
-       unsigned long index;
-       unsigned int offset;
+       unsigned long index = abs_from >> PAGE_CACHE_SHIFT;
        handle_t *handle = NULL;
-       int ret;
+       int ret = 0;
+       unsigned zero_from, zero_to, block_start, block_end;
 
-       offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
-       /* ugh.  in prepare/commit_write, if from==to==start of block, we
-       ** skip the prepare.  make sure we never send an offset for the start
-       ** of a block
-       */
-       if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
-               offset++;
-       }
-       index = size >> PAGE_CACHE_SHIFT;
+       BUG_ON(abs_from >= abs_to);
+       BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
+       BUG_ON(abs_from & (inode->i_blkbits - 1));
 
        page = grab_cache_page(mapping, index);
        if (!page) {
@@ -754,31 +781,56 @@ static int ocfs2_write_zero_page(struct inode *inode,
                goto out;
        }
 
-       ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
-       if (ret < 0) {
-               mlog_errno(ret);
-               goto out_unlock;
-       }
+       /* Get the offsets within the page that we want to zero */
+       zero_from = abs_from & (PAGE_CACHE_SIZE - 1);
+       zero_to = abs_to & (PAGE_CACHE_SIZE - 1);
+       if (!zero_to)
+               zero_to = PAGE_CACHE_SIZE;
 
-       if (ocfs2_should_order_data(inode)) {
-               handle = ocfs2_start_walk_page_trans(inode, page, offset,
-                                                    offset);
-               if (IS_ERR(handle)) {
-                       ret = PTR_ERR(handle);
-                       handle = NULL;
+       mlog(0,
+            "abs_from = %llu, abs_to = %llu, index = %lu, zero_from = %u, zero_to = %u\n",
+            (unsigned long long)abs_from, (unsigned long long)abs_to,
+            index, zero_from, zero_to);
+
+       /* We know that zero_from is block aligned */
+       for (block_start = zero_from; block_start < zero_to;
+            block_start = block_end) {
+               block_end = block_start + (1 << inode->i_blkbits);
+
+               /*
+                * block_start is block-aligned.  Bump it by one to
+                * force ocfs2_{prepare,commit}_write() to zero the
+                * whole block.
+                */
+               ret = ocfs2_prepare_write_nolock(inode, page,
+                                                block_start + 1,
+                                                block_start + 1);
+               if (ret < 0) {
+                       mlog_errno(ret);
                        goto out_unlock;
                }
-       }
 
-       /* must not update i_size! */
-       ret = block_commit_write(page, offset, offset);
-       if (ret < 0)
-               mlog_errno(ret);
-       else
-               ret = 0;
+               if (!handle) {
+                       handle = ocfs2_zero_start_ordered_transaction(inode);
+                       if (IS_ERR(handle)) {
+                               ret = PTR_ERR(handle);
+                               handle = NULL;
+                               break;
+                       }
+               }
+
+               /* must not update i_size! */
+               ret = block_commit_write(page, block_start + 1,
+                                        block_start + 1);
+               if (ret < 0)
+                       mlog_errno(ret);
+               else
+                       ret = 0;
+       }
 
        if (handle)
                ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
+
 out_unlock:
        unlock_page(page);
        page_cache_release(page);
@@ -786,22 +838,114 @@ out:
        return ret;
 }
 
-static int ocfs2_zero_extend(struct inode *inode,
-                            u64 zero_to_size)
+/*
+ * Find the next range to zero.  We do this in terms of bytes because
+ * that's what ocfs2_zero_extend() wants, and it is dealing with the
+ * pagecache.  We may return multiple extents.
+ *
+ * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
+ * needs to be zeroed.  range_start and range_end return the next zeroing
+ * range.  A subsequent call should pass the previous range_end as its
+ * zero_start.  If range_end is 0, there's nothing to do.
+ *
+ * Unwritten extents are skipped over.  Refcounted extents are CoWd.
+ */
+static int ocfs2_zero_extend_get_range(struct inode *inode,
+                                      struct buffer_head *di_bh,
+                                      u64 zero_start, u64 zero_end,
+                                      u64 *range_start, u64 *range_end)
 {
-       int ret = 0;
-       u64 start_off;
-       struct super_block *sb = inode->i_sb;
+       int rc = 0, needs_cow = 0;
+       u32 p_cpos, zero_clusters = 0;
+       u32 zero_cpos =
+               zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
+       u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
+       unsigned int num_clusters = 0;
+       unsigned int ext_flags = 0;
 
-       start_off = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
-       while (start_off < zero_to_size) {
-               ret = ocfs2_write_zero_page(inode, start_off);
-               if (ret < 0) {
-                       mlog_errno(ret);
+       while (zero_cpos < last_cpos) {
+               rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
+                                       &num_clusters, &ext_flags);
+               if (rc) {
+                       mlog_errno(rc);
+                       goto out;
+               }
+
+               if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
+                       zero_clusters = num_clusters;
+                       if (ext_flags & OCFS2_EXT_REFCOUNTED)
+                               needs_cow = 1;
+                       break;
+               }
+
+               zero_cpos += num_clusters;
+       }
+       if (!zero_clusters) {
+               *range_end = 0;
+               goto out;
+       }
+
+       while ((zero_cpos + zero_clusters) < last_cpos) {
+               rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
+                                       &p_cpos, &num_clusters,
+                                       &ext_flags);
+               if (rc) {
+                       mlog_errno(rc);
                        goto out;
                }
 
-               start_off += sb->s_blocksize;
+               if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
+                       break;
+               if (ext_flags & OCFS2_EXT_REFCOUNTED)
+                       needs_cow = 1;
+               zero_clusters += num_clusters;
+       }
+       if ((zero_cpos + zero_clusters) > last_cpos)
+               zero_clusters = last_cpos - zero_cpos;
+
+       if (needs_cow) {
+               rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos, zero_clusters,
+                                       UINT_MAX);
+               if (rc) {
+                       mlog_errno(rc);
+                       goto out;
+               }
+       }
+
+       *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
+       *range_end = ocfs2_clusters_to_bytes(inode->i_sb,
+                                            zero_cpos + zero_clusters);
+
+out:
+       return rc;
+}
+
+/*
+ * Zero one range returned from ocfs2_zero_extend_get_range().  The caller
+ * has made sure that the entire range needs zeroing.
+ */
+static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
+                                  u64 range_end)
+{
+       int rc = 0;
+       u64 next_pos;
+       u64 zero_pos = range_start;
+
+       mlog(0, "range_start = %llu, range_end = %llu\n",
+            (unsigned long long)range_start,
+            (unsigned long long)range_end);
+       BUG_ON(range_start >= range_end);
+
+       while (zero_pos < range_end) {
+               next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
+               if (next_pos > range_end)
+                       next_pos = range_end;
+               rc = ocfs2_write_zero_page(inode, zero_pos, next_pos);
+               if (rc < 0) {
+                       mlog_errno(rc);
+                       break;
+               }
+               zero_pos = next_pos;
 
                /*
                 * Very large extends have the potential to lock up
@@ -810,16 +954,63 @@ static int ocfs2_zero_extend(struct inode *inode,
                cond_resched();
        }
 
-out:
+       return rc;
+}
+
+int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
+                     loff_t zero_to_size)
+{
+       int ret = 0;
+       u64 zero_start, range_start = 0, range_end = 0;
+       struct super_block *sb = inode->i_sb;
+
+       zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
+       mlog(0, "zero_start %llu for i_size %llu\n",
+            (unsigned long long)zero_start,
+            (unsigned long long)i_size_read(inode));
+       while (zero_start < zero_to_size) {
+               ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
+                                                 zero_to_size,
+                                                 &range_start,
+                                                 &range_end);
+               if (ret) {
+                       mlog_errno(ret);
+                       break;
+               }
+               if (!range_end)
+                       break;
+               /* Trim the ends */
+               if (range_start < zero_start)
+                       range_start = zero_start;
+               if (range_end > zero_to_size)
+                       range_end = zero_to_size;
+
+               ret = ocfs2_zero_extend_range(inode, range_start,
+                                             range_end);
+               if (ret) {
+                       mlog_errno(ret);
+                       break;
+               }
+               zero_start = range_end;
+       }
+
        return ret;
 }
 
-int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to)
+int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
+                         u64 new_i_size, u64 zero_to)
 {
        int ret;
        u32 clusters_to_add;
        struct ocfs2_inode_info *oi = OCFS2_I(inode);
 
+       /*
+        * Only quota files call this without a bh, and they can't be
+        * refcounted.
+        */
+       BUG_ON(!di_bh && (oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
+       BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
+
        clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
        if (clusters_to_add < oi->ip_clusters)
                clusters_to_add = 0;
@@ -840,7 +1031,7 @@ int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to)
         * still need to zero the area between the old i_size and the
         * new i_size.
         */
-       ret = ocfs2_zero_extend(inode, zero_to);
+       ret = ocfs2_zero_extend(inode, di_bh, zero_to);
        if (ret < 0)
                mlog_errno(ret);
 
@@ -862,27 +1053,15 @@ static int ocfs2_extend_file(struct inode *inode,
                goto out;
 
        if (i_size_read(inode) == new_i_size)
-               goto out;
+               goto out;
        BUG_ON(new_i_size < i_size_read(inode));
 
-       /*
-        * Fall through for converting inline data, even if the fs
-        * supports sparse files.
-        *
-        * The check for inline data here is legal - nobody can add
-        * the feature since we have i_mutex. We must check it again
-        * after acquiring ip_alloc_sem though, as paths like mmap
-        * might have raced us to converting the inode to extents.
-        */
-       if (!(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
-           && ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
-               goto out_update_size;
-
        /*
         * The alloc sem blocks people in read/write from reading our
         * allocation until we're done changing it. We depend on
         * i_mutex to block other extend/truncate calls while we're
-        * here.
+        * here.  We even have to hold it for sparse files because there
+        * might be some tail zeroing.
         */
        down_write(&oi->ip_alloc_sem);
 
@@ -899,14 +1078,16 @@ static int ocfs2_extend_file(struct inode *inode,
                ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
                if (ret) {
                        up_write(&oi->ip_alloc_sem);
-
                        mlog_errno(ret);
                        goto out;
                }
        }
 
-       if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
-               ret = ocfs2_extend_no_holes(inode, new_i_size, new_i_size);
+       if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
+               ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
+       else
+               ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
+                                           new_i_size);
 
        up_write(&oi->ip_alloc_sem);
 
index d66cf4f7c70e34bbec1d63eccd25096eeac7e976..97bf761c9e7c7b8f1c1744bbada1d778095eb3d7 100644 (file)
@@ -54,8 +54,10 @@ int ocfs2_add_inode_data(struct ocfs2_super *osb,
 int ocfs2_simple_size_update(struct inode *inode,
                             struct buffer_head *di_bh,
                             u64 new_i_size);
-int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size,
-                         u64 zero_to);
+int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
+                         u64 new_i_size, u64 zero_to);
+int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
+                     loff_t zero_to);
 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr);
 int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
                  struct kstat *stat);
index 47878cf164184cf910bcbbebc10da133aa2e0c64..625de9d7088cdf2c82008b2e875094ec2b43f1d0 100644 (file)
@@ -472,7 +472,7 @@ static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger
        return container_of(triggers, struct ocfs2_triggers, ot_triggers);
 }
 
-static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
+static void ocfs2_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
                                 struct buffer_head *bh,
                                 void *data, size_t size)
 {
@@ -491,7 +491,7 @@ static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
  * Quota blocks have their own trigger because the struct ocfs2_block_check
  * offset depends on the blocksize.
  */
-static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
+static void ocfs2_dq_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
                                 struct buffer_head *bh,
                                 void *data, size_t size)
 {
@@ -511,7 +511,7 @@ static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
  * Directory blocks also have their own trigger because the
  * struct ocfs2_block_check offset depends on the blocksize.
  */
-static void ocfs2_db_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
+static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
                                 struct buffer_head *bh,
                                 void *data, size_t size)
 {
@@ -544,7 +544,7 @@ static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
 
 static struct ocfs2_triggers di_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_dinode, i_check),
@@ -552,7 +552,7 @@ static struct ocfs2_triggers di_triggers = {
 
 static struct ocfs2_triggers eb_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_extent_block, h_check),
@@ -560,7 +560,7 @@ static struct ocfs2_triggers eb_triggers = {
 
 static struct ocfs2_triggers rb_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_refcount_block, rf_check),
@@ -568,7 +568,7 @@ static struct ocfs2_triggers rb_triggers = {
 
 static struct ocfs2_triggers gd_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_group_desc, bg_check),
@@ -576,14 +576,14 @@ static struct ocfs2_triggers gd_triggers = {
 
 static struct ocfs2_triggers db_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_db_commit_trigger,
+               .t_frozen = ocfs2_db_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
 };
 
 static struct ocfs2_triggers xb_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_xattr_block, xb_check),
@@ -591,14 +591,14 @@ static struct ocfs2_triggers xb_triggers = {
 
 static struct ocfs2_triggers dq_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_dq_commit_trigger,
+               .t_frozen = ocfs2_dq_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
 };
 
 static struct ocfs2_triggers dr_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_dx_root_block, dr_check),
@@ -606,7 +606,7 @@ static struct ocfs2_triggers dr_triggers = {
 
 static struct ocfs2_triggers dl_triggers = {
        .ot_triggers = {
-               .t_commit = ocfs2_commit_trigger,
+               .t_frozen = ocfs2_frozen_trigger,
                .t_abort = ocfs2_abort_trigger,
        },
        .ot_offset      = offsetof(struct ocfs2_dx_leaf, dl_check),
@@ -1936,7 +1936,7 @@ void ocfs2_orphan_scan_work(struct work_struct *work)
        mutex_lock(&os->os_lock);
        ocfs2_queue_orphan_scan(osb);
        if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE)
-               schedule_delayed_work(&os->os_orphan_scan_work,
+               queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work,
                                      ocfs2_orphan_scan_timeout());
        mutex_unlock(&os->os_lock);
 }
@@ -1976,8 +1976,8 @@ void ocfs2_orphan_scan_start(struct ocfs2_super *osb)
                atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
        else {
                atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE);
-               schedule_delayed_work(&os->os_orphan_scan_work,
-                                     ocfs2_orphan_scan_timeout());
+               queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work,
+                                  ocfs2_orphan_scan_timeout());
        }
 }
 
index 3d7419682dc069da151ae265367f214e15ae31af..ec6adbf8f5515afbaa7e8ffcb8a32b6a3c4e1ea8 100644 (file)
@@ -118,6 +118,7 @@ unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
 {
        unsigned int la_mb;
        unsigned int gd_mb;
+       unsigned int la_max_mb;
        unsigned int megs_per_slot;
        struct super_block *sb = osb->sb;
 
@@ -182,6 +183,12 @@ unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
        if (megs_per_slot < la_mb)
                la_mb = megs_per_slot;
 
+       /* We can't store more bits than we can in a block. */
+       la_max_mb = ocfs2_clusters_to_megabytes(osb->sb,
+                                               ocfs2_local_alloc_size(sb) * 8);
+       if (la_mb > la_max_mb)
+               la_mb = la_max_mb;
+
        return la_mb;
 }
 
index 2bb35fe00511e98f41f23fa9a93d752d3a82b4fe..4607923eb24c192ff3642042161684a158300908 100644 (file)
@@ -775,7 +775,7 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
                 * locking allocators ranks above a transaction start
                 */
                WARN_ON(journal_current_handle());
-               status = ocfs2_extend_no_holes(gqinode,
+               status = ocfs2_extend_no_holes(gqinode, NULL,
                        gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
                        gqinode->i_size);
                if (status < 0)
index 8bd70d4d184d5827fdb861b7a565fc26d364f0a1..dc78764ccc4c6211bacee0a409f23606e24b3997 100644 (file)
@@ -971,7 +971,7 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
        u64 p_blkno;
 
        /* We are protected by dqio_sem so no locking needed */
-       status = ocfs2_extend_no_holes(lqinode,
+       status = ocfs2_extend_no_holes(lqinode, NULL,
                                       lqinode->i_size + 2 * sb->s_blocksize,
                                       lqinode->i_size);
        if (status < 0) {
@@ -1114,7 +1114,7 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
                return ocfs2_local_quota_add_chunk(sb, type, offset);
 
        /* We are protected by dqio_sem so no locking needed */
-       status = ocfs2_extend_no_holes(lqinode,
+       status = ocfs2_extend_no_holes(lqinode, NULL,
                                       lqinode->i_size + sb->s_blocksize,
                                       lqinode->i_size);
        if (status < 0) {
index 4793f36f6518b25f312274d2fcdc84492de26340..3ac5aa733e9c8018090bc2493d819937593760a0 100644 (file)
@@ -2931,6 +2931,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
 
        offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
        end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
+       /*
+        * We only duplicate pages until we reach the page contains i_size - 1.
+        * So trim 'end' to i_size.
+        */
+       if (end > i_size_read(context->inode))
+               end = i_size_read(context->inode);
 
        while (offset < end) {
                page_index = offset >> PAGE_CACHE_SHIFT;
@@ -4166,6 +4172,12 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
        struct inode *inode = old_dentry->d_inode;
        struct buffer_head *new_bh = NULL;
 
+       if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
+               ret = -EINVAL;
+               mlog_errno(ret);
+               goto out;
+       }
+
        ret = filemap_fdatawrite(inode->i_mapping);
        if (ret) {
                mlog_errno(ret);
index f4c2a9eb8c4d75a6354fb52c37ba93edb81bebd5..a8e6a95a353f03dcb8a34cf928ded84ff7e6d127 100644 (file)
@@ -741,7 +741,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
                     le16_to_cpu(bg->bg_free_bits_count));
        le32_add_cpu(&cl->cl_recs[alloc_rec].c_total,
                     le16_to_cpu(bg->bg_bits));
-       cl->cl_recs[alloc_rec].c_blkno  = cpu_to_le64(bg->bg_blkno);
+       cl->cl_recs[alloc_rec].c_blkno = bg->bg_blkno;
        if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
                le16_add_cpu(&cl->cl_next_free_rec, 1);
 
index e97b34842cfea0d188f85476a56c4a0eb0af5ce0..d03469f618012ea4aff64b62f1f77e3e8b8ec47d 100644 (file)
@@ -709,7 +709,7 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
                                         struct ocfs2_xattr_value_buf *vb,
                                         struct ocfs2_xattr_set_ctxt *ctxt)
 {
-       int status = 0;
+       int status = 0, credits;
        handle_t *handle = ctxt->handle;
        enum ocfs2_alloc_restarted why;
        u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters);
@@ -719,38 +719,54 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
 
        ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
 
-       status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
-                             OCFS2_JOURNAL_ACCESS_WRITE);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
-       }
+       while (clusters_to_add) {
+               status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
+                                      OCFS2_JOURNAL_ACCESS_WRITE);
+               if (status < 0) {
+                       mlog_errno(status);
+                       break;
+               }
 
-       prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
-       status = ocfs2_add_clusters_in_btree(handle,
-                                            &et,
-                                            &logical_start,
-                                            clusters_to_add,
-                                            0,
-                                            ctxt->data_ac,
-                                            ctxt->meta_ac,
-                                            &why);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
-       }
+               prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
+               status = ocfs2_add_clusters_in_btree(handle,
+                                                    &et,
+                                                    &logical_start,
+                                                    clusters_to_add,
+                                                    0,
+                                                    ctxt->data_ac,
+                                                    ctxt->meta_ac,
+                                                    &why);
+               if ((status < 0) && (status != -EAGAIN)) {
+                       if (status != -ENOSPC)
+                               mlog_errno(status);
+                       break;
+               }
 
-       ocfs2_journal_dirty(handle, vb->vb_bh);
+               ocfs2_journal_dirty(handle, vb->vb_bh);
 
-       clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) - prev_clusters;
+               clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) -
+                                        prev_clusters;
 
-       /*
-        * We should have already allocated enough space before the transaction,
-        * so no need to restart.
-        */
-       BUG_ON(why != RESTART_NONE || clusters_to_add);
-
-leave:
+               if (why != RESTART_NONE && clusters_to_add) {
+                       /*
+                        * We can only fail in case the alloc file doesn't give
+                        * up enough clusters.
+                        */
+                       BUG_ON(why == RESTART_META);
+
+                       mlog(0, "restarting xattr value extension for %u"
+                            " clusters,.\n", clusters_to_add);
+                       credits = ocfs2_calc_extend_credits(inode->i_sb,
+                                                           &vb->vb_xv->xr_list,
+                                                           clusters_to_add);
+                       status = ocfs2_extend_trans(handle, credits);
+                       if (status < 0) {
+                               status = -ENOMEM;
+                               mlog_errno(status);
+                               break;
+                       }
+               }
+       }
 
        return status;
 }
@@ -6788,16 +6804,15 @@ out:
        return ret;
 }
 
-static int ocfs2_reflink_xattr_buckets(handle_t *handle,
+static int ocfs2_reflink_xattr_bucket(handle_t *handle,
                                u64 blkno, u64 new_blkno, u32 clusters,
+                               u32 *cpos, int num_buckets,
                                struct ocfs2_alloc_context *meta_ac,
                                struct ocfs2_alloc_context *data_ac,
                                struct ocfs2_reflink_xattr_tree_args *args)
 {
        int i, j, ret = 0;
        struct super_block *sb = args->reflink->old_inode->i_sb;
-       u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb));
-       u32 num_buckets = clusters * bpc;
        int bpb = args->old_bucket->bu_blocks;
        struct ocfs2_xattr_value_buf vb = {
                .vb_access = ocfs2_journal_access,
@@ -6816,14 +6831,6 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
                        break;
                }
 
-               /*
-                * The real bucket num in this series of blocks is stored
-                * in the 1st bucket.
-                */
-               if (i == 0)
-                       num_buckets = le16_to_cpu(
-                               bucket_xh(args->old_bucket)->xh_num_buckets);
-
                ret = ocfs2_xattr_bucket_journal_access(handle,
                                                args->new_bucket,
                                                OCFS2_JOURNAL_ACCESS_CREATE);
@@ -6837,6 +6844,18 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
                               bucket_block(args->old_bucket, j),
                               sb->s_blocksize);
 
+               /*
+                * Record the start cpos so that we can use it to initialize
+                * our xattr tree we also set the xh_num_bucket for the new
+                * bucket.
+                */
+               if (i == 0) {
+                       *cpos = le32_to_cpu(bucket_xh(args->new_bucket)->
+                                           xh_entries[0].xe_name_hash);
+                       bucket_xh(args->new_bucket)->xh_num_buckets =
+                               cpu_to_le16(num_buckets);
+               }
+
                ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
 
                ret = ocfs2_reflink_xattr_header(handle, args->reflink,
@@ -6866,6 +6885,7 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
                }
 
                ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
+
                ocfs2_xattr_bucket_relse(args->old_bucket);
                ocfs2_xattr_bucket_relse(args->new_bucket);
        }
@@ -6874,6 +6894,75 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
        ocfs2_xattr_bucket_relse(args->new_bucket);
        return ret;
 }
+
+static int ocfs2_reflink_xattr_buckets(handle_t *handle,
+                               struct inode *inode,
+                               struct ocfs2_reflink_xattr_tree_args *args,
+                               struct ocfs2_extent_tree *et,
+                               struct ocfs2_alloc_context *meta_ac,
+                               struct ocfs2_alloc_context *data_ac,
+                               u64 blkno, u32 cpos, u32 len)
+{
+       int ret, first_inserted = 0;
+       u32 p_cluster, num_clusters, reflink_cpos = 0;
+       u64 new_blkno;
+       unsigned int num_buckets, reflink_buckets;
+       unsigned int bpc =
+               ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb));
+
+       ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+       num_buckets = le16_to_cpu(bucket_xh(args->old_bucket)->xh_num_buckets);
+       ocfs2_xattr_bucket_relse(args->old_bucket);
+
+       while (len && num_buckets) {
+               ret = ocfs2_claim_clusters(handle, data_ac,
+                                          1, &p_cluster, &num_clusters);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out;
+               }
+
+               new_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
+               reflink_buckets = min(num_buckets, bpc * num_clusters);
+
+               ret = ocfs2_reflink_xattr_bucket(handle, blkno,
+                                                new_blkno, num_clusters,
+                                                &reflink_cpos, reflink_buckets,
+                                                meta_ac, data_ac, args);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out;
+               }
+
+               /*
+                * For the 1st allocated cluster, we make it use the same cpos
+                * so that the xattr tree looks the same as the original one
+                * in the most case.
+                */
+               if (!first_inserted) {
+                       reflink_cpos = cpos;
+                       first_inserted = 1;
+               }
+               ret = ocfs2_insert_extent(handle, et, reflink_cpos, new_blkno,
+                                         num_clusters, 0, meta_ac);
+               if (ret)
+                       mlog_errno(ret);
+
+               mlog(0, "insert new xattr extent rec start %llu len %u to %u\n",
+                    (unsigned long long)new_blkno, num_clusters, reflink_cpos);
+
+               len -= num_clusters;
+               blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
+               num_buckets -= reflink_buckets;
+       }
+out:
+       return ret;
+}
+
 /*
  * Create the same xattr extent record in the new inode's xattr tree.
  */
@@ -6885,8 +6974,6 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
                                   void *para)
 {
        int ret, credits = 0;
-       u32 p_cluster, num_clusters;
-       u64 new_blkno;
        handle_t *handle;
        struct ocfs2_reflink_xattr_tree_args *args =
                        (struct ocfs2_reflink_xattr_tree_args *)para;
@@ -6895,6 +6982,9 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
        struct ocfs2_alloc_context *data_ac = NULL;
        struct ocfs2_extent_tree et;
 
+       mlog(0, "reflink xattr buckets %llu len %u\n",
+            (unsigned long long)blkno, len);
+
        ocfs2_init_xattr_tree_extent_tree(&et,
                                          INODE_CACHE(args->reflink->new_inode),
                                          args->new_blk_bh);
@@ -6914,32 +7004,12 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
                goto out;
        }
 
-       ret = ocfs2_claim_clusters(handle, data_ac,
-                                  len, &p_cluster, &num_clusters);
-       if (ret) {
-               mlog_errno(ret);
-               goto out_commit;
-       }
-
-       new_blkno = ocfs2_clusters_to_blocks(osb->sb, p_cluster);
-
-       mlog(0, "reflink xattr buckets %llu to %llu, len %u\n",
-            (unsigned long long)blkno, (unsigned long long)new_blkno, len);
-       ret = ocfs2_reflink_xattr_buckets(handle, blkno, new_blkno, len,
-                                         meta_ac, data_ac, args);
-       if (ret) {
-               mlog_errno(ret);
-               goto out_commit;
-       }
-
-       mlog(0, "insert new xattr extent rec start %llu len %u to %u\n",
-            (unsigned long long)new_blkno, len, cpos);
-       ret = ocfs2_insert_extent(handle, &et, cpos, new_blkno,
-                                 len, 0, meta_ac);
+       ret = ocfs2_reflink_xattr_buckets(handle, inode, args, &et,
+                                         meta_ac, data_ac,
+                                         blkno, cpos, len);
        if (ret)
                mlog_errno(ret);
 
-out_commit:
        ocfs2_commit_trans(osb, handle);
 
 out:
index 3e73de5967ff94c9e5896aaf84cd0c6a1c7a4b96..fc8497643fd08f42d48b1bad50c4c30e5f43a782 100644 (file)
@@ -74,6 +74,7 @@ int ibm_partition(struct parsed_partitions *state)
        } *label;
        unsigned char *data;
        Sector sect;
+       sector_t labelsect;
 
        res = 0;
        blocksize = bdev_logical_block_size(bdev);
@@ -97,11 +98,20 @@ int ibm_partition(struct parsed_partitions *state)
            ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
                goto out_freeall;
 
+       /*
+        * Special case for FBA disks: label sector does not depend on
+        * blocksize.
+        */
+       if ((info->cu_type == 0x6310 && info->dev_type == 0x9336) ||
+           (info->cu_type == 0x3880 && info->dev_type == 0x3370))
+               labelsect = info->label_block;
+       else
+               labelsect = info->label_block * (blocksize >> 9);
+
        /*
         * Get volume label, extract name and type.
         */
-       data = read_part_sector(state, info->label_block*(blocksize/512),
-                               &sect);
+       data = read_part_sector(state, labelsect, &sect);
        if (data == NULL)
                goto out_readerr;
 
index 9bf6870ee5f4cff00d8bd3dfea9ee374ebfb2e2b..a986ff588944ae0a5bb1ccc7017150324b488210 100644 (file)
@@ -46,31 +46,31 @@ int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
 
 #else
 
-void i8042_lock_chip(void)
+static inline void i8042_lock_chip(void)
 {
 }
 
-void i8042_unlock_chip(void)
+static inline void i8042_unlock_chip(void)
 {
 }
 
-int i8042_command(unsigned char *param, int command)
+static inline int i8042_command(unsigned char *param, int command)
 {
        return -ENODEV;
 }
 
-bool i8042_check_port_owner(const struct serio *serio)
+static inline bool i8042_check_port_owner(const struct serio *serio)
 {
        return false;
 }
 
-int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
                                        struct serio *serio))
 {
        return -ENODEV;
 }
 
-int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
+static inline int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
                                       struct serio *serio))
 {
        return -ENODEV;
index a4d2e9f7088ada70d8b1357f418c32cd09a5bf1a..adf832dec3f37dd639e8aa24fe3cc29c7504a6a7 100644 (file)
@@ -1026,11 +1026,12 @@ void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
 
 struct jbd2_buffer_trigger_type {
        /*
-        * Fired just before a buffer is written to the journal.
-        * mapped_data is a mapped buffer that is the frozen data for
-        * commit.
+        * Fired a the moment data to write to the journal are known to be
+        * stable - so either at the moment b_frozen_data is created or just
+        * before a buffer is written to the journal.  mapped_data is a mapped
+        * buffer that is the frozen data for commit.
         */
-       void (*t_commit)(struct jbd2_buffer_trigger_type *type,
+       void (*t_frozen)(struct jbd2_buffer_trigger_type *type,
                         struct buffer_head *bh, void *mapped_data,
                         size_t size);
 
@@ -1042,7 +1043,7 @@ struct jbd2_buffer_trigger_type {
                        struct buffer_head *bh);
 };
 
-extern void jbd2_buffer_commit_trigger(struct journal_head *jh,
+extern void jbd2_buffer_frozen_trigger(struct journal_head *jh,
                                       void *mapped_data,
                                       struct jbd2_buffer_trigger_type *triggers);
 extern void jbd2_buffer_abort_trigger(struct journal_head *jh,
index 7cb00845f150ca185b73a47931c6a1fbcd4c1cef..f26fda76b87fc66816f2fa286e97d760c545f165 100644 (file)
@@ -288,6 +288,7 @@ struct pci_dev {
         */
        unsigned int    irq;
        struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
+       resource_size_t fw_addr[DEVICE_COUNT_RESOURCE]; /* FW-assigned addr */
 
        /* These fields are used by common fixups */
        unsigned int    transparent:1;  /* Transparent PCI bridge */
index 7f614ce274a9198e969d573834d8782f95b8e6a3..13ebb5413a7982c5fdd1153432794f39a708f362 100644 (file)
@@ -124,7 +124,8 @@ extern struct trace_event_functions enter_syscall_print_funcs;
 extern struct trace_event_functions exit_syscall_print_funcs;
 
 #define SYSCALL_TRACE_ENTER_EVENT(sname)                               \
-       static struct syscall_metadata __syscall_meta_##sname;          \
+       static struct syscall_metadata                                  \
+       __attribute__((__aligned__(4))) __syscall_meta_##sname;         \
        static struct ftrace_event_call                                 \
        __attribute__((__aligned__(4))) event_enter_##sname;            \
        static struct ftrace_event_call __used                          \
@@ -138,7 +139,8 @@ extern struct trace_event_functions exit_syscall_print_funcs;
        }
 
 #define SYSCALL_TRACE_EXIT_EVENT(sname)                                        \
-       static struct syscall_metadata __syscall_meta_##sname;          \
+       static struct syscall_metadata                                  \
+       __attribute__((__aligned__(4))) __syscall_meta_##sname;         \
        static struct ftrace_event_call                                 \
        __attribute__((__aligned__(4))) event_exit_##sname;             \
        static struct ftrace_event_call __used                          \
index 31aa9332ef3f8d4f5447f72a3989cbfd0dc7c6df..7bfae887f211556961515b5d057cdcf2880a71c9 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/bootmem.h>
 #include <linux/mm.h>
 #include <linux/early_res.h>
+#include <linux/slab.h>
+#include <linux/kmemleak.h>
 
 /*
  * Early reserved memory areas.
@@ -319,6 +321,8 @@ void __init free_early(u64 start, u64 end)
        struct early_res *r;
        int i;
 
+       kmemleak_free_part(__va(start), end - start);
+
        i = find_overlapped_early(start, end);
        r = &early_res[i];
        if (i >= max_early_res || r->end != end || r->start != start)
@@ -333,6 +337,8 @@ void __init free_early_partial(u64 start, u64 end)
        struct early_res *r;
        int i;
 
+       kmemleak_free_part(__va(start), end - start);
+
        if (start == end)
                return;
 
index 431214b941acec39f68e171e468a59f74b9b749e..68319dd20bed107595661dc6f67fe880d7a5f88d 100644 (file)
@@ -3659,6 +3659,11 @@ void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
                ptr = phys_to_virt(addr);
                memset(ptr, 0, size);
                reserve_early_without_check(addr, addr + size, "BOOTMEM");
+               /*
+                * The min_count is set to 0 so that bootmem allocated blocks
+                * are never reported as leaks.
+                */
+               kmemleak_alloc(ptr, size, 0, 0);
                return ptr;
        }
 
index 6c0081441a326a174f83ebd66485bd9cb961f88d..5bffada7cde17a383e5ba510c2cca4be23463042 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/vmalloc.h>
 #include <linux/cgroup.h>
 #include <linux/swapops.h>
+#include <linux/kmemleak.h>
 
 static void __meminit
 __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
@@ -126,6 +127,12 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
                        if (!base)
                                base = vmalloc(table_size);
                }
+               /*
+                * The value stored in section->page_cgroup is (base - pfn)
+                * and it does not point to the memory block allocated above,
+                * causing kmemleak false positives.
+                */
+               kmemleak_not_leak(base);
        } else {
                /*
                 * We don't have to allocate page_cgroup again, but
index 3592057829648a8d3f8ac909f93a10a729640b68..fd7407c7205c7be809ce4c3b048e68ab9b7b9487 100644 (file)
@@ -107,7 +107,7 @@ static int perf_session__add_hist_entry(struct perf_session *self,
                goto out_free_syms;
        err = 0;
        if (symbol_conf.use_callchain) {
-               err = append_chain(he->callchain, data->callchain, syms);
+               err = append_chain(he->callchain, data->callchain, syms, data->period);
                if (err)
                        goto out_free_syms;
        }
index 49ece7921914a3fdc20ea02d56af193ab61a73f9..97d76562a1a093c8a4bfc6f4bca652fa410779e4 100755 (executable)
@@ -5,17 +5,13 @@ if [ $# -eq 1 ]  ; then
 fi
 
 GVF=${OUTPUT}PERF-VERSION-FILE
-DEF_VER=v0.0.2.PERF
 
 LF='
 '
 
-# First see if there is a version file (included in release tarballs),
-# then try git-describe, then default.
-if test -f version
-then
-       VN=$(cat version) || VN="$DEF_VER"
-elif test -d .git -o -f .git &&
+# First check if there is a .git to get the version from git describe
+# otherwise try to get the version from the kernel makefile
+if test -d ../../.git -o -f ../../.git &&
        VN=$(git describe --abbrev=4 HEAD 2>/dev/null) &&
        case "$VN" in
        *$LF*) (exit 1) ;;
@@ -27,7 +23,12 @@ elif test -d .git -o -f .git &&
 then
        VN=$(echo "$VN" | sed -e 's/-/./g');
 else
-       VN="$DEF_VER"
+       eval `grep '^VERSION\s*=' ../../Makefile|tr -d ' '`
+       eval `grep '^PATCHLEVEL\s*=' ../../Makefile|tr -d ' '`
+       eval `grep '^SUBLEVEL\s*=' ../../Makefile|tr -d ' '`
+       eval `grep '^EXTRAVERSION\s*=' ../../Makefile|tr -d ' '`
+
+       VN="${VERSION}.${PATCHLEVEL}.${SUBLEVEL}${EXTRAVERSION}"
 fi
 
 VN=$(expr "$VN" : v*'\(.*\)')
index 62b69ad4aa735bc0ec146277661e30f9c0e3d4e9..52c777e451ed8ebc049f39edfb651bfe848d66cc 100644 (file)
@@ -230,7 +230,7 @@ fill_node(struct callchain_node *node, struct resolved_chain *chain, int start)
 
 static void
 add_child(struct callchain_node *parent, struct resolved_chain *chain,
-         int start)
+         int start, u64 period)
 {
        struct callchain_node *new;
 
@@ -238,7 +238,7 @@ add_child(struct callchain_node *parent, struct resolved_chain *chain,
        fill_node(new, chain, start);
 
        new->children_hit = 0;
-       new->hit = 1;
+       new->hit = period;
 }
 
 /*
@@ -248,7 +248,8 @@ add_child(struct callchain_node *parent, struct resolved_chain *chain,
  */
 static void
 split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
-               struct callchain_list *to_split, int idx_parents, int idx_local)
+               struct callchain_list *to_split, int idx_parents, int idx_local,
+               u64 period)
 {
        struct callchain_node *new;
        struct list_head *old_tail;
@@ -275,41 +276,41 @@ split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
        /* create a new child for the new branch if any */
        if (idx_total < chain->nr) {
                parent->hit = 0;
-               add_child(parent, chain, idx_total);
-               parent->children_hit++;
+               add_child(parent, chain, idx_total, period);
+               parent->children_hit += period;
        } else {
-               parent->hit = 1;
+               parent->hit = period;
        }
 }
 
 static int
 __append_chain(struct callchain_node *root, struct resolved_chain *chain,
-              unsigned int start);
+              unsigned int start, u64 period);
 
 static void
 __append_chain_children(struct callchain_node *root,
                        struct resolved_chain *chain,
-                       unsigned int start)
+                       unsigned int start, u64 period)
 {
        struct callchain_node *rnode;
 
        /* lookup in childrens */
        chain_for_each_child(rnode, root) {
-               unsigned int ret = __append_chain(rnode, chain, start);
+               unsigned int ret = __append_chain(rnode, chain, start, period);
 
                if (!ret)
                        goto inc_children_hit;
        }
        /* nothing in children, add to the current node */
-       add_child(root, chain, start);
+       add_child(root, chain, start, period);
 
 inc_children_hit:
-       root->children_hit++;
+       root->children_hit += period;
 }
 
 static int
 __append_chain(struct callchain_node *root, struct resolved_chain *chain,
-              unsigned int start)
+              unsigned int start, u64 period)
 {
        struct callchain_list *cnode;
        unsigned int i = start;
@@ -345,18 +346,18 @@ __append_chain(struct callchain_node *root, struct resolved_chain *chain,
 
        /* we match only a part of the node. Split it and add the new chain */
        if (i - start < root->val_nr) {
-               split_add_child(root, chain, cnode, start, i - start);
+               split_add_child(root, chain, cnode, start, i - start, period);
                return 0;
        }
 
        /* we match 100% of the path, increment the hit */
        if (i - start == root->val_nr && i == chain->nr) {
-               root->hit++;
+               root->hit += period;
                return 0;
        }
 
        /* We match the node and still have a part remaining */
-       __append_chain_children(root, chain, i);
+       __append_chain_children(root, chain, i, period);
 
        return 0;
 }
@@ -380,7 +381,7 @@ static void filter_context(struct ip_callchain *old, struct resolved_chain *new,
 
 
 int append_chain(struct callchain_node *root, struct ip_callchain *chain,
-                struct map_symbol *syms)
+                struct map_symbol *syms, u64 period)
 {
        struct resolved_chain *filtered;
 
@@ -397,7 +398,7 @@ int append_chain(struct callchain_node *root, struct ip_callchain *chain,
        if (!filtered->nr)
                goto end;
 
-       __append_chain_children(root, filtered, 0);
+       __append_chain_children(root, filtered, 0, period);
 end:
        free(filtered);
 
index 1ca73e4a2723997099973963f8477bdbbafcc909..f2e9ee164bd8ce7bcf2bb7c7af5901e6c78ebcc3 100644 (file)
@@ -49,6 +49,9 @@ static inline void callchain_init(struct callchain_node *node)
        INIT_LIST_HEAD(&node->brothers);
        INIT_LIST_HEAD(&node->children);
        INIT_LIST_HEAD(&node->val);
+
+       node->parent = NULL;
+       node->hit = 0;
 }
 
 static inline u64 cumul_hits(struct callchain_node *node)
@@ -58,7 +61,7 @@ static inline u64 cumul_hits(struct callchain_node *node)
 
 int register_callchain_param(struct callchain_param *param);
 int append_chain(struct callchain_node *root, struct ip_callchain *chain,
-                struct map_symbol *syms);
+                struct map_symbol *syms, u64 period);
 
 bool ip_callchain__valid(struct ip_callchain *chain, event_t *event);
 #endif /* __PERF_CALLCHAIN_H */