Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 4 Jan 2017 22:14:53 +0000 (14:14 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 4 Jan 2017 22:14:53 +0000 (14:14 -0800)
Pull networking fixes from David Miller:

 1) stmmac_drv_probe() can race with stmmac_open() because we register
    the netdevice too early. Fix from Florian Fainelli.

 2) UFO handling in __ip6_append_data() and ip6_finish_output() use
    different tests for deciding whether a frame will be fragmented or
    not, put them in sync. Fix from Zheng Li.

 3) The rtnetlink getstats handlers need to validate that the netlink
    request is large enough, fix from Mathias Krause.

 4) Use after free in mlx4 driver, from Jack Morgenstein.

 5) Fix setting of garbage UID value in sockets during setattr() calls,
    from Eric Biggers.

 6) Packet drop_monitor doesn't format the netlink messages properly
    such that nlmsg_next fails to work, fix from Reiter Wolfgang.

 7) Fix handling of wildcard addresses in l2tp lookups, from Guillaume
    Nault.

 8) __skb_flow_dissect() can crash on pptp packets, from Ian Kumlien.

 9) IGMP code doesn't reset group query timers properly, from Michal
    Tesar.

10) Fix overzealous MAIN/LOCAL route table combining in ipv4, from
    Alexander Duyck.

11) vxlan offload check needs to be more strict in be2net driver, from
    Sabrina Dubroca.

12) Moving l3mdev to packet hooks lost RX stat counters unintentionally,
    fix from David Ahern.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (52 commits)
  sh_eth: enable RX descriptor word 0 shift on SH7734
  sfc: don't report RX hash keys to ethtool when RSS wasn't enabled
  dpaa_eth: Initialize CGR structure before init
  dpaa_eth: cleanup after init_phy() failure
  net: systemport: Pad packet before inserting TSB
  net: systemport: Utilize skb_put_padto()
  LiquidIO VF: s/select/imply/ for PTP_1588_CLOCK
  libcxgb: fix error check for ip6_route_output()
  net: usb: asix_devices: add .reset_resume for USB PM
  net: vrf: Add missing Rx counters
  drop_monitor: consider inserted data in genlmsg_end
  benet: stricter vxlan offloading check in be_features_check
  ipv4: Do not allow MAIN to be alias for new LOCAL w/ custom rules
  net: macb: Updated resource allocation function calls to new version of API.
  net: stmmac: dwmac-oxnas: use generic pm implementation
  net: stmmac: dwmac-oxnas: fix fixed-link-phydev leaks
  net: stmmac: dwmac-oxnas: fix of-node leak
  Documentation/networking: fix typo in mpls-sysctl
  igmp: Make igmp group member RFC 3376 compliant
  flow_dissector: Update pptp handling to avoid null pointer deref.
  ...

44 files changed:
Documentation/DocBook/Makefile
Documentation/block/queue-sysfs.txt
Documentation/unaligned-memory-access.txt
MAINTAINERS
Makefile
arch/openrisc/kernel/vmlinux.lds.S
arch/parisc/include/asm/thread_info.h
arch/parisc/kernel/time.c
arch/parisc/mm/fault.c
arch/s390/include/asm/asm-prototypes.h [new file with mode: 0644]
arch/s390/kernel/vtime.c
arch/x86/include/asm/bitops.h
block/blk-wbt.c
crypto/testmgr.c
drivers/crypto/marvell/cesa.h
drivers/crypto/marvell/hash.c
drivers/crypto/marvell/tdma.c
drivers/hid/hid-asus.c
drivers/hid/hid-ids.h
drivers/hid/hid-sensor-hub.c
drivers/hid/hid-sony.c
drivers/hid/usbhid/hid-quirks.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/scsi.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/fcloop.c
drivers/video/fbdev/cobalt_lcdfb.c
fs/block_dev.c
fs/buffer.c
fs/crypto/keyinfo.c
fs/crypto/policy.c
fs/dax.c
fs/ext2/inode.c
fs/ext4/file.c
include/linux/dax.h
include/linux/genhd.h
include/linux/page-flags.h
mm/filemap.c
mm/truncate.c
scripts/gcc-plugins/gcc-common.h
scripts/gcc-plugins/latent_entropy_plugin.c

index c75e5d6b8fa8d48b787eed2a0f926bc36fe0a930..a6eb7dcd4dd5c010fe76ac285769d9e0c5157adc 100644 (file)
@@ -12,7 +12,7 @@ DOCBOOKS := z8530book.xml  \
            kernel-api.xml filesystems.xml lsm.xml kgdb.xml \
            gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
            genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
-           80211.xml sh.xml regulator.xml w1.xml \
+           sh.xml regulator.xml w1.xml \
            writing_musb_glue_layer.xml iio.xml
 
 ifeq ($(DOCBOOKS),)
index 51642159aedbbc405d1bb90fa89402c2143f8310..c0a3bb5a6e4eb291d077f10633001c439563ccc2 100644 (file)
@@ -54,9 +54,9 @@ This is the hardware sector size of the device, in bytes.
 
 io_poll (RW)
 ------------
-When read, this file shows the total number of block IO polls and how
-many returned success.  Writing '0' to this file will disable polling
-for this device.  Writing any non-zero value will enable this feature.
+When read, this file shows whether polling is enabled (1) or disabled
+(0).  Writing '0' to this file will disable polling for this device.
+Writing any non-zero value will enable this feature.
 
 io_poll_delay (RW)
 ------------------
index a445da098bc6e5aa733cd55ca2ee8b4a5f04dc2c..3f76c0c379206a72519e864fb5486abf1a75ac0f 100644 (file)
@@ -151,7 +151,7 @@ bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
 #else
        const u16 *a = (const u16 *)addr1;
        const u16 *b = (const u16 *)addr2;
-       return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
+       return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
 #endif
 }
 
index cfff2c9e3d9470550fd47dcd7b2638c77121c607..97d0b689270a82fe5cd19c3c2c8fc1f2afdc1caf 100644 (file)
@@ -5080,9 +5080,11 @@ F:       drivers/net/wan/dlci.c
 F:     drivers/net/wan/sdla.c
 
 FRAMEBUFFER LAYER
+M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-fbdev@vger.kernel.org
+T:     git git://github.com/bzolnier/linux.git
 Q:     http://patchwork.kernel.org/project/linux-fbdev/list/
-S:     Orphan
+S:     Maintained
 F:     Documentation/fb/
 F:     drivers/video/
 F:     include/video/
@@ -8852,17 +8854,22 @@ F:      drivers/video/fbdev/nvidia/
 NVM EXPRESS DRIVER
 M:     Keith Busch <keith.busch@intel.com>
 M:     Jens Axboe <axboe@fb.com>
+M:     Christoph Hellwig <hch@lst.de>
+M:     Sagi Grimberg <sagi@grimberg.me>
 L:     linux-nvme@lists.infradead.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
-W:     https://kernel.googlesource.com/pub/scm/linux/kernel/git/axboe/linux-block/
+T:     git://git.infradead.org/nvme.git
+W:     http://git.infradead.org/nvme.git
 S:     Supported
 F:     drivers/nvme/host/
 F:     include/linux/nvme.h
+F:     include/uapi/linux/nvme_ioctl.h
 
 NVM EXPRESS TARGET DRIVER
 M:     Christoph Hellwig <hch@lst.de>
 M:     Sagi Grimberg <sagi@grimberg.me>
 L:     linux-nvme@lists.infradead.org
+T:     git://git.infradead.org/nvme.git
+W:     http://git.infradead.org/nvme.git
 S:     Supported
 F:     drivers/nvme/target/
 
index ec411ba9e40f98376bb594de459e98413f0dfae9..5470d599384a5ba676a60490e19baf81a1068b65 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Roaring Lionus
 
 # *DOCUMENTATION*
index ef31fc24344e983c67b6f2b9c185a9275d456ed1..552544616b9d93ab6838a998ffb0b6ad9a7f1574 100644 (file)
@@ -44,6 +44,8 @@ SECTIONS
         /* Read-only sections, merged into text segment: */
         . = LOAD_BASE ;
 
+       _text = .;
+
        /* _s_kernel_ro must be page aligned */
        . = ALIGN(PAGE_SIZE);
        _s_kernel_ro = .;
index 7581330ea35be1e15498cf5cef9bbcbd3889aab9..88fe0aad4390b10830ce1bc1be62925d4b2d4bbc 100644 (file)
@@ -49,7 +49,6 @@ struct thread_info {
 #define TIF_POLLING_NRFLAG     3       /* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_32BIT               4       /* 32 bit binary */
 #define TIF_MEMDIE             5       /* is terminating due to OOM killer */
-#define TIF_RESTORE_SIGMASK    6       /* restore saved signal mask */
 #define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
 #define TIF_NOTIFY_RESUME      8       /* callback before returning to user */
 #define TIF_SINGLESTEP         9       /* single stepping? */
index da0d9cb63403d4b3b4f9647cf8415dd11db30fd3..1e22f981cd81fb0cf840407210d499cab0319e0e 100644 (file)
@@ -235,9 +235,26 @@ void __init time_init(void)
 
        cr16_hz = 100 * PAGE0->mem_10msec;  /* Hz */
 
-       /* register at clocksource framework */
-       clocksource_register_hz(&clocksource_cr16, cr16_hz);
-
        /* register as sched_clock source */
        sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
 }
+
+static int __init init_cr16_clocksource(void)
+{
+       /*
+        * The cr16 interval timers are not syncronized across CPUs, so mark
+        * them unstable and lower rating on SMP systems.
+        */
+       if (num_online_cpus() > 1) {
+               clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
+               clocksource_cr16.rating = 0;
+       }
+
+       /* register at clocksource framework */
+       clocksource_register_hz(&clocksource_cr16,
+               100 * PAGE0->mem_10msec);
+
+       return 0;
+}
+
+device_initcall(init_cr16_clocksource);
index 8ff9253930af776b5ca7d408f4bd134cdb88c9b0..1a0b4f63f0e90fbb4e4cb58ea6da95d326bcba3c 100644 (file)
@@ -234,7 +234,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long code,
            tsk->comm, code, address);
        print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
 
-       pr_cont(" trap #%lu: %s%c", code, trap_name(code),
+       pr_cont("\ntrap #%lu: %s%c", code, trap_name(code),
                vma ? ',':'\n');
 
        if (vma)
diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h
new file mode 100644 (file)
index 0000000..2c3413b
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef _ASM_S390_PROTOTYPES_H
+
+#include <linux/kvm_host.h>
+#include <linux/ftrace.h>
+#include <asm/fpu/api.h>
+#include <asm-generic/asm-prototypes.h>
+
+#endif /* _ASM_S390_PROTOTYPES_H */
index 6b246aadf311706849341dac2d0eafee293340f1..1b5c5ee9fc1b60878844cd67cb6a6cb12800a563 100644 (file)
@@ -94,7 +94,7 @@ static void update_mt_scaling(void)
  * Update process times based on virtual cpu times stored by entry.S
  * to the lowcore fields user_timer, system_timer & steal_clock.
  */
-static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
+static int do_account_vtime(struct task_struct *tsk)
 {
        u64 timer, clock, user, system, steal;
        u64 user_scaled, system_scaled;
@@ -138,7 +138,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
        }
        account_user_time(tsk, user);
        tsk->utimescaled += user_scaled;
-       account_system_time(tsk, hardirq_offset, system);
+       account_system_time(tsk, 0, system);
        tsk->stimescaled += system_scaled;
 
        steal = S390_lowcore.steal_timer;
@@ -152,7 +152,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
 
 void vtime_task_switch(struct task_struct *prev)
 {
-       do_account_vtime(prev, 0);
+       do_account_vtime(prev);
        prev->thread.user_timer = S390_lowcore.user_timer;
        prev->thread.system_timer = S390_lowcore.system_timer;
        S390_lowcore.user_timer = current->thread.user_timer;
@@ -166,7 +166,7 @@ void vtime_task_switch(struct task_struct *prev)
  */
 void vtime_account_user(struct task_struct *tsk)
 {
-       if (do_account_vtime(tsk, HARDIRQ_OFFSET))
+       if (do_account_vtime(tsk))
                virt_timer_expire();
 }
 
index 68557f52b9619ddfed7681fc43c2d0530c625f0f..854022772c5be4d49d2697bd2b66b454f49c9e6f 100644 (file)
@@ -139,6 +139,19 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
        asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
 }
 
+static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
+{
+       bool negative;
+       asm volatile(LOCK_PREFIX "andb %2,%1\n\t"
+               CC_SET(s)
+               : CC_OUT(s) (negative), ADDR
+               : "ir" ((char) ~(1 << nr)) : "memory");
+       return negative;
+}
+
+// Let everybody know we have it
+#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+
 /*
  * __clear_bit_unlock - Clears a bit in memory
  * @nr: Bit to clear
index 6e82769f4042c2f57af7976b1b7b4342eea1306d..f0a9c07b4c7a5ef9e96985a89c5c000d62a78cd0 100644 (file)
@@ -544,6 +544,8 @@ static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
  * the timer to kick off queuing again.
  */
 static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
+       __releases(lock)
+       __acquires(lock)
 {
        struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd());
        DEFINE_WAIT(wait);
@@ -558,13 +560,12 @@ static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
                if (may_queue(rwb, rqw, &wait, rw))
                        break;
 
-               if (lock)
+               if (lock) {
                        spin_unlock_irq(lock);
-
-               io_schedule();
-
-               if (lock)
+                       io_schedule();
                        spin_lock_irq(lock);
+               } else
+                       io_schedule();
        } while (1);
 
        finish_wait(&rqw->wait, &wait);
@@ -595,7 +596,7 @@ static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
  * in an irq held spinlock, if it holds one when calling this function.
  * If we do sleep, we'll release and re-grab it.
  */
-unsigned int wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
+enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
 {
        unsigned int ret = 0;
 
index f616ad74cce756fb2d0a0657d153483ed05f56d7..44e888b0b041944b44e8aa5d75619e51d7fe52a2 100644 (file)
@@ -1461,16 +1461,25 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
        for (i = 0; i < ctcount; i++) {
                unsigned int dlen = COMP_BUF_SIZE;
                int ilen = ctemplate[i].inlen;
+               void *input_vec;
 
+               input_vec = kmalloc(ilen, GFP_KERNEL);
+               if (!input_vec) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               memcpy(input_vec, ctemplate[i].input, ilen);
                memset(output, 0, dlen);
                init_completion(&result.completion);
-               sg_init_one(&src, ctemplate[i].input, ilen);
+               sg_init_one(&src, input_vec, ilen);
                sg_init_one(&dst, output, dlen);
 
                req = acomp_request_alloc(tfm);
                if (!req) {
                        pr_err("alg: acomp: request alloc failed for %s\n",
                               algo);
+                       kfree(input_vec);
                        ret = -ENOMEM;
                        goto out;
                }
@@ -1483,6 +1492,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                if (ret) {
                        pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
                               i + 1, algo, -ret);
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
@@ -1491,6 +1501,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                        pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
                               i + 1, algo, req->dlen);
                        ret = -EINVAL;
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
@@ -1500,26 +1511,37 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                               i + 1, algo);
                        hexdump(output, req->dlen);
                        ret = -EINVAL;
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
 
+               kfree(input_vec);
                acomp_request_free(req);
        }
 
        for (i = 0; i < dtcount; i++) {
                unsigned int dlen = COMP_BUF_SIZE;
                int ilen = dtemplate[i].inlen;
+               void *input_vec;
+
+               input_vec = kmalloc(ilen, GFP_KERNEL);
+               if (!input_vec) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
 
+               memcpy(input_vec, dtemplate[i].input, ilen);
                memset(output, 0, dlen);
                init_completion(&result.completion);
-               sg_init_one(&src, dtemplate[i].input, ilen);
+               sg_init_one(&src, input_vec, ilen);
                sg_init_one(&dst, output, dlen);
 
                req = acomp_request_alloc(tfm);
                if (!req) {
                        pr_err("alg: acomp: request alloc failed for %s\n",
                               algo);
+                       kfree(input_vec);
                        ret = -ENOMEM;
                        goto out;
                }
@@ -1532,6 +1554,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                if (ret) {
                        pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
                               i + 1, algo, -ret);
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
@@ -1540,6 +1563,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                        pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
                               i + 1, algo, req->dlen);
                        ret = -EINVAL;
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
@@ -1549,10 +1573,12 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                               i + 1, algo);
                        hexdump(output, req->dlen);
                        ret = -EINVAL;
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
 
+               kfree(input_vec);
                acomp_request_free(req);
        }
 
index a768da7138a1cd4a0a79771ad99b2f5436f481c3..b7872f62f67475fdc6889420c38de4f8418ffafd 100644 (file)
@@ -273,7 +273,8 @@ struct mv_cesa_op_ctx {
 #define CESA_TDMA_SRC_IN_SRAM                  BIT(30)
 #define CESA_TDMA_END_OF_REQ                   BIT(29)
 #define CESA_TDMA_BREAK_CHAIN                  BIT(28)
-#define CESA_TDMA_TYPE_MSK                     GENMASK(27, 0)
+#define CESA_TDMA_SET_STATE                    BIT(27)
+#define CESA_TDMA_TYPE_MSK                     GENMASK(26, 0)
 #define CESA_TDMA_DUMMY                                0
 #define CESA_TDMA_DATA                         1
 #define CESA_TDMA_OP                           2
index 317cf029c0cf1beab3f2b5f0ab41cf187626e24b..77c0fb936f4794363f478b7bc0b8d23dd10d880f 100644 (file)
@@ -280,13 +280,32 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
        sreq->offset = 0;
 }
 
+static void mv_cesa_ahash_dma_step(struct ahash_request *req)
+{
+       struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+       struct mv_cesa_req *base = &creq->base;
+
+       /* We must explicitly set the digest state. */
+       if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
+               struct mv_cesa_engine *engine = base->engine;
+               int i;
+
+               /* Set the hash state in the IVDIG regs. */
+               for (i = 0; i < ARRAY_SIZE(creq->state); i++)
+                       writel_relaxed(creq->state[i], engine->regs +
+                                      CESA_IVDIG(i));
+       }
+
+       mv_cesa_dma_step(base);
+}
+
 static void mv_cesa_ahash_step(struct crypto_async_request *req)
 {
        struct ahash_request *ahashreq = ahash_request_cast(req);
        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 
        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
-               mv_cesa_dma_step(&creq->base);
+               mv_cesa_ahash_dma_step(ahashreq);
        else
                mv_cesa_ahash_std_step(ahashreq);
 }
@@ -584,12 +603,16 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
        struct mv_cesa_ahash_dma_iter iter;
        struct mv_cesa_op_ctx *op = NULL;
        unsigned int frag_len;
+       bool set_state = false;
        int ret;
        u32 type;
 
        basereq->chain.first = NULL;
        basereq->chain.last = NULL;
 
+       if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
+               set_state = true;
+
        if (creq->src_nents) {
                ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
                                 DMA_TO_DEVICE);
@@ -683,6 +706,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
        if (type != CESA_TDMA_RESULT)
                basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
 
+       if (set_state) {
+               /*
+                * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
+                * let the step logic know that the IVDIG registers should be
+                * explicitly set before launching a TDMA chain.
+                */
+               basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
+       }
+
        return 0;
 
 err_free_tdma:
index 4416b88eca708aff6aadbff76ec04d95f6b40dbb..c76375ff376d39e5dc2e74463a310231b58f4c86 100644 (file)
@@ -109,7 +109,14 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
                last->next = dreq->chain.first;
                engine->chain.last = dreq->chain.last;
 
-               if (!(last->flags & CESA_TDMA_BREAK_CHAIN))
+               /*
+                * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
+                * the last element of the current chain, or if the request
+                * being queued needs the IV regs to be set before lauching
+                * the request.
+                */
+               if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
+                   !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
                        last->next_dma = dreq->chain.first->cur_dma;
        }
 }
index d40ed9fdf68d990ce2b138579bc6404a6fae7d99..70b12f89a193dc369273cea5565bf804a78cc50a 100644 (file)
@@ -64,7 +64,8 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
 #define QUIRK_SKIP_INPUT_MAPPING       BIT(2)
 #define QUIRK_IS_MULTITOUCH            BIT(3)
 
-#define NOTEBOOK_QUIRKS                        QUIRK_FIX_NOTEBOOK_REPORT
+#define KEYBOARD_QUIRKS                        (QUIRK_FIX_NOTEBOOK_REPORT | \
+                                                QUIRK_NO_INIT_REPORTS)
 #define TOUCHPAD_QUIRKS                        (QUIRK_NO_INIT_REPORTS | \
                                                 QUIRK_SKIP_INPUT_MAPPING | \
                                                 QUIRK_IS_MULTITOUCH)
@@ -170,11 +171,11 @@ static int asus_raw_event(struct hid_device *hdev,
 
 static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
 {
+       struct input_dev *input = hi->input;
        struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
 
        if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
                int ret;
-               struct input_dev *input = hi->input;
 
                input_set_abs_params(input, ABS_MT_POSITION_X, 0, MAX_X, 0, 0);
                input_set_abs_params(input, ABS_MT_POSITION_Y, 0, MAX_Y, 0, 0);
@@ -191,10 +192,10 @@ static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
                        hid_err(hdev, "Asus input mt init slots failed: %d\n", ret);
                        return ret;
                }
-
-               drvdata->input = input;
        }
 
+       drvdata->input = input;
+
        return 0;
 }
 
@@ -286,7 +287,11 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
                goto err_stop_hw;
        }
 
-       drvdata->input->name = "Asus TouchPad";
+       if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+               drvdata->input->name = "Asus TouchPad";
+       } else {
+               drvdata->input->name = "Asus Keyboard";
+       }
 
        if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
                ret = asus_start_multitouch(hdev);
@@ -315,7 +320,7 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
 
 static const struct hid_device_id asus_devices[] = {
        { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
-                USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), NOTEBOOK_QUIRKS},
+                USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), KEYBOARD_QUIRKS},
        { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
                         USB_DEVICE_ID_ASUSTEK_TOUCHPAD), TOUCHPAD_QUIRKS },
        { }
index ec277b96eaa1b33461aa7702f38864598b910e59..54bd22dc14110c308744f28f01a7ab4cff79ff95 100644 (file)
 #define USB_VENDOR_ID_DRAGONRISE               0x0079
 #define USB_DEVICE_ID_DRAGONRISE_WIIU          0x1800
 #define USB_DEVICE_ID_DRAGONRISE_PS3           0x1801
+#define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR    0x1803
 #define USB_DEVICE_ID_DRAGONRISE_GAMECUBE      0x1843
 
 #define USB_VENDOR_ID_DWAV             0x0eef
 #define USB_VENDOR_ID_FLATFROG         0x25b5
 #define USB_DEVICE_ID_MULTITOUCH_3200  0x0002
 
+#define USB_VENDOR_ID_FUTABA            0x0547
+#define USB_DEVICE_ID_LED_DISPLAY       0x7000
+
 #define USB_VENDOR_ID_ESSENTIAL_REALITY        0x0d7f
 #define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
 
index 5c925228847c8e88653ec5fb00edfc053fa81784..4ef73374a8f9881136cabeda32a67c6a21d53a85 100644 (file)
@@ -212,7 +212,6 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
        __s32 value;
        int ret = 0;
 
-       memset(buffer, 0, buffer_size);
        mutex_lock(&data->mutex);
        report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
        if (!report || (field_index >= report->maxfield)) {
@@ -256,6 +255,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
        int buffer_index = 0;
        int i;
 
+       memset(buffer, 0, buffer_size);
+
        mutex_lock(&data->mutex);
        report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
        if (!report || (field_index >= report->maxfield) ||
index 7687c0875395d6b351928a156be6f5268cf50cfe..f405b07d03816506215bd19fe3c878393370484a 100644 (file)
@@ -1099,8 +1099,11 @@ struct sony_sc {
        u8 led_delay_on[MAX_LEDS];
        u8 led_delay_off[MAX_LEDS];
        u8 led_count;
+       bool ds4_dongle_connected;
 };
 
+static void sony_set_leds(struct sony_sc *sc);
+
 static inline void sony_schedule_work(struct sony_sc *sc)
 {
        if (!sc->defer_initialization)
@@ -1430,6 +1433,31 @@ static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
                                return -EILSEQ;
                        }
                }
+
+               /*
+                * In the case of a DS4 USB dongle, bit[2] of byte 31 indicates
+                * if a DS4 is actually connected (indicated by '0').
+                * For non-dongle, this bit is always 0 (connected).
+                */
+               if (sc->hdev->vendor == USB_VENDOR_ID_SONY &&
+                   sc->hdev->product == USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) {
+                       bool connected = (rd[31] & 0x04) ? false : true;
+
+                       if (!sc->ds4_dongle_connected && connected) {
+                               hid_info(sc->hdev, "DualShock 4 USB dongle: controller connected\n");
+                               sony_set_leds(sc);
+                               sc->ds4_dongle_connected = true;
+                       } else if (sc->ds4_dongle_connected && !connected) {
+                               hid_info(sc->hdev, "DualShock 4 USB dongle: controller disconnected\n");
+                               sc->ds4_dongle_connected = false;
+                               /* Return 0, so hidraw can get the report. */
+                               return 0;
+                       } else if (!sc->ds4_dongle_connected) {
+                               /* Return 0, so hidraw can get the report. */
+                               return 0;
+                       }
+               }
+
                dualshock4_parse_report(sc, rd, size);
        }
 
@@ -2390,6 +2418,12 @@ static int sony_check_add(struct sony_sc *sc)
                }
 
                memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address));
+
+               snprintf(sc->hdev->uniq, sizeof(sc->hdev->uniq),
+                       "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+                       sc->mac_address[5], sc->mac_address[4],
+                       sc->mac_address[3], sc->mac_address[2],
+                       sc->mac_address[1], sc->mac_address[0]);
        } else if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
                        (sc->quirks & NAVIGATION_CONTROLLER_USB)) {
                buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL);
@@ -2548,7 +2582,7 @@ static int sony_input_configured(struct hid_device *hdev,
                        hid_err(sc->hdev,
                        "Unable to initialize multi-touch slots: %d\n",
                        ret);
-                       return ret;
+                       goto err_stop;
                }
 
                sony_init_output_report(sc, dualshock4_send_output_report);
index b3e01c82af0512dce7a68e6bde908d7e3afeaba8..e9d6cc7cdfc5c8019422d45914dc0363448bcb12 100644 (file)
@@ -83,11 +83,13 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
index b40cfb076f02446fda62aea907301e4a62dcc17a..2fc86dc7a8df3e487c8222fa84310e7832c9c0a8 100644 (file)
@@ -1193,8 +1193,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
                blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
                blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
        }
-       if (ctrl->stripe_size)
-               blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
+       if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
+               blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
        blk_queue_virt_boundary(q, ctrl->page_size - 1);
        if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
                vwc = true;
@@ -1250,19 +1250,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
        ctrl->max_hw_sectors =
                min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
 
-       if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
-               unsigned int max_hw_sectors;
-
-               ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
-               max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
-               if (ctrl->max_hw_sectors) {
-                       ctrl->max_hw_sectors = min(max_hw_sectors,
-                                                       ctrl->max_hw_sectors);
-               } else {
-                       ctrl->max_hw_sectors = max_hw_sectors;
-               }
-       }
-
        nvme_set_queue_limits(ctrl, ctrl->admin_q);
        ctrl->sgls = le32_to_cpu(id->sgls);
        ctrl->kas = le16_to_cpu(id->kas);
index 771e2e76187222dfb71616f5665c7b2b22802c74..aa0bc60810a74ff93cf05b294b2a9d4968ecf397 100644 (file)
@@ -1491,19 +1491,20 @@ static int
 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
 {
        struct nvme_fc_queue *queue = &ctrl->queues[1];
-       int i, j, ret;
+       int i, ret;
 
        for (i = 1; i < ctrl->queue_count; i++, queue++) {
                ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
-               if (ret) {
-                       for (j = i-1; j >= 0; j--)
-                               __nvme_fc_delete_hw_queue(ctrl,
-                                               &ctrl->queues[j], j);
-                       return ret;
-               }
+               if (ret)
+                       goto delete_queues;
        }
 
        return 0;
+
+delete_queues:
+       for (; i >= 0; i--)
+               __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
+       return ret;
 }
 
 static int
@@ -2401,8 +2402,8 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        WARN_ON_ONCE(!changed);
 
        dev_info(ctrl->ctrl.device,
-               "NVME-FC{%d}: new ctrl: NQN \"%s\" (%p)\n",
-               ctrl->cnum, ctrl->ctrl.opts->subsysnqn, &ctrl);
+               "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
+               ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
 
        kref_get(&ctrl->ctrl.kref);
 
index bd5321441d127143b87563e5463d2944aa0c1b0e..6377e14586dc5c837749049cf3dafc7b210a3026 100644 (file)
@@ -135,7 +135,6 @@ struct nvme_ctrl {
 
        u32 page_size;
        u32 max_hw_sectors;
-       u32 stripe_size;
        u16 oncs;
        u16 vid;
        atomic_t abort_limit;
index 3d21a154dce79deceeff77cd16ef5c6bf2a71978..19beeb7b2ac26a5bf0f81bf4e8b995bf29dba195 100644 (file)
@@ -712,15 +712,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
                req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
                nvme_req(req)->result = cqe.result;
                blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
-
        }
 
-       /* If the controller ignores the cq head doorbell and continuously
-        * writes to the queue, it is theoretically possible to wrap around
-        * the queue twice and mistakenly return IRQ_NONE.  Linux only
-        * requires that 0.1% of your interrupts are handled, so this isn't
-        * a big problem.
-        */
        if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
                return;
 
@@ -1909,10 +1902,10 @@ static int nvme_dev_map(struct nvme_dev *dev)
        if (!dev->bar)
                goto release;
 
-       return 0;
+       return 0;
   release:
-       pci_release_mem_regions(pdev);
-       return -ENODEV;
+       pci_release_mem_regions(pdev);
+       return -ENODEV;
 }
 
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
index b71e95044b43e3e62a5d063047064a46d88a6d9f..a5c09e703bd8636d96c9c0c7d603226e46518f61 100644 (file)
@@ -2160,30 +2160,6 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
        return nvme_trans_status_code(hdr, nvme_sc);
 }
 
-static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-                                                       u8 *cmd)
-{
-       u8 immed, no_flush;
-
-       immed = cmd[1] & 0x01;
-       no_flush = cmd[4] & 0x04;
-
-       if (immed != 0) {
-               return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-                                       ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-                                       SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-       } else {
-               if (no_flush == 0) {
-                       /* Issue NVME FLUSH command prior to START STOP UNIT */
-                       int res = nvme_trans_synchronize_cache(ns, hdr);
-                       if (res)
-                               return res;
-               }
-
-               return 0;
-       }
-}
-
 static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                                                        u8 *cmd)
 {
@@ -2439,9 +2415,6 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
        case SECURITY_PROTOCOL_OUT:
                retcode = nvme_trans_security_protocol(ns, hdr, cmd);
                break;
-       case START_STOP:
-               retcode = nvme_trans_start_stop(ns, hdr, cmd);
-               break;
        case SYNCHRONIZE_CACHE:
                retcode = nvme_trans_synchronize_cache(ns, hdr);
                break;
index ec1ad2aa0a4ca941e8fe51db94cc7ffa452c1693..95ae52390478fe62fdb59605ee2c7a6d0583a919 100644 (file)
@@ -382,7 +382,6 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
 {
        struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
        u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
-       u64 val;
        u32 val32;
        u16 status = 0;
 
@@ -392,8 +391,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
                        (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
                break;
        case NVME_FEAT_KATO:
-               val = le64_to_cpu(req->cmd->prop_set.value);
-               val32 = val & 0xffff;
+               val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
                req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
                nvmet_set_result(req, req->sq->ctrl->kato);
                break;
index bcb8ebeb01c5d26515c8047e4eed765dbdc4da45..4e8e6a22bce162a61eec428e9c435acb26b74046 100644 (file)
@@ -845,7 +845,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
        rport->lport = nport->lport;
        nport->rport = rport;
 
-       return ret ? ret : count;
+       return count;
 }
 
 
@@ -952,7 +952,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
        tport->lport = nport->lport;
        nport->tport = tport;
 
-       return ret ? ret : count;
+       return count;
 }
 
 
index 2d3b691f3fc4885414ae9234950d8e9c798e5fe6..038ac6934fe9d7f865f9711f24fecadc27071b65 100644 (file)
@@ -308,6 +308,11 @@ static int cobalt_lcdfb_probe(struct platform_device *dev)
        info->screen_size = resource_size(res);
        info->screen_base = devm_ioremap(&dev->dev, res->start,
                                         info->screen_size);
+       if (!info->screen_base) {
+               framebuffer_release(info);
+               return -ENOMEM;
+       }
+
        info->fbops = &cobalt_lcd_fbops;
        info->fix = cobalt_lcdfb_fix;
        info->fix.smem_start = res->start;
index 6254cee8f8f382bf8aa881426453bae189973d34..5db5d1340d69eccf475f0feac7f85665bd6aceb5 100644 (file)
@@ -328,6 +328,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        struct file *file = iocb->ki_filp;
        struct inode *inode = bdev_file_inode(file);
        struct block_device *bdev = I_BDEV(inode);
+       struct blk_plug plug;
        struct blkdev_dio *dio;
        struct bio *bio;
        bool is_read = (iov_iter_rw(iter) == READ);
@@ -353,6 +354,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        dio->multi_bio = false;
        dio->should_dirty = is_read && (iter->type == ITER_IOVEC);
 
+       blk_start_plug(&plug);
        for (;;) {
                bio->bi_bdev = bdev;
                bio->bi_iter.bi_sector = pos >> 9;
@@ -394,6 +396,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
                submit_bio(bio);
                bio = bio_alloc(GFP_KERNEL, nr_pages);
        }
+       blk_finish_plug(&plug);
 
        if (!dio->is_sync)
                return -EIOCBQUEUED;
index d21771fcf7d345ab4299cb7fa25881ffcc61ef52..0e87401cf33535b03a1d2aa9da6e919d8a56a906 100644 (file)
@@ -1660,7 +1660,7 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
                        head = page_buffers(page);
                        bh = head;
                        do {
-                               if (!buffer_mapped(bh))
+                               if (!buffer_mapped(bh) || (bh->b_blocknr < block))
                                        goto next;
                                if (bh->b_blocknr >= block + len)
                                        break;
index 6eeea1dcba41c2fa75c479ce1a3fa16f7cd2e7a5..95cd4c3b06c326708a3315d3da86bdb9aafd5469 100644 (file)
@@ -248,7 +248,8 @@ retry:
                goto out;
 
        if (fscrypt_dummy_context_enabled(inode)) {
-               memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
+               memset(raw_key, 0x42, keysize/2);
+               memset(raw_key+keysize/2, 0x24, keysize - (keysize/2));
                goto got_key;
        }
 
index 6ed7c2eebeec53c7656054d05061dc83a23ef1c0..d6cd7ea4851da877b13c7af306fd07f8468a54f4 100644 (file)
@@ -179,6 +179,11 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
                BUG_ON(1);
        }
 
+       /* No restrictions on file types which are never encrypted */
+       if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) &&
+           !S_ISLNK(child->i_mode))
+               return 1;
+
        /* no restrictions if the parent directory is not encrypted */
        if (!parent->i_sb->s_cop->is_encrypted(parent))
                return 1;
index a8732fbed381a45bbce44fcdf0731ccfdc1a09ba..5c74f60d0a5094dc0a27f27ae0acd41667414332 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -451,16 +451,37 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
                __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
 }
 
+static int __dax_invalidate_mapping_entry(struct address_space *mapping,
+                                         pgoff_t index, bool trunc)
+{
+       int ret = 0;
+       void *entry;
+       struct radix_tree_root *page_tree = &mapping->page_tree;
+
+       spin_lock_irq(&mapping->tree_lock);
+       entry = get_unlocked_mapping_entry(mapping, index, NULL);
+       if (!entry || !radix_tree_exceptional_entry(entry))
+               goto out;
+       if (!trunc &&
+           (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
+            radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
+               goto out;
+       radix_tree_delete(page_tree, index);
+       mapping->nrexceptional--;
+       ret = 1;
+out:
+       put_unlocked_mapping_entry(mapping, index, entry);
+       spin_unlock_irq(&mapping->tree_lock);
+       return ret;
+}
 /*
  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
  * entry to get unlocked before deleting it.
  */
 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 {
-       void *entry;
+       int ret = __dax_invalidate_mapping_entry(mapping, index, true);
 
-       spin_lock_irq(&mapping->tree_lock);
-       entry = get_unlocked_mapping_entry(mapping, index, NULL);
        /*
         * This gets called from truncate / punch_hole path. As such, the caller
         * must hold locks protecting against concurrent modifications of the
@@ -468,16 +489,46 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
         * caller has seen exceptional entry for this index, we better find it
         * at that index as well...
         */
-       if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) {
-               spin_unlock_irq(&mapping->tree_lock);
-               return 0;
-       }
-       radix_tree_delete(&mapping->page_tree, index);
+       WARN_ON_ONCE(!ret);
+       return ret;
+}
+
+/*
+ * Invalidate exceptional DAX entry if easily possible. This handles DAX
+ * entries for invalidate_inode_pages() so we evict the entry only if we can
+ * do so without blocking.
+ */
+int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
+{
+       int ret = 0;
+       void *entry, **slot;
+       struct radix_tree_root *page_tree = &mapping->page_tree;
+
+       spin_lock_irq(&mapping->tree_lock);
+       entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
+       if (!entry || !radix_tree_exceptional_entry(entry) ||
+           slot_locked(mapping, slot))
+               goto out;
+       if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
+           radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
+               goto out;
+       radix_tree_delete(page_tree, index);
        mapping->nrexceptional--;
+       ret = 1;
+out:
        spin_unlock_irq(&mapping->tree_lock);
-       dax_wake_mapping_entry_waiter(mapping, index, entry, true);
+       if (ret)
+               dax_wake_mapping_entry_waiter(mapping, index, entry, true);
+       return ret;
+}
 
-       return 1;
+/*
+ * Invalidate exceptional DAX entry if it is clean.
+ */
+int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
+                                     pgoff_t index)
+{
+       return __dax_invalidate_mapping_entry(mapping, index, false);
 }
 
 /*
@@ -488,15 +539,16 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
  * otherwise it will simply fall out of the page cache under memory
  * pressure without ever having been dirtied.
  */
-static int dax_load_hole(struct address_space *mapping, void *entry,
+static int dax_load_hole(struct address_space *mapping, void **entry,
                         struct vm_fault *vmf)
 {
        struct page *page;
+       int ret;
 
        /* Hole page already exists? Return it...  */
-       if (!radix_tree_exceptional_entry(entry)) {
-               vmf->page = entry;
-               return VM_FAULT_LOCKED;
+       if (!radix_tree_exceptional_entry(*entry)) {
+               page = *entry;
+               goto out;
        }
 
        /* This will replace locked radix tree entry with a hole page */
@@ -504,8 +556,17 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
                                   vmf->gfp_mask | __GFP_ZERO);
        if (!page)
                return VM_FAULT_OOM;
+ out:
        vmf->page = page;
-       return VM_FAULT_LOCKED;
+       ret = finish_fault(vmf);
+       vmf->page = NULL;
+       *entry = page;
+       if (!ret) {
+               /* Grab reference for PTE that is now referencing the page */
+               get_page(page);
+               return VM_FAULT_NOPAGE;
+       }
+       return ret;
 }
 
 static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
@@ -934,6 +995,17 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
        if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
                return -EIO;
 
+       /*
+        * Write can allocate block for an area which has a hole page mapped
+        * into page tables. We have to tear down these mappings so that data
+        * written by write(2) is visible in mmap.
+        */
+       if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
+               invalidate_inode_pages2_range(inode->i_mapping,
+                                             pos >> PAGE_SHIFT,
+                                             (end - 1) >> PAGE_SHIFT);
+       }
+
        while (pos < end) {
                unsigned offset = pos & (PAGE_SIZE - 1);
                struct blk_dax_ctl dax = { 0 };
@@ -992,23 +1064,6 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
        if (iov_iter_rw(iter) == WRITE)
                flags |= IOMAP_WRITE;
 
-       /*
-        * Yes, even DAX files can have page cache attached to them:  A zeroed
-        * page is inserted into the pagecache when we have to serve a write
-        * fault on a hole.  It should never be dirtied and can simply be
-        * dropped from the pagecache once we get real data for the page.
-        *
-        * XXX: This is racy against mmap, and there's nothing we can do about
-        * it. We'll eventually need to shift this down even further so that
-        * we can check if we allocated blocks over a hole first.
-        */
-       if (mapping->nrpages) {
-               ret = invalidate_inode_pages2_range(mapping,
-                               pos >> PAGE_SHIFT,
-                               (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT);
-               WARN_ON_ONCE(ret);
-       }
-
        while (iov_iter_count(iter)) {
                ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
                                iter, dax_iomap_actor);
@@ -1023,6 +1078,15 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
 }
 EXPORT_SYMBOL_GPL(dax_iomap_rw);
 
+static int dax_fault_return(int error)
+{
+       if (error == 0)
+               return VM_FAULT_NOPAGE;
+       if (error == -ENOMEM)
+               return VM_FAULT_OOM;
+       return VM_FAULT_SIGBUS;
+}
+
 /**
  * dax_iomap_fault - handle a page fault on a DAX file
  * @vma: The virtual memory area where the fault occurred
@@ -1055,12 +1119,6 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
        if (pos >= i_size_read(inode))
                return VM_FAULT_SIGBUS;
 
-       entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
-       if (IS_ERR(entry)) {
-               error = PTR_ERR(entry);
-               goto out;
-       }
-
        if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
                flags |= IOMAP_WRITE;
 
@@ -1071,9 +1129,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
         */
        error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
        if (error)
-               goto unlock_entry;
+               return dax_fault_return(error);
        if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
-               error = -EIO;           /* fs corruption? */
+               vmf_ret = dax_fault_return(-EIO);       /* fs corruption? */
+               goto finish_iomap;
+       }
+
+       entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
+       if (IS_ERR(entry)) {
+               vmf_ret = dax_fault_return(PTR_ERR(entry));
                goto finish_iomap;
        }
 
@@ -1096,13 +1160,13 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                }
 
                if (error)
-                       goto finish_iomap;
+                       goto error_unlock_entry;
 
                __SetPageUptodate(vmf->cow_page);
                vmf_ret = finish_fault(vmf);
                if (!vmf_ret)
                        vmf_ret = VM_FAULT_DONE_COW;
-               goto finish_iomap;
+               goto unlock_entry;
        }
 
        switch (iomap.type) {
@@ -1114,12 +1178,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                }
                error = dax_insert_mapping(mapping, iomap.bdev, sector,
                                PAGE_SIZE, &entry, vma, vmf);
+               /* -EBUSY is fine, somebody else faulted on the same PTE */
+               if (error == -EBUSY)
+                       error = 0;
                break;
        case IOMAP_UNWRITTEN:
        case IOMAP_HOLE:
                if (!(vmf->flags & FAULT_FLAG_WRITE)) {
-                       vmf_ret = dax_load_hole(mapping, entry, vmf);
-                       break;
+                       vmf_ret = dax_load_hole(mapping, &entry, vmf);
+                       goto unlock_entry;
                }
                /*FALLTHRU*/
        default:
@@ -1128,31 +1195,25 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                break;
        }
 
+ error_unlock_entry:
+       vmf_ret = dax_fault_return(error) | major;
+ unlock_entry:
+       put_locked_mapping_entry(mapping, vmf->pgoff, entry);
  finish_iomap:
        if (ops->iomap_end) {
-               if (error || (vmf_ret & VM_FAULT_ERROR)) {
-                       /* keep previous error */
-                       ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
-                                       &iomap);
-               } else {
-                       error = ops->iomap_end(inode, pos, PAGE_SIZE,
-                                       PAGE_SIZE, flags, &iomap);
-               }
-       }
- unlock_entry:
-       if (vmf_ret != VM_FAULT_LOCKED || error)
-               put_locked_mapping_entry(mapping, vmf->pgoff, entry);
- out:
-       if (error == -ENOMEM)
-               return VM_FAULT_OOM | major;
-       /* -EBUSY is fine, somebody else faulted on the same PTE */
-       if (error < 0 && error != -EBUSY)
-               return VM_FAULT_SIGBUS | major;
-       if (vmf_ret) {
-               WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
-               return vmf_ret;
+               int copied = PAGE_SIZE;
+
+               if (vmf_ret & VM_FAULT_ERROR)
+                       copied = 0;
+               /*
+                * The fault is done by now and there's no way back (other
+                * thread may be already happily using PTE we have installed).
+                * Just ignore error from ->iomap_end since we cannot do much
+                * with it.
+                */
+               ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
        }
-       return VM_FAULT_NOPAGE | major;
+       return vmf_ret;
 }
 EXPORT_SYMBOL_GPL(dax_iomap_fault);
 
@@ -1276,16 +1337,6 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
                goto fallback;
 
-       /*
-        * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
-        * PMD or a HZP entry.  If it can't (because a 4k page is already in
-        * the tree, for instance), it will return -EEXIST and we just fall
-        * back to 4k entries.
-        */
-       entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
-       if (IS_ERR(entry))
-               goto fallback;
-
        /*
         * Note that we don't use iomap_apply here.  We aren't doing I/O, only
         * setting up a mapping, so really we're using iomap_begin() as a way
@@ -1294,10 +1345,21 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        pos = (loff_t)pgoff << PAGE_SHIFT;
        error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
        if (error)
-               goto unlock_entry;
+               goto fallback;
+
        if (iomap.offset + iomap.length < pos + PMD_SIZE)
                goto finish_iomap;
 
+       /*
+        * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
+        * PMD or a HZP entry.  If it can't (because a 4k page is already in
+        * the tree, for instance), it will return -EEXIST and we just fall
+        * back to 4k entries.
+        */
+       entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
+       if (IS_ERR(entry))
+               goto finish_iomap;
+
        vmf.pgoff = pgoff;
        vmf.flags = flags;
        vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
@@ -1310,7 +1372,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        case IOMAP_UNWRITTEN:
        case IOMAP_HOLE:
                if (WARN_ON_ONCE(write))
-                       goto finish_iomap;
+                       goto unlock_entry;
                result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
                                &entry);
                break;
@@ -1319,20 +1381,23 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
                break;
        }
 
+ unlock_entry:
+       put_locked_mapping_entry(mapping, pgoff, entry);
  finish_iomap:
        if (ops->iomap_end) {
-               if (result == VM_FAULT_FALLBACK) {
-                       ops->iomap_end(inode, pos, PMD_SIZE, 0, iomap_flags,
-                                       &iomap);
-               } else {
-                       error = ops->iomap_end(inode, pos, PMD_SIZE, PMD_SIZE,
-                                       iomap_flags, &iomap);
-                       if (error)
-                               result = VM_FAULT_FALLBACK;
-               }
+               int copied = PMD_SIZE;
+
+               if (result == VM_FAULT_FALLBACK)
+                       copied = 0;
+               /*
+                * The fault is done by now and there's no way back (other
+                * thread may be already happily using PMD we have installed).
+                * Just ignore error from ->iomap_end since we cannot do much
+                * with it.
+                */
+               ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
+                               &iomap);
        }
- unlock_entry:
-       put_locked_mapping_entry(mapping, pgoff, entry);
  fallback:
        if (result == VM_FAULT_FALLBACK) {
                split_huge_pmd(vma, pmd, address);
index 0093ea2512a85809e16605088074a8335513e81c..f073bfca694b9982b8bc23e8f0e00be6bef075a7 100644 (file)
@@ -751,9 +751,8 @@ static int ext2_get_blocks(struct inode *inode,
                        mutex_unlock(&ei->truncate_mutex);
                        goto cleanup;
                }
-       } else {
-               *new = true;
        }
+       *new = true;
 
        ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
        mutex_unlock(&ei->truncate_mutex);
index b5f184493c57b0fd91cfc5f6c0633577ce770884..d663d3d7c81cb7fdff0f33f1903e9ed4d1f77f9a 100644 (file)
@@ -258,7 +258,6 @@ out:
 static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        int result;
-       handle_t *handle = NULL;
        struct inode *inode = file_inode(vma->vm_file);
        struct super_block *sb = inode->i_sb;
        bool write = vmf->flags & FAULT_FLAG_WRITE;
@@ -266,24 +265,12 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (write) {
                sb_start_pagefault(sb);
                file_update_time(vma->vm_file);
-               down_read(&EXT4_I(inode)->i_mmap_sem);
-               handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
-                                               EXT4_DATA_TRANS_BLOCKS(sb));
-       } else
-               down_read(&EXT4_I(inode)->i_mmap_sem);
-
-       if (IS_ERR(handle))
-               result = VM_FAULT_SIGBUS;
-       else
-               result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
-
-       if (write) {
-               if (!IS_ERR(handle))
-                       ext4_journal_stop(handle);
-               up_read(&EXT4_I(inode)->i_mmap_sem);
+       }
+       down_read(&EXT4_I(inode)->i_mmap_sem);
+       result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
+       up_read(&EXT4_I(inode)->i_mmap_sem);
+       if (write)
                sb_end_pagefault(sb);
-       } else
-               up_read(&EXT4_I(inode)->i_mmap_sem);
 
        return result;
 }
@@ -292,7 +279,6 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
                                                pmd_t *pmd, unsigned int flags)
 {
        int result;
-       handle_t *handle = NULL;
        struct inode *inode = file_inode(vma->vm_file);
        struct super_block *sb = inode->i_sb;
        bool write = flags & FAULT_FLAG_WRITE;
@@ -300,27 +286,13 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
        if (write) {
                sb_start_pagefault(sb);
                file_update_time(vma->vm_file);
-               down_read(&EXT4_I(inode)->i_mmap_sem);
-               handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
-                               ext4_chunk_trans_blocks(inode,
-                                                       PMD_SIZE / PAGE_SIZE));
-       } else
-               down_read(&EXT4_I(inode)->i_mmap_sem);
-
-       if (IS_ERR(handle))
-               result = VM_FAULT_SIGBUS;
-       else {
-               result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
-                                            &ext4_iomap_ops);
        }
-
-       if (write) {
-               if (!IS_ERR(handle))
-                       ext4_journal_stop(handle);
-               up_read(&EXT4_I(inode)->i_mmap_sem);
+       down_read(&EXT4_I(inode)->i_mmap_sem);
+       result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
+                                    &ext4_iomap_ops);
+       up_read(&EXT4_I(inode)->i_mmap_sem);
+       if (write)
                sb_end_pagefault(sb);
-       } else
-               up_read(&EXT4_I(inode)->i_mmap_sem);
 
        return result;
 }
index f97bcfe794724d4fb757297851fe218268d0c86b..24ad711739955e573aefea713852b2f49a8fed85 100644 (file)
@@ -41,6 +41,9 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
 int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                        struct iomap_ops *ops);
 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
+int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
+int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
+                                     pgoff_t index);
 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
                pgoff_t index, void *entry, bool wake_all);
 
index e0341af6950e2116a43b3b0281f57fea8099c06f..76f39754e7b0299df616bc3cb909f9a35fce9ea1 100644 (file)
@@ -146,15 +146,6 @@ enum {
        DISK_EVENT_EJECT_REQUEST                = 1 << 1, /* eject requested */
 };
 
-#define BLK_SCSI_MAX_CMDS      (256)
-#define BLK_SCSI_CMD_PER_LONG  (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
-
-struct blk_scsi_cmd_filter {
-       unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
-       unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
-       struct kobject kobj;
-};
-
 struct disk_part_tbl {
        struct rcu_head rcu_head;
        int len;
index c56b39890a412abfec4acc31e404781215ae3ff6..6b5818d6de322f8b5898e082ab4b3936042beb5a 100644 (file)
  */
 enum pageflags {
        PG_locked,              /* Page is locked. Don't touch. */
-       PG_waiters,             /* Page has waiters, check its waitqueue */
        PG_error,
        PG_referenced,
        PG_uptodate,
        PG_dirty,
        PG_lru,
        PG_active,
+       PG_waiters,             /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
        PG_slab,
        PG_owner_priv_1,        /* Owner use. If pagecache, fs may use*/
        PG_arch_1,
index 82f26cde830c4b70df30cfa47c7e21dbe6d05a7f..d0e4d1002059360e50254ae2c87dc8f7a87a2dff 100644 (file)
@@ -912,6 +912,29 @@ void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
 }
 EXPORT_SYMBOL_GPL(add_page_wait_queue);
 
+#ifndef clear_bit_unlock_is_negative_byte
+
+/*
+ * PG_waiters is the high bit in the same byte as PG_lock.
+ *
+ * On x86 (and on many other architectures), we can clear PG_lock and
+ * test the sign bit at the same time. But if the architecture does
+ * not support that special operation, we just do this all by hand
+ * instead.
+ *
+ * The read of PG_waiters has to be after (or concurrently with) PG_locked
+ * being cleared, but a memory barrier should be unneccssary since it is
+ * in the same byte as PG_locked.
+ */
+static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
+{
+       clear_bit_unlock(nr, mem);
+       /* smp_mb__after_atomic(); */
+       return test_bit(PG_waiters, mem);
+}
+
+#endif
+
 /**
  * unlock_page - unlock a locked page
  * @page: the page
@@ -921,16 +944,19 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
  * mechanism between PageLocked pages and PageWriteback pages is shared.
  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
  *
- * The mb is necessary to enforce ordering between the clear_bit and the read
- * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
+ * Note that this depends on PG_waiters being the sign bit in the byte
+ * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
+ * clear the PG_locked bit and test PG_waiters at the same time fairly
+ * portably (architectures that do LL/SC can test any bit, while x86 can
+ * test the sign bit).
  */
 void unlock_page(struct page *page)
 {
+       BUILD_BUG_ON(PG_waiters != 7);
        page = compound_head(page);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
-       clear_bit_unlock(PG_locked, &page->flags);
-       smp_mb__after_atomic();
-       wake_up_page(page, PG_locked);
+       if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
+               wake_up_page_bit(page, PG_locked);
 }
 EXPORT_SYMBOL(unlock_page);
 
index fd97f1dbce290f39e1d0d0367006df954f20e8f1..dd7b24e083c5b1f76851eb0b5e3359dde92d910e 100644 (file)
 #include <linux/rmap.h>
 #include "internal.h"
 
-static void clear_exceptional_entry(struct address_space *mapping,
-                                   pgoff_t index, void *entry)
+static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
+                              void *entry)
 {
        struct radix_tree_node *node;
        void **slot;
 
-       /* Handled by shmem itself */
-       if (shmem_mapping(mapping))
-               return;
-
-       if (dax_mapping(mapping)) {
-               dax_delete_mapping_entry(mapping, index);
-               return;
-       }
        spin_lock_irq(&mapping->tree_lock);
        /*
         * Regular page slots are stabilized by the page lock even
@@ -55,6 +47,56 @@ unlock:
        spin_unlock_irq(&mapping->tree_lock);
 }
 
+/*
+ * Unconditionally remove exceptional entry. Usually called from truncate path.
+ */
+static void truncate_exceptional_entry(struct address_space *mapping,
+                                      pgoff_t index, void *entry)
+{
+       /* Handled by shmem itself */
+       if (shmem_mapping(mapping))
+               return;
+
+       if (dax_mapping(mapping)) {
+               dax_delete_mapping_entry(mapping, index);
+               return;
+       }
+       clear_shadow_entry(mapping, index, entry);
+}
+
+/*
+ * Invalidate exceptional entry if easily possible. This handles exceptional
+ * entries for invalidate_inode_pages() so for DAX it evicts only unlocked and
+ * clean entries.
+ */
+static int invalidate_exceptional_entry(struct address_space *mapping,
+                                       pgoff_t index, void *entry)
+{
+       /* Handled by shmem itself */
+       if (shmem_mapping(mapping))
+               return 1;
+       if (dax_mapping(mapping))
+               return dax_invalidate_mapping_entry(mapping, index);
+       clear_shadow_entry(mapping, index, entry);
+       return 1;
+}
+
+/*
+ * Invalidate exceptional entry if clean. This handles exceptional entries for
+ * invalidate_inode_pages2() so for DAX it evicts only clean entries.
+ */
+static int invalidate_exceptional_entry2(struct address_space *mapping,
+                                        pgoff_t index, void *entry)
+{
+       /* Handled by shmem itself */
+       if (shmem_mapping(mapping))
+               return 1;
+       if (dax_mapping(mapping))
+               return dax_invalidate_mapping_entry_sync(mapping, index);
+       clear_shadow_entry(mapping, index, entry);
+       return 1;
+}
+
 /**
  * do_invalidatepage - invalidate part or all of a page
  * @page: the page which is affected
@@ -262,7 +304,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
                                break;
 
                        if (radix_tree_exceptional_entry(page)) {
-                               clear_exceptional_entry(mapping, index, page);
+                               truncate_exceptional_entry(mapping, index,
+                                                          page);
                                continue;
                        }
 
@@ -351,7 +394,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
                        }
 
                        if (radix_tree_exceptional_entry(page)) {
-                               clear_exceptional_entry(mapping, index, page);
+                               truncate_exceptional_entry(mapping, index,
+                                                          page);
                                continue;
                        }
 
@@ -470,7 +514,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
                                break;
 
                        if (radix_tree_exceptional_entry(page)) {
-                               clear_exceptional_entry(mapping, index, page);
+                               invalidate_exceptional_entry(mapping, index,
+                                                            page);
                                continue;
                        }
 
@@ -592,7 +637,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                                break;
 
                        if (radix_tree_exceptional_entry(page)) {
-                               clear_exceptional_entry(mapping, index, page);
+                               if (!invalidate_exceptional_entry2(mapping,
+                                                                  index, page))
+                                       ret = -EBUSY;
                                continue;
                        }
 
index 950fd2e64bb73b9f261188bba272ea7e7ec10249..12262c0cc6914e6a5eebac1b887b129a15fb7466 100644 (file)
@@ -39,6 +39,9 @@
 #include "hash-map.h"
 #endif
 
+#if BUILDING_GCC_VERSION >= 7000
+#include "memmodel.h"
+#endif
 #include "emit-rtl.h"
 #include "debug.h"
 #include "target.h"
@@ -91,6 +94,9 @@
 #include "tree-ssa-alias.h"
 #include "tree-ssa.h"
 #include "stringpool.h"
+#if BUILDING_GCC_VERSION >= 7000
+#include "tree-vrp.h"
+#endif
 #include "tree-ssanames.h"
 #include "print-tree.h"
 #include "tree-eh.h"
@@ -287,6 +293,22 @@ static inline struct cgraph_node *cgraph_next_function_with_gimple_body(struct c
        return NULL;
 }
 
+static inline bool cgraph_for_node_and_aliases(cgraph_node_ptr node, bool (*callback)(cgraph_node_ptr, void *), void *data, bool include_overwritable)
+{
+       cgraph_node_ptr alias;
+
+       if (callback(node, data))
+               return true;
+
+       for (alias = node->same_body; alias; alias = alias->next) {
+               if (include_overwritable || cgraph_function_body_availability(alias) > AVAIL_OVERWRITABLE)
+                       if (cgraph_for_node_and_aliases(alias, callback, data, include_overwritable))
+                               return true;
+       }
+
+       return false;
+}
+
 #define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \
        for ((node) = cgraph_first_function_with_gimple_body(); (node); \
                (node) = cgraph_next_function_with_gimple_body(node))
@@ -399,6 +421,7 @@ typedef union gimple_statement_d gassign;
 typedef union gimple_statement_d gcall;
 typedef union gimple_statement_d gcond;
 typedef union gimple_statement_d gdebug;
+typedef union gimple_statement_d ggoto;
 typedef union gimple_statement_d gphi;
 typedef union gimple_statement_d greturn;
 
@@ -452,6 +475,16 @@ static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
        return stmt;
 }
 
+static inline ggoto *as_a_ggoto(gimple stmt)
+{
+       return stmt;
+}
+
+static inline const ggoto *as_a_const_ggoto(const_gimple stmt)
+{
+       return stmt;
+}
+
 static inline gphi *as_a_gphi(gimple stmt)
 {
        return stmt;
@@ -496,6 +529,14 @@ static inline const greturn *as_a_const_greturn(const_gimple stmt)
 
 typedef struct rtx_def rtx_insn;
 
+static inline const char *get_decl_section_name(const_tree decl)
+{
+       if (DECL_SECTION_NAME(decl) == NULL_TREE)
+               return NULL;
+
+       return TREE_STRING_POINTER(DECL_SECTION_NAME(decl));
+}
+
 static inline void set_decl_section_name(tree node, const char *value)
 {
        if (value)
@@ -511,6 +552,7 @@ typedef struct gimple_statement_base gassign;
 typedef struct gimple_statement_call gcall;
 typedef struct gimple_statement_base gcond;
 typedef struct gimple_statement_base gdebug;
+typedef struct gimple_statement_base ggoto;
 typedef struct gimple_statement_phi gphi;
 typedef struct gimple_statement_base greturn;
 
@@ -564,6 +606,16 @@ static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
        return stmt;
 }
 
+static inline ggoto *as_a_ggoto(gimple stmt)
+{
+       return stmt;
+}
+
+static inline const ggoto *as_a_const_ggoto(const_gimple stmt)
+{
+       return stmt;
+}
+
 static inline gphi *as_a_gphi(gimple stmt)
 {
        return as_a<gphi>(stmt);
@@ -611,6 +663,11 @@ inline bool is_a_helper<const gassign *>::test(const_gimple gs)
 
 #define INSN_DELETED_P(insn) (insn)->deleted()
 
+static inline const char *get_decl_section_name(const_tree decl)
+{
+       return DECL_SECTION_NAME(decl);
+}
+
 /* symtab/cgraph related */
 #define debug_cgraph_node(node) (node)->debug()
 #define cgraph_get_node(decl) cgraph_node::get(decl)
@@ -619,6 +676,7 @@ inline bool is_a_helper<const gassign *>::test(const_gimple gs)
 #define cgraph_n_nodes symtab->cgraph_count
 #define cgraph_max_uid symtab->cgraph_max_uid
 #define varpool_get_node(decl) varpool_node::get(decl)
+#define dump_varpool_node(file, node) (node)->dump(file)
 
 #define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \
        (caller)->create_edge((callee), (call_stmt), (count), (freq))
@@ -674,6 +732,11 @@ static inline cgraph_node_ptr cgraph_alias_target(cgraph_node_ptr node)
        return node->get_alias_target();
 }
 
+static inline bool cgraph_for_node_and_aliases(cgraph_node_ptr node, bool (*callback)(cgraph_node_ptr, void *), void *data, bool include_overwritable)
+{
+       return node->call_for_symbol_thunks_and_aliases(callback, data, include_overwritable);
+}
+
 static inline struct cgraph_node_hook_list *cgraph_add_function_insertion_hook(cgraph_node_hook hook, void *data)
 {
        return symtab->add_cgraph_insertion_hook(hook, data);
@@ -729,6 +792,13 @@ static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree l
        return gimple_build_assign(lhs, subcode, op1, op2 PASS_MEM_STAT);
 }
 
+template <>
+template <>
+inline bool is_a_helper<const ggoto *>::test(const_gimple gs)
+{
+       return gs->code == GIMPLE_GOTO;
+}
+
 template <>
 template <>
 inline bool is_a_helper<const greturn *>::test(const_gimple gs)
@@ -766,6 +836,16 @@ static inline const gcall *as_a_const_gcall(const_gimple stmt)
        return as_a<const gcall *>(stmt);
 }
 
+static inline ggoto *as_a_ggoto(gimple stmt)
+{
+       return as_a<ggoto *>(stmt);
+}
+
+static inline const ggoto *as_a_const_ggoto(const_gimple stmt)
+{
+       return as_a<const ggoto *>(stmt);
+}
+
 static inline gphi *as_a_gphi(gimple stmt)
 {
        return as_a<gphi *>(stmt);
@@ -828,4 +908,9 @@ static inline void debug_gimple_stmt(const_gimple s)
 #define debug_gimple_stmt(s) debug_gimple_stmt(CONST_CAST_GIMPLE(s))
 #endif
 
+#if BUILDING_GCC_VERSION >= 7000
+#define get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, preversep, pvolatilep, keep_aligning)  \
+       get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, preversep, pvolatilep)
+#endif
+
 #endif
index 12541126575b1e2416ad48d58f1d01f8e08a5d93..8ff203ad48093f57fccf04bb9d7c9b9e1952603b 100644 (file)
@@ -328,9 +328,9 @@ static enum tree_code get_op(tree *rhs)
                        op = LROTATE_EXPR;
                        /*
                         * This code limits the value of random_const to
-                        * the size of a wide int for the rotation
+                        * the size of a long for the rotation
                         */
-                       random_const &= HOST_BITS_PER_WIDE_INT - 1;
+                       random_const %= TYPE_PRECISION(long_unsigned_type_node);
                        break;
                }