Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 25 Nov 2017 18:37:16 +0000 (08:37 -1000)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 25 Nov 2017 18:37:16 +0000 (08:37 -1000)
Pull timer updates from Thomas Gleixner:

 - The final conversion of timer wheel timers to timer_setup().

   A few manual conversions and a large coccinelle assisted sweep and
   the removal of the old initialization mechanisms and the related
   code.

 - Remove the now unused VSYSCALL update code

 - Fix permissions of /proc/timer_list. I still need to get rid of that
   file completely

 - Rename a misnomed clocksource function and remove a stale declaration

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits)
  m68k/macboing: Fix missed timer callback assignment
  treewide: Remove TIMER_FUNC_TYPE and TIMER_DATA_TYPE casts
  timer: Remove redundant __setup_timer*() macros
  timer: Pass function down to initialization routines
  timer: Remove unused data arguments from macros
  timer: Switch callback prototype to take struct timer_list * argument
  timer: Pass timer_list pointer to callbacks unconditionally
  Coccinelle: Remove setup_timer.cocci
  timer: Remove setup_*timer() interface
  timer: Remove init_timer() interface
  treewide: setup_timer() -> timer_setup() (2 field)
  treewide: setup_timer() -> timer_setup()
  treewide: init_timer() -> setup_timer()
  treewide: Switch DEFINE_TIMER callbacks to struct timer_list *
  s390: cmm: Convert timers to use timer_setup()
  lightnvm: Convert timers to use timer_setup()
  drivers/net: cris: Convert timers to use timer_setup()
  drm/vc4: Convert timers to use timer_setup()
  block/laptop_mode: Convert timers to use timer_setup()
  net/atm/mpc: Avoid open-coded assignment of timer callback function
  ...

1  2 
drivers/net/tun.c
drivers/target/target_core_user.c
security/keys/gc.c

diff --combined drivers/net/tun.c
index 6a7bde9bc4b292e349dd92830de494185f7fdc39,c3af08f24679bea4eb09ffdb8bcb0c7e457f22e2..95749006d687b971a49894c903fcc611bc25c375
@@@ -444,9 -444,9 +444,9 @@@ static void tun_flow_delete_by_queue(st
        spin_unlock_bh(&tun->lock);
  }
  
- static void tun_flow_cleanup(unsigned long data)
+ static void tun_flow_cleanup(struct timer_list *t)
  {
-       struct tun_struct *tun = (struct tun_struct *)data;
+       struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
        unsigned long delay = tun->ageing_time;
        unsigned long next_timer = jiffies + delay;
        unsigned long count = 0;
@@@ -1196,7 -1196,9 +1196,9 @@@ static void tun_flow_init(struct tun_st
                INIT_HLIST_HEAD(&tun->flows[i]);
  
        tun->ageing_time = TUN_FLOW_EXPIRE;
-       setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
+       timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
+       mod_timer(&tun->flow_gc_timer,
+                 round_jiffies_up(jiffies + tun->ageing_time));
  }
  
  static void tun_flow_uninit(struct tun_struct *tun)
@@@ -2370,8 -2372,6 +2372,8 @@@ static int set_offload(struct tun_struc
                                features |= NETIF_F_TSO6;
                        arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
                }
 +
 +              arg &= ~TUN_F_UFO;
        }
  
        /* This gives the user a way to test for new features in future by
index cc2468a299d3381ab6f1a89bed45f6575f2e5af3,a8eaed2c211a38dc1ef94043a77f8160e94ab4a9..a415d87f22d24237f1ae67539cfbb91a33ddbc9d
@@@ -150,8 -150,6 +150,8 @@@ struct tcmu_dev 
        wait_queue_head_t nl_cmd_wq;
  
        char dev_config[TCMU_CONFIG_LEN];
 +
 +      int nl_reply_supported;
  };
  
  #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
@@@ -432,6 -430,7 +432,6 @@@ static struct tcmu_cmd *tcmu_alloc_cmd(
        struct se_device *se_dev = se_cmd->se_dev;
        struct tcmu_dev *udev = TCMU_DEV(se_dev);
        struct tcmu_cmd *tcmu_cmd;
 -      int cmd_id;
  
        tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
        if (!tcmu_cmd)
  
        tcmu_cmd->se_cmd = se_cmd;
        tcmu_cmd->tcmu_dev = udev;
 -      if (udev->cmd_time_out)
 -              tcmu_cmd->deadline = jiffies +
 -                                      msecs_to_jiffies(udev->cmd_time_out);
  
        tcmu_cmd_reset_dbi_cur(tcmu_cmd);
        tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
                return NULL;
        }
  
 -      idr_preload(GFP_KERNEL);
 -      spin_lock_irq(&udev->commands_lock);
 -      cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
 -              USHRT_MAX, GFP_NOWAIT);
 -      spin_unlock_irq(&udev->commands_lock);
 -      idr_preload_end();
 -
 -      if (cmd_id < 0) {
 -              tcmu_free_cmd(tcmu_cmd);
 -              return NULL;
 -      }
 -      tcmu_cmd->cmd_id = cmd_id;
 -
        return tcmu_cmd;
  }
  
@@@ -731,30 -746,6 +731,30 @@@ static inline size_t tcmu_cmd_get_cmd_s
        return command_size;
  }
  
 +static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
 +{
 +      struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
 +      unsigned long tmo = udev->cmd_time_out;
 +      int cmd_id;
 +
 +      if (tcmu_cmd->cmd_id)
 +              return 0;
 +
 +      cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
 +      if (cmd_id < 0) {
 +              pr_err("tcmu: Could not allocate cmd id.\n");
 +              return cmd_id;
 +      }
 +      tcmu_cmd->cmd_id = cmd_id;
 +
 +      if (!tmo)
 +              return 0;
 +
 +      tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
 +      mod_timer(&udev->timeout, tcmu_cmd->deadline);
 +      return 0;
 +}
 +
  static sense_reason_t
  tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
  {
        entry = (void *) mb + CMDR_OFF + cmd_head;
        memset(entry, 0, command_size);
        tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
 -      entry->hdr.cmd_id = tcmu_cmd->cmd_id;
  
        /* Handle allocating space from the data area */
        tcmu_cmd_reset_dbi_cur(tcmu_cmd);
        }
        entry->req.iov_bidi_cnt = iov_cnt;
  
 +      ret = tcmu_setup_cmd_timer(tcmu_cmd);
 +      if (ret) {
 +              tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
 +              mutex_unlock(&udev->cmdr_lock);
 +              return TCM_OUT_OF_RESOURCES;
 +      }
 +      entry->hdr.cmd_id = tcmu_cmd->cmd_id;
 +
        /*
         * Recalaulate the command's base size and size according
         * to the actual needs
  static sense_reason_t
  tcmu_queue_cmd(struct se_cmd *se_cmd)
  {
 -      struct se_device *se_dev = se_cmd->se_dev;
 -      struct tcmu_dev *udev = TCMU_DEV(se_dev);
        struct tcmu_cmd *tcmu_cmd;
        sense_reason_t ret;
  
        ret = tcmu_queue_cmd_ring(tcmu_cmd);
        if (ret != TCM_NO_SENSE) {
                pr_err("TCMU: Could not queue command\n");
 -              spin_lock_irq(&udev->commands_lock);
 -              idr_remove(&udev->commands, tcmu_cmd->cmd_id);
 -              spin_unlock_irq(&udev->commands_lock);
  
                tcmu_free_cmd(tcmu_cmd);
        }
@@@ -1055,9 -1044,9 +1055,9 @@@ static int tcmu_check_expired_cmd(int i
        return 0;
  }
  
- static void tcmu_device_timedout(unsigned long data)
+ static void tcmu_device_timedout(struct timer_list *t)
  {
-       struct tcmu_dev *udev = (struct tcmu_dev *)data;
+       struct tcmu_dev *udev = from_timer(udev, t, timeout);
        unsigned long flags;
  
        spin_lock_irqsave(&udev->commands_lock, flags);
@@@ -1117,14 -1106,11 +1117,13 @@@ static struct se_device *tcmu_alloc_dev
        idr_init(&udev->commands);
        spin_lock_init(&udev->commands_lock);
  
-       setup_timer(&udev->timeout, tcmu_device_timedout,
-               (unsigned long)udev);
+       timer_setup(&udev->timeout, tcmu_device_timedout, 0);
  
        init_waitqueue_head(&udev->nl_cmd_wq);
        spin_lock_init(&udev->nl_cmd_lock);
  
 +      INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
 +
        return &udev->se_dev;
  }
  
@@@ -1293,54 -1279,10 +1292,54 @@@ static void tcmu_dev_call_rcu(struct rc
        kfree(udev);
  }
  
 +static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
 +{
 +      if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
 +              kmem_cache_free(tcmu_cmd_cache, cmd);
 +              return 0;
 +      }
 +      return -EINVAL;
 +}
 +
 +static void tcmu_blocks_release(struct tcmu_dev *udev)
 +{
 +      int i;
 +      struct page *page;
 +
 +      /* Try to release all block pages */
 +      mutex_lock(&udev->cmdr_lock);
 +      for (i = 0; i <= udev->dbi_max; i++) {
 +              page = radix_tree_delete(&udev->data_blocks, i);
 +              if (page) {
 +                      __free_page(page);
 +                      atomic_dec(&global_db_count);
 +              }
 +      }
 +      mutex_unlock(&udev->cmdr_lock);
 +}
 +
  static void tcmu_dev_kref_release(struct kref *kref)
  {
        struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
        struct se_device *dev = &udev->se_dev;
 +      struct tcmu_cmd *cmd;
 +      bool all_expired = true;
 +      int i;
 +
 +      vfree(udev->mb_addr);
 +      udev->mb_addr = NULL;
 +
 +      /* Upper layer should drain all requests before calling this */
 +      spin_lock_irq(&udev->commands_lock);
 +      idr_for_each_entry(&udev->commands, cmd, i) {
 +              if (tcmu_check_and_free_pending_cmd(cmd) != 0)
 +                      all_expired = false;
 +      }
 +      idr_destroy(&udev->commands);
 +      spin_unlock_irq(&udev->commands_lock);
 +      WARN_ON(!all_expired);
 +
 +      tcmu_blocks_release(udev);
  
        call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
  }
@@@ -1363,10 -1305,6 +1362,10 @@@ static void tcmu_init_genl_cmd_reply(st
  
        if (!tcmu_kern_cmd_reply_supported)
                return;
 +
 +      if (udev->nl_reply_supported <= 0)
 +              return;
 +
  relock:
        spin_lock(&udev->nl_cmd_lock);
  
@@@ -1393,9 -1331,6 +1392,9 @@@ static int tcmu_wait_genl_cmd_reply(str
        if (!tcmu_kern_cmd_reply_supported)
                return 0;
  
 +      if (udev->nl_reply_supported <= 0)
 +              return 0;
 +
        pr_debug("sleeping for nl reply\n");
        wait_for_completion(&nl_cmd->complete);
  
@@@ -1540,6 -1475,8 +1539,6 @@@ static int tcmu_configure_device(struc
        WARN_ON(udev->data_size % PAGE_SIZE);
        WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
  
 -      INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
 -
        info->version = __stringify(TCMU_MAILBOX_VERSION);
  
        info->mem[0].name = "tcm-user command & data buffer";
                dev->dev_attrib.emulate_write_cache = 0;
        dev->dev_attrib.hw_queue_depth = 128;
  
 +      /* If user didn't explicitly disable netlink reply support, use
 +       * module scope setting.
 +       */
 +      if (udev->nl_reply_supported >= 0)
 +              udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
 +
        /*
         * Get a ref incase userspace does a close on the uio device before
         * LIO has initiated tcmu_free_device.
@@@ -1595,7 -1526,6 +1594,7 @@@ err_netlink
        uio_unregister_device(&udev->uio_info);
  err_register:
        vfree(udev->mb_addr);
 +      udev->mb_addr = NULL;
  err_vzalloc:
        kfree(info->name);
        info->name = NULL;
        return ret;
  }
  
 -static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
 -{
 -      if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
 -              kmem_cache_free(tcmu_cmd_cache, cmd);
 -              return 0;
 -      }
 -      return -EINVAL;
 -}
 -
  static bool tcmu_dev_configured(struct tcmu_dev *udev)
  {
        return udev->uio_info.uio_dev ? true : false;
  }
  
 -static void tcmu_blocks_release(struct tcmu_dev *udev)
 -{
 -      int i;
 -      struct page *page;
 -
 -      /* Try to release all block pages */
 -      mutex_lock(&udev->cmdr_lock);
 -      for (i = 0; i <= udev->dbi_max; i++) {
 -              page = radix_tree_delete(&udev->data_blocks, i);
 -              if (page) {
 -                      __free_page(page);
 -                      atomic_dec(&global_db_count);
 -              }
 -      }
 -      mutex_unlock(&udev->cmdr_lock);
 -}
 -
  static void tcmu_free_device(struct se_device *dev)
  {
        struct tcmu_dev *udev = TCMU_DEV(dev);
  static void tcmu_destroy_device(struct se_device *dev)
  {
        struct tcmu_dev *udev = TCMU_DEV(dev);
 -      struct tcmu_cmd *cmd;
 -      bool all_expired = true;
 -      int i;
  
        del_timer_sync(&udev->timeout);
  
        list_del(&udev->node);
        mutex_unlock(&root_udev_mutex);
  
 -      vfree(udev->mb_addr);
 -
 -      /* Upper layer should drain all requests before calling this */
 -      spin_lock_irq(&udev->commands_lock);
 -      idr_for_each_entry(&udev->commands, cmd, i) {
 -              if (tcmu_check_and_free_pending_cmd(cmd) != 0)
 -                      all_expired = false;
 -      }
 -      idr_destroy(&udev->commands);
 -      spin_unlock_irq(&udev->commands_lock);
 -      WARN_ON(!all_expired);
 -
 -      tcmu_blocks_release(udev);
 -
        tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
  
        uio_unregister_device(&udev->uio_info);
  
  enum {
        Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
 -      Opt_err,
 +      Opt_nl_reply_supported, Opt_err,
  };
  
  static match_table_t tokens = {
        {Opt_dev_size, "dev_size=%u"},
        {Opt_hw_block_size, "hw_block_size=%u"},
        {Opt_hw_max_sectors, "hw_max_sectors=%u"},
 +      {Opt_nl_reply_supported, "nl_reply_supported=%d"},
        {Opt_err, NULL}
  };
  
@@@ -1719,17 -1691,6 +1718,17 @@@ static ssize_t tcmu_set_configfs_dev_pa
                        ret = tcmu_set_dev_attrib(&args[0],
                                        &(dev->dev_attrib.hw_max_sectors));
                        break;
 +              case Opt_nl_reply_supported:
 +                      arg_p = match_strdup(&args[0]);
 +                      if (!arg_p) {
 +                              ret = -ENOMEM;
 +                              break;
 +                      }
 +                      ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
 +                      kfree(arg_p);
 +                      if (ret < 0)
 +                              pr_err("kstrtoint() failed for nl_reply_supported=\n");
 +                      break;
                default:
                        break;
                }
@@@ -1772,7 -1733,8 +1771,7 @@@ static ssize_t tcmu_cmd_time_out_show(s
  {
        struct se_dev_attrib *da = container_of(to_config_group(item),
                                        struct se_dev_attrib, da_group);
 -      struct tcmu_dev *udev = container_of(da->da_dev,
 -                                      struct tcmu_dev, se_dev);
 +      struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
  
        return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
  }
@@@ -1879,34 -1841,6 +1878,34 @@@ static ssize_t tcmu_dev_size_store(stru
  }
  CONFIGFS_ATTR(tcmu_, dev_size);
  
 +static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
 +              char *page)
 +{
 +      struct se_dev_attrib *da = container_of(to_config_group(item),
 +                                              struct se_dev_attrib, da_group);
 +      struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
 +
 +      return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
 +}
 +
 +static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
 +              const char *page, size_t count)
 +{
 +      struct se_dev_attrib *da = container_of(to_config_group(item),
 +                                              struct se_dev_attrib, da_group);
 +      struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
 +      s8 val;
 +      int ret;
 +
 +      ret = kstrtos8(page, 0, &val);
 +      if (ret < 0)
 +              return ret;
 +
 +      udev->nl_reply_supported = val;
 +      return count;
 +}
 +CONFIGFS_ATTR(tcmu_, nl_reply_supported);
 +
  static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
                                             char *page)
  {
@@@ -1949,7 -1883,6 +1948,7 @@@ static struct configfs_attribute *tcmu_
        &tcmu_attr_dev_config,
        &tcmu_attr_dev_size,
        &tcmu_attr_emulate_write_cache,
 +      &tcmu_attr_nl_reply_supported,
        NULL,
  };
  
diff --combined security/keys/gc.c
index 6713fee893fb50ff1436a197202bed8e1b05a7cc,b93603724b8c0afb153d53039e3c2a81e33cc909..7207e6094dc1622c9a51beedc92288cdec244de1
@@@ -29,10 -29,10 +29,10 @@@ DECLARE_WORK(key_gc_work, key_garbage_c
  /*
   * Reaper for links from keyrings to dead keys.
   */
- static void key_gc_timer_func(unsigned long);
+ static void key_gc_timer_func(struct timer_list *);
  static DEFINE_TIMER(key_gc_timer, key_gc_timer_func);
  
 -static time_t key_gc_next_run = LONG_MAX;
 +static time64_t key_gc_next_run = TIME64_MAX;
  static struct key_type *key_gc_dead_keytype;
  
  static unsigned long key_gc_flags;
@@@ -53,12 -53,12 +53,12 @@@ struct key_type key_type_dead = 
   * Schedule a garbage collection run.
   * - time precision isn't particularly important
   */
 -void key_schedule_gc(time_t gc_at)
 +void key_schedule_gc(time64_t gc_at)
  {
        unsigned long expires;
 -      time_t now = current_kernel_time().tv_sec;
 +      time64_t now = ktime_get_real_seconds();
  
 -      kenter("%ld", gc_at - now);
 +      kenter("%lld", gc_at - now);
  
        if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
                kdebug("IMMEDIATE");
@@@ -84,10 -84,10 +84,10 @@@ void key_schedule_gc_links(void
   * Some key's cleanup time was met after it expired, so we need to get the
   * reaper to go through a cycle finding expired keys.
   */
- static void key_gc_timer_func(unsigned long data)
+ static void key_gc_timer_func(struct timer_list *unused)
  {
        kenter("");
 -      key_gc_next_run = LONG_MAX;
 +      key_gc_next_run = TIME64_MAX;
        key_schedule_gc_links();
  }
  
@@@ -184,11 -184,11 +184,11 @@@ static void key_garbage_collector(struc
  
        struct rb_node *cursor;
        struct key *key;
 -      time_t new_timer, limit;
 +      time64_t new_timer, limit;
  
        kenter("[%lx,%x]", key_gc_flags, gc_state);
  
 -      limit = current_kernel_time().tv_sec;
 +      limit = ktime_get_real_seconds();
        if (limit > key_gc_delay)
                limit -= key_gc_delay;
        else
                gc_state |= KEY_GC_REAPING_DEAD_1;
        kdebug("new pass %x", gc_state);
  
 -      new_timer = LONG_MAX;
 +      new_timer = TIME64_MAX;
  
        /* As only this function is permitted to remove things from the key
         * serial tree, if cursor is non-NULL then it will always point to a
@@@ -235,7 -235,7 +235,7 @@@ continue_scanning
  
                if (gc_state & KEY_GC_SET_TIMER) {
                        if (key->expiry > limit && key->expiry < new_timer) {
 -                              kdebug("will expire %x in %ld",
 +                              kdebug("will expire %x in %lld",
                                       key_serial(key), key->expiry - limit);
                                new_timer = key->expiry;
                        }
@@@ -276,7 -276,7 +276,7 @@@ maybe_resched
         */
        kdebug("pass complete");
  
 -      if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) {
 +      if (gc_state & KEY_GC_SET_TIMER && new_timer != (time64_t)TIME64_MAX) {
                new_timer += key_gc_delay;
                key_schedule_gc(new_timer);
        }