mm: memcontrol: make cgroup stats and events query API explicitly local
[sfrench/cifs-2.6.git] / ipc / util.c
index 095274a871f89b9d37f825d9190355a8592a10b3..d126d156efc64e7d2d710197cf50377c12ad620d 100644 (file)
@@ -109,7 +109,7 @@ static const struct rhashtable_params ipc_kht_params = {
  * @ids: ipc identifier set
  *
  * Set up the sequence range to use for the ipc identifier range (limited
- * below IPCMNI) then initialise the keys hashtable and ids idr.
+ * below ipc_mni) then initialise the keys hashtable and ids idr.
  */
 void ipc_init_ids(struct ipc_ids *ids)
 {
@@ -119,6 +119,7 @@ void ipc_init_ids(struct ipc_ids *ids)
        rhashtable_init(&ids->key_ht, &ipc_kht_params);
        idr_init(&ids->ipcs_idr);
        ids->max_idx = -1;
+       ids->last_idx = -1;
 #ifdef CONFIG_CHECKPOINT_RESTORE
        ids->next_id = -1;
 #endif
@@ -192,6 +193,10 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
  *
  * The caller must own kern_ipc_perm.lock.of the new object.
  * On error, the function returns a (negative) error code.
+ *
+ * To conserve sequence number space, especially with extended ipc_mni,
+ * the sequence number is incremented only when the returned ID is less than
+ * the last one.
  */
 static inline int ipc_idr_alloc(struct ipc_ids *ids, struct kern_ipc_perm *new)
 {
@@ -215,17 +220,42 @@ static inline int ipc_idr_alloc(struct ipc_ids *ids, struct kern_ipc_perm *new)
         */
 
        if (next_id < 0) { /* !CHECKPOINT_RESTORE or next_id is unset */
-               new->seq = ids->seq++;
-               if (ids->seq > IPCID_SEQ_MAX)
-                       ids->seq = 0;
-               idx = idr_alloc(&ids->ipcs_idr, new, 0, 0, GFP_NOWAIT);
+               int max_idx;
+
+               max_idx = max(ids->in_use*3/2, ipc_min_cycle);
+               max_idx = min(max_idx, ipc_mni);
+
+               /* allocate the idx, with a NULL struct kern_ipc_perm */
+               idx = idr_alloc_cyclic(&ids->ipcs_idr, NULL, 0, max_idx,
+                                       GFP_NOWAIT);
+
+               if (idx >= 0) {
+                       /*
+                        * idx got allocated successfully.
+                        * Now calculate the sequence number and set the
+                        * pointer for real.
+                        */
+                       if (idx <= ids->last_idx) {
+                               ids->seq++;
+                               if (ids->seq >= ipcid_seq_max())
+                                       ids->seq = 0;
+                       }
+                       ids->last_idx = idx;
+
+                       new->seq = ids->seq;
+                       /* no need for smp_wmb(), this is done
+                        * inside idr_replace, as part of
+                        * rcu_assign_pointer
+                        */
+                       idr_replace(&ids->ipcs_idr, new, idx);
+               }
        } else {
                new->seq = ipcid_to_seqx(next_id);
                idx = idr_alloc(&ids->ipcs_idr, new, ipcid_to_idx(next_id),
                                0, GFP_NOWAIT);
        }
        if (idx >= 0)
-               new->id = SEQ_MULTIPLIER * new->seq + idx;
+               new->id = (new->seq << ipcmni_seq_shift()) + idx;
        return idx;
 }
 
@@ -253,8 +283,8 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int limit)
        /* 1) Initialize the refcount so that ipc_rcu_putref works */
        refcount_set(&new->refcount, 1);
 
-       if (limit > IPCMNI)
-               limit = IPCMNI;
+       if (limit > ipc_mni)
+               limit = ipc_mni;
 
        if (ids->in_use >= limit)
                return -ENOSPC;
@@ -737,7 +767,7 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
        if (total >= ids->in_use)
                return NULL;
 
-       for (; pos < IPCMNI; pos++) {
+       for (; pos < ipc_mni; pos++) {
                ipc = idr_find(&ids->ipcs_idr, pos);
                if (ipc != NULL) {
                        *new_pos = pos + 1;