Merge branch 'akpm' (patches from Andrew)
[sfrench/cifs-2.6.git] / kernel / locking / qspinlock.c
index 8a8c3c208c5e4b0f05694c93b177ff7f4fac5ee7..5e9247dc2515833e78e5f0899795b1228542da2f 100644 (file)
@@ -124,9 +124,6 @@ static inline __pure u32 encode_tail(int cpu, int idx)
 {
        u32 tail;
 
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(idx > 3);
-#endif
        tail  = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
        tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
 
@@ -412,12 +409,28 @@ pv_queue:
        idx = node->count++;
        tail = encode_tail(smp_processor_id(), idx);
 
+       /*
+        * 4 nodes are allocated based on the assumption that there will
+        * not be nested NMIs taking spinlocks. That may not be true in
+        * some architectures even though the chance of needing more than
+        * 4 nodes will still be extremely unlikely. When that happens,
+        * we fall back to spinning on the lock directly without using
+        * any MCS node. This is not the most elegant solution, but is
+        * simple enough.
+        */
+       if (unlikely(idx >= MAX_NODES)) {
+               qstat_inc(qstat_lock_no_node, true);
+               while (!queued_spin_trylock(lock))
+                       cpu_relax();
+               goto release;
+       }
+
        node = grab_mcs_node(node, idx);
 
        /*
         * Keep counts of non-zero index values:
         */
-       qstat_inc(qstat_lock_idx1 + idx - 1, idx);
+       qstat_inc(qstat_lock_use_node2 + idx - 1, idx);
 
        /*
         * Ensure that we increment the head node->count before initialising