blktrace: switch trace spinlock to a raw spinlock
[sfrench/cifs-2.6.git] / kernel / trace / blktrace.c
index 431e41bc4c23293b281a0faaa9e78e7d142f0abf..af68a67179b48a54d4eb3f77f4b9fc776c49da9c 100644 (file)
@@ -34,7 +34,7 @@ static struct trace_array *blk_tr;
 static bool blk_tracer_enabled __read_mostly;
 
 static LIST_HEAD(running_trace_list);
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
+static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock);
 
 /* Select an alternative, minimalistic output than the original one */
 #define TRACE_BLK_OPT_CLASSIC  0x1
@@ -121,12 +121,12 @@ static void trace_note_tsk(struct task_struct *tsk)
        struct blk_trace *bt;
 
        tsk->btrace_seq = blktrace_seq;
-       spin_lock_irqsave(&running_trace_lock, flags);
+       raw_spin_lock_irqsave(&running_trace_lock, flags);
        list_for_each_entry(bt, &running_trace_list, running_list) {
                trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
                           sizeof(tsk->comm), 0);
        }
-       spin_unlock_irqrestore(&running_trace_lock, flags);
+       raw_spin_unlock_irqrestore(&running_trace_lock, flags);
 }
 
 static void trace_note_time(struct blk_trace *bt)
@@ -666,9 +666,9 @@ static int __blk_trace_startstop(struct request_queue *q, int start)
                        blktrace_seq++;
                        smp_mb();
                        bt->trace_state = Blktrace_running;
-                       spin_lock_irq(&running_trace_lock);
+                       raw_spin_lock_irq(&running_trace_lock);
                        list_add(&bt->running_list, &running_trace_list);
-                       spin_unlock_irq(&running_trace_lock);
+                       raw_spin_unlock_irq(&running_trace_lock);
 
                        trace_note_time(bt);
                        ret = 0;
@@ -676,9 +676,9 @@ static int __blk_trace_startstop(struct request_queue *q, int start)
        } else {
                if (bt->trace_state == Blktrace_running) {
                        bt->trace_state = Blktrace_stopped;
-                       spin_lock_irq(&running_trace_lock);
+                       raw_spin_lock_irq(&running_trace_lock);
                        list_del_init(&bt->running_list);
-                       spin_unlock_irq(&running_trace_lock);
+                       raw_spin_unlock_irq(&running_trace_lock);
                        relay_flush(bt->rchan);
                        ret = 0;
                }
@@ -1608,9 +1608,9 @@ static int blk_trace_remove_queue(struct request_queue *q)
 
        if (bt->trace_state == Blktrace_running) {
                bt->trace_state = Blktrace_stopped;
-               spin_lock_irq(&running_trace_lock);
+               raw_spin_lock_irq(&running_trace_lock);
                list_del_init(&bt->running_list);
-               spin_unlock_irq(&running_trace_lock);
+               raw_spin_unlock_irq(&running_trace_lock);
                relay_flush(bt->rchan);
        }