audit: report audit wait metric in audit status reply
[sfrench/cifs-2.6.git] / kernel / audit.c
index a2f3e34aa724503a06ce0fdfe1ece209810899b3..d72663ac248c7bf02415f9cb5f103617d077ef25 100644 (file)
@@ -136,6 +136,11 @@ u32                audit_sig_sid = 0;
 */
 static atomic_t        audit_lost = ATOMIC_INIT(0);
 
+/* Monotonically increasing sum of time the kernel has spent
+ * waiting while the backlog limit is exceeded.
+ */
+static atomic_t audit_backlog_wait_time_actual = ATOMIC_INIT(0);
+
 /* Hash for inode-based rules */
 struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
 
@@ -1201,17 +1206,18 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        case AUDIT_GET: {
                struct audit_status     s;
                memset(&s, 0, sizeof(s));
-               s.enabled               = audit_enabled;
-               s.failure               = audit_failure;
+               s.enabled                  = audit_enabled;
+               s.failure                  = audit_failure;
                /* NOTE: use pid_vnr() so the PID is relative to the current
                 *       namespace */
-               s.pid                   = auditd_pid_vnr();
-               s.rate_limit            = audit_rate_limit;
-               s.backlog_limit         = audit_backlog_limit;
-               s.lost                  = atomic_read(&audit_lost);
-               s.backlog               = skb_queue_len(&audit_queue);
-               s.feature_bitmap        = AUDIT_FEATURE_BITMAP_ALL;
-               s.backlog_wait_time     = audit_backlog_wait_time;
+               s.pid                      = auditd_pid_vnr();
+               s.rate_limit               = audit_rate_limit;
+               s.backlog_limit            = audit_backlog_limit;
+               s.lost                     = atomic_read(&audit_lost);
+               s.backlog                  = skb_queue_len(&audit_queue);
+               s.feature_bitmap           = AUDIT_FEATURE_BITMAP_ALL;
+               s.backlog_wait_time        = audit_backlog_wait_time;
+               s.backlog_wait_time_actual = atomic_read(&audit_backlog_wait_time_actual);
                audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s));
                break;
        }
@@ -1315,6 +1321,12 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        audit_log_config_change("lost", 0, lost, 1);
                        return lost;
                }
+               if (s.mask == AUDIT_STATUS_BACKLOG_WAIT_TIME_ACTUAL) {
+                       u32 actual = atomic_xchg(&audit_backlog_wait_time_actual, 0);
+
+                       audit_log_config_change("backlog_wait_time_actual", 0, actual, 1);
+                       return actual;
+               }
                break;
        }
        case AUDIT_GET_FEATURE:
@@ -1826,12 +1838,15 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
                        /* sleep if we are allowed and we haven't exhausted our
                         * backlog wait limit */
                        if (gfpflags_allow_blocking(gfp_mask) && (stime > 0)) {
+                               long rtime = stime;
+
                                DECLARE_WAITQUEUE(wait, current);
 
                                add_wait_queue_exclusive(&audit_backlog_wait,
                                                         &wait);
                                set_current_state(TASK_UNINTERRUPTIBLE);
-                               stime = schedule_timeout(stime);
+                               stime = schedule_timeout(rtime);
+                               atomic_add(rtime - stime, &audit_backlog_wait_time_actual);
                                remove_wait_queue(&audit_backlog_wait, &wait);
                        } else {
                                if (audit_rate_check() && printk_ratelimit())