Merge remote-tracking branches 'regulator/fix/max1586', 'regulator/fix/max77686'...
[sfrench/cifs-2.6.git] / drivers / staging / android / logger.c
1 /*
2  * drivers/misc/logger.c
3  *
4  * A Logging Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  *
8  * Robert Love <rlove@google.com>
9  *
10  * This software is licensed under the terms of the GNU General Public
11  * License version 2, as published by the Free Software Foundation, and
12  * may be copied, distributed, and modified under those terms.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #define pr_fmt(fmt) "logger: " fmt
21
22 #include <linux/sched.h>
23 #include <linux/module.h>
24 #include <linux/fs.h>
25 #include <linux/miscdevice.h>
26 #include <linux/uaccess.h>
27 #include <linux/poll.h>
28 #include <linux/slab.h>
29 #include <linux/time.h>
30 #include <linux/vmalloc.h>
31 #include <linux/aio.h>
32 #include "logger.h"
33
34 #include <asm/ioctls.h>
35
36 /**
37  * struct logger_log - represents a specific log, such as 'main' or 'radio'
38  * @buffer:     The actual ring buffer
39  * @misc:       The "misc" device representing the log
40  * @wq:         The wait queue for @readers
41  * @readers:    This log's readers
42  * @mutex:      The mutex that protects the @buffer
43  * @w_off:      The current write head offset
44  * @head:       The head, or location that readers start reading at.
45  * @size:       The size of the log
46  * @logs:       The list of log channels
47  *
48  * This structure lives from module insertion until module removal, so it does
49  * not need additional reference counting. The structure is protected by the
50  * mutex 'mutex'.
51  */
52 struct logger_log {
53         unsigned char           *buffer;
54         struct miscdevice       misc;
55         wait_queue_head_t       wq;
56         struct list_head        readers;
57         struct mutex            mutex;
58         size_t                  w_off;
59         size_t                  head;
60         size_t                  size;
61         struct list_head        logs;
62 };
63
64 static LIST_HEAD(log_list);
65
66
67 /**
68  * struct logger_reader - a logging device open for reading
69  * @log:        The associated log
70  * @list:       The associated entry in @logger_log's list
71  * @r_off:      The current read head offset.
72  * @r_all:      Reader can read all entries
73  * @r_ver:      Reader ABI version
74  *
75  * This object lives from open to release, so we don't need additional
76  * reference counting. The structure is protected by log->mutex.
77  */
78 struct logger_reader {
79         struct logger_log       *log;
80         struct list_head        list;
81         size_t                  r_off;
82         bool                    r_all;
83         int                     r_ver;
84 };
85
86 /* logger_offset - returns index 'n' into the log via (optimized) modulus */
87 static size_t logger_offset(struct logger_log *log, size_t n)
88 {
89         return n & (log->size - 1);
90 }
91
92
93 /*
94  * file_get_log - Given a file structure, return the associated log
95  *
96  * This isn't aesthetic. We have several goals:
97  *
98  *      1) Need to quickly obtain the associated log during an I/O operation
99  *      2) Readers need to maintain state (logger_reader)
100  *      3) Writers need to be very fast (open() should be a near no-op)
101  *
102  * In the reader case, we can trivially go file->logger_reader->logger_log.
103  * For a writer, we don't want to maintain a logger_reader, so we just go
104  * file->logger_log. Thus what file->private_data points at depends on whether
105  * or not the file was opened for reading. This function hides that dirtiness.
106  */
107 static inline struct logger_log *file_get_log(struct file *file)
108 {
109         if (file->f_mode & FMODE_READ) {
110                 struct logger_reader *reader = file->private_data;
111
112                 return reader->log;
113         }
114         return file->private_data;
115 }
116
117 /*
118  * get_entry_header - returns a pointer to the logger_entry header within
119  * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must
120  * be provided. Typically the return value will be a pointer within
121  * 'logger->buf'.  However, a pointer to 'scratch' may be returned if
122  * the log entry spans the end and beginning of the circular buffer.
123  */
124 static struct logger_entry *get_entry_header(struct logger_log *log,
125                 size_t off, struct logger_entry *scratch)
126 {
127         size_t len = min(sizeof(struct logger_entry), log->size - off);
128
129         if (len != sizeof(struct logger_entry)) {
130                 memcpy(((void *) scratch), log->buffer + off, len);
131                 memcpy(((void *) scratch) + len, log->buffer,
132                         sizeof(struct logger_entry) - len);
133                 return scratch;
134         }
135
136         return (struct logger_entry *) (log->buffer + off);
137 }
138
139 /*
140  * get_entry_msg_len - Grabs the length of the message of the entry
141  * starting from from 'off'.
142  *
143  * An entry length is 2 bytes (16 bits) in host endian order.
144  * In the log, the length does not include the size of the log entry structure.
145  * This function returns the size including the log entry structure.
146  *
147  * Caller needs to hold log->mutex.
148  */
149 static __u32 get_entry_msg_len(struct logger_log *log, size_t off)
150 {
151         struct logger_entry scratch;
152         struct logger_entry *entry;
153
154         entry = get_entry_header(log, off, &scratch);
155         return entry->len;
156 }
157
158 static size_t get_user_hdr_len(int ver)
159 {
160         if (ver < 2)
161                 return sizeof(struct user_logger_entry_compat);
162         return sizeof(struct logger_entry);
163 }
164
165 static ssize_t copy_header_to_user(int ver, struct logger_entry *entry,
166                                          char __user *buf)
167 {
168         void *hdr;
169         size_t hdr_len;
170         struct user_logger_entry_compat v1;
171
172         if (ver < 2) {
173                 v1.len      = entry->len;
174                 v1.__pad    = 0;
175                 v1.pid      = entry->pid;
176                 v1.tid      = entry->tid;
177                 v1.sec      = entry->sec;
178                 v1.nsec     = entry->nsec;
179                 hdr         = &v1;
180                 hdr_len     = sizeof(struct user_logger_entry_compat);
181         } else {
182                 hdr         = entry;
183                 hdr_len     = sizeof(struct logger_entry);
184         }
185
186         return copy_to_user(buf, hdr, hdr_len);
187 }
188
189 /*
190  * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
191  * user-space buffer 'buf'. Returns 'count' on success.
192  *
193  * Caller must hold log->mutex.
194  */
195 static ssize_t do_read_log_to_user(struct logger_log *log,
196                                    struct logger_reader *reader,
197                                    char __user *buf,
198                                    size_t count)
199 {
200         struct logger_entry scratch;
201         struct logger_entry *entry;
202         size_t len;
203         size_t msg_start;
204
205         /*
206          * First, copy the header to userspace, using the version of
207          * the header requested
208          */
209         entry = get_entry_header(log, reader->r_off, &scratch);
210         if (copy_header_to_user(reader->r_ver, entry, buf))
211                 return -EFAULT;
212
213         count -= get_user_hdr_len(reader->r_ver);
214         buf += get_user_hdr_len(reader->r_ver);
215         msg_start = logger_offset(log,
216                 reader->r_off + sizeof(struct logger_entry));
217
218         /*
219          * We read from the msg in two disjoint operations. First, we read from
220          * the current msg head offset up to 'count' bytes or to the end of
221          * the log, whichever comes first.
222          */
223         len = min(count, log->size - msg_start);
224         if (copy_to_user(buf, log->buffer + msg_start, len))
225                 return -EFAULT;
226
227         /*
228          * Second, we read any remaining bytes, starting back at the head of
229          * the log.
230          */
231         if (count != len)
232                 if (copy_to_user(buf + len, log->buffer, count - len))
233                         return -EFAULT;
234
235         reader->r_off = logger_offset(log, reader->r_off +
236                 sizeof(struct logger_entry) + count);
237
238         return count + get_user_hdr_len(reader->r_ver);
239 }
240
241 /*
242  * get_next_entry_by_uid - Starting at 'off', returns an offset into
243  * 'log->buffer' which contains the first entry readable by 'euid'
244  */
245 static size_t get_next_entry_by_uid(struct logger_log *log,
246                 size_t off, kuid_t euid)
247 {
248         while (off != log->w_off) {
249                 struct logger_entry *entry;
250                 struct logger_entry scratch;
251                 size_t next_len;
252
253                 entry = get_entry_header(log, off, &scratch);
254
255                 if (uid_eq(entry->euid, euid))
256                         return off;
257
258                 next_len = sizeof(struct logger_entry) + entry->len;
259                 off = logger_offset(log, off + next_len);
260         }
261
262         return off;
263 }
264
265 /*
266  * logger_read - our log's read() method
267  *
268  * Behavior:
269  *
270  *      - O_NONBLOCK works
271  *      - If there are no log entries to read, blocks until log is written to
272  *      - Atomically reads exactly one log entry
273  *
274  * Will set errno to EINVAL if read
275  * buffer is insufficient to hold next entry.
276  */
277 static ssize_t logger_read(struct file *file, char __user *buf,
278                            size_t count, loff_t *pos)
279 {
280         struct logger_reader *reader = file->private_data;
281         struct logger_log *log = reader->log;
282         ssize_t ret;
283         DEFINE_WAIT(wait);
284
285 start:
286         while (1) {
287                 mutex_lock(&log->mutex);
288
289                 prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
290
291                 ret = (log->w_off == reader->r_off);
292                 mutex_unlock(&log->mutex);
293                 if (!ret)
294                         break;
295
296                 if (file->f_flags & O_NONBLOCK) {
297                         ret = -EAGAIN;
298                         break;
299                 }
300
301                 if (signal_pending(current)) {
302                         ret = -EINTR;
303                         break;
304                 }
305
306                 schedule();
307         }
308
309         finish_wait(&log->wq, &wait);
310         if (ret)
311                 return ret;
312
313         mutex_lock(&log->mutex);
314
315         if (!reader->r_all)
316                 reader->r_off = get_next_entry_by_uid(log,
317                         reader->r_off, current_euid());
318
319         /* is there still something to read or did we race? */
320         if (unlikely(log->w_off == reader->r_off)) {
321                 mutex_unlock(&log->mutex);
322                 goto start;
323         }
324
325         /* get the size of the next entry */
326         ret = get_user_hdr_len(reader->r_ver) +
327                 get_entry_msg_len(log, reader->r_off);
328         if (count < ret) {
329                 ret = -EINVAL;
330                 goto out;
331         }
332
333         /* get exactly one entry from the log */
334         ret = do_read_log_to_user(log, reader, buf, ret);
335
336 out:
337         mutex_unlock(&log->mutex);
338
339         return ret;
340 }
341
342 /*
343  * get_next_entry - return the offset of the first valid entry at least 'len'
344  * bytes after 'off'.
345  *
346  * Caller must hold log->mutex.
347  */
348 static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
349 {
350         size_t count = 0;
351
352         do {
353                 size_t nr = sizeof(struct logger_entry) +
354                         get_entry_msg_len(log, off);
355                 off = logger_offset(log, off + nr);
356                 count += nr;
357         } while (count < len);
358
359         return off;
360 }
361
362 /*
363  * is_between - is a < c < b, accounting for wrapping of a, b, and c
364  *    positions in the buffer
365  *
366  * That is, if a<b, check for c between a and b
367  * and if a>b, check for c outside (not between) a and b
368  *
369  * |------- a xxxxxxxx b --------|
370  *               c^
371  *
372  * |xxxxx b --------- a xxxxxxxxx|
373  *    c^
374  *  or                    c^
375  */
376 static inline int is_between(size_t a, size_t b, size_t c)
377 {
378         if (a < b) {
379                 /* is c between a and b? */
380                 if (a < c && c <= b)
381                         return 1;
382         } else {
383                 /* is c outside of b through a? */
384                 if (c <= b || a < c)
385                         return 1;
386         }
387
388         return 0;
389 }
390
391 /*
392  * fix_up_readers - walk the list of all readers and "fix up" any who were
393  * lapped by the writer; also do the same for the default "start head".
394  * We do this by "pulling forward" the readers and start head to the first
395  * entry after the new write head.
396  *
397  * The caller needs to hold log->mutex.
398  */
399 static void fix_up_readers(struct logger_log *log, size_t len)
400 {
401         size_t old = log->w_off;
402         size_t new = logger_offset(log, old + len);
403         struct logger_reader *reader;
404
405         if (is_between(old, new, log->head))
406                 log->head = get_next_entry(log, log->head, len);
407
408         list_for_each_entry(reader, &log->readers, list)
409                 if (is_between(old, new, reader->r_off))
410                         reader->r_off = get_next_entry(log, reader->r_off, len);
411 }
412
413 /*
414  * logger_write_iter - our write method, implementing support for write(),
415  * writev(), and aio_write(). Writes are our fast path, and we try to optimize
416  * them above all else.
417  */
418 static ssize_t logger_write_iter(struct kiocb *iocb, struct iov_iter *from)
419 {
420         struct logger_log *log = file_get_log(iocb->ki_filp);
421         struct logger_entry header;
422         struct timespec now;
423         size_t len, count, w_off;
424
425         count = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD);
426
427         now = current_kernel_time();
428
429         header.pid = current->tgid;
430         header.tid = current->pid;
431         header.sec = now.tv_sec;
432         header.nsec = now.tv_nsec;
433         header.euid = current_euid();
434         header.len = count;
435         header.hdr_size = sizeof(struct logger_entry);
436
437         /* null writes succeed, return zero */
438         if (unlikely(!header.len))
439                 return 0;
440
441         mutex_lock(&log->mutex);
442
443         /*
444          * Fix up any readers, pulling them forward to the first readable
445          * entry after (what will be) the new write offset. We do this now
446          * because if we partially fail, we can end up with clobbered log
447          * entries that encroach on readable buffer.
448          */
449         fix_up_readers(log, sizeof(struct logger_entry) + header.len);
450
451         len = min(sizeof(header), log->size - log->w_off);
452         memcpy(log->buffer + log->w_off, &header, len);
453         memcpy(log->buffer, (char *)&header + len, sizeof(header) - len);
454
455         /* Work with a copy until we are ready to commit the whole entry */
456         w_off =  logger_offset(log, log->w_off + sizeof(struct logger_entry));
457
458         len = min(count, log->size - w_off);
459
460         if (copy_from_iter(log->buffer + w_off, len, from) != len) {
461                 /*
462                  * Note that by not updating log->w_off, this abandons the
463                  * portion of the new entry that *was* successfully
464                  * copied, just above.  This is intentional to avoid
465                  * message corruption from missing fragments.
466                  */
467                 mutex_unlock(&log->mutex);
468                 return -EFAULT;
469         }
470
471         if (copy_from_iter(log->buffer, count - len, from) != count - len) {
472                 mutex_unlock(&log->mutex);
473                 return -EFAULT;
474         }
475
476         log->w_off = logger_offset(log, w_off + count);
477         mutex_unlock(&log->mutex);
478
479         /* wake up any blocked readers */
480         wake_up_interruptible(&log->wq);
481
482         return len;
483 }
484
485 static struct logger_log *get_log_from_minor(int minor)
486 {
487         struct logger_log *log;
488
489         list_for_each_entry(log, &log_list, logs)
490                 if (log->misc.minor == minor)
491                         return log;
492         return NULL;
493 }
494
495 /*
496  * logger_open - the log's open() file operation
497  *
498  * Note how near a no-op this is in the write-only case. Keep it that way!
499  */
500 static int logger_open(struct inode *inode, struct file *file)
501 {
502         struct logger_log *log;
503         int ret;
504
505         ret = nonseekable_open(inode, file);
506         if (ret)
507                 return ret;
508
509         log = get_log_from_minor(MINOR(inode->i_rdev));
510         if (!log)
511                 return -ENODEV;
512
513         if (file->f_mode & FMODE_READ) {
514                 struct logger_reader *reader;
515
516                 reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
517                 if (!reader)
518                         return -ENOMEM;
519
520                 reader->log = log;
521                 reader->r_ver = 1;
522                 reader->r_all = in_egroup_p(inode->i_gid) ||
523                         capable(CAP_SYSLOG);
524
525                 INIT_LIST_HEAD(&reader->list);
526
527                 mutex_lock(&log->mutex);
528                 reader->r_off = log->head;
529                 list_add_tail(&reader->list, &log->readers);
530                 mutex_unlock(&log->mutex);
531
532                 file->private_data = reader;
533         } else
534                 file->private_data = log;
535
536         return 0;
537 }
538
539 /*
540  * logger_release - the log's release file operation
541  *
542  * Note this is a total no-op in the write-only case. Keep it that way!
543  */
544 static int logger_release(struct inode *ignored, struct file *file)
545 {
546         if (file->f_mode & FMODE_READ) {
547                 struct logger_reader *reader = file->private_data;
548                 struct logger_log *log = reader->log;
549
550                 mutex_lock(&log->mutex);
551                 list_del(&reader->list);
552                 mutex_unlock(&log->mutex);
553
554                 kfree(reader);
555         }
556
557         return 0;
558 }
559
560 /*
561  * logger_poll - the log's poll file operation, for poll/select/epoll
562  *
563  * Note we always return POLLOUT, because you can always write() to the log.
564  * Note also that, strictly speaking, a return value of POLLIN does not
565  * guarantee that the log is readable without blocking, as there is a small
566  * chance that the writer can lap the reader in the interim between poll()
567  * returning and the read() request.
568  */
569 static unsigned int logger_poll(struct file *file, poll_table *wait)
570 {
571         struct logger_reader *reader;
572         struct logger_log *log;
573         unsigned int ret = POLLOUT | POLLWRNORM;
574
575         if (!(file->f_mode & FMODE_READ))
576                 return ret;
577
578         reader = file->private_data;
579         log = reader->log;
580
581         poll_wait(file, &log->wq, wait);
582
583         mutex_lock(&log->mutex);
584         if (!reader->r_all)
585                 reader->r_off = get_next_entry_by_uid(log,
586                         reader->r_off, current_euid());
587
588         if (log->w_off != reader->r_off)
589                 ret |= POLLIN | POLLRDNORM;
590         mutex_unlock(&log->mutex);
591
592         return ret;
593 }
594
595 static long logger_set_version(struct logger_reader *reader, void __user *arg)
596 {
597         int version;
598
599         if (copy_from_user(&version, arg, sizeof(int)))
600                 return -EFAULT;
601
602         if ((version < 1) || (version > 2))
603                 return -EINVAL;
604
605         reader->r_ver = version;
606         return 0;
607 }
608
609 static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
610 {
611         struct logger_log *log = file_get_log(file);
612         struct logger_reader *reader;
613         long ret = -EINVAL;
614         void __user *argp = (void __user *) arg;
615
616         mutex_lock(&log->mutex);
617
618         switch (cmd) {
619         case LOGGER_GET_LOG_BUF_SIZE:
620                 ret = log->size;
621                 break;
622         case LOGGER_GET_LOG_LEN:
623                 if (!(file->f_mode & FMODE_READ)) {
624                         ret = -EBADF;
625                         break;
626                 }
627                 reader = file->private_data;
628                 if (log->w_off >= reader->r_off)
629                         ret = log->w_off - reader->r_off;
630                 else
631                         ret = (log->size - reader->r_off) + log->w_off;
632                 break;
633         case LOGGER_GET_NEXT_ENTRY_LEN:
634                 if (!(file->f_mode & FMODE_READ)) {
635                         ret = -EBADF;
636                         break;
637                 }
638                 reader = file->private_data;
639
640                 if (!reader->r_all)
641                         reader->r_off = get_next_entry_by_uid(log,
642                                 reader->r_off, current_euid());
643
644                 if (log->w_off != reader->r_off)
645                         ret = get_user_hdr_len(reader->r_ver) +
646                                 get_entry_msg_len(log, reader->r_off);
647                 else
648                         ret = 0;
649                 break;
650         case LOGGER_FLUSH_LOG:
651                 if (!(file->f_mode & FMODE_WRITE)) {
652                         ret = -EBADF;
653                         break;
654                 }
655                 if (!(in_egroup_p(file_inode(file)->i_gid) ||
656                                 capable(CAP_SYSLOG))) {
657                         ret = -EPERM;
658                         break;
659                 }
660                 list_for_each_entry(reader, &log->readers, list)
661                         reader->r_off = log->w_off;
662                 log->head = log->w_off;
663                 ret = 0;
664                 break;
665         case LOGGER_GET_VERSION:
666                 if (!(file->f_mode & FMODE_READ)) {
667                         ret = -EBADF;
668                         break;
669                 }
670                 reader = file->private_data;
671                 ret = reader->r_ver;
672                 break;
673         case LOGGER_SET_VERSION:
674                 if (!(file->f_mode & FMODE_READ)) {
675                         ret = -EBADF;
676                         break;
677                 }
678                 reader = file->private_data;
679                 ret = logger_set_version(reader, argp);
680                 break;
681         }
682
683         mutex_unlock(&log->mutex);
684
685         return ret;
686 }
687
688 static const struct file_operations logger_fops = {
689         .owner = THIS_MODULE,
690         .read = logger_read,
691         .write_iter = logger_write_iter,
692         .poll = logger_poll,
693         .unlocked_ioctl = logger_ioctl,
694         .compat_ioctl = logger_ioctl,
695         .open = logger_open,
696         .release = logger_release,
697 };
698
699 /*
700  * Log size must must be a power of two, and greater than
701  * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)).
702  */
703 static int __init create_log(char *log_name, int size)
704 {
705         int ret = 0;
706         struct logger_log *log;
707         unsigned char *buffer;
708
709         buffer = vmalloc(size);
710         if (buffer == NULL)
711                 return -ENOMEM;
712
713         log = kzalloc(sizeof(struct logger_log), GFP_KERNEL);
714         if (log == NULL) {
715                 ret = -ENOMEM;
716                 goto out_free_buffer;
717         }
718         log->buffer = buffer;
719
720         log->misc.minor = MISC_DYNAMIC_MINOR;
721         log->misc.name = kstrdup(log_name, GFP_KERNEL);
722         if (log->misc.name == NULL) {
723                 ret = -ENOMEM;
724                 goto out_free_log;
725         }
726
727         log->misc.fops = &logger_fops;
728         log->misc.parent = NULL;
729
730         init_waitqueue_head(&log->wq);
731         INIT_LIST_HEAD(&log->readers);
732         mutex_init(&log->mutex);
733         log->w_off = 0;
734         log->head = 0;
735         log->size = size;
736
737         INIT_LIST_HEAD(&log->logs);
738         list_add_tail(&log->logs, &log_list);
739
740         /* finally, initialize the misc device for this log */
741         ret = misc_register(&log->misc);
742         if (unlikely(ret)) {
743                 pr_err("failed to register misc device for log '%s'!\n",
744                                 log->misc.name);
745                 goto out_free_misc_name;
746         }
747
748         pr_info("created %luK log '%s'\n",
749                 (unsigned long) log->size >> 10, log->misc.name);
750
751         return 0;
752
753 out_free_misc_name:
754         kfree(log->misc.name);
755
756 out_free_log:
757         kfree(log);
758
759 out_free_buffer:
760         vfree(buffer);
761         return ret;
762 }
763
764 static int __init logger_init(void)
765 {
766         int ret;
767
768         ret = create_log(LOGGER_LOG_MAIN, 256*1024);
769         if (unlikely(ret))
770                 goto out;
771
772         ret = create_log(LOGGER_LOG_EVENTS, 256*1024);
773         if (unlikely(ret))
774                 goto out;
775
776         ret = create_log(LOGGER_LOG_RADIO, 256*1024);
777         if (unlikely(ret))
778                 goto out;
779
780         ret = create_log(LOGGER_LOG_SYSTEM, 256*1024);
781         if (unlikely(ret))
782                 goto out;
783
784 out:
785         return ret;
786 }
787
788 static void __exit logger_exit(void)
789 {
790         struct logger_log *current_log, *next_log;
791
792         list_for_each_entry_safe(current_log, next_log, &log_list, logs) {
793                 /* we have to delete all the entry inside log_list */
794                 misc_deregister(&current_log->misc);
795                 vfree(current_log->buffer);
796                 kfree(current_log->misc.name);
797                 list_del(&current_log->logs);
798                 kfree(current_log);
799         }
800 }
801
802
803 device_initcall(logger_init);
804 module_exit(logger_exit);
805
806 MODULE_LICENSE("GPL");
807 MODULE_AUTHOR("Robert Love, <rlove@google.com>");
808 MODULE_DESCRIPTION("Android Logger");