Merge branch 'next-smack' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[sfrench/cifs-2.6.git] / drivers / char / tpm / tpm-dev-common.c
1 /*
2  * Copyright (C) 2004 IBM Corporation
3  * Authors:
4  * Leendert van Doorn <leendert@watson.ibm.com>
5  * Dave Safford <safford@watson.ibm.com>
6  * Reiner Sailer <sailer@watson.ibm.com>
7  * Kylene Hall <kjhall@us.ibm.com>
8  *
9  * Copyright (C) 2013 Obsidian Research Corp
10  * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
11  *
12  * Device file system interface to the TPM
13  *
14  * This program is free software; you can redistribute it and/or
15  * modify it under the terms of the GNU General Public License as
16  * published by the Free Software Foundation, version 2 of the
17  * License.
18  *
19  */
20 #include <linux/poll.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/workqueue.h>
24 #include "tpm.h"
25 #include "tpm-dev.h"
26
27 static struct workqueue_struct *tpm_dev_wq;
28 static DEFINE_MUTEX(tpm_dev_wq_lock);
29
30 static void tpm_async_work(struct work_struct *work)
31 {
32         struct file_priv *priv =
33                         container_of(work, struct file_priv, async_work);
34         ssize_t ret;
35
36         mutex_lock(&priv->buffer_mutex);
37         priv->command_enqueued = false;
38         ret = tpm_transmit(priv->chip, priv->space, priv->data_buffer,
39                            sizeof(priv->data_buffer), 0);
40
41         tpm_put_ops(priv->chip);
42         if (ret > 0) {
43                 priv->data_pending = ret;
44                 mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
45         }
46         mutex_unlock(&priv->buffer_mutex);
47         wake_up_interruptible(&priv->async_wait);
48 }
49
50 static void user_reader_timeout(struct timer_list *t)
51 {
52         struct file_priv *priv = from_timer(priv, t, user_read_timer);
53
54         pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
55                 task_tgid_nr(current));
56
57         schedule_work(&priv->timeout_work);
58 }
59
60 static void tpm_timeout_work(struct work_struct *work)
61 {
62         struct file_priv *priv = container_of(work, struct file_priv,
63                                               timeout_work);
64
65         mutex_lock(&priv->buffer_mutex);
66         priv->data_pending = 0;
67         memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
68         mutex_unlock(&priv->buffer_mutex);
69         wake_up_interruptible(&priv->async_wait);
70 }
71
72 void tpm_common_open(struct file *file, struct tpm_chip *chip,
73                      struct file_priv *priv, struct tpm_space *space)
74 {
75         priv->chip = chip;
76         priv->space = space;
77
78         mutex_init(&priv->buffer_mutex);
79         timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
80         INIT_WORK(&priv->timeout_work, tpm_timeout_work);
81         INIT_WORK(&priv->async_work, tpm_async_work);
82         init_waitqueue_head(&priv->async_wait);
83         file->private_data = priv;
84 }
85
86 ssize_t tpm_common_read(struct file *file, char __user *buf,
87                         size_t size, loff_t *off)
88 {
89         struct file_priv *priv = file->private_data;
90         ssize_t ret_size = 0;
91         int rc;
92
93         del_singleshot_timer_sync(&priv->user_read_timer);
94         flush_work(&priv->timeout_work);
95         mutex_lock(&priv->buffer_mutex);
96
97         if (priv->data_pending) {
98                 ret_size = min_t(ssize_t, size, priv->data_pending);
99                 if (ret_size > 0) {
100                         rc = copy_to_user(buf, priv->data_buffer, ret_size);
101                         memset(priv->data_buffer, 0, priv->data_pending);
102                         if (rc)
103                                 ret_size = -EFAULT;
104                 }
105
106                 priv->data_pending = 0;
107         }
108
109         mutex_unlock(&priv->buffer_mutex);
110         return ret_size;
111 }
112
113 ssize_t tpm_common_write(struct file *file, const char __user *buf,
114                          size_t size, loff_t *off)
115 {
116         struct file_priv *priv = file->private_data;
117         int ret = 0;
118
119         if (size > TPM_BUFSIZE)
120                 return -E2BIG;
121
122         mutex_lock(&priv->buffer_mutex);
123
124         /* Cannot perform a write until the read has cleared either via
125          * tpm_read or a user_read_timer timeout. This also prevents split
126          * buffered writes from blocking here.
127          */
128         if (priv->data_pending != 0 || priv->command_enqueued) {
129                 ret = -EBUSY;
130                 goto out;
131         }
132
133         if (copy_from_user(priv->data_buffer, buf, size)) {
134                 ret = -EFAULT;
135                 goto out;
136         }
137
138         if (size < 6 ||
139             size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) {
140                 ret = -EINVAL;
141                 goto out;
142         }
143
144         /* atomic tpm command send and result receive. We only hold the ops
145          * lock during this period so that the tpm can be unregistered even if
146          * the char dev is held open.
147          */
148         if (tpm_try_get_ops(priv->chip)) {
149                 ret = -EPIPE;
150                 goto out;
151         }
152
153         /*
154          * If in nonblocking mode schedule an async job to send
155          * the command return the size.
156          * In case of error the err code will be returned in
157          * the subsequent read call.
158          */
159         if (file->f_flags & O_NONBLOCK) {
160                 priv->command_enqueued = true;
161                 queue_work(tpm_dev_wq, &priv->async_work);
162                 mutex_unlock(&priv->buffer_mutex);
163                 return size;
164         }
165
166         ret = tpm_transmit(priv->chip, priv->space, priv->data_buffer,
167                            sizeof(priv->data_buffer), 0);
168         tpm_put_ops(priv->chip);
169
170         if (ret > 0) {
171                 priv->data_pending = ret;
172                 mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
173                 ret = size;
174         }
175 out:
176         mutex_unlock(&priv->buffer_mutex);
177         return ret;
178 }
179
180 __poll_t tpm_common_poll(struct file *file, poll_table *wait)
181 {
182         struct file_priv *priv = file->private_data;
183         __poll_t mask = 0;
184
185         poll_wait(file, &priv->async_wait, wait);
186
187         if (priv->data_pending)
188                 mask = EPOLLIN | EPOLLRDNORM;
189         else
190                 mask = EPOLLOUT | EPOLLWRNORM;
191
192         return mask;
193 }
194
195 /*
196  * Called on file close
197  */
198 void tpm_common_release(struct file *file, struct file_priv *priv)
199 {
200         flush_work(&priv->async_work);
201         del_singleshot_timer_sync(&priv->user_read_timer);
202         flush_work(&priv->timeout_work);
203         file->private_data = NULL;
204         priv->data_pending = 0;
205 }
206
207 int __init tpm_dev_common_init(void)
208 {
209         tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
210
211         return !tpm_dev_wq ? -ENOMEM : 0;
212 }
213
214 void __exit tpm_dev_common_exit(void)
215 {
216         if (tpm_dev_wq) {
217                 destroy_workqueue(tpm_dev_wq);
218                 tpm_dev_wq = NULL;
219         }
220 }