hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/char/tpm/tpm-dev-common.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2004 IBM Corporation
34 * Authors:
....@@ -10,17 +11,79 @@
1011 * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
1112 *
1213 * Device file system interface to the TPM
13
- *
14
- * This program is free software; you can redistribute it and/or
15
- * modify it under the terms of the GNU General Public License as
16
- * published by the Free Software Foundation, version 2 of the
17
- * License.
18
- *
1914 */
15
+#include <linux/poll.h>
2016 #include <linux/slab.h>
2117 #include <linux/uaccess.h>
18
+#include <linux/workqueue.h>
2219 #include "tpm.h"
2320 #include "tpm-dev.h"
21
+
22
+static struct workqueue_struct *tpm_dev_wq;
23
+static DEFINE_MUTEX(tpm_dev_wq_lock);
24
+
25
+static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
26
+ u8 *buf, size_t bufsiz)
27
+{
28
+ struct tpm_header *header = (void *)buf;
29
+ ssize_t ret, len;
30
+
31
+ ret = tpm2_prepare_space(chip, space, buf, bufsiz);
32
+ /* If the command is not implemented by the TPM, synthesize a
33
+ * response with a TPM2_RC_COMMAND_CODE return for user-space.
34
+ */
35
+ if (ret == -EOPNOTSUPP) {
36
+ header->length = cpu_to_be32(sizeof(*header));
37
+ header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
38
+ header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
39
+ TSS2_RESMGR_TPM_RC_LAYER);
40
+ ret = sizeof(*header);
41
+ }
42
+ if (ret)
43
+ goto out_rc;
44
+
45
+ len = tpm_transmit(chip, buf, bufsiz);
46
+ if (len < 0)
47
+ ret = len;
48
+
49
+ if (!ret)
50
+ ret = tpm2_commit_space(chip, space, buf, &len);
51
+
52
+out_rc:
53
+ return ret ? ret : len;
54
+}
55
+
56
+static void tpm_dev_async_work(struct work_struct *work)
57
+{
58
+ struct file_priv *priv =
59
+ container_of(work, struct file_priv, async_work);
60
+ ssize_t ret;
61
+
62
+ mutex_lock(&priv->buffer_mutex);
63
+ priv->command_enqueued = false;
64
+ ret = tpm_try_get_ops(priv->chip);
65
+ if (ret) {
66
+ priv->response_length = ret;
67
+ goto out;
68
+ }
69
+
70
+ ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
71
+ sizeof(priv->data_buffer));
72
+ tpm_put_ops(priv->chip);
73
+
74
+ /*
75
+ * If ret is > 0 then tpm_dev_transmit returned the size of the
76
+ * response. If ret is < 0 then tpm_dev_transmit failed and
77
+ * returned an error code.
78
+ */
79
+ if (ret != 0) {
80
+ priv->response_length = ret;
81
+ mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
82
+ }
83
+out:
84
+ mutex_unlock(&priv->buffer_mutex);
85
+ wake_up_interruptible(&priv->async_wait);
86
+}
2487
2588 static void user_reader_timeout(struct timer_list *t)
2689 {
....@@ -29,27 +92,34 @@
2992 pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
3093 task_tgid_nr(current));
3194
32
- schedule_work(&priv->work);
95
+ schedule_work(&priv->timeout_work);
3396 }
3497
35
-static void timeout_work(struct work_struct *work)
98
+static void tpm_timeout_work(struct work_struct *work)
3699 {
37
- struct file_priv *priv = container_of(work, struct file_priv, work);
100
+ struct file_priv *priv = container_of(work, struct file_priv,
101
+ timeout_work);
38102
39103 mutex_lock(&priv->buffer_mutex);
40
- priv->data_pending = 0;
104
+ priv->response_read = true;
105
+ priv->response_length = 0;
41106 memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
42107 mutex_unlock(&priv->buffer_mutex);
108
+ wake_up_interruptible(&priv->async_wait);
43109 }
44110
45111 void tpm_common_open(struct file *file, struct tpm_chip *chip,
46
- struct file_priv *priv)
112
+ struct file_priv *priv, struct tpm_space *space)
47113 {
48114 priv->chip = chip;
115
+ priv->space = space;
116
+ priv->response_read = true;
117
+
49118 mutex_init(&priv->buffer_mutex);
50119 timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
51
- INIT_WORK(&priv->work, timeout_work);
52
-
120
+ INIT_WORK(&priv->timeout_work, tpm_timeout_work);
121
+ INIT_WORK(&priv->async_work, tpm_dev_async_work);
122
+ init_waitqueue_head(&priv->async_wait);
53123 file->private_data = priv;
54124 }
55125
....@@ -60,32 +130,46 @@
60130 ssize_t ret_size = 0;
61131 int rc;
62132
63
- del_singleshot_timer_sync(&priv->user_read_timer);
64
- flush_work(&priv->work);
65133 mutex_lock(&priv->buffer_mutex);
66134
67
- if (priv->data_pending) {
68
- ret_size = min_t(ssize_t, size, priv->data_pending);
69
- rc = copy_to_user(buf, priv->data_buffer, ret_size);
70
- memset(priv->data_buffer, 0, priv->data_pending);
71
- if (rc)
72
- ret_size = -EFAULT;
135
+ if (priv->response_length) {
136
+ priv->response_read = true;
73137
74
- priv->data_pending = 0;
138
+ ret_size = min_t(ssize_t, size, priv->response_length);
139
+ if (ret_size <= 0) {
140
+ priv->response_length = 0;
141
+ goto out;
142
+ }
143
+
144
+ rc = copy_to_user(buf, priv->data_buffer + *off, ret_size);
145
+ if (rc) {
146
+ memset(priv->data_buffer, 0, TPM_BUFSIZE);
147
+ priv->response_length = 0;
148
+ ret_size = -EFAULT;
149
+ } else {
150
+ memset(priv->data_buffer + *off, 0, ret_size);
151
+ priv->response_length -= ret_size;
152
+ *off += ret_size;
153
+ }
75154 }
76155
156
+out:
157
+ if (!priv->response_length) {
158
+ *off = 0;
159
+ del_singleshot_timer_sync(&priv->user_read_timer);
160
+ flush_work(&priv->timeout_work);
161
+ }
77162 mutex_unlock(&priv->buffer_mutex);
78163 return ret_size;
79164 }
80165
81166 ssize_t tpm_common_write(struct file *file, const char __user *buf,
82
- size_t size, loff_t *off, struct tpm_space *space)
167
+ size_t size, loff_t *off)
83168 {
84169 struct file_priv *priv = file->private_data;
85
- size_t in_size = size;
86
- ssize_t out_size;
170
+ int ret = 0;
87171
88
- if (in_size > TPM_BUFSIZE)
172
+ if (size > TPM_BUFSIZE)
89173 return -E2BIG;
90174
91175 mutex_lock(&priv->buffer_mutex);
....@@ -94,21 +178,38 @@
94178 * tpm_read or a user_read_timer timeout. This also prevents split
95179 * buffered writes from blocking here.
96180 */
97
- if (priv->data_pending != 0) {
98
- mutex_unlock(&priv->buffer_mutex);
99
- return -EBUSY;
181
+ if ((!priv->response_read && priv->response_length) ||
182
+ priv->command_enqueued) {
183
+ ret = -EBUSY;
184
+ goto out;
100185 }
101186
102
- if (copy_from_user
103
- (priv->data_buffer, (void __user *) buf, in_size)) {
104
- mutex_unlock(&priv->buffer_mutex);
105
- return -EFAULT;
187
+ if (copy_from_user(priv->data_buffer, buf, size)) {
188
+ ret = -EFAULT;
189
+ goto out;
106190 }
107191
108
- if (in_size < 6 ||
109
- in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) {
192
+ if (size < 6 ||
193
+ size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) {
194
+ ret = -EINVAL;
195
+ goto out;
196
+ }
197
+
198
+ priv->response_length = 0;
199
+ priv->response_read = false;
200
+ *off = 0;
201
+
202
+ /*
203
+ * If in nonblocking mode schedule an async job to send
204
+ * the command return the size.
205
+ * In case of error the err code will be returned in
206
+ * the subsequent read call.
207
+ */
208
+ if (file->f_flags & O_NONBLOCK) {
209
+ priv->command_enqueued = true;
210
+ queue_work(tpm_dev_wq, &priv->async_work);
110211 mutex_unlock(&priv->buffer_mutex);
111
- return -EINVAL;
212
+ return size;
112213 }
113214
114215 /* atomic tpm command send and result receive. We only hold the ops
....@@ -116,25 +217,44 @@
116217 * the char dev is held open.
117218 */
118219 if (tpm_try_get_ops(priv->chip)) {
119
- mutex_unlock(&priv->buffer_mutex);
120
- return -EPIPE;
220
+ ret = -EPIPE;
221
+ goto out;
121222 }
122
- out_size = tpm_transmit(priv->chip, space, priv->data_buffer,
123
- sizeof(priv->data_buffer), 0);
124223
224
+ ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
225
+ sizeof(priv->data_buffer));
125226 tpm_put_ops(priv->chip);
126
- if (out_size < 0) {
127
- mutex_unlock(&priv->buffer_mutex);
128
- return out_size;
227
+
228
+ if (ret > 0) {
229
+ priv->response_length = ret;
230
+ mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
231
+ ret = size;
129232 }
130
-
131
- priv->data_pending = out_size;
233
+out:
132234 mutex_unlock(&priv->buffer_mutex);
235
+ return ret;
236
+}
133237
134
- /* Set a timeout by which the reader must come claim the result */
135
- mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
238
+__poll_t tpm_common_poll(struct file *file, poll_table *wait)
239
+{
240
+ struct file_priv *priv = file->private_data;
241
+ __poll_t mask = 0;
136242
137
- return in_size;
243
+ poll_wait(file, &priv->async_wait, wait);
244
+ mutex_lock(&priv->buffer_mutex);
245
+
246
+ /*
247
+ * The response_length indicates if there is still response
248
+ * (or part of it) to be consumed. Partial reads decrease it
249
+ * by the number of bytes read, and write resets it the zero.
250
+ */
251
+ if (priv->response_length)
252
+ mask = EPOLLIN | EPOLLRDNORM;
253
+ else
254
+ mask = EPOLLOUT | EPOLLWRNORM;
255
+
256
+ mutex_unlock(&priv->buffer_mutex);
257
+ return mask;
138258 }
139259
140260 /*
....@@ -142,8 +262,24 @@
142262 */
143263 void tpm_common_release(struct file *file, struct file_priv *priv)
144264 {
265
+ flush_work(&priv->async_work);
145266 del_singleshot_timer_sync(&priv->user_read_timer);
146
- flush_work(&priv->work);
267
+ flush_work(&priv->timeout_work);
147268 file->private_data = NULL;
148
- priv->data_pending = 0;
269
+ priv->response_length = 0;
270
+}
271
+
272
+int __init tpm_dev_common_init(void)
273
+{
274
+ tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
275
+
276
+ return !tpm_dev_wq ? -ENOMEM : 0;
277
+}
278
+
279
+void __exit tpm_dev_common_exit(void)
280
+{
281
+ if (tpm_dev_wq) {
282
+ destroy_workqueue(tpm_dev_wq);
283
+ tpm_dev_wq = NULL;
284
+ }
149285 }