1 /*-
2 * Copyright (c) 2018 Stormshield.
3 * Copyright (c) 2018 Semihalf.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/dev/tpm/tpm20.c 365144 2020-09-01 21:50:31Z mjg $
28 */
29
30 #include <sys/random.h>
31
32 #include "tpm20.h"
33
34 #define TPM_HARVEST_SIZE 16
35 /*
36 * Perform a harvest every 10 seconds.
37 * Since discrete TPMs are painfully slow
38 * we don't want to execute this too often
39 * as the chip is likely to be used by others too.
40 */
41 #define TPM_HARVEST_INTERVAL 10000000
42
43 MALLOC_DECLARE(M_TPM20);
44 MALLOC_DEFINE(M_TPM20, "tpm_buffer", "buffer for tpm 2.0 driver");
45
46 static void tpm20_discard_buffer(void *arg);
47 #ifdef TPM_HARVEST
48 static void tpm20_harvest(void *arg);
49 #endif
50 static int tpm20_save_state(device_t dev, bool suspend);
51
52 static d_open_t tpm20_open;
53 static d_close_t tpm20_close;
54 static d_read_t tpm20_read;
55 static d_write_t tpm20_write;
56 static d_ioctl_t tpm20_ioctl;
57
58 static struct dev_ops tpm20_ops = {
59 { "tpm20", 0, 0 },
60 .d_open = tpm20_open,
61 .d_close = tpm20_close,
62 .d_read = tpm20_read,
63 .d_write = tpm20_write,
64 .d_ioctl = tpm20_ioctl,
65 };
66
67 int
tpm20_read(struct dev_read_args * ap)68 tpm20_read(struct dev_read_args *ap)
69 {
70 cdev_t dev = ap->a_head.a_dev;
71 struct uio *uio = ap->a_uio;
72 struct tpm_sc *sc;
73 size_t bytes_to_transfer;
74 int result = 0;
75
76 sc = (struct tpm_sc *)dev->si_drv1;
77
78 callout_stop(&sc->discard_buffer_callout);
79 lockmgr(&sc->dev_lock, LK_EXCLUSIVE);
80 if (sc->owner_tid != uio->uio_td) {
81 lockmgr(&sc->dev_lock, LK_RELEASE);
82 return (EPERM);
83 }
84
85 bytes_to_transfer = MIN(sc->pending_data_length, uio->uio_resid);
86 if (bytes_to_transfer > 0) {
87 result = uiomove((caddr_t) sc->buf, bytes_to_transfer, uio);
88 memset(sc->buf, 0, TPM_BUFSIZE);
89 sc->pending_data_length = 0;
90 cv_signal(&sc->buf_cv);
91 } else {
92 result = ETIMEDOUT;
93 }
94
95 lockmgr(&sc->dev_lock, LK_RELEASE);
96
97 return (result);
98 }
99
100 int
tpm20_write(struct dev_write_args * ap)101 tpm20_write(struct dev_write_args *ap)
102 {
103 cdev_t dev = ap->a_head.a_dev;
104 struct uio *uio = ap->a_uio;
105 struct tpm_sc *sc;
106 size_t byte_count;
107 int result = 0;
108
109 sc = (struct tpm_sc *)dev->si_drv1;
110
111 byte_count = uio->uio_resid;
112 if (byte_count < TPM_HEADER_SIZE) {
113 device_printf(sc->dev,
114 "Requested transfer is too small\n");
115 return (EINVAL);
116 }
117
118 if (byte_count > TPM_BUFSIZE) {
119 device_printf(sc->dev,
120 "Requested transfer is too large\n");
121 return (E2BIG);
122 }
123
124 lockmgr(&sc->dev_lock, LK_EXCLUSIVE);
125
126 while (sc->pending_data_length != 0)
127 cv_wait(&sc->buf_cv, &sc->dev_lock);
128
129 result = uiomove(sc->buf, byte_count, uio);
130 if (result != 0) {
131 lockmgr(&sc->dev_lock, LK_RELEASE);
132 return (result);
133 }
134
135 result = sc->transmit(sc, byte_count);
136
137 if (result == 0) {
138 callout_reset(&sc->discard_buffer_callout,
139 TPM_READ_TIMEOUT / ustick, tpm20_discard_buffer, sc);
140 sc->owner_tid = uio->uio_td;
141 }
142
143 lockmgr(&sc->dev_lock, LK_RELEASE);
144 return (result);
145 }
146
147 static void
tpm20_discard_buffer(void * arg)148 tpm20_discard_buffer(void *arg)
149 {
150 struct tpm_sc *sc;
151
152 sc = (struct tpm_sc *)arg;
153 if (callout_pending(&sc->discard_buffer_callout))
154 return;
155
156 lockmgr(&sc->dev_lock, LK_EXCLUSIVE);
157
158 memset(sc->buf, 0, TPM_BUFSIZE);
159 sc->pending_data_length = 0;
160
161 cv_signal(&sc->buf_cv);
162 lockmgr(&sc->dev_lock, LK_RELEASE);
163
164 device_printf(sc->dev,
165 "User failed to read buffer in time\n");
166 }
167
168 int
tpm20_open(struct dev_open_args * ap)169 tpm20_open(struct dev_open_args *ap)
170 {
171
172 return (0);
173 }
174
175 int
tpm20_close(struct dev_close_args * ap)176 tpm20_close(struct dev_close_args *ap)
177 {
178
179 return (0);
180 }
181
182 int
tpm20_ioctl(struct dev_ioctl_args * ap)183 tpm20_ioctl(struct dev_ioctl_args *ap)
184 {
185
186 return (ENOTTY);
187 }
188
189 int
tpm20_init(struct tpm_sc * sc)190 tpm20_init(struct tpm_sc *sc)
191 {
192 sc->buf = kmalloc(TPM_BUFSIZE, M_TPM20, M_WAITOK);
193 lockinit(&sc->dev_lock, "TPM driver lock", 0, LK_CANRECURSE);
194 cv_init(&sc->buf_cv, "TPM buffer cv");
195 callout_init_mp(&sc->discard_buffer_callout);
196 #ifdef TPM_HARVEST
197 sc->harvest_ticks = TPM_HARVEST_INTERVAL / ustick;
198 callout_init_mp(&sc->harvest_callout);
199 callout_reset(&sc->harvest_callout, 0, tpm20_harvest, sc);
200 #endif
201 sc->pending_data_length = 0;
202
203 sc->sc_cdev = make_dev(&tpm20_ops, device_get_unit(sc->dev),
204 UID_ROOT, GID_WHEEL, TPM_CDEV_PERM_FLAG, TPM_CDEV_NAME);
205 sc->sc_cdev->si_drv1 = sc;
206
207 return (0);
208
209 }
210
211 void
tpm20_release(struct tpm_sc * sc)212 tpm20_release(struct tpm_sc *sc)
213 {
214
215 #ifdef TPM_HARVEST
216 callout_drain(&sc->harvest_callout);
217 #endif
218
219 if (sc->buf != NULL)
220 kfree(sc->buf, M_TPM20);
221
222 lockuninit(&sc->dev_lock);
223 cv_destroy(&sc->buf_cv);
224 if (sc->sc_cdev != NULL)
225 destroy_dev(sc->sc_cdev);
226 }
227
228 int
tpm20_suspend(device_t dev)229 tpm20_suspend(device_t dev)
230 {
231 return (tpm20_save_state(dev, true));
232 }
233
234 int
tpm20_shutdown(device_t dev)235 tpm20_shutdown(device_t dev)
236 {
237 return (tpm20_save_state(dev, false));
238 }
239
240 #ifdef TPM_HARVEST
241
242 /*
243 * Get TPM_HARVEST_SIZE random bytes and add them
244 * into system entropy pool.
245 */
246 static void
tpm20_harvest(void * arg)247 tpm20_harvest(void *arg)
248 {
249 struct tpm_sc *sc;
250 unsigned char entropy[TPM_HARVEST_SIZE];
251 uint16_t entropy_size;
252 int result;
253 uint8_t cmd[] = {
254 0x80, 0x01, /* TPM_ST_NO_SESSIONS tag*/
255 0x00, 0x00, 0x00, 0x0c, /* cmd length */
256 0x00, 0x00, 0x01, 0x7b, /* cmd TPM_CC_GetRandom */
257 0x00, TPM_HARVEST_SIZE /* number of bytes requested */
258 };
259
260 sc = arg;
261 lockmgr(&sc->dev_lock, LK_EXCLUSIVE);
262 while (sc->pending_data_length != 0)
263 cv_wait(&sc->buf_cv, &sc->dev_lock);
264
265 memcpy(sc->buf, cmd, sizeof(cmd));
266 result = sc->transmit(sc, sizeof(cmd));
267 if (result != 0) {
268 lockmgr(&sc->dev_lock, LK_RELEASE);
269 return;
270 }
271
272 /* Ignore response size */
273 sc->pending_data_length = 0;
274
275 /* The number of random bytes we got is placed right after the header */
276 entropy_size = (uint16_t) sc->buf[TPM_HEADER_SIZE + 1];
277 if (entropy_size > 0) {
278 entropy_size = MIN(entropy_size, TPM_HARVEST_SIZE);
279 memcpy(entropy,
280 sc->buf + TPM_HEADER_SIZE + sizeof(uint16_t),
281 entropy_size);
282 }
283
284 lockmgr(&sc->dev_lock, LK_RELEASE);
285 if (entropy_size > 0)
286 add_buffer_randomness_src(entropy, entropy_size, RAND_SRC_TPM);
287
288 callout_reset(&sc->harvest_callout, sc->harvest_ticks, tpm20_harvest, sc);
289 }
290 #endif /* TPM_HARVEST */
291
292 static int
tpm20_save_state(device_t dev,bool suspend)293 tpm20_save_state(device_t dev, bool suspend)
294 {
295 struct tpm_sc *sc;
296 uint8_t save_cmd[] = {
297 0x80, 0x01, /* TPM_ST_NO_SESSIONS tag*/
298 0x00, 0x00, 0x00, 0x0C, /* cmd length */
299 0x00, 0x00, 0x01, 0x45, /* cmd TPM_CC_Shutdown */
300 0x00, 0x00 /* TPM_SU_STATE */
301 };
302
303 sc = device_get_softc(dev);
304
305 /*
306 * Inform the TPM whether we are going to suspend or reboot/shutdown.
307 */
308 if (suspend)
309 save_cmd[11] = 1; /* TPM_SU_STATE */
310
311 if (sc == NULL || sc->buf == NULL)
312 return (0);
313
314 lockmgr(&sc->dev_lock, LK_EXCLUSIVE);
315
316 memcpy(sc->buf, save_cmd, sizeof(save_cmd));
317 sc->transmit(sc, sizeof(save_cmd));
318
319 lockmgr(&sc->dev_lock, LK_RELEASE);
320
321 return (0);
322 }
323
324 int32_t
tpm20_get_timeout(uint32_t command)325 tpm20_get_timeout(uint32_t command)
326 {
327 int32_t timeout;
328
329 switch (command) {
330 case TPM_CC_CreatePrimary:
331 case TPM_CC_Create:
332 case TPM_CC_CreateLoaded:
333 timeout = TPM_TIMEOUT_LONG;
334 break;
335 case TPM_CC_SequenceComplete:
336 case TPM_CC_Startup:
337 case TPM_CC_SequenceUpdate:
338 case TPM_CC_GetCapability:
339 case TPM_CC_PCR_Extend:
340 case TPM_CC_EventSequenceComplete:
341 case TPM_CC_HashSequenceStart:
342 timeout = TPM_TIMEOUT_C;
343 break;
344 default:
345 timeout = TPM_TIMEOUT_B;
346 break;
347 }
348 return timeout;
349 }
350