xref: /freebsd/sys/dev/tpm/tpm20.c (revision 9768746b)
1 /*-
2  * Copyright (c) 2018 Stormshield.
3  * Copyright (c) 2018 Semihalf.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/random.h>
32 
33 #include "tpm20.h"
34 
35 #define TPM_HARVEST_SIZE     16
36 /*
37  * Perform a harvest every 10 seconds.
38  * Since discrete TPMs are painfully slow
39  * we don't want to execute this too often
40  * as the chip is likely to be used by others too.
41  */
42 #define TPM_HARVEST_INTERVAL 10
43 
44 MALLOC_DEFINE(M_TPM20, "tpm_buffer", "buffer for tpm 2.0 driver");
45 
46 static void tpm20_discard_buffer(void *arg);
47 #ifdef TPM_HARVEST
48 static void tpm20_harvest(void *arg, int unused);
49 #endif
50 static int  tpm20_save_state(device_t dev, bool suspend);
51 
52 static d_open_t		tpm20_open;
53 static d_close_t	tpm20_close;
54 static d_read_t		tpm20_read;
55 static d_write_t	tpm20_write;
56 static d_ioctl_t	tpm20_ioctl;
57 
58 static struct cdevsw tpm20_cdevsw = {
59 	.d_version = D_VERSION,
60 	.d_open = tpm20_open,
61 	.d_close = tpm20_close,
62 	.d_read = tpm20_read,
63 	.d_write = tpm20_write,
64 	.d_ioctl = tpm20_ioctl,
65 	.d_name = "tpm20",
66 };
67 
68 int
69 tpm20_read(struct cdev *dev, struct uio *uio, int flags)
70 {
71 	struct tpm_sc *sc;
72 	size_t bytes_to_transfer;
73 	int result = 0;
74 
75 	sc = (struct tpm_sc *)dev->si_drv1;
76 
77 	callout_stop(&sc->discard_buffer_callout);
78 	sx_xlock(&sc->dev_lock);
79 	if (sc->owner_tid != uio->uio_td->td_tid) {
80 		sx_xunlock(&sc->dev_lock);
81 		return (EPERM);
82 	}
83 
84 	bytes_to_transfer = MIN(sc->pending_data_length, uio->uio_resid);
85 	if (bytes_to_transfer > 0) {
86 		result = uiomove((caddr_t) sc->buf, bytes_to_transfer, uio);
87 		memset(sc->buf, 0, TPM_BUFSIZE);
88 		sc->pending_data_length = 0;
89 		cv_signal(&sc->buf_cv);
90 	} else {
91 		result = ETIMEDOUT;
92 	}
93 
94 	sx_xunlock(&sc->dev_lock);
95 
96 	return (result);
97 }
98 
99 int
100 tpm20_write(struct cdev *dev, struct uio *uio, int flags)
101 {
102 	struct tpm_sc *sc;
103 	size_t byte_count;
104 	int result = 0;
105 
106 	sc = (struct tpm_sc *)dev->si_drv1;
107 
108 	byte_count = uio->uio_resid;
109 	if (byte_count < TPM_HEADER_SIZE) {
110 		device_printf(sc->dev,
111 		    "Requested transfer is too small\n");
112 		return (EINVAL);
113 	}
114 
115 	if (byte_count > TPM_BUFSIZE) {
116 		device_printf(sc->dev,
117 		    "Requested transfer is too large\n");
118 		return (E2BIG);
119 	}
120 
121 	sx_xlock(&sc->dev_lock);
122 
123 	while (sc->pending_data_length != 0)
124 		cv_wait(&sc->buf_cv, &sc->dev_lock);
125 
126 	result = uiomove(sc->buf, byte_count, uio);
127 	if (result != 0) {
128 		sx_xunlock(&sc->dev_lock);
129 		return (result);
130 	}
131 
132 	result = sc->transmit(sc, byte_count);
133 
134 	if (result == 0) {
135 		callout_reset(&sc->discard_buffer_callout,
136 		    TPM_READ_TIMEOUT / tick, tpm20_discard_buffer, sc);
137 		sc->owner_tid = uio->uio_td->td_tid;
138 	}
139 
140 	sx_xunlock(&sc->dev_lock);
141 	return (result);
142 }
143 
144 static void
145 tpm20_discard_buffer(void *arg)
146 {
147 	struct tpm_sc *sc;
148 
149 	sc = (struct tpm_sc *)arg;
150 	if (callout_pending(&sc->discard_buffer_callout))
151 		return;
152 
153 	sx_xlock(&sc->dev_lock);
154 
155 	memset(sc->buf, 0, TPM_BUFSIZE);
156 	sc->pending_data_length = 0;
157 
158 	cv_signal(&sc->buf_cv);
159 	sx_xunlock(&sc->dev_lock);
160 
161 	device_printf(sc->dev,
162 	    "User failed to read buffer in time\n");
163 }
164 
165 int
166 tpm20_open(struct cdev *dev, int flag, int mode, struct thread *td)
167 {
168 
169 	return (0);
170 }
171 
172 int
173 tpm20_close(struct cdev *dev, int flag, int mode, struct thread *td)
174 {
175 
176 	return (0);
177 }
178 
179 int
180 tpm20_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
181     int flags, struct thread *td)
182 {
183 
184 	return (ENOTTY);
185 }
186 
187 int
188 tpm20_init(struct tpm_sc *sc)
189 {
190 	struct make_dev_args args;
191 	int result;
192 
193 	cv_init(&sc->buf_cv, "TPM buffer cv");
194 	callout_init(&sc->discard_buffer_callout, 1);
195 	sc->pending_data_length = 0;
196 
197 	make_dev_args_init(&args);
198 	args.mda_devsw = &tpm20_cdevsw;
199 	args.mda_uid = UID_ROOT;
200 	args.mda_gid = GID_WHEEL;
201 	args.mda_mode = TPM_CDEV_PERM_FLAG;
202 	args.mda_si_drv1 = sc;
203 	result = make_dev_s(&args, &sc->sc_cdev, TPM_CDEV_NAME);
204 	if (result != 0)
205 		tpm20_release(sc);
206 
207 #ifdef TPM_HARVEST
208 	TIMEOUT_TASK_INIT(taskqueue_thread, &sc->harvest_task, 0,
209 	    tpm20_harvest, sc);
210 	taskqueue_enqueue_timeout(taskqueue_thread, &sc->harvest_task, 0);
211 #endif
212 
213 	return (result);
214 
215 }
216 
217 void
218 tpm20_release(struct tpm_sc *sc)
219 {
220 
221 #ifdef TPM_HARVEST
222 	if (device_is_attached(sc->dev))
223 		taskqueue_drain_timeout(taskqueue_thread, &sc->harvest_task);
224 #endif
225 
226 	if (sc->buf != NULL)
227 		free(sc->buf, M_TPM20);
228 
229 	sx_destroy(&sc->dev_lock);
230 	cv_destroy(&sc->buf_cv);
231 	if (sc->sc_cdev != NULL)
232 		destroy_dev(sc->sc_cdev);
233 }
234 
235 int
236 tpm20_suspend(device_t dev)
237 {
238 	return (tpm20_save_state(dev, true));
239 }
240 
241 int
242 tpm20_shutdown(device_t dev)
243 {
244 	return (tpm20_save_state(dev, false));
245 }
246 
247 #ifdef TPM_HARVEST
248 /*
249  * Get TPM_HARVEST_SIZE random bytes and add them
250  * into system entropy pool.
251  */
252 static void
253 tpm20_harvest(void *arg, int unused)
254 {
255 	struct tpm_sc *sc;
256 	unsigned char entropy[TPM_HARVEST_SIZE];
257 	uint16_t entropy_size;
258 	int result;
259 	uint8_t cmd[] = {
260 		0x80, 0x01,		/* TPM_ST_NO_SESSIONS tag*/
261 		0x00, 0x00, 0x00, 0x0c,	/* cmd length */
262 		0x00, 0x00, 0x01, 0x7b,	/* cmd TPM_CC_GetRandom */
263 		0x00, TPM_HARVEST_SIZE 	/* number of bytes requested */
264 	};
265 
266 	sc = arg;
267 	sx_xlock(&sc->dev_lock);
268 	while (sc->pending_data_length != 0)
269 		cv_wait(&sc->buf_cv, &sc->dev_lock);
270 
271 	memcpy(sc->buf, cmd, sizeof(cmd));
272 	result = sc->transmit(sc, sizeof(cmd));
273 	if (result != 0) {
274 		sx_xunlock(&sc->dev_lock);
275 		return;
276 	}
277 
278 	/* Ignore response size */
279 	sc->pending_data_length = 0;
280 
281 	/* The number of random bytes we got is placed right after the header */
282 	entropy_size = (uint16_t) sc->buf[TPM_HEADER_SIZE + 1];
283 	if (entropy_size > 0) {
284 		entropy_size = MIN(entropy_size, TPM_HARVEST_SIZE);
285 		memcpy(entropy,
286 			sc->buf + TPM_HEADER_SIZE + sizeof(uint16_t),
287 			entropy_size);
288 	}
289 
290 	sx_xunlock(&sc->dev_lock);
291 	if (entropy_size > 0)
292 		random_harvest_queue(entropy, entropy_size, RANDOM_PURE_TPM);
293 
294 	taskqueue_enqueue_timeout(taskqueue_thread, &sc->harvest_task,
295 	    hz * TPM_HARVEST_INTERVAL);
296 }
297 #endif	/* TPM_HARVEST */
298 
299 static int
300 tpm20_save_state(device_t dev, bool suspend)
301 {
302 	struct tpm_sc *sc;
303 	uint8_t save_cmd[] = {
304 		0x80, 0x01,             /* TPM_ST_NO_SESSIONS tag*/
305 		0x00, 0x00, 0x00, 0x0C, /* cmd length */
306 		0x00, 0x00, 0x01, 0x45, /* cmd TPM_CC_Shutdown */
307 		0x00, 0x00              /* TPM_SU_STATE */
308 	};
309 
310 	sc = device_get_softc(dev);
311 
312 	/*
313 	 * Inform the TPM whether we are going to suspend or reboot/shutdown.
314 	 */
315 	if (suspend)
316 		save_cmd[11] = 1; /* TPM_SU_STATE */
317 
318 	if (sc == NULL || sc->buf == NULL)
319 		return (0);
320 
321 	sx_xlock(&sc->dev_lock);
322 
323 	memcpy(sc->buf, save_cmd, sizeof(save_cmd));
324 	sc->transmit(sc, sizeof(save_cmd));
325 
326 	sx_xunlock(&sc->dev_lock);
327 
328 	return (0);
329 }
330 
331 int32_t
332 tpm20_get_timeout(uint32_t command)
333 {
334 	int32_t timeout;
335 
336 	switch (command) {
337 		case TPM_CC_CreatePrimary:
338 		case TPM_CC_Create:
339 		case TPM_CC_CreateLoaded:
340 			timeout = TPM_TIMEOUT_LONG;
341 			break;
342 		case TPM_CC_SequenceComplete:
343 		case TPM_CC_Startup:
344 		case TPM_CC_SequenceUpdate:
345 		case TPM_CC_GetCapability:
346 		case TPM_CC_PCR_Extend:
347 		case TPM_CC_EventSequenceComplete:
348 		case TPM_CC_HashSequenceStart:
349 			timeout = TPM_TIMEOUT_C;
350 			break;
351 		default:
352 			timeout = TPM_TIMEOUT_B;
353 			break;
354 	}
355 	return timeout;
356 }
357