xref: /freebsd/sys/dev/tpm/tpm20.c (revision 10eea8dc)
1 /*-
2  * Copyright (c) 2018 Stormshield.
3  * Copyright (c) 2018 Semihalf.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #include <sys/random.h>
30 
31 #include "tpm20.h"
32 
33 #define TPM_HARVEST_SIZE     16
34 /*
35  * Perform a harvest every 10 seconds.
36  * Since discrete TPMs are painfully slow
37  * we don't want to execute this too often
38  * as the chip is likely to be used by others too.
39  */
40 #define TPM_HARVEST_INTERVAL 10
41 
42 MALLOC_DEFINE(M_TPM20, "tpm_buffer", "buffer for tpm 2.0 driver");
43 
44 static void tpm20_discard_buffer(void *arg);
45 #ifdef TPM_HARVEST
46 static void tpm20_harvest(void *arg, int unused);
47 #endif
48 static int  tpm20_save_state(device_t dev, bool suspend);
49 
50 static d_open_t		tpm20_open;
51 static d_close_t	tpm20_close;
52 static d_read_t		tpm20_read;
53 static d_write_t	tpm20_write;
54 static d_ioctl_t	tpm20_ioctl;
55 
56 static struct cdevsw tpm20_cdevsw = {
57 	.d_version = D_VERSION,
58 	.d_open = tpm20_open,
59 	.d_close = tpm20_close,
60 	.d_read = tpm20_read,
61 	.d_write = tpm20_write,
62 	.d_ioctl = tpm20_ioctl,
63 	.d_name = "tpm20",
64 };
65 
66 int
tpm20_read(struct cdev * dev,struct uio * uio,int flags)67 tpm20_read(struct cdev *dev, struct uio *uio, int flags)
68 {
69 	struct tpm_sc *sc;
70 	size_t bytes_to_transfer;
71 	size_t offset;
72 	int result = 0;
73 
74 	sc = (struct tpm_sc *)dev->si_drv1;
75 
76 	callout_stop(&sc->discard_buffer_callout);
77 	sx_xlock(&sc->dev_lock);
78 	if (sc->owner_tid != uio->uio_td->td_tid) {
79 		sx_xunlock(&sc->dev_lock);
80 		return (EPERM);
81 	}
82 
83 	bytes_to_transfer = MIN(sc->pending_data_length, uio->uio_resid);
84 	offset = sc->total_length - sc->pending_data_length;
85 	if (bytes_to_transfer > 0) {
86 		result = uiomove((caddr_t) sc->buf + offset, bytes_to_transfer, uio);
87 		sc->pending_data_length -= bytes_to_transfer;
88 		cv_signal(&sc->buf_cv);
89 	} else {
90 		result = ETIMEDOUT;
91 	}
92 
93 	sx_xunlock(&sc->dev_lock);
94 
95 	return (result);
96 }
97 
98 int
tpm20_write(struct cdev * dev,struct uio * uio,int flags)99 tpm20_write(struct cdev *dev, struct uio *uio, int flags)
100 {
101 	struct tpm_sc *sc;
102 	size_t byte_count;
103 	int result = 0;
104 
105 	sc = (struct tpm_sc *)dev->si_drv1;
106 
107 	byte_count = uio->uio_resid;
108 	if (byte_count < TPM_HEADER_SIZE) {
109 		device_printf(sc->dev,
110 		    "Requested transfer is too small\n");
111 		return (EINVAL);
112 	}
113 
114 	if (byte_count > TPM_BUFSIZE) {
115 		device_printf(sc->dev,
116 		    "Requested transfer is too large\n");
117 		return (E2BIG);
118 	}
119 
120 	sx_xlock(&sc->dev_lock);
121 
122 	while (sc->pending_data_length != 0)
123 		cv_wait(&sc->buf_cv, &sc->dev_lock);
124 
125 	result = uiomove(sc->buf, byte_count, uio);
126 	if (result != 0) {
127 		sx_xunlock(&sc->dev_lock);
128 		return (result);
129 	}
130 
131 	result = TPM_TRANSMIT(sc->dev, byte_count);
132 
133 	if (result == 0) {
134 		callout_reset(&sc->discard_buffer_callout,
135 		    TPM_READ_TIMEOUT / tick, tpm20_discard_buffer, sc);
136 		sc->owner_tid = uio->uio_td->td_tid;
137 	}
138 
139 	sx_xunlock(&sc->dev_lock);
140 	return (result);
141 }
142 
143 static void
tpm20_discard_buffer(void * arg)144 tpm20_discard_buffer(void *arg)
145 {
146 	struct tpm_sc *sc;
147 
148 	sc = (struct tpm_sc *)arg;
149 	if (callout_pending(&sc->discard_buffer_callout))
150 		return;
151 
152 	sx_xlock(&sc->dev_lock);
153 
154 	memset(sc->buf, 0, TPM_BUFSIZE);
155 	sc->pending_data_length = 0;
156 	sc->total_length = 0;
157 
158 	cv_signal(&sc->buf_cv);
159 	sx_xunlock(&sc->dev_lock);
160 
161 	device_printf(sc->dev,
162 	    "User failed to read buffer in time\n");
163 }
164 
165 int
tpm20_open(struct cdev * dev,int flag,int mode,struct thread * td)166 tpm20_open(struct cdev *dev, int flag, int mode, struct thread *td)
167 {
168 
169 	return (0);
170 }
171 
172 int
tpm20_close(struct cdev * dev,int flag,int mode,struct thread * td)173 tpm20_close(struct cdev *dev, int flag, int mode, struct thread *td)
174 {
175 
176 	return (0);
177 }
178 
179 int
tpm20_ioctl(struct cdev * dev,u_long cmd,caddr_t data,int flags,struct thread * td)180 tpm20_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
181     int flags, struct thread *td)
182 {
183 
184 	return (ENOTTY);
185 }
186 
187 int
tpm20_init(struct tpm_sc * sc)188 tpm20_init(struct tpm_sc *sc)
189 {
190 	struct make_dev_args args;
191 	int result;
192 
193 	cv_init(&sc->buf_cv, "TPM buffer cv");
194 	callout_init(&sc->discard_buffer_callout, 1);
195 	sc->pending_data_length = 0;
196 	sc->total_length = 0;
197 
198 	make_dev_args_init(&args);
199 	args.mda_devsw = &tpm20_cdevsw;
200 	args.mda_uid = UID_ROOT;
201 	args.mda_gid = GID_WHEEL;
202 	args.mda_mode = TPM_CDEV_PERM_FLAG;
203 	args.mda_si_drv1 = sc;
204 	result = make_dev_s(&args, &sc->sc_cdev, TPM_CDEV_NAME);
205 	if (result != 0)
206 		tpm20_release(sc);
207 
208 #ifdef TPM_HARVEST
209 	TIMEOUT_TASK_INIT(taskqueue_thread, &sc->harvest_task, 0,
210 	    tpm20_harvest, sc);
211 	taskqueue_enqueue_timeout(taskqueue_thread, &sc->harvest_task, 0);
212 #endif
213 
214 	return (result);
215 
216 }
217 
218 void
tpm20_release(struct tpm_sc * sc)219 tpm20_release(struct tpm_sc *sc)
220 {
221 
222 #ifdef TPM_HARVEST
223 	if (device_is_attached(sc->dev))
224 		taskqueue_drain_timeout(taskqueue_thread, &sc->harvest_task);
225 #endif
226 
227 	if (sc->buf != NULL)
228 		free(sc->buf, M_TPM20);
229 
230 	sx_destroy(&sc->dev_lock);
231 	cv_destroy(&sc->buf_cv);
232 	if (sc->sc_cdev != NULL)
233 		destroy_dev(sc->sc_cdev);
234 }
235 
236 int
tpm20_suspend(device_t dev)237 tpm20_suspend(device_t dev)
238 {
239 	return (tpm20_save_state(dev, true));
240 }
241 
242 int
tpm20_shutdown(device_t dev)243 tpm20_shutdown(device_t dev)
244 {
245 	return (tpm20_save_state(dev, false));
246 }
247 
248 #ifdef TPM_HARVEST
249 /*
250  * Get TPM_HARVEST_SIZE random bytes and add them
251  * into system entropy pool.
252  */
253 static void
tpm20_harvest(void * arg,int unused)254 tpm20_harvest(void *arg, int unused)
255 {
256 	struct tpm_sc *sc;
257 	unsigned char entropy[TPM_HARVEST_SIZE];
258 	uint16_t entropy_size;
259 	int result;
260 	uint8_t cmd[] = {
261 		0x80, 0x01,		/* TPM_ST_NO_SESSIONS tag*/
262 		0x00, 0x00, 0x00, 0x0c,	/* cmd length */
263 		0x00, 0x00, 0x01, 0x7b,	/* cmd TPM_CC_GetRandom */
264 		0x00, TPM_HARVEST_SIZE 	/* number of bytes requested */
265 	};
266 
267 	sc = arg;
268 	sx_xlock(&sc->dev_lock);
269 	while (sc->pending_data_length != 0)
270 		cv_wait(&sc->buf_cv, &sc->dev_lock);
271 
272 	memcpy(sc->buf, cmd, sizeof(cmd));
273 	result = TPM_TRANSMIT(sc->dev, sizeof(cmd));
274 	if (result != 0) {
275 		sx_xunlock(&sc->dev_lock);
276 		return;
277 	}
278 
279 	/* Ignore response size */
280 	sc->pending_data_length = 0;
281 	sc->total_length = 0;
282 
283 	/* The number of random bytes we got is placed right after the header */
284 	entropy_size = (uint16_t) sc->buf[TPM_HEADER_SIZE + 1];
285 	if (entropy_size > 0) {
286 		entropy_size = MIN(entropy_size, TPM_HARVEST_SIZE);
287 		memcpy(entropy,
288 			sc->buf + TPM_HEADER_SIZE + sizeof(uint16_t),
289 			entropy_size);
290 	}
291 
292 	sx_xunlock(&sc->dev_lock);
293 	if (entropy_size > 0)
294 		random_harvest_queue(entropy, entropy_size, RANDOM_PURE_TPM);
295 
296 	taskqueue_enqueue_timeout(taskqueue_thread, &sc->harvest_task,
297 	    hz * TPM_HARVEST_INTERVAL);
298 }
299 #endif	/* TPM_HARVEST */
300 
301 static int
tpm20_save_state(device_t dev,bool suspend)302 tpm20_save_state(device_t dev, bool suspend)
303 {
304 	struct tpm_sc *sc;
305 	uint8_t save_cmd[] = {
306 		0x80, 0x01,             /* TPM_ST_NO_SESSIONS tag*/
307 		0x00, 0x00, 0x00, 0x0C, /* cmd length */
308 		0x00, 0x00, 0x01, 0x45, /* cmd TPM_CC_Shutdown */
309 		0x00, 0x00              /* TPM_SU_STATE */
310 	};
311 
312 	sc = device_get_softc(dev);
313 
314 	/*
315 	 * Inform the TPM whether we are going to suspend or reboot/shutdown.
316 	 */
317 	if (suspend)
318 		save_cmd[11] = 1; /* TPM_SU_STATE */
319 
320 	if (sc == NULL || sc->buf == NULL)
321 		return (0);
322 
323 	sx_xlock(&sc->dev_lock);
324 
325 	memcpy(sc->buf, save_cmd, sizeof(save_cmd));
326 	TPM_TRANSMIT(sc->dev, sizeof(save_cmd));
327 
328 	sx_xunlock(&sc->dev_lock);
329 
330 	return (0);
331 }
332 
333 int32_t
tpm20_get_timeout(uint32_t command)334 tpm20_get_timeout(uint32_t command)
335 {
336 	int32_t timeout;
337 
338 	switch (command) {
339 		case TPM_CC_CreatePrimary:
340 		case TPM_CC_Create:
341 		case TPM_CC_CreateLoaded:
342 			timeout = TPM_TIMEOUT_LONG;
343 			break;
344 		case TPM_CC_SequenceComplete:
345 		case TPM_CC_Startup:
346 		case TPM_CC_SequenceUpdate:
347 		case TPM_CC_GetCapability:
348 		case TPM_CC_PCR_Extend:
349 		case TPM_CC_EventSequenceComplete:
350 		case TPM_CC_HashSequenceStart:
351 			timeout = TPM_TIMEOUT_C;
352 			break;
353 		default:
354 			timeout = TPM_TIMEOUT_B;
355 			break;
356 	}
357 	return timeout;
358 }
359