xref: /dragonfly/sys/dev/crypto/tpm/tpm.c (revision c51f15da)
1 /*
2  * Copyright (c) 2008, 2009 Michael Shalayeff
3  * Copyright (c) 2009, 2010 Hans-Joerg Hoexer
4  * All rights reserved.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN
15  * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
16  * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  *
18  * $FreeBSD: head/sys/dev/tpm/tpm.c 365144 2020-09-01 21:50:31Z mjg $
19  */
20 
21 /* #define	TPM_DEBUG */
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/kernel.h>
26 #include <sys/malloc.h>
27 #include <sys/proc.h>
28 
29 #include <sys/module.h>
30 #include <sys/conf.h>
31 #include <sys/uio.h>
32 #include <sys/bus.h>
33 #include <sys/device.h>
34 #include <sys/thread2.h>
35 
36 #include <sys/rman.h>
37 
38 #include <machine/md_var.h>
39 
40 #include <bus/isa/isareg.h>
41 #include <bus/isa/isavar.h>
42 
43 #include <dev/crypto/tpm/tpmvar.h>
44 
45 #define	TPM_BUFSIZ	1024
46 
47 #define TPM_HDRSIZE	10
48 
49 #define TPM_PARAM_SIZE	0x0001
50 
51 #define IRQUNK	-1
52 
53 #define	TPM_ACCESS			0x0000	/* access register */
54 #define	TPM_ACCESS_ESTABLISHMENT	0x01	/* establishment */
55 #define	TPM_ACCESS_REQUEST_USE		0x02	/* request using locality */
56 #define	TPM_ACCESS_REQUEST_PENDING	0x04	/* pending request */
57 #define	TPM_ACCESS_SEIZE		0x08	/* request locality seize */
58 #define	TPM_ACCESS_SEIZED		0x10	/* locality has been seized */
59 #define	TPM_ACCESS_ACTIVE_LOCALITY	0x20	/* locality is active */
60 #define	TPM_ACCESS_VALID		0x80	/* bits are valid */
61 #define	TPM_ACCESS_BITS	\
62     "\020\01EST\02REQ\03PEND\04SEIZE\05SEIZED\06ACT\010VALID"
63 
64 #define	TPM_INTERRUPT_ENABLE	0x0008
65 #define	TPM_GLOBAL_INT_ENABLE	0x80000000	/* enable ints */
66 #define	TPM_CMD_READY_INT	0x00000080	/* cmd ready enable */
67 #define	TPM_INT_EDGE_FALLING	0x00000018
68 #define	TPM_INT_EDGE_RISING	0x00000010
69 #define	TPM_INT_LEVEL_LOW	0x00000008
70 #define	TPM_INT_LEVEL_HIGH	0x00000000
71 #define	TPM_LOCALITY_CHANGE_INT	0x00000004	/* locality change enable */
72 #define	TPM_STS_VALID_INT	0x00000002	/* int on TPM_STS_VALID is set */
73 #define	TPM_DATA_AVAIL_INT	0x00000001	/* int on TPM_STS_DATA_AVAIL is set */
74 #define	TPM_INTERRUPT_ENABLE_BITS \
75     "\020\040ENA\010RDY\03LOCH\02STSV\01DRDY"
76 
77 #define	TPM_INT_VECTOR		0x000c	/* 8 bit reg for 4 bit irq vector */
78 #define	TPM_INT_STATUS		0x0010	/* bits are & 0x87 from TPM_INTERRUPT_ENABLE */
79 
80 #define	TPM_INTF_CAPABILITIES		0x0014	/* capability register */
81 #define	TPM_INTF_BURST_COUNT_STATIC	0x0100	/* TPM_STS_BMASK static */
82 #define	TPM_INTF_CMD_READY_INT		0x0080	/* int on ready supported */
83 #define	TPM_INTF_INT_EDGE_FALLING	0x0040	/* falling edge ints supported */
84 #define	TPM_INTF_INT_EDGE_RISING	0x0020	/* rising edge ints supported */
85 #define	TPM_INTF_INT_LEVEL_LOW		0x0010	/* level-low ints supported */
86 #define	TPM_INTF_INT_LEVEL_HIGH		0x0008	/* level-high ints supported */
87 #define	TPM_INTF_LOCALITY_CHANGE_INT	0x0004	/* locality-change int (mb 1) */
88 #define	TPM_INTF_STS_VALID_INT		0x0002	/* TPM_STS_VALID int supported */
89 #define	TPM_INTF_DATA_AVAIL_INT		0x0001	/* TPM_STS_DATA_AVAIL int supported (mb 1) */
90 #define	TPM_CAPSREQ \
91   (TPM_INTF_DATA_AVAIL_INT|TPM_INTF_LOCALITY_CHANGE_INT|TPM_INTF_INT_LEVEL_LOW)
92 #define	TPM_CAPBITS \
93   "\020\01IDRDY\02ISTSV\03ILOCH\04IHIGH\05ILOW\06IEDGE\07IFALL\010IRDY\011BCST"
94 
95 #define	TPM_STS			0x0018		/* status register */
96 #define TPM_STS_MASK		0x000000ff	/* status bits */
97 #define	TPM_STS_BMASK		0x00ffff00	/* ro io burst size */
98 #define	TPM_STS_VALID		0x00000080	/* ro other bits are valid */
99 #define	TPM_STS_CMD_READY	0x00000040	/* rw chip/signal ready */
100 #define	TPM_STS_GO		0x00000020	/* wo start the command */
101 #define	TPM_STS_DATA_AVAIL	0x00000010	/* ro data available */
102 #define	TPM_STS_DATA_EXPECT	0x00000008	/* ro more data to be written */
103 #define	TPM_STS_RESP_RETRY	0x00000002	/* wo resend the response */
104 #define	TPM_STS_BITS	"\020\010VALID\07RDY\06GO\05DRDY\04EXPECT\02RETRY"
105 
106 #define	TPM_DATA	0x0024
107 #define	TPM_ID		0x0f00
108 #define	TPM_REV		0x0f04
109 #define	TPM_SIZE	0x5000		/* five pages of the above */
110 
111 #define	TPM_ACCESS_TMO	2000		/* 2sec */
112 #define	TPM_READY_TMO	2000		/* 2sec */
113 #define	TPM_READ_TMO	120000		/* 2 minutes */
114 #define TPM_BURST_TMO	2000		/* 2sec */
115 
116 #define	TPM_LEGACY_BUSY	0x01
117 #define	TPM_LEGACY_ABRT	0x01
118 #define	TPM_LEGACY_DA	0x02
119 #define	TPM_LEGACY_RE	0x04
120 #define	TPM_LEGACY_LAST	0x04
121 #define	TPM_LEGACY_BITS	"\020\01BUSY\2DA\3RE\4LAST"
122 #define	TPM_LEGACY_TMO		(2*60)	/* sec */
123 #define	TPM_LEGACY_SLEEP	5	/* ticks */
124 #define	TPM_LEGACY_DELAY	100
125 
126 /* Set when enabling legacy interface in host bridge. */
127 int tpm_enabled;
128 
129 #define	TPMSOFTC(dev) \
130 	((struct tpm_softc *)dev->si_drv1)
131 
132 d_open_t	tpmopen;
133 d_close_t	tpmclose;
134 d_read_t	tpmread;
135 d_write_t	tpmwrite;
136 d_ioctl_t	tpmioctl;
137 
138 static struct dev_ops tpm_ops = {
139 	{ "tpm", 0, 0 },
140 	.d_open =	tpmopen,
141 	.d_close =	tpmclose,
142 	.d_read =	tpmread,
143 	.d_write =	tpmwrite,
144 	.d_ioctl =	tpmioctl,
145 };
146 
147 const struct {
148 	u_int32_t devid;
149 	char name[32];
150 	int flags;
151 #define TPM_DEV_NOINTS	0x0001
152 } tpm_devs[] = {
153 	{ 0x000615d1, "IFX SLD 9630 TT 1.1", 0 },
154 	{ 0x000b15d1, "IFX SLB 9635 TT 1.2", 0 },
155 	{ 0x100214e4, "Broadcom BCM0102", TPM_DEV_NOINTS },
156 	{ 0x00fe1050, "WEC WPCT200", 0 },
157 	{ 0x687119fa, "SNS SSX35", 0 },
158 	{ 0x2e4d5453, "STM ST19WP18", 0 },
159 	{ 0x32021114, "ATML 97SC3203", TPM_DEV_NOINTS },
160 	{ 0x10408086, "INTEL INTC0102", 0 },
161 	{ 0, "", TPM_DEV_NOINTS },
162 };
163 
164 int tpm_tis12_irqinit(struct tpm_softc *, int, int);
165 int tpm_tis12_init(struct tpm_softc *, int, const char *);
166 int tpm_tis12_start(struct tpm_softc *, int);
167 int tpm_tis12_read(struct tpm_softc *, void *, int, size_t *, int);
168 int tpm_tis12_write(struct tpm_softc *, void *, int);
169 int tpm_tis12_end(struct tpm_softc *, int, int);
170 
171 void tpm_intr(void *);
172 
173 int tpm_waitfor_poll(struct tpm_softc *, u_int8_t, int, void *);
174 int tpm_waitfor_int(struct tpm_softc *, u_int8_t, int, void *, int);
175 int tpm_waitfor(struct tpm_softc *, u_int8_t, int, void *);
176 int tpm_request_locality(struct tpm_softc *, int);
177 int tpm_getburst(struct tpm_softc *);
178 u_int8_t tpm_status(struct tpm_softc *);
179 int tpm_tmotohz(int);
180 
181 int tpm_legacy_probe(bus_space_tag_t, bus_addr_t);
182 int tpm_legacy_init(struct tpm_softc *, int, const char *);
183 int tpm_legacy_start(struct tpm_softc *, int);
184 int tpm_legacy_read(struct tpm_softc *, void *, int, size_t *, int);
185 int tpm_legacy_write(struct tpm_softc *, void *, int);
186 int tpm_legacy_end(struct tpm_softc *, int, int);
187 
188 /*
189  * FreeBSD specific code for probing and attaching TPM to device tree.
190  */
191 #if 0
192 static void
193 tpm_identify(driver_t *driver, device_t parent)
194 {
195 	BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "tpm", 0);
196 }
197 #endif
198 
199 int
200 tpm_attach(device_t dev)
201 {
202 	struct tpm_softc *sc = device_get_softc(dev);
203 	int irq;
204 
205 	sc->mem_rid = 0;
206 	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
207 	    RF_ACTIVE);
208 	if (sc->mem_res == NULL)
209 		return ENXIO;
210 
211 	sc->sc_bt = rman_get_bustag(sc->mem_res);
212 	sc->sc_bh = rman_get_bushandle(sc->mem_res);
213 
214 	sc->irq_rid = 0;
215 	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
216 	    RF_ACTIVE | RF_SHAREABLE);
217 	if (sc->irq_res != NULL)
218 		irq = rman_get_start(sc->irq_res);
219 	else
220 		irq = IRQUNK;
221 
222 	/* In case PnP probe this may contain some initialization. */
223 	tpm_tis12_probe(sc->sc_bt, sc->sc_bh);
224 
225 	if (tpm_legacy_probe(sc->sc_bt, sc->sc_bh)) {
226 		sc->sc_init = tpm_legacy_init;
227 		sc->sc_start = tpm_legacy_start;
228 		sc->sc_read = tpm_legacy_read;
229 		sc->sc_write = tpm_legacy_write;
230 		sc->sc_end = tpm_legacy_end;
231 	} else {
232 		sc->sc_init = tpm_tis12_init;
233 		sc->sc_start = tpm_tis12_start;
234 		sc->sc_read = tpm_tis12_read;
235 		sc->sc_write = tpm_tis12_write;
236 		sc->sc_end = tpm_tis12_end;
237 	}
238 
239 	kprintf("%s", device_get_name(dev));
240 	if ((sc->sc_init)(sc, irq, "tpm")) {
241 		tpm_detach(dev);
242 		return ENXIO;
243 	}
244 
245 	if (sc->sc_init == tpm_tis12_init && sc->irq_res != NULL &&
246 	    bus_setup_intr(dev, sc->irq_res, 0,
247 	    tpm_intr, sc, &sc->intr_cookie, NULL) != 0) {
248 		tpm_detach(dev);
249 		kprintf(": cannot establish interrupt\n");
250 		return 1;
251 	}
252 
253 	sc->sc_cdev = make_dev(&tpm_ops, device_get_unit(dev),
254 			    UID_ROOT, GID_WHEEL, 0600, "tpm");
255 	sc->sc_cdev->si_drv1 = sc;
256 
257 	return 0;
258 }
259 
260 int
261 tpm_detach(device_t dev)
262 {
263 	struct tpm_softc * sc = device_get_softc(dev);
264 
265 	if(sc->intr_cookie){
266 		bus_teardown_intr(dev, sc->irq_res, sc->intr_cookie);
267 	}
268 
269 	if(sc->mem_res){
270 		bus_release_resource(dev, SYS_RES_MEMORY,
271 				     sc->mem_rid, sc->mem_res);
272 	}
273 
274 	if(sc->irq_res){
275 		bus_release_resource(dev, SYS_RES_IRQ,
276 				     sc->irq_rid, sc->irq_res);
277 	}
278 	if(sc->sc_cdev){
279 		destroy_dev(sc->sc_cdev);
280 	}
281 
282 	return 0;
283 }
284 
285 /* Probe TPM using TIS 1.2 interface. */
286 int
287 tpm_tis12_probe(bus_space_tag_t bt, bus_space_handle_t bh)
288 {
289 	u_int32_t r;
290 	u_int8_t save, reg;
291 
292 	r = bus_space_read_4(bt, bh, TPM_INTF_CAPABILITIES);
293 	if (r == 0xffffffff)
294 		return 0;
295 
296 #ifdef TPM_DEBUG
297 	kprintf("tpm: caps=%pb%i\n", TPM_CAPBITS, r);
298 #endif
299 	if ((r & TPM_CAPSREQ) != TPM_CAPSREQ ||
300 	    !(r & (TPM_INTF_INT_EDGE_RISING | TPM_INTF_INT_LEVEL_LOW))) {
301 #ifdef TPM_DEBUG
302 		kprintf("tpm: caps too low (caps=%pb%i)\n", TPM_CAPBITS, r);
303 #endif
304 		return 0;
305 	}
306 
307 	save = bus_space_read_1(bt, bh, TPM_ACCESS);
308 	bus_space_write_1(bt, bh, TPM_ACCESS, TPM_ACCESS_REQUEST_USE);
309 	reg = bus_space_read_1(bt, bh, TPM_ACCESS);
310 	if ((reg & TPM_ACCESS_VALID) && (reg & TPM_ACCESS_ACTIVE_LOCALITY) &&
311 	    bus_space_read_4(bt, bh, TPM_ID) != 0xffffffff)
312 		return 1;
313 
314 	bus_space_write_1(bt, bh, TPM_ACCESS, save);
315 	return 0;
316 }
317 
318 /*
319  * Setup interrupt vector if one is provided and interrupts are know to
320  * work on that particular chip.
321  */
322 int
323 tpm_tis12_irqinit(struct tpm_softc *sc, int irq, int idx)
324 {
325 	u_int32_t r;
326 
327 	if ((irq == IRQUNK) || (tpm_devs[idx].flags & TPM_DEV_NOINTS)) {
328 		sc->sc_vector = IRQUNK;
329 		return 0;
330 	}
331 
332 	/* Ack and disable all interrupts. */
333 	bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE,
334 	    bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE) &
335 	    ~TPM_GLOBAL_INT_ENABLE);
336 	bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INT_STATUS,
337 	    bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INT_STATUS));
338 
339 	/* Program interrupt vector. */
340 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_INT_VECTOR, irq);
341 	sc->sc_vector = irq;
342 
343 	/* Program interrupt type. */
344 	if (sc->sc_capabilities & TPM_INTF_INT_EDGE_RISING)
345 		r = TPM_INT_EDGE_RISING;
346 	else if (sc->sc_capabilities & TPM_INTF_INT_LEVEL_HIGH)
347 		r = TPM_INT_LEVEL_HIGH;
348 	else
349 		r = TPM_INT_LEVEL_LOW;
350 	bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE, r);
351 
352 	return 0;
353 }
354 
355 /* Setup TPM using TIS 1.2 interface. */
356 int
357 tpm_tis12_init(struct tpm_softc *sc, int irq, const char *name)
358 {
359 	u_int32_t r;
360 	int i;
361 
362 	r = bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTF_CAPABILITIES);
363 #ifdef TPM_DEBUG
364 	kprintf(" caps=%pb%i ", TPM_CAPBITS, r);
365 #endif
366 	if ((r & TPM_CAPSREQ) != TPM_CAPSREQ ||
367 	    !(r & (TPM_INTF_INT_EDGE_RISING | TPM_INTF_INT_LEVEL_LOW))) {
368 		kprintf(": capabilities too low (caps=%pb%i)\n",
369 		    TPM_CAPBITS, r);
370 		return 1;
371 	}
372 	sc->sc_capabilities = r;
373 
374 	sc->sc_devid = bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_ID);
375 	sc->sc_rev = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_REV);
376 
377 	for (i = 0; tpm_devs[i].devid; i++)
378 		if (tpm_devs[i].devid == sc->sc_devid)
379 			break;
380 
381 	if (tpm_devs[i].devid)
382 		kprintf(": %s rev 0x%x\n", tpm_devs[i].name, sc->sc_rev);
383 	else
384 		kprintf(": device 0x%08x rev 0x%x\n", sc->sc_devid, sc->sc_rev);
385 
386 	if (tpm_tis12_irqinit(sc, irq, i))
387 		return 1;
388 
389 	if (tpm_request_locality(sc, 0))
390 		return 1;
391 
392 	/* Abort whatever it thought it was doing. */
393 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
394 
395 	return 0;
396 }
397 
398 int
399 tpm_request_locality(struct tpm_softc *sc, int l)
400 {
401 	u_int32_t r;
402 	int to, rv;
403 
404 	if (l != 0)
405 		return EINVAL;
406 
407 	if ((bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
408 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) ==
409 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
410 		return 0;
411 
412 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS,
413 	    TPM_ACCESS_REQUEST_USE);
414 
415 	to = tpm_tmotohz(TPM_ACCESS_TMO);
416 
417 	while ((r = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
418 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
419 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && to--) {
420 		rv = tsleep(sc->sc_init, PCATCH, "tpm_locality", 1);
421 		if (rv &&  rv != EWOULDBLOCK) {
422 #ifdef TPM_DEBUG
423 			kprintf("%s: interrupted %d\n", __func__, rv);
424 #endif
425 			return rv;
426 		}
427 	}
428 
429 	if ((r & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
430 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) {
431 #ifdef TPM_DEBUG
432 		kprintf("%s: access %pb%i\n", __func__, TPM_ACCESS_BITS, r);
433 #endif
434 		return EBUSY;
435 	}
436 
437 	return 0;
438 }
439 
440 int
441 tpm_getburst(struct tpm_softc *sc)
442 {
443 	int burst, to, rv;
444 
445 	to = tpm_tmotohz(TPM_BURST_TMO);
446 
447 	burst = 0;
448 	while (burst == 0 && to--) {
449 		/*
450 		 * Burst count has to be read from bits 8 to 23 without
451 		 * touching any other bits, eg. the actual status bits 0
452 		 * to 7.
453 		 */
454 		burst = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 1);
455 		burst |= bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 2)
456 		    << 8;
457 #ifdef TPM_DEBUG
458 		kprintf("%s: read %d\n", __func__, burst);
459 #endif
460 		if (burst)
461 			return burst;
462 
463 		rv = tsleep(sc, PCATCH, "tpm_getburst", 1);
464 		if (rv && rv != EWOULDBLOCK) {
465 			return 0;
466 		}
467 	}
468 
469 	return 0;
470 }
471 
472 u_int8_t
473 tpm_status(struct tpm_softc *sc)
474 {
475 	u_int8_t status;
476 
477 	status = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS) &
478 	    TPM_STS_MASK;
479 
480 	return status;
481 }
482 
483 int
484 tpm_tmotohz(int tmo)
485 {
486 	struct timeval tv;
487 
488 	tv.tv_sec = tmo / 1000;
489 	tv.tv_usec = 1000 * (tmo % 1000);
490 
491 	return tvtohz_high(&tv);
492 }
493 
494 /* Save TPM state on suspend. */
495 int
496 tpm_suspend(device_t dev)
497 {
498 	struct tpm_softc *sc = device_get_softc(dev);
499 	int why = 1;
500 	u_int8_t command[] = {
501 	    0, 193,		/* TPM_TAG_RQU_COMMAND */
502 	    0, 0, 0, 10,	/* Length in bytes */
503 	    0, 0, 0, 156	/* TPM_ORD_SaveStates */
504 	};
505 
506 	/*
507 	 * Power down:  We have to issue the SaveStates command.
508 	 */
509 	sc->sc_write(sc, &command, sizeof(command));
510 	sc->sc_read(sc, &command, sizeof(command), NULL, TPM_HDRSIZE);
511 #ifdef TPM_DEBUG
512 	kprintf("%s: power down: %d -> %d\n", __func__, sc->sc_suspend, why);
513 #endif
514 	sc->sc_suspend = why;
515 
516 	return 0;
517 }
518 
519 /*
520  * Handle resume event.  Actually nothing to do as the BIOS is supposed
521  * to restore the previously saved state.
522  */
523 int
524 tpm_resume(device_t dev)
525 {
526 	struct tpm_softc *sc = device_get_softc(dev);
527 	int why = 0;
528 
529 #ifdef TPM_DEBUG
530 	kprintf("%s: resume: %d -> %d\n", __func__, sc->sc_suspend, why);
531 #endif
532 	sc->sc_suspend = why;
533 
534 	return 0;
535 }
536 
537 /* Wait for given status bits using polling. */
538 int
539 tpm_waitfor_poll(struct tpm_softc *sc, u_int8_t mask, int tmo, void *c)
540 {
541 	int rv;
542 
543 	/*
544 	 * Poll until either the requested condition or a time out is
545 	 * met.
546 	 */
547 	while (((sc->sc_stat = tpm_status(sc)) & mask) != mask && tmo--) {
548 		rv = tsleep(c, PCATCH, "tpm_poll", 1);
549 		if (rv && rv != EWOULDBLOCK) {
550 #ifdef TPM_DEBUG
551 			kprintf("%s: interrupted %d\n", __func__, rv);
552 #endif
553 			return rv;
554 		}
555 	}
556 
557 	return 0;
558 }
559 
560 /* Wait for given status bits using interrupts. */
561 int
562 tpm_waitfor_int(struct tpm_softc *sc, u_int8_t mask, int tmo, void *c,
563     int inttype)
564 {
565 	int rv, to;
566 
567 	/* Poll and return when condition is already met. */
568 	sc->sc_stat = tpm_status(sc);
569 	if ((sc->sc_stat & mask) == mask)
570 		return 0;
571 
572 	/*
573 	 * Enable interrupt on tpm chip.  Note that interrupts on our
574 	 * level (SPL_TTY) are disabled (see tpm{read,write} et al) and
575 	 * will not be delivered to the cpu until we call tsleep(9) below.
576 	 */
577 	bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE,
578 	    bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE) |
579 	    inttype);
580 	bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE,
581 	    bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE) |
582 	    TPM_GLOBAL_INT_ENABLE);
583 
584 	/*
585 	 * Poll once more to remedy the race between previous polling
586 	 * and enabling interrupts on the tpm chip.
587 	 */
588 	sc->sc_stat = tpm_status(sc);
589 	if ((sc->sc_stat & mask) == mask) {
590 		rv = 0;
591 		goto out;
592 	}
593 
594 	to = tpm_tmotohz(tmo);
595 #ifdef TPM_DEBUG
596 	kprintf("%s: sleeping for %d ticks on %p\n", __func__, to, c);
597 #endif
598 	/*
599 	 * tsleep(9) enables interrupts on the cpu and returns after
600 	 * wake up with interrupts disabled again.  Note that interrupts
601 	 * generated by the tpm chip while being at SPL_TTY are not lost
602 	 * but held and delivered as soon as the cpu goes below SPL_TTY.
603 	 */
604 	rv = tsleep(c, PCATCH, "tpm_intr", to);
605 
606 	sc->sc_stat = tpm_status(sc);
607 #ifdef TPM_DEBUG
608 	kprintf("%s: woke up with rv %d stat %pb%i\n", __func__, rv,
609 	    TPM_STS_BITS, sc->sc_stat);
610 #endif
611 	if ((sc->sc_stat & mask) == mask)
612 		rv = 0;
613 
614 	/* Disable interrupts on tpm chip again. */
615 out:	bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE,
616 	    bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE) &
617 	    ~TPM_GLOBAL_INT_ENABLE);
618 	bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE,
619 	    bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE) &
620 	    ~inttype);
621 
622 	return rv;
623 }
624 
625 /*
626  * Wait on given status bits, uses interrupts where possible, otherwise polls.
627  */
628 int
629 tpm_waitfor(struct tpm_softc *sc, u_int8_t b0, int tmo, void *c)
630 {
631 	u_int8_t b;
632 	int re, to, rv;
633 
634 #ifdef TPM_DEBUG
635 	kprintf("%s: b0 %pb%i\n", __func__, TPM_STS_BITS, b0);
636 #endif
637 
638 	/*
639 	 * If possible, use interrupts, otherwise poll.
640 	 *
641 	 * We use interrupts for TPM_STS_VALID and TPM_STS_DATA_AVAIL (if
642 	 * the tpm chips supports them) as waiting for those can take
643 	 * really long.  The other TPM_STS* are not needed very often
644 	 * so we do not support them.
645 	 */
646 	if (sc->sc_vector != IRQUNK) {
647 		b = b0;
648 
649 		/*
650 		 * Wait for data ready.  This interrupt only occurs
651 		 * when both TPM_STS_VALID and TPM_STS_DATA_AVAIL are asserted.
652 		 * Thus we don't have to bother with TPM_STS_VALID
653 		 * separately and can just return.
654 		 *
655 		 * This only holds for interrupts!  When using polling
656 		 * both flags have to be waited for, see below.
657 		 */
658 		if ((b & TPM_STS_DATA_AVAIL) && (sc->sc_capabilities &
659 		    TPM_INTF_DATA_AVAIL_INT))
660 			return tpm_waitfor_int(sc, b, tmo, c,
661 			    TPM_DATA_AVAIL_INT);
662 
663 		/* Wait for status valid bit. */
664 		if ((b & TPM_STS_VALID) && (sc->sc_capabilities &
665 		    TPM_INTF_STS_VALID_INT)) {
666 			rv = tpm_waitfor_int(sc, b, tmo, c, TPM_STS_VALID_INT);
667 			if (rv != 0)
668 				return rv;
669 			else
670 				b = b0 & ~TPM_STS_VALID;
671 		}
672 
673 		/*
674 		 * When all flags are taken care of, return.  Otherwise
675 		 * use polling for eg. TPM_STS_CMD_READY.
676 		 */
677 		if (b == 0)
678 			return 0;
679 	}
680 
681 	re = 3;
682 restart:
683 	/*
684 	 * If requested wait for TPM_STS_VALID before dealing with
685 	 * any other flag.  Eg. when both TPM_STS_DATA_AVAIL and TPM_STS_VALID
686 	 * are requested, wait for the latter first.
687 	 */
688 	b = b0;
689 	if (b0 & TPM_STS_VALID)
690 		b = TPM_STS_VALID;
691 
692 	to = tpm_tmotohz(tmo);
693 again:
694 	if ((rv = tpm_waitfor_poll(sc, b, to, c)) != 0)
695 		return rv;
696 
697 	if ((b & sc->sc_stat) == TPM_STS_VALID) {
698 		/* Now wait for other flags. */
699 		b = b0 & ~TPM_STS_VALID;
700 		to++;
701 		goto again;
702 	}
703 
704 	if ((sc->sc_stat & b) != b) {
705 #ifdef TPM_DEBUG
706 		kprintf("%s: timeout: stat=%pb%i b=%pb%i\n", __func__,
707 		    TPM_STS_BITS, sc->sc_stat, TPM_STS_BITS, b);
708 #endif
709 		if (re-- && (b0 & TPM_STS_VALID)) {
710 			bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
711 			    TPM_STS_RESP_RETRY);
712 			goto restart;
713 		}
714 		return EIO;
715 	}
716 
717 	return 0;
718 }
719 
720 /* Start transaction. */
721 int
722 tpm_tis12_start(struct tpm_softc *sc, int flag)
723 {
724 	int rv;
725 
726 	if (flag == UIO_READ) {
727 		rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
728 		    TPM_READ_TMO, sc->sc_read);
729 		return rv;
730 	}
731 
732 	/* Own our (0th) locality. */
733 	if ((rv = tpm_request_locality(sc, 0)) != 0)
734 		return rv;
735 
736 	sc->sc_stat = tpm_status(sc);
737 	if (sc->sc_stat & TPM_STS_CMD_READY) {
738 #ifdef TPM_DEBUG
739 		kprintf("%s: UIO_WRITE status %pb%i\n", __func__,
740 		   TPM_STS_BITS, sc->sc_stat);
741 #endif
742 		return 0;
743 	}
744 
745 #ifdef TPM_DEBUG
746 	kprintf("%s: UIO_WRITE readying chip\n", __func__);
747 #endif
748 
749 	/* Abort previous and restart. */
750 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
751 	if ((rv = tpm_waitfor(sc, TPM_STS_CMD_READY, TPM_READY_TMO,
752 	    sc->sc_write))) {
753 #ifdef TPM_DEBUG
754 		kprintf("%s: UIO_WRITE readying failed %d\n", __func__, rv);
755 #endif
756 		return rv;
757 	}
758 
759 #ifdef TPM_DEBUG
760 	kprintf("%s: UIO_WRITE readying done\n", __func__);
761 #endif
762 
763 	return 0;
764 }
765 
766 int
767 tpm_tis12_read(struct tpm_softc *sc, void *buf, int len, size_t *count,
768     int flags)
769 {
770 	u_int8_t *p = buf;
771 	size_t cnt;
772 	int rv, n, bcnt;
773 
774 #ifdef TPM_DEBUG
775 	kprintf("%s: len %d\n", __func__, len);
776 #endif
777 	cnt = 0;
778 	while (len > 0) {
779 		if ((rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
780 		    TPM_READ_TMO, sc->sc_read)))
781 			return rv;
782 
783 		bcnt = tpm_getburst(sc);
784 		n = MIN(len, bcnt);
785 #ifdef TPM_DEBUG
786 		kprintf("%s: fetching %d, burst is %d\n", __func__, n, bcnt);
787 #endif
788 		for (; n--; len--) {
789 			*p++ = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_DATA);
790 			cnt++;
791 		}
792 
793 		if ((flags & TPM_PARAM_SIZE) == 0 && cnt >= 6)
794 			break;
795 	}
796 #ifdef TPM_DEBUG
797 	kprintf("%s: read %zd bytes, len %d\n", __func__, cnt, len);
798 #endif
799 
800 	if (count)
801 		*count = cnt;
802 
803 	return 0;
804 }
805 
806 int
807 tpm_tis12_write(struct tpm_softc *sc, void *buf, int len)
808 {
809 	u_int8_t *p = buf;
810 	size_t cnt;
811 	int rv, r;
812 
813 #ifdef TPM_DEBUG
814 	kprintf("%s: sc %p buf %p len %d\n", __func__, sc, buf, len);
815 #endif
816 
817 	if ((rv = tpm_request_locality(sc, 0)) != 0)
818 		return rv;
819 
820 	cnt = 0;
821 	while (cnt < len - 1) {
822 		for (r = tpm_getburst(sc); r > 0 && cnt < len - 1; r--) {
823 			bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
824 			cnt++;
825 		}
826 		if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
827 #ifdef TPM_DEBUG
828 			kprintf("%s: failed burst rv %d\n", __func__, rv);
829 #endif
830 			return rv;
831 		}
832 		sc->sc_stat = tpm_status(sc);
833 		if (!(sc->sc_stat & TPM_STS_DATA_EXPECT)) {
834 #ifdef TPM_DEBUG
835 			kprintf("%s: failed rv %d stat=%pb%i\n", __func__, rv,
836 			    TPM_STS_BITS, sc->sc_stat);
837 #endif
838 			return EIO;
839 		}
840 	}
841 
842 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
843 	cnt++;
844 
845 	if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
846 #ifdef TPM_DEBUG
847 		kprintf("%s: failed last byte rv %d\n", __func__, rv);
848 #endif
849 		return rv;
850 	}
851 	if ((sc->sc_stat & TPM_STS_DATA_EXPECT) != 0) {
852 #ifdef TPM_DEBUG
853 		kprintf("%s: failed rv %d stat=%pb%i\n", __func__, rv,
854 		    TPM_STS_BITS, sc->sc_stat);
855 #endif
856 		return EIO;
857 	}
858 
859 #ifdef TPM_DEBUG
860 	kprintf("%s: wrote %zd byte\n", __func__, cnt);
861 #endif
862 
863 	return 0;
864 }
865 
866 /* Finish transaction. */
867 int
868 tpm_tis12_end(struct tpm_softc *sc, int flag, int err)
869 {
870 	int rv = 0;
871 
872 	if (flag == UIO_READ) {
873 		if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO,
874 		    sc->sc_read)))
875 			return rv;
876 
877 		/* Still more data? */
878 		sc->sc_stat = tpm_status(sc);
879 		if (!err && ((sc->sc_stat & TPM_STS_DATA_AVAIL) == TPM_STS_DATA_AVAIL)) {
880 #ifdef TPM_DEBUG
881 			kprintf("%s: read failed stat=%pb%i\n", __func__,
882 			    TPM_STS_BITS, sc->sc_stat);
883 #endif
884 			rv = EIO;
885 		}
886 
887 		bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
888 		    TPM_STS_CMD_READY);
889 
890 		/* Release our (0th) locality. */
891 		bus_space_write_1(sc->sc_bt, sc->sc_bh,TPM_ACCESS,
892 		    TPM_ACCESS_ACTIVE_LOCALITY);
893 	} else {
894 		/* Hungry for more? */
895 		sc->sc_stat = tpm_status(sc);
896 		if (!err && (sc->sc_stat & TPM_STS_DATA_EXPECT)) {
897 #ifdef TPM_DEBUG
898 			kprintf("%s: write failed stat=%pb%i\n", __func__,
899 			    TPM_STS_BITS, sc->sc_stat);
900 #endif
901 			rv = EIO;
902 		}
903 
904 		bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
905 		    err ? TPM_STS_CMD_READY : TPM_STS_GO);
906 	}
907 
908 	return rv;
909 }
910 
911 void
912 tpm_intr(void *v)
913 {
914 	struct tpm_softc *sc = v;
915 	u_int32_t r;
916 #ifdef TPM_DEBUG
917 	static int cnt = 0;
918 #endif
919 
920 	r = bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INT_STATUS);
921 #ifdef TPM_DEBUG
922 	if (r != 0)
923 		kprintf("%s: int=%pb%i (%d)\n", __func__,
924 		    TPM_INTERRUPT_ENABLE_BITS, r, cnt);
925 	else
926 		cnt++;
927 #endif
928 	if (!(r & (TPM_CMD_READY_INT | TPM_LOCALITY_CHANGE_INT |
929 	    TPM_STS_VALID_INT | TPM_DATA_AVAIL_INT)))
930 		return;
931 
932 	if (r & TPM_STS_VALID_INT)
933 		wakeup(sc);
934 
935 	if (r & TPM_CMD_READY_INT)
936 		wakeup(sc->sc_write);
937 
938 	if (r & TPM_DATA_AVAIL_INT)
939 		wakeup(sc->sc_read);
940 
941 	if (r & TPM_LOCALITY_CHANGE_INT)
942 		wakeup(sc->sc_init);
943 
944 	bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INT_STATUS, r);
945 
946 	return;
947 }
948 
949 /* Read single byte using legacy interface. */
950 static inline u_int8_t
951 tpm_legacy_in(bus_space_tag_t iot, bus_space_handle_t ioh, int reg)
952 {
953 	bus_space_write_1(iot, ioh, 0, reg);
954 	return bus_space_read_1(iot, ioh, 1);
955 }
956 
957 #if 0
958 /* Write single byte using legacy interface. */
959 static inline void
960 tpm_legacy_out(bus_space_tag_t iot, bus_space_handle_t ioh, int reg, u_int8_t v)
961 {
962 	bus_space_write_1(iot, ioh, 0, reg);
963 	bus_space_write_1(iot, ioh, 1, v);
964 }
965 #endif
966 
967 /* Probe for TPM using legacy interface. */
968 int
969 tpm_legacy_probe(bus_space_tag_t iot, bus_addr_t iobase)
970 {
971 	bus_space_handle_t ioh;
972 	u_int8_t r, v;
973 	int i, rv = 0;
974 	char id[8];
975 
976 	if (!tpm_enabled || iobase == -1)
977 		return 0;
978 
979 #if 0 /* XXX swildner*/
980 	if (bus_space_map(iot, iobase, 2, 0, &ioh))
981 		return 0;
982 #else
983 	ioh = iobase;
984 #endif
985 
986 	v = bus_space_read_1(iot, ioh, 0);
987 	if (v == 0xff) {
988 		bus_space_unmap(iot, ioh, 2);
989 		return 0;
990 	}
991 	r = bus_space_read_1(iot, ioh, 1);
992 
993 	for (i = sizeof(id); i--; )
994 		id[i] = tpm_legacy_in(iot, ioh, TPM_ID + i);
995 
996 #ifdef TPM_DEBUG
997 	kprintf("%s: %.4s %d.%d.%d.%d\n", __func__,
998 	    &id[4], id[0], id[1], id[2], id[3]);
999 #endif
1000 	/*
1001 	 * The only chips using the legacy interface we are aware of are
1002 	 * by Atmel.  For other chips more signature would have to be added.
1003 	 */
1004 	if (!bcmp(&id[4], "ATML", 4))
1005 		rv = 1;
1006 
1007 	if (!rv) {
1008 		bus_space_write_1(iot, ioh, r, 1);
1009 		bus_space_write_1(iot, ioh, v, 0);
1010 	}
1011 	bus_space_unmap(iot, ioh, 2);
1012 
1013 	return rv;
1014 }
1015 
1016 /* Setup TPM using legacy interface. */
1017 int
1018 tpm_legacy_init(struct tpm_softc *sc, int irq, const char *name)
1019 {
1020 	char id[8];
1021 	u_int8_t ioh, iol;
1022 	int i;
1023 
1024 #if 0 /* XXX swildner*/
1025 	if ((i = bus_space_map(sc->sc_batm, tpm_enabled, 2, 0, &sc->sc_bahm))) {
1026 		kprintf(": cannot map tpm registers (%d)\n", i);
1027 		tpm_enabled = 0;
1028 		return 1;
1029 	}
1030 #else
1031 	sc->sc_bahm = tpm_enabled;
1032 #endif
1033 
1034 	for (i = sizeof(id); i--; )
1035 		id[i] = tpm_legacy_in(sc->sc_bt, sc->sc_bh, TPM_ID + i);
1036 
1037 	kprintf(": %.4s %d.%d @0x%x\n", &id[4], id[0], id[1], tpm_enabled);
1038 	iol = tpm_enabled & 0xff;
1039 	ioh = tpm_enabled >> 16;
1040 	tpm_enabled = 0;
1041 
1042 	return 0;
1043 }
1044 
1045 /* Start transaction. */
1046 int
1047 tpm_legacy_start(struct tpm_softc *sc, int flag)
1048 {
1049 	struct timeval tv;
1050 	u_int8_t bits, r;
1051 	int to, rv;
1052 
1053 	bits = flag == UIO_READ ? TPM_LEGACY_DA : 0;
1054 	tv.tv_sec = TPM_LEGACY_TMO;
1055 	tv.tv_usec = 0;
1056 	to = tvtohz_high(&tv) / TPM_LEGACY_SLEEP;
1057 	while (((r = bus_space_read_1(sc->sc_batm, sc->sc_bahm, 1)) &
1058 	    (TPM_LEGACY_BUSY|bits)) != bits && to--) {
1059 		rv = tsleep(sc, PCATCH, "legacy_tpm_start",
1060 		    TPM_LEGACY_SLEEP);
1061 		if (rv && rv != EWOULDBLOCK)
1062 			return rv;
1063 	}
1064 
1065 	if ((r & (TPM_LEGACY_BUSY|bits)) != bits)
1066 		return EIO;
1067 
1068 	return 0;
1069 }
1070 
1071 int
1072 tpm_legacy_read(struct tpm_softc *sc, void *buf, int len, size_t *count,
1073     int flags)
1074 {
1075 	u_int8_t *p;
1076 	size_t cnt;
1077 	int to, rv;
1078 
1079 	cnt = rv = 0;
1080 	for (p = buf; !rv && len > 0; len--) {
1081 		for (to = 1000;
1082 		    !(bus_space_read_1(sc->sc_batm, sc->sc_bahm, 1) &
1083 		    TPM_LEGACY_DA); DELAY(1))
1084 			if (!to--)
1085 				return EIO;
1086 
1087 		DELAY(TPM_LEGACY_DELAY);
1088 		*p++ = bus_space_read_1(sc->sc_batm, sc->sc_bahm, 0);
1089 		cnt++;
1090 	}
1091 
1092 	*count = cnt;
1093 	return 0;
1094 }
1095 
1096 int
1097 tpm_legacy_write(struct tpm_softc *sc, void *buf, int len)
1098 {
1099 	u_int8_t *p;
1100 	int n;
1101 
1102 	for (p = buf, n = len; n--; DELAY(TPM_LEGACY_DELAY)) {
1103 		if (!n && len != TPM_BUFSIZ) {
1104 			bus_space_write_1(sc->sc_batm, sc->sc_bahm, 1,
1105 			    TPM_LEGACY_LAST);
1106 			DELAY(TPM_LEGACY_DELAY);
1107 		}
1108 		bus_space_write_1(sc->sc_batm, sc->sc_bahm, 0, *p++);
1109 	}
1110 
1111 	return 0;
1112 }
1113 
1114 /* Finish transaction. */
1115 int
1116 tpm_legacy_end(struct tpm_softc *sc, int flag, int rv)
1117 {
1118 	struct timeval tv;
1119 	u_int8_t r;
1120 	int to;
1121 
1122 	if (rv || flag == UIO_READ)
1123 		bus_space_write_1(sc->sc_batm, sc->sc_bahm, 1, TPM_LEGACY_ABRT);
1124 	else {
1125 		tv.tv_sec = TPM_LEGACY_TMO;
1126 		tv.tv_usec = 0;
1127 		to = tvtohz_high(&tv) / TPM_LEGACY_SLEEP;
1128 		while(((r = bus_space_read_1(sc->sc_batm, sc->sc_bahm, 1)) &
1129 		    TPM_LEGACY_BUSY) && to--) {
1130 			rv = tsleep(sc, PCATCH, "legacy_tpm_end",
1131 			    TPM_LEGACY_SLEEP);
1132 			if (rv && rv != EWOULDBLOCK)
1133 				return rv;
1134 		}
1135 
1136 		if (r & TPM_LEGACY_BUSY)
1137 			return EIO;
1138 
1139 		if (r & TPM_LEGACY_RE)
1140 			return EIO;	/* XXX Retry the loop? */
1141 	}
1142 
1143 	return rv;
1144 }
1145 
1146 int
1147 tpmopen(struct dev_open_args *ap)
1148 {
1149 	cdev_t dev = ap->a_head.a_dev;
1150 	struct tpm_softc *sc = TPMSOFTC(dev);
1151 
1152 	if (!sc)
1153 		return ENXIO;
1154 
1155 	if (sc->sc_flags & TPM_OPEN)
1156 		return EBUSY;
1157 
1158 	sc->sc_flags |= TPM_OPEN;
1159 
1160 	return 0;
1161 }
1162 
1163 int
1164 tpmclose(struct dev_close_args *ap)
1165 {
1166 	cdev_t dev = ap->a_head.a_dev;
1167 	struct tpm_softc *sc = TPMSOFTC(dev);
1168 
1169 	if (!sc)
1170 		return ENXIO;
1171 
1172 	if (!(sc->sc_flags & TPM_OPEN))
1173 		return EINVAL;
1174 
1175 	sc->sc_flags &= ~TPM_OPEN;
1176 
1177 	return 0;
1178 }
1179 
1180 int
1181 tpmread(struct dev_read_args *ap)
1182 {
1183 	cdev_t dev = ap->a_head.a_dev;
1184 	struct uio *uio = ap->a_uio;
1185 	struct tpm_softc *sc = TPMSOFTC(dev);
1186 	u_int8_t buf[TPM_BUFSIZ], *p;
1187 	size_t cnt;
1188 	int n, len, rv;
1189 
1190 	if (!sc)
1191 		return ENXIO;
1192 
1193 	crit_enter();
1194 	if ((rv = (sc->sc_start)(sc, UIO_READ))) {
1195 		crit_exit();
1196 		return rv;
1197 	}
1198 
1199 #ifdef TPM_DEBUG
1200 	kprintf("%s: getting header\n", __func__);
1201 #endif
1202 	if ((rv = (sc->sc_read)(sc, buf, TPM_HDRSIZE, &cnt, 0))) {
1203 		(sc->sc_end)(sc, UIO_READ, rv);
1204 		crit_exit();
1205 		return rv;
1206 	}
1207 
1208 	len = (buf[2] << 24) | (buf[3] << 16) | (buf[4] << 8) | buf[5];
1209 #ifdef TPM_DEBUG
1210 	kprintf("%s: len %d, io count %zd\n", __func__, len, uio->uio_resid);
1211 #endif
1212 	if (len > uio->uio_resid) {
1213 		rv = EIO;
1214 		(sc->sc_end)(sc, UIO_READ, rv);
1215 #ifdef TPM_DEBUG
1216 		kprintf("%s: bad residual io count 0x%zx\n", __func__,
1217 		    uio->uio_resid);
1218 #endif
1219 		crit_exit();
1220 		return rv;
1221 	}
1222 
1223 	/* Copy out header. */
1224 	if ((rv = uiomove((caddr_t)buf, cnt, uio))) {
1225 		(sc->sc_end)(sc, UIO_READ, rv);
1226 		crit_exit();
1227 		return rv;
1228 	}
1229 
1230 	/* Get remaining part of the answer (if anything is left). */
1231 	for (len -= cnt, p = buf, n = sizeof(buf); len > 0; p = buf, len -= n,
1232 	    n = sizeof(buf)) {
1233 		n = MIN(n, len);
1234 #ifdef TPM_DEBUG
1235 		kprintf("%s: n %d len %d\n", __func__, n, len);
1236 #endif
1237 		if ((rv = (sc->sc_read)(sc, p, n, NULL, TPM_PARAM_SIZE))) {
1238 			(sc->sc_end)(sc, UIO_READ, rv);
1239 			crit_exit();
1240 			return rv;
1241 		}
1242 		p += n;
1243 		if ((rv = uiomove((caddr_t)buf, p - buf, uio))) {
1244 			(sc->sc_end)(sc, UIO_READ, rv);
1245 			crit_exit();
1246 			return rv;
1247 		}
1248 	}
1249 
1250 	rv = (sc->sc_end)(sc, UIO_READ, rv);
1251 	crit_exit();
1252 	return rv;
1253 }
1254 
1255 int
1256 tpmwrite(struct dev_write_args *ap)
1257 {
1258 	cdev_t dev = ap->a_head.a_dev;
1259 	struct uio *uio = ap->a_uio;
1260 	struct tpm_softc *sc = TPMSOFTC(dev);
1261 	u_int8_t buf[TPM_BUFSIZ];
1262 	int n, rv;
1263 
1264 	if (!sc)
1265 		return ENXIO;
1266 
1267 	crit_enter();
1268 
1269 #ifdef TPM_DEBUG
1270 	kprintf("%s: io count %zd\n", __func__, uio->uio_resid);
1271 #endif
1272 
1273 	n = MIN(sizeof(buf), uio->uio_resid);
1274 	if ((rv = uiomove((caddr_t)buf, n, uio))) {
1275 		crit_exit();
1276 		return rv;
1277 	}
1278 
1279 	if ((rv = (sc->sc_start)(sc, UIO_WRITE))) {
1280 		crit_exit();
1281 		return rv;
1282 	}
1283 
1284 	if ((rv = (sc->sc_write(sc, buf, n)))) {
1285 		crit_exit();
1286 		return rv;
1287 	}
1288 
1289 	rv = (sc->sc_end)(sc, UIO_WRITE, rv);
1290 	crit_exit();
1291 	return rv;
1292 }
1293 
1294 int
1295 tpmioctl(struct dev_ioctl_args *ap)
1296 {
1297 	return ENOTTY;
1298 }
1299