xref: /dragonfly/sys/dev/crypto/tpm/tpm.c (revision 279dd846)
1 /*
2  * Copyright (c) 2008, 2009 Michael Shalayeff
3  * Copyright (c) 2009, 2010 Hans-Joerg Hoexer
4  * All rights reserved.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN
15  * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
16  * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  *
18  * $FreeBSD: src/sys/dev/tpm/tpm.c,v 1.1 2010/08/12 00:16:18 takawata Exp $
19  */
20 
21 /* #define	TPM_DEBUG */
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/kernel.h>
26 #include <sys/malloc.h>
27 #include <sys/proc.h>
28 
29 #include <sys/module.h>
30 #include <sys/conf.h>
31 #include <sys/uio.h>
32 #include <sys/bus.h>
33 #include <sys/device.h>
34 #include <sys/thread2.h>
35 
36 #include <sys/rman.h>
37 
38 #include <machine/md_var.h>
39 
40 #include <bus/isa/isareg.h>
41 #include <bus/isa/isavar.h>
42 
43 #include <dev/crypto/tpm/tpmvar.h>
44 
45 #define	TPM_BUFSIZ	1024
46 
47 #define TPM_HDRSIZE	10
48 
49 #define TPM_PARAM_SIZE	0x0001
50 
51 #define IRQUNK	-1
52 
53 #define	TPM_ACCESS			0x0000	/* acess register */
54 #define	TPM_ACCESS_ESTABLISHMENT	0x01	/* establishment */
55 #define	TPM_ACCESS_REQUEST_USE		0x02	/* request using locality */
56 #define	TPM_ACCESS_REQUEST_PENDING	0x04	/* pending request */
57 #define	TPM_ACCESS_SEIZE		0x08	/* request locality seize */
58 #define	TPM_ACCESS_SEIZED		0x10	/* locality has been seized */
59 #define	TPM_ACCESS_ACTIVE_LOCALITY	0x20	/* locality is active */
60 #define	TPM_ACCESS_VALID		0x80	/* bits are valid */
61 #define	TPM_ACCESS_BITS	\
62     "\020\01EST\02REQ\03PEND\04SEIZE\05SEIZED\06ACT\010VALID"
63 
64 #define	TPM_INTERRUPT_ENABLE	0x0008
65 #define	TPM_GLOBAL_INT_ENABLE	0x80000000	/* enable ints */
66 #define	TPM_CMD_READY_INT	0x00000080	/* cmd ready enable */
67 #define	TPM_INT_EDGE_FALLING	0x00000018
68 #define	TPM_INT_EDGE_RISING	0x00000010
69 #define	TPM_INT_LEVEL_LOW	0x00000008
70 #define	TPM_INT_LEVEL_HIGH	0x00000000
71 #define	TPM_LOCALITY_CHANGE_INT	0x00000004	/* locality change enable */
72 #define	TPM_STS_VALID_INT	0x00000002	/* int on TPM_STS_VALID is set */
73 #define	TPM_DATA_AVAIL_INT	0x00000001	/* int on TPM_STS_DATA_AVAIL is set */
74 #define	TPM_INTERRUPT_ENABLE_BITS \
75     "\020\040ENA\010RDY\03LOCH\02STSV\01DRDY"
76 
77 #define	TPM_INT_VECTOR		0x000c	/* 8 bit reg for 4 bit irq vector */
78 #define	TPM_INT_STATUS		0x0010	/* bits are & 0x87 from TPM_INTERRUPT_ENABLE */
79 
80 #define	TPM_INTF_CAPABILITIES		0x0014	/* capability register */
81 #define	TPM_INTF_BURST_COUNT_STATIC	0x0100	/* TPM_STS_BMASK static */
82 #define	TPM_INTF_CMD_READY_INT		0x0080	/* int on ready supported */
83 #define	TPM_INTF_INT_EDGE_FALLING	0x0040	/* falling edge ints supported */
84 #define	TPM_INTF_INT_EDGE_RISING	0x0020	/* rising edge ints supported */
85 #define	TPM_INTF_INT_LEVEL_LOW		0x0010	/* level-low ints supported */
86 #define	TPM_INTF_INT_LEVEL_HIGH		0x0008	/* level-high ints supported */
87 #define	TPM_INTF_LOCALITY_CHANGE_INT	0x0004	/* locality-change int (mb 1) */
88 #define	TPM_INTF_STS_VALID_INT		0x0002	/* TPM_STS_VALID int supported */
89 #define	TPM_INTF_DATA_AVAIL_INT		0x0001	/* TPM_STS_DATA_AVAIL int supported (mb 1) */
90 #define	TPM_CAPSREQ \
91   (TPM_INTF_DATA_AVAIL_INT|TPM_INTF_LOCALITY_CHANGE_INT|TPM_INTF_INT_LEVEL_LOW)
92 #define	TPM_CAPBITS \
93   "\020\01IDRDY\02ISTSV\03ILOCH\04IHIGH\05ILOW\06IEDGE\07IFALL\010IRDY\011BCST"
94 
95 #define	TPM_STS			0x0018		/* status register */
96 #define TPM_STS_MASK		0x000000ff	/* status bits */
97 #define	TPM_STS_BMASK		0x00ffff00	/* ro io burst size */
98 #define	TPM_STS_VALID		0x00000080	/* ro other bits are valid */
99 #define	TPM_STS_CMD_READY	0x00000040	/* rw chip/signal ready */
100 #define	TPM_STS_GO		0x00000020	/* wo start the command */
101 #define	TPM_STS_DATA_AVAIL	0x00000010	/* ro data available */
102 #define	TPM_STS_DATA_EXPECT	0x00000008	/* ro more data to be written */
103 #define	TPM_STS_RESP_RETRY	0x00000002	/* wo resend the response */
104 #define	TPM_STS_BITS	"\020\010VALID\07RDY\06GO\05DRDY\04EXPECT\02RETRY"
105 
106 #define	TPM_DATA	0x0024
107 #define	TPM_ID		0x0f00
108 #define	TPM_REV		0x0f04
109 #define	TPM_SIZE	0x5000		/* five pages of the above */
110 
111 #define	TPM_ACCESS_TMO	2000		/* 2sec */
112 #define	TPM_READY_TMO	2000		/* 2sec */
113 #define	TPM_READ_TMO	120000		/* 2 minutes */
114 #define TPM_BURST_TMO	2000		/* 2sec */
115 
116 #define	TPM_LEGACY_BUSY	0x01
117 #define	TPM_LEGACY_ABRT	0x01
118 #define	TPM_LEGACY_DA	0x02
119 #define	TPM_LEGACY_RE	0x04
120 #define	TPM_LEGACY_LAST	0x04
121 #define	TPM_LEGACY_BITS	"\020\01BUSY\2DA\3RE\4LAST"
122 #define	TPM_LEGACY_TMO		(2*60)	/* sec */
123 #define	TPM_LEGACY_SLEEP	5	/* ticks */
124 #define	TPM_LEGACY_DELAY	100
125 
126 /* Set when enabling legacy interface in host bridge. */
127 int tpm_enabled;
128 
129 
130 #define	TPMSOFTC(dev) \
131 	((struct tpm_softc *)dev->si_drv1)
132 
133 d_open_t	tpmopen;
134 d_close_t	tpmclose;
135 d_read_t	tpmread;
136 d_write_t	tpmwrite;
137 d_ioctl_t	tpmioctl;
138 
139 static struct dev_ops tpm_ops = {
140 	{ "tpm", 0, 0 },
141 	.d_open =	tpmopen,
142 	.d_close =	tpmclose,
143 	.d_read =	tpmread,
144 	.d_write =	tpmwrite,
145 	.d_ioctl =	tpmioctl,
146 };
147 
148 const struct {
149 	u_int32_t devid;
150 	char name[32];
151 	int flags;
152 #define TPM_DEV_NOINTS	0x0001
153 } tpm_devs[] = {
154 	{ 0x000615d1, "IFX SLD 9630 TT 1.1", 0 },
155 	{ 0x000b15d1, "IFX SLB 9635 TT 1.2", 0 },
156 	{ 0x100214e4, "Broadcom BCM0102", TPM_DEV_NOINTS },
157 	{ 0x00fe1050, "WEC WPCT200", 0 },
158 	{ 0x687119fa, "SNS SSX35", 0 },
159 	{ 0x2e4d5453, "STM ST19WP18", 0 },
160 	{ 0x32021114, "ATML 97SC3203", TPM_DEV_NOINTS },
161 	{ 0x10408086, "INTEL INTC0102", 0 },
162 	{ 0, "", TPM_DEV_NOINTS },
163 };
164 
165 int tpm_tis12_irqinit(struct tpm_softc *, int, int);
166 int tpm_tis12_init(struct tpm_softc *, int, const char *);
167 int tpm_tis12_start(struct tpm_softc *, int);
168 int tpm_tis12_read(struct tpm_softc *, void *, int, size_t *, int);
169 int tpm_tis12_write(struct tpm_softc *, void *, int);
170 int tpm_tis12_end(struct tpm_softc *, int, int);
171 
172 void tpm_intr(void *);
173 
174 int tpm_waitfor_poll(struct tpm_softc *, u_int8_t, int, void *);
175 int tpm_waitfor_int(struct tpm_softc *, u_int8_t, int, void *, int);
176 int tpm_waitfor(struct tpm_softc *, u_int8_t, int, void *);
177 int tpm_request_locality(struct tpm_softc *, int);
178 int tpm_getburst(struct tpm_softc *);
179 u_int8_t tpm_status(struct tpm_softc *);
180 int tpm_tmotohz(int);
181 
182 int tpm_legacy_probe(bus_space_tag_t, bus_addr_t);
183 int tpm_legacy_init(struct tpm_softc *, int, const char *);
184 int tpm_legacy_start(struct tpm_softc *, int);
185 int tpm_legacy_read(struct tpm_softc *, void *, int, size_t *, int);
186 int tpm_legacy_write(struct tpm_softc *, void *, int);
187 int tpm_legacy_end(struct tpm_softc *, int, int);
188 
189 /*
190  * FreeBSD specific code for probing and attaching TPM to device tree.
191  */
192 #if 0
193 static void
194 tpm_identify(driver_t *driver, device_t parent)
195 {
196 	BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "tpm", 0);
197 }
198 #endif
199 
200 
201 int
202 tpm_attach(device_t dev)
203 {
204 	struct tpm_softc *sc = device_get_softc(dev);
205 	int irq;
206 
207 	sc->mem_rid = 0;
208 	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
209 	    RF_ACTIVE);
210 	if (sc->mem_res == NULL)
211 		return ENXIO;
212 
213 	sc->sc_bt = rman_get_bustag(sc->mem_res);
214 	sc->sc_bh = rman_get_bushandle(sc->mem_res);
215 
216 	sc->irq_rid = 0;
217 	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
218 	    RF_ACTIVE | RF_SHAREABLE);
219 	if (sc->irq_res != NULL)
220 		irq = rman_get_start(sc->irq_res);
221 	else
222 		irq = IRQUNK;
223 
224 	/* In case PnP probe this may contain some initialization. */
225 	tpm_tis12_probe(sc->sc_bt, sc->sc_bh);
226 
227 	if (tpm_legacy_probe(sc->sc_bt, sc->sc_bh)) {
228 		sc->sc_init = tpm_legacy_init;
229 		sc->sc_start = tpm_legacy_start;
230 		sc->sc_read = tpm_legacy_read;
231 		sc->sc_write = tpm_legacy_write;
232 		sc->sc_end = tpm_legacy_end;
233 	} else {
234 		sc->sc_init = tpm_tis12_init;
235 		sc->sc_start = tpm_tis12_start;
236 		sc->sc_read = tpm_tis12_read;
237 		sc->sc_write = tpm_tis12_write;
238 		sc->sc_end = tpm_tis12_end;
239 	}
240 
241 	kprintf("%s", device_get_name(dev));
242 	if ((sc->sc_init)(sc, irq, "tpm")) {
243 		tpm_detach(dev);
244 		return ENXIO;
245 	}
246 
247 	if (sc->sc_init == tpm_tis12_init && sc->irq_res != NULL &&
248 	    bus_setup_intr(dev, sc->irq_res, 0,
249 	    tpm_intr, sc, &sc->intr_cookie, NULL) != 0) {
250 		tpm_detach(dev);
251 		kprintf(": cannot establish interrupt\n");
252 		return 1;
253 	}
254 
255 	sc->sc_cdev = make_dev(&tpm_ops, device_get_unit(dev),
256 			    UID_ROOT, GID_WHEEL, 0600, "tpm");
257 	sc->sc_cdev->si_drv1 = sc;
258 
259 	return 0;
260 }
261 
262 int
263 tpm_detach(device_t dev)
264 {
265 	struct tpm_softc * sc = device_get_softc(dev);
266 
267 	if(sc->intr_cookie){
268 		bus_teardown_intr(dev, sc->irq_res, sc->intr_cookie);
269 	}
270 
271 	if(sc->mem_res){
272 		bus_release_resource(dev, SYS_RES_MEMORY,
273 				     sc->mem_rid, sc->mem_res);
274 	}
275 
276 	if(sc->irq_res){
277 		bus_release_resource(dev, SYS_RES_IRQ,
278 				     sc->irq_rid, sc->irq_res);
279 	}
280 	if(sc->sc_cdev){
281 		destroy_dev(sc->sc_cdev);
282 	}
283 
284 	return 0;
285 }
286 
287 /* Probe TPM using TIS 1.2 interface. */
288 int
289 tpm_tis12_probe(bus_space_tag_t bt, bus_space_handle_t bh)
290 {
291 	u_int32_t r;
292 	u_int8_t save, reg;
293 
294 	r = bus_space_read_4(bt, bh, TPM_INTF_CAPABILITIES);
295 	if (r == 0xffffffff)
296 		return 0;
297 
298 #ifdef TPM_DEBUG
299 	kprintf("tpm: caps=%b\n", r, TPM_CAPBITS);
300 #endif
301 	if ((r & TPM_CAPSREQ) != TPM_CAPSREQ ||
302 	    !(r & (TPM_INTF_INT_EDGE_RISING | TPM_INTF_INT_LEVEL_LOW))) {
303 #ifdef TPM_DEBUG
304 		kprintf("tpm: caps too low (caps=%b)\n", r, TPM_CAPBITS);
305 #endif
306 		return 0;
307 	}
308 
309 	save = bus_space_read_1(bt, bh, TPM_ACCESS);
310 	bus_space_write_1(bt, bh, TPM_ACCESS, TPM_ACCESS_REQUEST_USE);
311 	reg = bus_space_read_1(bt, bh, TPM_ACCESS);
312 	if ((reg & TPM_ACCESS_VALID) && (reg & TPM_ACCESS_ACTIVE_LOCALITY) &&
313 	    bus_space_read_4(bt, bh, TPM_ID) != 0xffffffff)
314 		return 1;
315 
316 	bus_space_write_1(bt, bh, TPM_ACCESS, save);
317 	return 0;
318 }
319 
320 /*
321  * Setup interrupt vector if one is provided and interrupts are know to
322  * work on that particular chip.
323  */
324 int
325 tpm_tis12_irqinit(struct tpm_softc *sc, int irq, int idx)
326 {
327 	u_int32_t r;
328 
329 	if ((irq == IRQUNK) || (tpm_devs[idx].flags & TPM_DEV_NOINTS)) {
330 		sc->sc_vector = IRQUNK;
331 		return 0;
332 	}
333 
334 	/* Ack and disable all interrupts. */
335 	bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE,
336 	    bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE) &
337 	    ~TPM_GLOBAL_INT_ENABLE);
338 	bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INT_STATUS,
339 	    bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INT_STATUS));
340 
341 	/* Program interrupt vector. */
342 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_INT_VECTOR, irq);
343 	sc->sc_vector = irq;
344 
345 	/* Program interrupt type. */
346 	if (sc->sc_capabilities & TPM_INTF_INT_EDGE_RISING)
347 		r = TPM_INT_EDGE_RISING;
348 	else if (sc->sc_capabilities & TPM_INTF_INT_LEVEL_HIGH)
349 		r = TPM_INT_LEVEL_HIGH;
350 	else
351 		r = TPM_INT_LEVEL_LOW;
352 	bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE, r);
353 
354 	return 0;
355 }
356 
357 /* Setup TPM using TIS 1.2 interface. */
358 int
359 tpm_tis12_init(struct tpm_softc *sc, int irq, const char *name)
360 {
361 	u_int32_t r;
362 	int i;
363 
364 	r = bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTF_CAPABILITIES);
365 #ifdef TPM_DEBUG
366 	kprintf(" caps=%b ", r, TPM_CAPBITS);
367 #endif
368 	if ((r & TPM_CAPSREQ) != TPM_CAPSREQ ||
369 	    !(r & (TPM_INTF_INT_EDGE_RISING | TPM_INTF_INT_LEVEL_LOW))) {
370 		kprintf(": capabilities too low (caps=%b)\n", r, TPM_CAPBITS);
371 		return 1;
372 	}
373 	sc->sc_capabilities = r;
374 
375 	sc->sc_devid = bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_ID);
376 	sc->sc_rev = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_REV);
377 
378 	for (i = 0; tpm_devs[i].devid; i++)
379 		if (tpm_devs[i].devid == sc->sc_devid)
380 			break;
381 
382 	if (tpm_devs[i].devid)
383 		kprintf(": %s rev 0x%x\n", tpm_devs[i].name, sc->sc_rev);
384 	else
385 		kprintf(": device 0x%08x rev 0x%x\n", sc->sc_devid, sc->sc_rev);
386 
387 	if (tpm_tis12_irqinit(sc, irq, i))
388 		return 1;
389 
390 	if (tpm_request_locality(sc, 0))
391 		return 1;
392 
393 	/* Abort whatever it thought it was doing. */
394 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
395 
396 	return 0;
397 }
398 
399 int
400 tpm_request_locality(struct tpm_softc *sc, int l)
401 {
402 	u_int32_t r;
403 	int to, rv;
404 
405 	if (l != 0)
406 		return EINVAL;
407 
408 	if ((bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
409 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) ==
410 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
411 		return 0;
412 
413 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS,
414 	    TPM_ACCESS_REQUEST_USE);
415 
416 	to = tpm_tmotohz(TPM_ACCESS_TMO);
417 
418 	while ((r = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
419 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
420 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && to--) {
421 		rv = tsleep(sc->sc_init, PCATCH, "tpm_locality", 1);
422 		if (rv &&  rv != EWOULDBLOCK) {
423 #ifdef TPM_DEBUG
424 			kprintf("%s: interrupted %d\n", __func__, rv);
425 #endif
426 			return rv;
427 		}
428 	}
429 
430 	if ((r & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
431 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) {
432 #ifdef TPM_DEBUG
433 		kprintf("%s: access %b\n", __func__, r, TPM_ACCESS_BITS);
434 #endif
435 		return EBUSY;
436 	}
437 
438 	return 0;
439 }
440 
441 int
442 tpm_getburst(struct tpm_softc *sc)
443 {
444 	int burst, to, rv;
445 
446 	to = tpm_tmotohz(TPM_BURST_TMO);
447 
448 	burst = 0;
449 	while (burst == 0 && to--) {
450 		/*
451 		 * Burst count has to be read from bits 8 to 23 without
452 		 * touching any other bits, eg. the actual status bits 0
453 		 * to 7.
454 		 */
455 		burst = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 1);
456 		burst |= bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 2)
457 		    << 8;
458 #ifdef TPM_DEBUG
459 		kprintf("%s: read %d\n", __func__, burst);
460 #endif
461 		if (burst)
462 			return burst;
463 
464 		rv = tsleep(sc, PCATCH, "tpm_getburst", 1);
465 		if (rv && rv != EWOULDBLOCK) {
466 			return 0;
467 		}
468 	}
469 
470 	return 0;
471 }
472 
473 u_int8_t
474 tpm_status(struct tpm_softc *sc)
475 {
476 	u_int8_t status;
477 
478 	status = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS) &
479 	    TPM_STS_MASK;
480 
481 	return status;
482 }
483 
484 int
485 tpm_tmotohz(int tmo)
486 {
487 	struct timeval tv;
488 
489 	tv.tv_sec = tmo / 1000;
490 	tv.tv_usec = 1000 * (tmo % 1000);
491 
492 	return tvtohz_high(&tv);
493 }
494 
495 /* Save TPM state on suspend. */
496 int
497 tpm_suspend(device_t dev)
498 {
499 	struct tpm_softc *sc = device_get_softc(dev);
500 	int why = 1;
501 	u_int8_t command[] = {
502 	    0, 193,		/* TPM_TAG_RQU_COMMAND */
503 	    0, 0, 0, 10,	/* Length in bytes */
504 	    0, 0, 0, 156	/* TPM_ORD_SaveStates */
505 	};
506 
507 	/*
508 	 * Power down:  We have to issue the SaveStates command.
509 	 */
510 	sc->sc_write(sc, &command, sizeof(command));
511 	sc->sc_read(sc, &command, sizeof(command), NULL, TPM_HDRSIZE);
512 #ifdef TPM_DEBUG
513 	kprintf("%s: power down: %d -> %d\n", __func__, sc->sc_suspend, why);
514 #endif
515 	sc->sc_suspend = why;
516 
517 	return 0;
518 }
519 
520 /*
521  * Handle resume event.  Actually nothing to do as the BIOS is supposed
522  * to restore the previously saved state.
523  */
524 int
525 tpm_resume(device_t dev)
526 {
527 	struct tpm_softc *sc = device_get_softc(dev);
528 	int why = 0;
529 
530 #ifdef TPM_DEBUG
531 	kprintf("%s: resume: %d -> %d\n", __func__, sc->sc_suspend, why);
532 #endif
533 	sc->sc_suspend = why;
534 
535 	return 0;
536 }
537 
538 /* Wait for given status bits using polling. */
539 int
540 tpm_waitfor_poll(struct tpm_softc *sc, u_int8_t mask, int tmo, void *c)
541 {
542 	int rv;
543 
544 	/*
545 	 * Poll until either the requested condition or a time out is
546 	 * met.
547 	 */
548 	while (((sc->sc_stat = tpm_status(sc)) & mask) != mask && tmo--) {
549 		rv = tsleep(c, PCATCH, "tpm_poll", 1);
550 		if (rv && rv != EWOULDBLOCK) {
551 #ifdef TPM_DEBUG
552 			kprintf("%s: interrupted %d\n", __func__, rv);
553 #endif
554 			return rv;
555 		}
556 	}
557 
558 	return 0;
559 }
560 
561 /* Wait for given status bits using interrupts. */
562 int
563 tpm_waitfor_int(struct tpm_softc *sc, u_int8_t mask, int tmo, void *c,
564     int inttype)
565 {
566 	int rv, to;
567 
568 	/* Poll and return when condition is already met. */
569 	sc->sc_stat = tpm_status(sc);
570 	if ((sc->sc_stat & mask) == mask)
571 		return 0;
572 
573 	/*
574 	 * Enable interrupt on tpm chip.  Note that interrupts on our
575 	 * level (SPL_TTY) are disabled (see tpm{read,write} et al) and
576 	 * will not be delivered to the cpu until we call tsleep(9) below.
577 	 */
578 	bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE,
579 	    bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE) |
580 	    inttype);
581 	bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE,
582 	    bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE) |
583 	    TPM_GLOBAL_INT_ENABLE);
584 
585 	/*
586 	 * Poll once more to remedy the race between previous polling
587 	 * and enabling interrupts on the tpm chip.
588 	 */
589 	sc->sc_stat = tpm_status(sc);
590 	if ((sc->sc_stat & mask) == mask) {
591 		rv = 0;
592 		goto out;
593 	}
594 
595 	to = tpm_tmotohz(tmo);
596 #ifdef TPM_DEBUG
597 	kprintf("%s: sleeping for %d ticks on %p\n", __func__, to, c);
598 #endif
599 	/*
600 	 * tsleep(9) enables interrupts on the cpu and returns after
601 	 * wake up with interrupts disabled again.  Note that interrupts
602 	 * generated by the tpm chip while being at SPL_TTY are not lost
603 	 * but held and delivered as soon as the cpu goes below SPL_TTY.
604 	 */
605 	rv = tsleep(c, PCATCH, "tpm_intr", to);
606 
607 	sc->sc_stat = tpm_status(sc);
608 #ifdef TPM_DEBUG
609 	kprintf("%s: woke up with rv %d stat %b\n", __func__, rv,
610 	    sc->sc_stat, TPM_STS_BITS);
611 #endif
612 	if ((sc->sc_stat & mask) == mask)
613 		rv = 0;
614 
615 	/* Disable interrupts on tpm chip again. */
616 out:	bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE,
617 	    bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE) &
618 	    ~TPM_GLOBAL_INT_ENABLE);
619 	bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE,
620 	    bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE) &
621 	    ~inttype);
622 
623 	return rv;
624 }
625 
626 /*
627  * Wait on given status bits, uses interrupts where possible, otherwise polls.
628  */
629 int
630 tpm_waitfor(struct tpm_softc *sc, u_int8_t b0, int tmo, void *c)
631 {
632 	u_int8_t b;
633 	int re, to, rv;
634 
635 #ifdef TPM_DEBUG
636 	kprintf("%s: b0 %b\n", __func__, b0, TPM_STS_BITS);
637 #endif
638 
639 	/*
640 	 * If possible, use interrupts, otherwise poll.
641 	 *
642 	 * We use interrupts for TPM_STS_VALID and TPM_STS_DATA_AVAIL (if
643 	 * the tpm chips supports them) as waiting for those can take
644 	 * really long.  The other TPM_STS* are not needed very often
645 	 * so we do not support them.
646 	 */
647 	if (sc->sc_vector != IRQUNK) {
648 		b = b0;
649 
650 		/*
651 		 * Wait for data ready.  This interrupt only occures
652 		 * when both TPM_STS_VALID and TPM_STS_DATA_AVAIL are asserted.
653 		 * Thus we don't have to bother with TPM_STS_VALID
654 		 * separately and can just return.
655 		 *
656 		 * This only holds for interrupts!  When using polling
657 		 * both flags have to be waited for, see below.
658 		 */
659 		if ((b & TPM_STS_DATA_AVAIL) && (sc->sc_capabilities &
660 		    TPM_INTF_DATA_AVAIL_INT))
661 			return tpm_waitfor_int(sc, b, tmo, c,
662 			    TPM_DATA_AVAIL_INT);
663 
664 		/* Wait for status valid bit. */
665 		if ((b & TPM_STS_VALID) && (sc->sc_capabilities &
666 		    TPM_INTF_STS_VALID_INT)) {
667 			rv = tpm_waitfor_int(sc, b, tmo, c, TPM_STS_VALID_INT);
668 			if (rv != 0)
669 				return rv;
670 			else
671 				b = b0 & ~TPM_STS_VALID;
672 		}
673 
674 		/*
675 		 * When all flags are taken care of, return.  Otherwise
676 		 * use polling for eg. TPM_STS_CMD_READY.
677 		 */
678 		if (b == 0)
679 			return 0;
680 	}
681 
682 	re = 3;
683 restart:
684 	/*
685 	 * If requested wait for TPM_STS_VALID before dealing with
686 	 * any other flag.  Eg. when both TPM_STS_DATA_AVAIL and TPM_STS_VALID
687 	 * are requested, wait for the latter first.
688 	 */
689 	b = b0;
690 	if (b0 & TPM_STS_VALID)
691 		b = TPM_STS_VALID;
692 
693 	to = tpm_tmotohz(tmo);
694 again:
695 	if ((rv = tpm_waitfor_poll(sc, b, to, c)) != 0)
696 		return rv;
697 
698 	if ((b & sc->sc_stat) == TPM_STS_VALID) {
699 		/* Now wait for other flags. */
700 		b = b0 & ~TPM_STS_VALID;
701 		to++;
702 		goto again;
703 	}
704 
705 	if ((sc->sc_stat & b) != b) {
706 #ifdef TPM_DEBUG
707 		kprintf("%s: timeout: stat=%b b=%b\n", __func__,
708 		    sc->sc_stat, TPM_STS_BITS, b, TPM_STS_BITS);
709 #endif
710 		if (re-- && (b0 & TPM_STS_VALID)) {
711 			bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
712 			    TPM_STS_RESP_RETRY);
713 			goto restart;
714 		}
715 		return EIO;
716 	}
717 
718 	return 0;
719 }
720 
721 /* Start transaction. */
722 int
723 tpm_tis12_start(struct tpm_softc *sc, int flag)
724 {
725 	int rv;
726 
727 	if (flag == UIO_READ) {
728 		rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
729 		    TPM_READ_TMO, sc->sc_read);
730 		return rv;
731 	}
732 
733 	/* Own our (0th) locality. */
734 	if ((rv = tpm_request_locality(sc, 0)) != 0)
735 		return rv;
736 
737 	sc->sc_stat = tpm_status(sc);
738 	if (sc->sc_stat & TPM_STS_CMD_READY) {
739 #ifdef TPM_DEBUG
740 		kprintf("%s: UIO_WRITE status %b\n", __func__, sc->sc_stat,
741 		   TPM_STS_BITS);
742 #endif
743 		return 0;
744 	}
745 
746 #ifdef TPM_DEBUG
747 	kprintf("%s: UIO_WRITE readying chip\n", __func__);
748 #endif
749 
750 	/* Abort previous and restart. */
751 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
752 	if ((rv = tpm_waitfor(sc, TPM_STS_CMD_READY, TPM_READY_TMO,
753 	    sc->sc_write))) {
754 #ifdef TPM_DEBUG
755 		kprintf("%s: UIO_WRITE readying failed %d\n", __func__, rv);
756 #endif
757 		return rv;
758 	}
759 
760 #ifdef TPM_DEBUG
761 	kprintf("%s: UIO_WRITE readying done\n", __func__);
762 #endif
763 
764 	return 0;
765 }
766 
767 int
768 tpm_tis12_read(struct tpm_softc *sc, void *buf, int len, size_t *count,
769     int flags)
770 {
771 	u_int8_t *p = buf;
772 	size_t cnt;
773 	int rv, n, bcnt;
774 
775 #ifdef TPM_DEBUG
776 	kprintf("%s: len %d\n", __func__, len);
777 #endif
778 	cnt = 0;
779 	while (len > 0) {
780 		if ((rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
781 		    TPM_READ_TMO, sc->sc_read)))
782 			return rv;
783 
784 		bcnt = tpm_getburst(sc);
785 		n = MIN(len, bcnt);
786 #ifdef TPM_DEBUG
787 		kprintf("%s: fetching %d, burst is %d\n", __func__, n, bcnt);
788 #endif
789 		for (; n--; len--) {
790 			*p++ = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_DATA);
791 			cnt++;
792 		}
793 
794 		if ((flags & TPM_PARAM_SIZE) == 0 && cnt >= 6)
795 			break;
796 	}
797 #ifdef TPM_DEBUG
798 	kprintf("%s: read %zd bytes, len %d\n", __func__, cnt, len);
799 #endif
800 
801 	if (count)
802 		*count = cnt;
803 
804 	return 0;
805 }
806 
807 int
808 tpm_tis12_write(struct tpm_softc *sc, void *buf, int len)
809 {
810 	u_int8_t *p = buf;
811 	size_t cnt;
812 	int rv, r;
813 
814 #ifdef TPM_DEBUG
815 	kprintf("%s: sc %p buf %p len %d\n", __func__, sc, buf, len);
816 #endif
817 
818 	if ((rv = tpm_request_locality(sc, 0)) != 0)
819 		return rv;
820 
821 	cnt = 0;
822 	while (cnt < len - 1) {
823 		for (r = tpm_getburst(sc); r > 0 && cnt < len - 1; r--) {
824 			bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
825 			cnt++;
826 		}
827 		if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
828 #ifdef TPM_DEBUG
829 			kprintf("%s: failed burst rv %d\n", __func__, rv);
830 #endif
831 			return rv;
832 		}
833 		sc->sc_stat = tpm_status(sc);
834 		if (!(sc->sc_stat & TPM_STS_DATA_EXPECT)) {
835 #ifdef TPM_DEBUG
836 			kprintf("%s: failed rv %d stat=%b\n", __func__, rv,
837 			    sc->sc_stat, TPM_STS_BITS);
838 #endif
839 			return EIO;
840 		}
841 	}
842 
843 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
844 	cnt++;
845 
846 	if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
847 #ifdef TPM_DEBUG
848 		kprintf("%s: failed last byte rv %d\n", __func__, rv);
849 #endif
850 		return rv;
851 	}
852 	if ((sc->sc_stat & TPM_STS_DATA_EXPECT) != 0) {
853 #ifdef TPM_DEBUG
854 		kprintf("%s: failed rv %d stat=%b\n", __func__, rv,
855 		    sc->sc_stat, TPM_STS_BITS);
856 #endif
857 		return EIO;
858 	}
859 
860 #ifdef TPM_DEBUG
861 	kprintf("%s: wrote %zd byte\n", __func__, cnt);
862 #endif
863 
864 	return 0;
865 }
866 
867 /* Finish transaction. */
868 int
869 tpm_tis12_end(struct tpm_softc *sc, int flag, int err)
870 {
871 	int rv = 0;
872 
873 	if (flag == UIO_READ) {
874 		if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO,
875 		    sc->sc_read)))
876 			return rv;
877 
878 		/* Still more data? */
879 		sc->sc_stat = tpm_status(sc);
880 		if (!err && ((sc->sc_stat & TPM_STS_DATA_AVAIL) == TPM_STS_DATA_AVAIL)) {
881 #ifdef TPM_DEBUG
882 			kprintf("%s: read failed stat=%b\n", __func__,
883 			    sc->sc_stat, TPM_STS_BITS);
884 #endif
885 			rv = EIO;
886 		}
887 
888 		bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
889 		    TPM_STS_CMD_READY);
890 
891 		/* Release our (0th) locality. */
892 		bus_space_write_1(sc->sc_bt, sc->sc_bh,TPM_ACCESS,
893 		    TPM_ACCESS_ACTIVE_LOCALITY);
894 	} else {
895 		/* Hungry for more? */
896 		sc->sc_stat = tpm_status(sc);
897 		if (!err && (sc->sc_stat & TPM_STS_DATA_EXPECT)) {
898 #ifdef TPM_DEBUG
899 			kprintf("%s: write failed stat=%b\n", __func__,
900 			    sc->sc_stat, TPM_STS_BITS);
901 #endif
902 			rv = EIO;
903 		}
904 
905 		bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
906 		    err ? TPM_STS_CMD_READY : TPM_STS_GO);
907 	}
908 
909 	return rv;
910 }
911 
912 void
913 tpm_intr(void *v)
914 {
915 	struct tpm_softc *sc = v;
916 	u_int32_t r;
917 #ifdef TPM_DEBUG
918 	static int cnt = 0;
919 #endif
920 
921 	r = bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INT_STATUS);
922 #ifdef TPM_DEBUG
923 	if (r != 0)
924 		kprintf("%s: int=%b (%d)\n", __func__, r,
925 		    TPM_INTERRUPT_ENABLE_BITS, cnt);
926 	else
927 		cnt++;
928 #endif
929 	if (!(r & (TPM_CMD_READY_INT | TPM_LOCALITY_CHANGE_INT |
930 	    TPM_STS_VALID_INT | TPM_DATA_AVAIL_INT)))
931 		return;
932 
933 	if (r & TPM_STS_VALID_INT)
934 		wakeup(sc);
935 
936 	if (r & TPM_CMD_READY_INT)
937 		wakeup(sc->sc_write);
938 
939 	if (r & TPM_DATA_AVAIL_INT)
940 		wakeup(sc->sc_read);
941 
942 	if (r & TPM_LOCALITY_CHANGE_INT)
943 		wakeup(sc->sc_init);
944 
945 	bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INT_STATUS, r);
946 
947 	return;
948 }
949 
950 /* Read single byte using legacy interface. */
951 static inline u_int8_t
952 tpm_legacy_in(bus_space_tag_t iot, bus_space_handle_t ioh, int reg)
953 {
954 	bus_space_write_1(iot, ioh, 0, reg);
955 	return bus_space_read_1(iot, ioh, 1);
956 }
957 
958 /* Write single byte using legacy interface. */
959 static inline void
960 tpm_legacy_out(bus_space_tag_t iot, bus_space_handle_t ioh, int reg, u_int8_t v)
961 {
962 	bus_space_write_1(iot, ioh, 0, reg);
963 	bus_space_write_1(iot, ioh, 1, v);
964 }
965 
966 /* Probe for TPM using legacy interface. */
967 int
968 tpm_legacy_probe(bus_space_tag_t iot, bus_addr_t iobase)
969 {
970 	bus_space_handle_t ioh;
971 	u_int8_t r, v;
972 	int i, rv = 0;
973 	char id[8];
974 
975 	if (!tpm_enabled || iobase == -1)
976 		return 0;
977 
978 #if 0 /* XXX swildner*/
979 	if (bus_space_map(iot, iobase, 2, 0, &ioh))
980 		return 0;
981 #else
982 	ioh = iobase;
983 #endif
984 
985 	v = bus_space_read_1(iot, ioh, 0);
986 	if (v == 0xff) {
987 		bus_space_unmap(iot, ioh, 2);
988 		return 0;
989 	}
990 	r = bus_space_read_1(iot, ioh, 1);
991 
992 	for (i = sizeof(id); i--; )
993 		id[i] = tpm_legacy_in(iot, ioh, TPM_ID + i);
994 
995 #ifdef TPM_DEBUG
996 	kprintf("%s: %.4s %d.%d.%d.%d\n", __func__,
997 	    &id[4], id[0], id[1], id[2], id[3]);
998 #endif
999 	/*
1000 	 * The only chips using the legacy interface we are aware of are
1001 	 * by Atmel.  For other chips more signature would have to be added.
1002 	 */
1003 	if (!bcmp(&id[4], "ATML", 4))
1004 		rv = 1;
1005 
1006 	if (!rv) {
1007 		bus_space_write_1(iot, ioh, r, 1);
1008 		bus_space_write_1(iot, ioh, v, 0);
1009 	}
1010 	bus_space_unmap(iot, ioh, 2);
1011 
1012 	return rv;
1013 }
1014 
1015 /* Setup TPM using legacy interface. */
1016 int
1017 tpm_legacy_init(struct tpm_softc *sc, int irq, const char *name)
1018 {
1019 	char id[8];
1020 	u_int8_t ioh, iol;
1021 	int i;
1022 
1023 #if 0 /* XXX swildner*/
1024 	if ((i = bus_space_map(sc->sc_batm, tpm_enabled, 2, 0, &sc->sc_bahm))) {
1025 		kprintf(": cannot map tpm registers (%d)\n", i);
1026 		tpm_enabled = 0;
1027 		return 1;
1028 	}
1029 #else
1030 	sc->sc_bahm = tpm_enabled;
1031 #endif
1032 
1033 	for (i = sizeof(id); i--; )
1034 		id[i] = tpm_legacy_in(sc->sc_bt, sc->sc_bh, TPM_ID + i);
1035 
1036 	kprintf(": %.4s %d.%d @0x%x\n", &id[4], id[0], id[1], tpm_enabled);
1037 	iol = tpm_enabled & 0xff;
1038 	ioh = tpm_enabled >> 16;
1039 	tpm_enabled = 0;
1040 
1041 	return 0;
1042 }
1043 
1044 /* Start transaction. */
1045 int
1046 tpm_legacy_start(struct tpm_softc *sc, int flag)
1047 {
1048 	struct timeval tv;
1049 	u_int8_t bits, r;
1050 	int to, rv;
1051 
1052 	bits = flag == UIO_READ ? TPM_LEGACY_DA : 0;
1053 	tv.tv_sec = TPM_LEGACY_TMO;
1054 	tv.tv_usec = 0;
1055 	to = tvtohz_high(&tv) / TPM_LEGACY_SLEEP;
1056 	while (((r = bus_space_read_1(sc->sc_batm, sc->sc_bahm, 1)) &
1057 	    (TPM_LEGACY_BUSY|bits)) != bits && to--) {
1058 		rv = tsleep(sc, PCATCH, "legacy_tpm_start",
1059 		    TPM_LEGACY_SLEEP);
1060 		if (rv && rv != EWOULDBLOCK)
1061 			return rv;
1062 	}
1063 
1064 	if ((r & (TPM_LEGACY_BUSY|bits)) != bits)
1065 		return EIO;
1066 
1067 	return 0;
1068 }
1069 
1070 int
1071 tpm_legacy_read(struct tpm_softc *sc, void *buf, int len, size_t *count,
1072     int flags)
1073 {
1074 	u_int8_t *p;
1075 	size_t cnt;
1076 	int to, rv;
1077 
1078 	cnt = rv = 0;
1079 	for (p = buf; !rv && len > 0; len--) {
1080 		for (to = 1000;
1081 		    !(bus_space_read_1(sc->sc_batm, sc->sc_bahm, 1) &
1082 		    TPM_LEGACY_DA); DELAY(1))
1083 			if (!to--)
1084 				return EIO;
1085 
1086 		DELAY(TPM_LEGACY_DELAY);
1087 		*p++ = bus_space_read_1(sc->sc_batm, sc->sc_bahm, 0);
1088 		cnt++;
1089 	}
1090 
1091 	*count = cnt;
1092 	return 0;
1093 }
1094 
1095 int
1096 tpm_legacy_write(struct tpm_softc *sc, void *buf, int len)
1097 {
1098 	u_int8_t *p;
1099 	int n;
1100 
1101 	for (p = buf, n = len; n--; DELAY(TPM_LEGACY_DELAY)) {
1102 		if (!n && len != TPM_BUFSIZ) {
1103 			bus_space_write_1(sc->sc_batm, sc->sc_bahm, 1,
1104 			    TPM_LEGACY_LAST);
1105 			DELAY(TPM_LEGACY_DELAY);
1106 		}
1107 		bus_space_write_1(sc->sc_batm, sc->sc_bahm, 0, *p++);
1108 	}
1109 
1110 	return 0;
1111 }
1112 
1113 /* Finish transaction. */
1114 int
1115 tpm_legacy_end(struct tpm_softc *sc, int flag, int rv)
1116 {
1117 	struct timeval tv;
1118 	u_int8_t r;
1119 	int to;
1120 
1121 	if (rv || flag == UIO_READ)
1122 		bus_space_write_1(sc->sc_batm, sc->sc_bahm, 1, TPM_LEGACY_ABRT);
1123 	else {
1124 		tv.tv_sec = TPM_LEGACY_TMO;
1125 		tv.tv_usec = 0;
1126 		to = tvtohz_high(&tv) / TPM_LEGACY_SLEEP;
1127 		while(((r = bus_space_read_1(sc->sc_batm, sc->sc_bahm, 1)) &
1128 		    TPM_LEGACY_BUSY) && to--) {
1129 			rv = tsleep(sc, PCATCH, "legacy_tpm_end",
1130 			    TPM_LEGACY_SLEEP);
1131 			if (rv && rv != EWOULDBLOCK)
1132 				return rv;
1133 		}
1134 
1135 		if (r & TPM_LEGACY_BUSY)
1136 			return EIO;
1137 
1138 		if (r & TPM_LEGACY_RE)
1139 			return EIO;	/* XXX Retry the loop? */
1140 	}
1141 
1142 	return rv;
1143 }
1144 
1145 int
1146 tpmopen(struct dev_open_args *ap)
1147 {
1148 	cdev_t dev = ap->a_head.a_dev;
1149 	struct tpm_softc *sc = TPMSOFTC(dev);
1150 
1151 	if (!sc)
1152 		return ENXIO;
1153 
1154 	if (sc->sc_flags & TPM_OPEN)
1155 		return EBUSY;
1156 
1157 	sc->sc_flags |= TPM_OPEN;
1158 
1159 	return 0;
1160 }
1161 
1162 int
1163 tpmclose(struct dev_close_args *ap)
1164 {
1165 	cdev_t dev = ap->a_head.a_dev;
1166 	struct tpm_softc *sc = TPMSOFTC(dev);
1167 
1168 	if (!sc)
1169 		return ENXIO;
1170 
1171 	if (!(sc->sc_flags & TPM_OPEN))
1172 		return EINVAL;
1173 
1174 	sc->sc_flags &= ~TPM_OPEN;
1175 
1176 	return 0;
1177 }
1178 
1179 int
1180 tpmread(struct dev_read_args *ap)
1181 {
1182 	cdev_t dev = ap->a_head.a_dev;
1183 	struct uio *uio = ap->a_uio;
1184 	struct tpm_softc *sc = TPMSOFTC(dev);
1185 	u_int8_t buf[TPM_BUFSIZ], *p;
1186 	size_t cnt;
1187 	int n, len, rv;
1188 
1189 	if (!sc)
1190 		return ENXIO;
1191 
1192 	crit_enter();
1193 	if ((rv = (sc->sc_start)(sc, UIO_READ))) {
1194 		crit_exit();
1195 		return rv;
1196 	}
1197 
1198 #ifdef TPM_DEBUG
1199 	kprintf("%s: getting header\n", __func__);
1200 #endif
1201 	if ((rv = (sc->sc_read)(sc, buf, TPM_HDRSIZE, &cnt, 0))) {
1202 		(sc->sc_end)(sc, UIO_READ, rv);
1203 		crit_exit();
1204 		return rv;
1205 	}
1206 
1207 	len = (buf[2] << 24) | (buf[3] << 16) | (buf[4] << 8) | buf[5];
1208 #ifdef TPM_DEBUG
1209 	kprintf("%s: len %d, io count %zd\n", __func__, len, uio->uio_resid);
1210 #endif
1211 	if (len > uio->uio_resid) {
1212 		rv = EIO;
1213 		(sc->sc_end)(sc, UIO_READ, rv);
1214 #ifdef TPM_DEBUG
1215 		kprintf("%s: bad residual io count 0x%zx\n", __func__,
1216 		    uio->uio_resid);
1217 #endif
1218 		crit_exit();
1219 		return rv;
1220 	}
1221 
1222 	/* Copy out header. */
1223 	if ((rv = uiomove((caddr_t)buf, cnt, uio))) {
1224 		(sc->sc_end)(sc, UIO_READ, rv);
1225 		crit_exit();
1226 		return rv;
1227 	}
1228 
1229 	/* Get remaining part of the answer (if anything is left). */
1230 	for (len -= cnt, p = buf, n = sizeof(buf); len > 0; p = buf, len -= n,
1231 	    n = sizeof(buf)) {
1232 		n = MIN(n, len);
1233 #ifdef TPM_DEBUG
1234 		kprintf("%s: n %d len %d\n", __func__, n, len);
1235 #endif
1236 		if ((rv = (sc->sc_read)(sc, p, n, NULL, TPM_PARAM_SIZE))) {
1237 			(sc->sc_end)(sc, UIO_READ, rv);
1238 			crit_exit();
1239 			return rv;
1240 		}
1241 		p += n;
1242 		if ((rv = uiomove((caddr_t)buf, p - buf, uio))) {
1243 			(sc->sc_end)(sc, UIO_READ, rv);
1244 			crit_exit();
1245 			return rv;
1246 		}
1247 	}
1248 
1249 	rv = (sc->sc_end)(sc, UIO_READ, rv);
1250 	crit_exit();
1251 	return rv;
1252 }
1253 
1254 int
1255 tpmwrite(struct dev_write_args *ap)
1256 {
1257 	cdev_t dev = ap->a_head.a_dev;
1258 	struct uio *uio = ap->a_uio;
1259 	struct tpm_softc *sc = TPMSOFTC(dev);
1260 	u_int8_t buf[TPM_BUFSIZ];
1261 	int n, rv;
1262 
1263 	if (!sc)
1264 		return ENXIO;
1265 
1266 	crit_enter();
1267 
1268 #ifdef TPM_DEBUG
1269 	kprintf("%s: io count %zd\n", __func__, uio->uio_resid);
1270 #endif
1271 
1272 	n = MIN(sizeof(buf), uio->uio_resid);
1273 	if ((rv = uiomove((caddr_t)buf, n, uio))) {
1274 		crit_exit();
1275 		return rv;
1276 	}
1277 
1278 	if ((rv = (sc->sc_start)(sc, UIO_WRITE))) {
1279 		crit_exit();
1280 		return rv;
1281 	}
1282 
1283 	if ((rv = (sc->sc_write(sc, buf, n)))) {
1284 		crit_exit();
1285 		return rv;
1286 	}
1287 
1288 	rv = (sc->sc_end)(sc, UIO_WRITE, rv);
1289 	crit_exit();
1290 	return rv;
1291 }
1292 
1293 int
1294 tpmioctl(struct dev_ioctl_args *ap)
1295 {
1296 	return ENOTTY;
1297 }
1298