xref: /dragonfly/sys/dev/misc/ipmi/ipmi.c (revision 7d3e9a5b)
1 /*-
2  * Copyright (c) 2006 IronPort Systems Inc. <ambrisko@ironport.com>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: head/sys/dev/ipmi/ipmi.c 257421 2013-10-31 05:13:53Z glebius $
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/bus.h>
32 #include <sys/condvar.h>
33 #include <sys/conf.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/rman.h>
38 #include <sys/sysctl.h>
39 #include <sys/wdog.h>
40 #include <sys/device.h>
41 #include <sys/devfs.h>
42 
43 #ifdef LOCAL_MODULE
44 #include <ipmi.h>
45 #include <ipmivars.h>
46 #else
47 #include <sys/ipmi.h>
48 #include <dev/misc/ipmi/ipmivars.h>
49 #endif
50 
51 #ifdef IPMB
52 static int ipmi_ipmb_checksum(u_char, int);
53 static int ipmi_ipmb_send_message(device_t, u_char, u_char, u_char,
54      u_char, u_char, int)
55 #endif
56 
57 static d_ioctl_t ipmi_ioctl;
58 static d_kqfilter_t ipmi_kqfilter;
59 static d_open_t ipmi_open;
60 static d_priv_dtor_t ipmi_dtor;
61 
62 static void ipmi_filter_detach(struct knote *);
63 static int ipmi_filter_read(struct knote *, long);
64 
65 int ipmi_attached = 0;
66 
67 static int on = 1;
68 static SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD, 0,
69     "IPMI driver parameters");
70 SYSCTL_INT(_hw_ipmi, OID_AUTO, on, CTLFLAG_RW,
71 	&on, 0, "");
72 
73 static struct dev_ops ipmi_ops = {
74 	{ "ipmi", 0, D_MPSAFE },
75 	.d_open =	ipmi_open,
76 	.d_ioctl =	ipmi_ioctl,
77 	.d_kqfilter =	ipmi_kqfilter,
78 };
79 
80 static int ipmi_watchdog_sysctl_enable(SYSCTL_HANDLER_ARGS);
81 static int ipmi_watchdog_sysctl_period(SYSCTL_HANDLER_ARGS);
82 
83 static MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi");
84 
85 static int
86 ipmi_open(struct dev_open_args *ap)
87 {
88 	struct file *fp = ap->a_fpp ? *ap->a_fpp : NULL;
89 	cdev_t cdev = ap->a_head.a_dev;
90 	struct ipmi_device *dev;
91 	struct ipmi_softc *sc;
92 	int error;
93 
94 	if (!on)
95 		return (ENOENT);
96 
97 	/* Initialize the per file descriptor data. */
98 	dev = kmalloc(sizeof(struct ipmi_device), M_IPMI, M_WAITOK | M_ZERO);
99 	error = devfs_set_cdevpriv(fp, dev, ipmi_dtor);
100 	if (error) {
101 		kfree(dev, M_IPMI);
102 		return (error);
103 	}
104 
105 	sc = cdev->si_drv1;
106 	TAILQ_INIT(&dev->ipmi_completed_requests);
107 	dev->ipmi_address = IPMI_BMC_SLAVE_ADDR;
108 	dev->ipmi_lun = IPMI_BMC_SMS_LUN;
109 	dev->ipmi_softc = sc;
110 	IPMI_LOCK(sc);
111 	sc->ipmi_opened++;
112 	IPMI_UNLOCK(sc);
113 
114 	return (0);
115 }
116 
117 static struct filterops ipmi_filterops = {
118 	FILTEROP_ISFD | FILTEROP_MPSAFE,
119 	NULL,
120 	ipmi_filter_detach,
121 	ipmi_filter_read
122 };
123 
124 static int
125 ipmi_kqfilter(struct dev_kqfilter_args *ap)
126 {
127 	cdev_t cdev = ap->a_head.a_dev;
128 	struct file *fp = ap->a_fp;
129 	struct knote *kn = ap->a_kn;
130 	struct ipmi_softc *sc = cdev->si_drv1;
131 	struct ipmi_device *dev;
132 	struct klist *klist;
133 
134 	ap->a_result = 0;
135 
136 	switch(kn->kn_filter) {
137 	case EVFILT_READ:
138 		if (devfs_get_cdevpriv(fp, (void **)&dev))
139 			return EOPNOTSUPP;
140 		kn->kn_fop = &ipmi_filterops;
141 		kn->kn_hook = (caddr_t)dev;
142 		break;
143 	default:
144 		ap->a_result = EOPNOTSUPP;
145 		return (0);
146 	}
147 
148 	klist = &sc->ipmi_kq.ki_note;
149 	knote_insert(klist, kn);
150 
151 	return (0);
152 }
153 
154 static void
155 ipmi_filter_detach(struct knote *kn)
156 {
157 	struct ipmi_device *dev = (struct ipmi_device *)kn->kn_hook;
158 	struct ipmi_softc *sc = dev->ipmi_softc;
159 	struct klist *klist;
160 
161 	klist = &sc->ipmi_kq.ki_note;
162 	knote_remove(klist, kn);
163 }
164 
165 static int
166 ipmi_filter_read(struct knote *kn, long hint)
167 {
168 	struct ipmi_device *dev = (struct ipmi_device *)kn->kn_hook;
169 	struct ipmi_softc *sc = dev->ipmi_softc;
170 	int ret = 0;
171 
172 	IPMI_LOCK(sc);
173 	if (!TAILQ_EMPTY(&dev->ipmi_completed_requests))
174 		ret = 1;
175 	if (dev->ipmi_requests == 0)
176 		kn->kn_flags |= EV_ERROR;
177 	IPMI_UNLOCK(sc);
178 
179 	return (ret);
180 }
181 
182 static void
183 ipmi_purge_completed_requests(struct ipmi_device *dev)
184 {
185 	struct ipmi_request *req;
186 
187 	while (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) {
188 		req = TAILQ_FIRST(&dev->ipmi_completed_requests);
189 		TAILQ_REMOVE(&dev->ipmi_completed_requests, req, ir_link);
190 		dev->ipmi_requests--;
191 		ipmi_free_request(req);
192 	}
193 }
194 
195 static void
196 ipmi_dtor(void *arg)
197 {
198 	struct ipmi_request *req, *nreq;
199 	struct ipmi_device *dev;
200 	struct ipmi_softc *sc;
201 
202 	dev = arg;
203 	sc = dev->ipmi_softc;
204 
205 	IPMI_LOCK(sc);
206 	if (dev->ipmi_requests) {
207 		/* Throw away any pending requests for this device. */
208 		TAILQ_FOREACH_MUTABLE(req, &sc->ipmi_pending_requests, ir_link,
209 		    nreq) {
210 			if (req->ir_owner == dev) {
211 				TAILQ_REMOVE(&sc->ipmi_pending_requests, req,
212 				    ir_link);
213 				dev->ipmi_requests--;
214 				ipmi_free_request(req);
215 			}
216 		}
217 
218 		/* Throw away any pending completed requests for this device. */
219 		ipmi_purge_completed_requests(dev);
220 
221 		/*
222 		 * If we still have outstanding requests, they must be stuck
223 		 * in an interface driver, so wait for those to drain.
224 		 */
225 		dev->ipmi_closing = 1;
226 		while (dev->ipmi_requests > 0) {
227 			lksleep(&dev->ipmi_requests, &sc->ipmi_lock, 0,
228 			    "ipmidrain", 0);
229 			ipmi_purge_completed_requests(dev);
230 		}
231 	}
232 	sc->ipmi_opened--;
233 	IPMI_UNLOCK(sc);
234 
235 	/* Cleanup. */
236 	kfree(dev, M_IPMI);
237 }
238 
239 #ifdef IPMB
240 static int
241 ipmi_ipmb_checksum(u_char *data, int len)
242 {
243 	u_char sum = 0;
244 
245 	for (; len; len--) {
246 		sum += *data++;
247 	}
248 	return (-sum);
249 }
250 
251 /* XXX: Needs work */
252 static int
253 ipmi_ipmb_send_message(device_t dev, u_char channel, u_char netfn,
254     u_char command, u_char seq, u_char *data, int data_len)
255 {
256 	struct ipmi_softc *sc = device_get_softc(dev);
257 	struct ipmi_request *req;
258 	u_char slave_addr = 0x52;
259 	int error;
260 
261 	req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
262 	    IPMI_SEND_MSG, data_len + 8, 0);
263 	req->ir_request[0] = channel;
264 	req->ir_request[1] = slave_addr;
265 	req->ir_request[2] = IPMI_ADDR(netfn, 0);
266 	req->ir_request[3] = ipmi_ipmb_checksum(&req->ir_request[1], 2);
267 	req->ir_request[4] = sc->ipmi_address;
268 	req->ir_request[5] = IPMI_ADDR(seq, sc->ipmi_lun);
269 	req->ir_request[6] = command;
270 
271 	bcopy(data, &req->ir_request[7], data_len);
272 	temp[data_len + 7] = ipmi_ipmb_checksum(&req->ir_request[4],
273 	    data_len + 3);
274 
275 	ipmi_submit_driver_request(sc, req);
276 	error = req->ir_error;
277 	ipmi_free_request(req);
278 
279 	return (error);
280 }
281 
282 static int
283 ipmi_handle_attn(struct ipmi_softc *sc)
284 {
285 	struct ipmi_request *req;
286 	int error;
287 
288 	device_printf(sc->ipmi_dev, "BMC has a message\n");
289 	req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
290 	    IPMI_GET_MSG_FLAGS, 0, 1);
291 
292 	ipmi_submit_driver_request(sc, req);
293 
294 	if (req->ir_error == 0 && req->ir_compcode == 0) {
295 		if (req->ir_reply[0] & IPMI_MSG_BUFFER_FULL) {
296 			device_printf(sc->ipmi_dev, "message buffer full");
297 		}
298 		if (req->ir_reply[0] & IPMI_WDT_PRE_TIMEOUT) {
299 			device_printf(sc->ipmi_dev,
300 			    "watchdog about to go off");
301 		}
302 		if (req->ir_reply[0] & IPMI_MSG_AVAILABLE) {
303 			ipmi_free_request(req);
304 
305 			req = ipmi_alloc_driver_request(
306 			    IPMI_ADDR(IPMI_APP_REQUEST, 0), IPMI_GET_MSG, 0,
307 			    16);
308 
309 			device_printf(sc->ipmi_dev, "throw out message ");
310 			dump_buf(temp, 16);
311 		}
312 	}
313 	error = req->ir_error;
314 	ipmi_free_request(req);
315 
316 	return (error);
317 }
318 #endif
319 
320 #ifdef IPMICTL_SEND_COMMAND_32
321 #define	PTRIN(p)	((void *)(uintptr_t)(p))
322 #define	PTROUT(p)	((uintptr_t)(p))
323 #endif
324 
325 static int
326 ipmi_ioctl(struct dev_ioctl_args *ap)
327 {
328 	struct file *fp = ap->a_fp;
329 	cdev_t cdev = ap->a_head.a_dev;
330 	u_long cmd = ap->a_cmd;
331 	caddr_t data = ap->a_data;
332 	struct ipmi_softc *sc;
333 	struct ipmi_device *dev;
334 	struct ipmi_request *kreq;
335 	struct ipmi_req *req = (struct ipmi_req *)data;
336 	struct ipmi_recv *recv = (struct ipmi_recv *)data;
337 	struct ipmi_addr addr;
338 #ifdef IPMICTL_SEND_COMMAND_32
339 	struct ipmi_req32 *req32 = (struct ipmi_req32 *)data;
340 	struct ipmi_recv32 *recv32 = (struct ipmi_recv32 *)data;
341 	union {
342 		struct ipmi_req req;
343 		struct ipmi_recv recv;
344 	} thunk32;
345 #endif
346 	int error, len;
347 
348 	error = devfs_get_cdevpriv(fp, (void **)&dev);
349 	if (error)
350 		return (error);
351 
352 	sc = cdev->si_drv1;
353 
354 #ifdef IPMICTL_SEND_COMMAND_32
355 	/* Convert 32-bit structures to native. */
356 	switch (cmd) {
357 	case IPMICTL_SEND_COMMAND_32:
358 		req = &thunk32.req;
359 		req->addr = PTRIN(req32->addr);
360 		req->addr_len = req32->addr_len;
361 		req->msgid = req32->msgid;
362 		req->msg.netfn = req32->msg.netfn;
363 		req->msg.cmd = req32->msg.cmd;
364 		req->msg.data_len = req32->msg.data_len;
365 		req->msg.data = PTRIN(req32->msg.data);
366 		break;
367 	case IPMICTL_RECEIVE_MSG_TRUNC_32:
368 	case IPMICTL_RECEIVE_MSG_32:
369 		recv = &thunk32.recv;
370 		recv->addr = PTRIN(recv32->addr);
371 		recv->addr_len = recv32->addr_len;
372 		recv->msg.data_len = recv32->msg.data_len;
373 		recv->msg.data = PTRIN(recv32->msg.data);
374 		break;
375 	}
376 #endif
377 
378 	switch (cmd) {
379 #ifdef IPMICTL_SEND_COMMAND_32
380 	case IPMICTL_SEND_COMMAND_32:
381 #endif
382 	case IPMICTL_SEND_COMMAND:
383 		/*
384 		 * XXX: Need to add proper handling of this.
385 		 */
386 		error = copyin(req->addr, &addr, sizeof(addr));
387 		if (error)
388 			return (error);
389 
390 		IPMI_LOCK(sc);
391 		/* clear out old stuff in queue of stuff done */
392 		/* XXX: This seems odd. */
393 		while ((kreq = TAILQ_FIRST(&dev->ipmi_completed_requests))) {
394 			TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
395 			    ir_link);
396 			dev->ipmi_requests--;
397 			ipmi_free_request(kreq);
398 		}
399 		IPMI_UNLOCK(sc);
400 
401 		kreq = ipmi_alloc_request(dev, req->msgid,
402 		    IPMI_ADDR(req->msg.netfn, 0), req->msg.cmd,
403 		    req->msg.data_len, IPMI_MAX_RX);
404 		error = copyin(req->msg.data, kreq->ir_request,
405 		    req->msg.data_len);
406 		if (error) {
407 			ipmi_free_request(kreq);
408 			return (error);
409 		}
410 		IPMI_LOCK(sc);
411 		dev->ipmi_requests++;
412 		error = sc->ipmi_enqueue_request(sc, kreq);
413 		IPMI_UNLOCK(sc);
414 		if (error)
415 			return (error);
416 		break;
417 #ifdef IPMICTL_SEND_COMMAND_32
418 	case IPMICTL_RECEIVE_MSG_TRUNC_32:
419 	case IPMICTL_RECEIVE_MSG_32:
420 #endif
421 	case IPMICTL_RECEIVE_MSG_TRUNC:
422 	case IPMICTL_RECEIVE_MSG:
423 		error = copyin(recv->addr, &addr, sizeof(addr));
424 		if (error)
425 			return (error);
426 
427 		IPMI_LOCK(sc);
428 		kreq = TAILQ_FIRST(&dev->ipmi_completed_requests);
429 		if (kreq == NULL) {
430 			IPMI_UNLOCK(sc);
431 			return (EAGAIN);
432 		}
433 		addr.channel = IPMI_BMC_CHANNEL;
434 		/* XXX */
435 		recv->recv_type = IPMI_RESPONSE_RECV_TYPE;
436 		recv->msgid = kreq->ir_msgid;
437 		recv->msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2;
438 		recv->msg.cmd = kreq->ir_command;
439 		error = kreq->ir_error;
440 		if (error) {
441 			TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
442 			    ir_link);
443 			dev->ipmi_requests--;
444 			IPMI_UNLOCK(sc);
445 			ipmi_free_request(kreq);
446 			return (error);
447 		}
448 		len = kreq->ir_replylen + 1;
449 		if (recv->msg.data_len < len &&
450 		    (cmd == IPMICTL_RECEIVE_MSG
451 #ifdef IPMICTL_RECEIVE_MSG_32
452 		     || cmd == IPMICTL_RECEIVE_MSG_32
453 #endif
454 		    )) {
455 			IPMI_UNLOCK(sc);
456 			return (EMSGSIZE);
457 		}
458 		TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link);
459 		dev->ipmi_requests--;
460 		IPMI_UNLOCK(sc);
461 		len = min(recv->msg.data_len, len);
462 		recv->msg.data_len = len;
463 		error = copyout(&addr, recv->addr,sizeof(addr));
464 		if (error == 0)
465 			error = copyout(&kreq->ir_compcode, recv->msg.data, 1);
466 		if (error == 0)
467 			error = copyout(kreq->ir_reply, recv->msg.data + 1,
468 			    len - 1);
469 		ipmi_free_request(kreq);
470 		if (error)
471 			return (error);
472 		break;
473 	case IPMICTL_SET_MY_ADDRESS_CMD:
474 		IPMI_LOCK(sc);
475 		dev->ipmi_address = *(int*)data;
476 		IPMI_UNLOCK(sc);
477 		break;
478 	case IPMICTL_GET_MY_ADDRESS_CMD:
479 		IPMI_LOCK(sc);
480 		*(int*)data = dev->ipmi_address;
481 		IPMI_UNLOCK(sc);
482 		break;
483 	case IPMICTL_SET_MY_LUN_CMD:
484 		IPMI_LOCK(sc);
485 		dev->ipmi_lun = *(int*)data & 0x3;
486 		IPMI_UNLOCK(sc);
487 		break;
488 	case IPMICTL_GET_MY_LUN_CMD:
489 		IPMI_LOCK(sc);
490 		*(int*)data = dev->ipmi_lun;
491 		IPMI_UNLOCK(sc);
492 		break;
493 	case IPMICTL_SET_GETS_EVENTS_CMD:
494 		/*
495 		device_printf(sc->ipmi_dev,
496 		    "IPMICTL_SET_GETS_EVENTS_CMD NA\n");
497 		*/
498 		break;
499 	case IPMICTL_REGISTER_FOR_CMD:
500 	case IPMICTL_UNREGISTER_FOR_CMD:
501 		return (EOPNOTSUPP);
502 	default:
503 		device_printf(sc->ipmi_dev, "Unknown IOCTL %lX\n", cmd);
504 		return (ENOIOCTL);
505 	}
506 
507 #ifdef IPMICTL_SEND_COMMAND_32
508 	/* Update changed fields in 32-bit structures. */
509 	switch (cmd) {
510 	case IPMICTL_RECEIVE_MSG_TRUNC_32:
511 	case IPMICTL_RECEIVE_MSG_32:
512 		recv32->recv_type = recv->recv_type;
513 		recv32->msgid = recv->msgid;
514 		recv32->msg.netfn = recv->msg.netfn;
515 		recv32->msg.cmd = recv->msg.cmd;
516 		recv32->msg.data_len = recv->msg.data_len;
517 		break;
518 	}
519 #endif
520 	return (0);
521 }
522 
523 /*
524  * Request management.
525  */
526 
527 /* Allocate a new request with request and reply buffers. */
528 struct ipmi_request *
529 ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr,
530     uint8_t command, size_t requestlen, size_t replylen)
531 {
532 	struct ipmi_request *req;
533 
534 	req = kmalloc(sizeof(struct ipmi_request) + requestlen + replylen,
535 	    M_IPMI, M_WAITOK | M_ZERO);
536 	req->ir_owner = dev;
537 	req->ir_msgid = msgid;
538 	req->ir_addr = addr;
539 	req->ir_command = command;
540 	if (requestlen) {
541 		req->ir_request = (char *)&req[1];
542 		req->ir_requestlen = requestlen;
543 	}
544 	if (replylen) {
545 		req->ir_reply = (char *)&req[1] + requestlen;
546 		req->ir_replybuflen = replylen;
547 	}
548 	return (req);
549 }
550 
551 /* Free a request no longer in use. */
552 void
553 ipmi_free_request(struct ipmi_request *req)
554 {
555 
556 	kfree(req, M_IPMI);
557 }
558 
559 /* Store a processed request on the appropriate completion queue. */
560 void
561 ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req)
562 {
563 	struct ipmi_device *dev;
564 
565 	IPMI_LOCK_ASSERT(sc);
566 
567 	/*
568 	 * Anonymous requests (from inside the driver) always have a
569 	 * waiter that we awaken.
570 	 */
571 	if (req->ir_owner == NULL)
572 		wakeup(req);
573 	else {
574 		dev = req->ir_owner;
575 		TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link);
576 		KNOTE(&sc->ipmi_kq.ki_note, 0);
577 		if (dev->ipmi_closing)
578 			wakeup(&dev->ipmi_requests);
579 	}
580 }
581 
582 /* Enqueue an internal driver request and wait until it is completed. */
583 int
584 ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req,
585     int timo)
586 {
587 	int error;
588 
589 	IPMI_LOCK(sc);
590 	error = sc->ipmi_enqueue_request(sc, req);
591 	if (error == 0)
592 		error = lksleep(req, &sc->ipmi_lock, 0, "ipmireq", timo);
593 	if (error == 0)
594 		error = req->ir_error;
595 	IPMI_UNLOCK(sc);
596 	return (error);
597 }
598 
599 /*
600  * Helper routine for polled system interfaces that use
601  * ipmi_polled_enqueue_request() to queue requests.  This request
602  * waits until there is a pending request and then returns the first
603  * request.  If the driver is shutting down, it returns NULL.
604  */
605 struct ipmi_request *
606 ipmi_dequeue_request(struct ipmi_softc *sc)
607 {
608 	struct ipmi_request *req;
609 
610 	IPMI_LOCK_ASSERT(sc);
611 
612 	while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests))
613 		cv_wait(&sc->ipmi_request_added, &sc->ipmi_lock);
614 	if (sc->ipmi_detaching)
615 		return (NULL);
616 
617 	req = TAILQ_FIRST(&sc->ipmi_pending_requests);
618 	TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link);
619 	return (req);
620 }
621 
622 /* Default implementation of ipmi_enqueue_request() for polled interfaces. */
623 int
624 ipmi_polled_enqueue_request(struct ipmi_softc *sc, struct ipmi_request *req)
625 {
626 	IPMI_LOCK_ASSERT(sc);
627 
628 	TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests, req, ir_link);
629 
630 	cv_signal(&sc->ipmi_request_added);
631 	return (0);
632 }
633 
634 /*
635  * Watchdog event handler.
636  */
637 static int
638 ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec)
639 {
640 	struct ipmi_request *req;
641 	int error;
642 
643 	if (sec > 0xffff / 10)
644 		return (EINVAL);
645 
646 	req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
647 	    IPMI_SET_WDOG, 6, 0);
648 
649 	if (sec) {
650 		req->ir_request[0] = IPMI_SET_WD_TIMER_DONT_STOP
651 		    | IPMI_SET_WD_TIMER_SMS_OS;
652 		req->ir_request[1] = IPMI_SET_WD_ACTION_RESET;
653 		req->ir_request[2] = 0;
654 		req->ir_request[3] = 0;	/* Timer use */
655 		req->ir_request[4] = (sec * 10) & 0xff;
656 		req->ir_request[5] = (sec * 10) >> 8;
657 	} else {
658 		req->ir_request[0] = IPMI_SET_WD_TIMER_SMS_OS;
659 		req->ir_request[1] = 0;
660 		req->ir_request[2] = 0;
661 		req->ir_request[3] = 0;	/* Timer use */
662 		req->ir_request[4] = 0;
663 		req->ir_request[5] = 0;
664 	}
665 	error = ipmi_submit_driver_request(sc, req, 0);
666 	if (error)
667 		device_printf(sc->ipmi_dev, "Failed to set watchdog\n");
668 	else if (sec) {
669 		ipmi_free_request(req);
670 
671 		req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
672 		    IPMI_RESET_WDOG, 0, 0);
673 
674 		error = ipmi_submit_driver_request(sc, req, 0);
675 		if (error)
676 			device_printf(sc->ipmi_dev,
677 			    "Failed to reset watchdog\n");
678 	}
679 
680 	ipmi_free_request(req);
681 	return (error);
682 	/*
683 	dump_watchdog(sc);
684 	*/
685 }
686 
687 static void
688 ipmi_watchdog(void *arg)
689 {
690 	struct ipmi_softc *sc = (struct ipmi_softc *)arg;
691 	int e;
692 
693 	if(sc->ipmi_wdog_period) {
694 		e = ipmi_set_watchdog(sc, sc->ipmi_wdog_period + 1);
695 
696 		if (e == 0)
697 			sc->ipmi_watchdog_active = 1;
698 		else
699 			ipmi_set_watchdog(sc, 0);
700 
701 		callout_reset(&sc->ipmi_watchdog,
702 			      sc->ipmi_wdog_period * hz,
703 			      &ipmi_watchdog, (void *)sc);
704 	}
705 }
706 
707 static void
708 ipmi_startup(void *arg)
709 {
710 	struct ipmi_softc *sc = arg;
711 	struct ipmi_request *req;
712 	device_t dev;
713 	int error, i;
714 
715 	config_intrhook_disestablish(&sc->ipmi_ich);
716 	dev = sc->ipmi_dev;
717 
718 	/* Initialize interface-independent state. */
719 	lockinit(&sc->ipmi_lock, device_get_nameunit(dev), 0, LK_CANRECURSE);
720 	cv_init(&sc->ipmi_request_added, "ipmireq");
721 	TAILQ_INIT(&sc->ipmi_pending_requests);
722 
723 	/* Initialize interface-dependent state. */
724 	error = sc->ipmi_startup(sc);
725 	if (error) {
726 		device_printf(dev, "Failed to initialize interface: %d\n",
727 		    error);
728 		return;
729 	}
730 
731 	/* Send a GET_DEVICE_ID request. */
732 	req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
733 	    IPMI_GET_DEVICE_ID, 0, 15);
734 
735 	error = ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
736 	if (error == EWOULDBLOCK) {
737 		device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n");
738 		ipmi_free_request(req);
739 		return;
740 	} else if (error) {
741 		device_printf(dev, "Failed GET_DEVICE_ID: %d\n", error);
742 		ipmi_free_request(req);
743 		return;
744 	} else if (req->ir_compcode != 0) {
745 		device_printf(dev,
746 		    "Bad completion code for GET_DEVICE_ID: %d\n",
747 		    req->ir_compcode);
748 		ipmi_free_request(req);
749 		return;
750 	} else if (req->ir_replylen < 5) {
751 		device_printf(dev, "Short reply for GET_DEVICE_ID: %d\n",
752 		    req->ir_replylen);
753 		ipmi_free_request(req);
754 		return;
755 	}
756 
757 	device_printf(dev, "IPMI device rev. %d, firmware rev. %d.%d%d, "
758 	    "version %d.%d\n",
759 	     req->ir_reply[1] & 0x0f,
760 	     req->ir_reply[2] & 0x7f, req->ir_reply[3] >> 4, req->ir_reply[3] & 0x0f,
761 	     req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4);
762 
763 	ipmi_free_request(req);
764 
765 	req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
766 	    IPMI_CLEAR_FLAGS, 1, 0);
767 
768 	ipmi_submit_driver_request(sc, req, 0);
769 
770 	/* XXX: Magic numbers */
771 	if (req->ir_compcode == 0xc0) {
772 		device_printf(dev, "Clear flags is busy\n");
773 	}
774 	if (req->ir_compcode == 0xc1) {
775 		device_printf(dev, "Clear flags illegal\n");
776 	}
777 	ipmi_free_request(req);
778 
779 	for (i = 0; i < 8; i++) {
780 		req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
781 		    IPMI_GET_CHANNEL_INFO, 1, 0);
782 		req->ir_request[0] = i;
783 
784 		ipmi_submit_driver_request(sc, req, 0);
785 
786 		if (req->ir_compcode != 0) {
787 			ipmi_free_request(req);
788 			break;
789 		}
790 		ipmi_free_request(req);
791 	}
792 	device_printf(dev, "Number of channels %d\n", i);
793 
794 	/* probe for watchdog */
795 	req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
796 	    IPMI_GET_WDOG, 0, 0);
797 
798 	ipmi_submit_driver_request(sc, req, 0);
799 
800 	if (req->ir_compcode == 0x00) {
801 		struct sysctl_ctx_list *ctx;
802 		struct sysctl_oid *tree;
803 		struct sysctl_oid_list *child;
804 
805 		device_printf(dev, "Attached watchdog\n");
806 		/* register the watchdog event handler */
807 		/* XXX profmakx: our wdog driver holds a spinlock while
808 		   running the watchdog function, but since the ipmi watchdog
809 		   function sleeps, this doesn't work. Hack something with
810 		   a callout */
811 		callout_init(&sc->ipmi_watchdog);
812 		sc->ipmi_wdog_enable = 0;
813 		sc->ipmi_wdog_period = 30;
814 
815 		ctx = device_get_sysctl_ctx(dev);
816 		tree = device_get_sysctl_tree(dev);
817 		child = SYSCTL_CHILDREN(tree);
818 
819 		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "watchdog_enable",
820 			    CTLTYPE_INT | CTLFLAG_RW, sc, 0,
821 			    ipmi_watchdog_sysctl_enable, "I",
822 			    "ipmi watchdog enable");
823 		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "watchdog_period",
824 			    CTLTYPE_INT | CTLFLAG_RW, sc, 0,
825 			    ipmi_watchdog_sysctl_period, "I",
826 			    "ipmi watchdog period");
827 
828 	}
829 	ipmi_free_request(req);
830 
831 	sc->ipmi_cdev = make_dev(&ipmi_ops, device_get_unit(dev),
832 	    UID_ROOT, GID_OPERATOR, 0660, "ipmi%d", device_get_unit(dev));
833 	if (sc->ipmi_cdev == NULL) {
834 		device_printf(dev, "Failed to create cdev\n");
835 		return;
836 	}
837 	sc->ipmi_cdev->si_drv1 = sc;
838 }
839 
840 int
841 ipmi_attach(device_t dev)
842 {
843 	struct ipmi_softc *sc = device_get_softc(dev);
844 	int error;
845 
846 	if (sc->ipmi_irq_res != NULL && sc->ipmi_intr != NULL) {
847 		error = bus_setup_intr(dev, sc->ipmi_irq_res, 0,
848 		    sc->ipmi_intr, sc, &sc->ipmi_irq, NULL);
849 		if (error) {
850 			device_printf(dev, "can't set up interrupt\n");
851 			return (error);
852 		}
853 	}
854 
855 	bzero(&sc->ipmi_ich, sizeof(struct intr_config_hook));
856 	sc->ipmi_ich.ich_func = ipmi_startup;
857 	sc->ipmi_ich.ich_arg = sc;
858 	sc->ipmi_ich.ich_desc = "ipmi";
859 	if (config_intrhook_establish(&sc->ipmi_ich) != 0) {
860 		device_printf(dev, "can't establish configuration hook\n");
861 		return (ENOMEM);
862 	}
863 
864 	ipmi_attached = 1;
865 	return (0);
866 }
867 
868 int
869 ipmi_detach(device_t dev)
870 {
871 	struct ipmi_softc *sc;
872 
873 	sc = device_get_softc(dev);
874 
875 	/* Fail if there are any open handles. */
876 	IPMI_LOCK(sc);
877 	if (sc->ipmi_opened) {
878 		IPMI_UNLOCK(sc);
879 		return (EBUSY);
880 	}
881 	IPMI_UNLOCK(sc);
882 	if (sc->ipmi_cdev)
883 		destroy_dev(sc->ipmi_cdev);
884 
885 	/* Detach from watchdog handling and turn off watchdog. */
886 	callout_cancel(&sc->ipmi_watchdog);
887 	ipmi_set_watchdog(sc, 0);
888 
889 	/* XXX: should use shutdown callout I think. */
890 	/* If the backend uses a kthread, shut it down. */
891 	IPMI_LOCK(sc);
892 	sc->ipmi_detaching = 1;
893 	if (sc->ipmi_kthread) {
894 		cv_broadcast(&sc->ipmi_request_added);
895 		lksleep(sc->ipmi_kthread, &sc->ipmi_lock, 0, "ipmi_wait", 0);
896 	}
897 	IPMI_UNLOCK(sc);
898 	if (sc->ipmi_irq)
899 		bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
900 
901 	ipmi_release_resources(dev);
902 	lockuninit(&sc->ipmi_lock);
903 	return (0);
904 }
905 
906 void
907 ipmi_release_resources(device_t dev)
908 {
909 	struct ipmi_softc *sc;
910 	int i;
911 
912 	sc = device_get_softc(dev);
913 	if (sc->ipmi_irq)
914 		bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
915 	if (sc->ipmi_irq_res)
916 		bus_release_resource(dev, SYS_RES_IRQ, sc->ipmi_irq_rid,
917 		    sc->ipmi_irq_res);
918 	for (i = 0; i < MAX_RES; i++)
919 		if (sc->ipmi_io_res[i])
920 			bus_release_resource(dev, sc->ipmi_io_type,
921 			    sc->ipmi_io_rid + i, sc->ipmi_io_res[i]);
922 }
923 
924 devclass_t ipmi_devclass;
925 
926 /* XXX: Why? */
927 static void
928 ipmi_unload(void *arg)
929 {
930 	device_t *	devs;
931 	int		count;
932 	int		i;
933 
934 	if (devclass_get_devices(ipmi_devclass, &devs, &count) != 0)
935 		return;
936 	for (i = 0; i < count; i++)
937 		device_delete_child(device_get_parent(devs[i]), devs[i]);
938 	kfree(devs, M_TEMP);
939 }
940 SYSUNINIT(ipmi_unload, SI_SUB_DRIVERS, SI_ORDER_FIRST, ipmi_unload, NULL);
941 
942 static int
943 ipmi_watchdog_sysctl_enable(SYSCTL_HANDLER_ARGS)
944 {
945 	struct ipmi_softc *sc;
946 	int enable;
947 	int error;
948 
949 	sc = oidp->oid_arg1;
950 
951 	IPMI_LOCK(sc);
952 	enable = sc->ipmi_wdog_enable;
953 	IPMI_UNLOCK(sc);
954 
955 	error = sysctl_handle_int(oidp, &enable, 0, req);
956 	if(error || req->newptr == NULL)
957 		return error;
958 
959 	IPMI_LOCK(sc);
960 	sc->ipmi_wdog_enable = enable;
961 	IPMI_UNLOCK(sc);
962 
963 	if (sc->ipmi_wdog_enable==0) {
964 		callout_stop(&sc->ipmi_watchdog);
965 		ipmi_set_watchdog(sc, 0);
966 	} else {
967 		callout_reset(&sc->ipmi_watchdog,
968 			      sc->ipmi_wdog_period * hz,
969 			      &ipmi_watchdog, (void *)sc);
970 		ipmi_set_watchdog(sc, sc->ipmi_wdog_period + 1);
971 	}
972 	return 0;
973 }
974 
975 static int
976 ipmi_watchdog_sysctl_period(SYSCTL_HANDLER_ARGS)
977 {
978 	struct ipmi_softc *sc;
979 	int error;
980 	int period;
981 
982 	sc = oidp->oid_arg1;
983 
984 	IPMI_LOCK(sc);
985 	period = sc->ipmi_wdog_period;
986 	IPMI_UNLOCK(sc);
987 
988 	error = sysctl_handle_int(oidp, &period, 30, req);
989 
990 	if (error || req->newptr == NULL)
991 		return error;
992 
993 	IPMI_LOCK(sc);
994 	sc->ipmi_wdog_period = period;
995 	IPMI_UNLOCK(sc);
996 
997 	return 0;
998 }
999 
1000 #ifdef IMPI_DEBUG
1001 static void
1002 dump_buf(u_char *data, int len)
1003 {
1004 	char buf[20];
1005 	char line[1024];
1006 	char temp[30];
1007 	int count = 0;
1008 	int i=0;
1009 
1010 	printf("Address %p len %d\n", data, len);
1011 	if (len > 256)
1012 		len = 256;
1013 	line[0] = '\000';
1014 	for (; len > 0; len--, data++) {
1015 		sprintf(temp, "%02x ", *data);
1016 		strcat(line, temp);
1017 		if (*data >= ' ' && *data <= '~')
1018 			buf[count] = *data;
1019 		else if (*data >= 'A' && *data <= 'Z')
1020 			buf[count] = *data;
1021 		else
1022 			buf[count] = '.';
1023 		if (++count == 16) {
1024 			buf[count] = '\000';
1025 			count = 0;
1026 			printf("  %3x  %s %s\n", i, line, buf);
1027 			i+=16;
1028 			line[0] = '\000';
1029 		}
1030 	}
1031 	buf[count] = '\000';
1032 
1033 	for (; count != 16; count++) {
1034 		strcat(line, "   ");
1035 	}
1036 	printf("  %3x  %s %s\n", i, line, buf);
1037 }
1038 #endif
1039