xref: /freebsd/sys/dev/ipmi/ipmi.c (revision aa0a1e58)
1 /*-
2  * Copyright (c) 2006 IronPort Systems Inc. <ambrisko@ironport.com>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/condvar.h>
34 #include <sys/conf.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/poll.h>
39 #include <sys/rman.h>
40 #include <sys/selinfo.h>
41 #include <sys/sysctl.h>
42 #include <sys/watchdog.h>
43 
44 #ifdef LOCAL_MODULE
45 #include <ipmi.h>
46 #include <ipmivars.h>
47 #else
48 #include <sys/ipmi.h>
49 #include <dev/ipmi/ipmivars.h>
50 #endif
51 
52 #ifdef IPMB
53 static int ipmi_ipmb_checksum(u_char, int);
54 static int ipmi_ipmb_send_message(device_t, u_char, u_char, u_char,
55      u_char, u_char, int)
56 #endif
57 
58 static d_ioctl_t ipmi_ioctl;
59 static d_poll_t ipmi_poll;
60 static d_open_t ipmi_open;
61 static void ipmi_dtor(void *arg);
62 
63 int ipmi_attached = 0;
64 
65 static int on = 1;
66 SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD, 0, "IPMI driver parameters");
67 SYSCTL_INT(_hw_ipmi, OID_AUTO, on, CTLFLAG_RW,
68 	&on, 0, "");
69 
70 static struct cdevsw ipmi_cdevsw = {
71 	.d_version =    D_VERSION,
72 	.d_open =	ipmi_open,
73 	.d_ioctl =	ipmi_ioctl,
74 	.d_poll =	ipmi_poll,
75 	.d_name =	"ipmi",
76 };
77 
78 MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi");
79 
80 static int
81 ipmi_open(struct cdev *cdev, int flags, int fmt, struct thread *td)
82 {
83 	struct ipmi_device *dev;
84 	struct ipmi_softc *sc;
85 	int error;
86 
87 	if (!on)
88 		return (ENOENT);
89 
90 	/* Initialize the per file descriptor data. */
91 	dev = malloc(sizeof(struct ipmi_device), M_IPMI, M_WAITOK | M_ZERO);
92 	error = devfs_set_cdevpriv(dev, ipmi_dtor);
93 	if (error) {
94 		free(dev, M_IPMI);
95 		return (error);
96 	}
97 
98 	sc = cdev->si_drv1;
99 	TAILQ_INIT(&dev->ipmi_completed_requests);
100 	dev->ipmi_address = IPMI_BMC_SLAVE_ADDR;
101 	dev->ipmi_lun = IPMI_BMC_SMS_LUN;
102 	dev->ipmi_softc = sc;
103 	IPMI_LOCK(sc);
104 	sc->ipmi_opened++;
105 	IPMI_UNLOCK(sc);
106 
107 	return (0);
108 }
109 
110 static int
111 ipmi_poll(struct cdev *cdev, int poll_events, struct thread *td)
112 {
113 	struct ipmi_device *dev;
114 	struct ipmi_softc *sc;
115 	int revents = 0;
116 
117 	if (devfs_get_cdevpriv((void **)&dev))
118 		return (0);
119 
120 	sc = cdev->si_drv1;
121 	IPMI_LOCK(sc);
122 	if (poll_events & (POLLIN | POLLRDNORM)) {
123 		if (!TAILQ_EMPTY(&dev->ipmi_completed_requests))
124 		    revents |= poll_events & (POLLIN | POLLRDNORM);
125 		if (dev->ipmi_requests == 0)
126 		    revents |= POLLERR;
127 	}
128 
129 	if (revents == 0) {
130 		if (poll_events & (POLLIN | POLLRDNORM))
131 			selrecord(td, &dev->ipmi_select);
132 	}
133 	IPMI_UNLOCK(sc);
134 
135 	return (revents);
136 }
137 
138 static void
139 ipmi_purge_completed_requests(struct ipmi_device *dev)
140 {
141 	struct ipmi_request *req;
142 
143 	while (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) {
144 		req = TAILQ_FIRST(&dev->ipmi_completed_requests);
145 		TAILQ_REMOVE(&dev->ipmi_completed_requests, req, ir_link);
146 		dev->ipmi_requests--;
147 		ipmi_free_request(req);
148 	}
149 }
150 
151 static void
152 ipmi_dtor(void *arg)
153 {
154 	struct ipmi_request *req, *nreq;
155 	struct ipmi_device *dev;
156 	struct ipmi_softc *sc;
157 
158 	dev = arg;
159 	sc = dev->ipmi_softc;
160 
161 	IPMI_LOCK(sc);
162 	if (dev->ipmi_requests) {
163 		/* Throw away any pending requests for this device. */
164 		TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests, ir_link,
165 		    nreq) {
166 			if (req->ir_owner == dev) {
167 				TAILQ_REMOVE(&sc->ipmi_pending_requests, req,
168 				    ir_link);
169 				dev->ipmi_requests--;
170 				ipmi_free_request(req);
171 			}
172 		}
173 
174 		/* Throw away any pending completed requests for this device. */
175 		ipmi_purge_completed_requests(dev);
176 
177 		/*
178 		 * If we still have outstanding requests, they must be stuck
179 		 * in an interface driver, so wait for those to drain.
180 		 */
181 		dev->ipmi_closing = 1;
182 		while (dev->ipmi_requests > 0) {
183 			msleep(&dev->ipmi_requests, &sc->ipmi_lock, PWAIT,
184 			    "ipmidrain", 0);
185 			ipmi_purge_completed_requests(dev);
186 		}
187 	}
188 	sc->ipmi_opened--;
189 	IPMI_UNLOCK(sc);
190 
191 	/* Cleanup. */
192 	free(dev, M_IPMI);
193 }
194 
195 #ifdef IPMB
196 static int
197 ipmi_ipmb_checksum(u_char *data, int len)
198 {
199 	u_char sum = 0;
200 
201 	for (; len; len--) {
202 		sum += *data++;
203 	}
204 	return (-sum);
205 }
206 
207 /* XXX: Needs work */
208 static int
209 ipmi_ipmb_send_message(device_t dev, u_char channel, u_char netfn,
210     u_char command, u_char seq, u_char *data, int data_len)
211 {
212 	struct ipmi_softc *sc = device_get_softc(dev);
213 	struct ipmi_request *req;
214 	u_char slave_addr = 0x52;
215 	int error;
216 
217 	req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
218 	    IPMI_SEND_MSG, data_len + 8, 0);
219 	req->ir_request[0] = channel;
220 	req->ir_request[1] = slave_addr;
221 	req->ir_request[2] = IPMI_ADDR(netfn, 0);
222 	req->ir_request[3] = ipmi_ipmb_checksum(&req->ir_request[1], 2);
223 	req->ir_request[4] = sc->ipmi_address;
224 	req->ir_request[5] = IPMI_ADDR(seq, sc->ipmi_lun);
225 	req->ir_request[6] = command;
226 
227 	bcopy(data, &req->ir_request[7], data_len);
228 	temp[data_len + 7] = ipmi_ipmb_checksum(&req->ir_request[4],
229 	    data_len + 3);
230 
231 	ipmi_submit_driver_request(sc, req);
232 	error = req->ir_error;
233 	ipmi_free_request(req);
234 
235 	return (error);
236 }
237 
238 static int
239 ipmi_handle_attn(struct ipmi_softc *sc)
240 {
241 	struct ipmi_request *req;
242 	int error;
243 
244 	device_printf(sc->ipmi_dev, "BMC has a message\n");
245 	req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
246 	    IPMI_GET_MSG_FLAGS, 0, 1);
247 
248 	ipmi_submit_driver_request(sc, req);
249 
250 	if (req->ir_error == 0 && req->ir_compcode == 0) {
251 		if (req->ir_reply[0] & IPMI_MSG_BUFFER_FULL) {
252 			device_printf(sc->ipmi_dev, "message buffer full");
253 		}
254 		if (req->ir_reply[0] & IPMI_WDT_PRE_TIMEOUT) {
255 			device_printf(sc->ipmi_dev,
256 			    "watchdog about to go off");
257 		}
258 		if (req->ir_reply[0] & IPMI_MSG_AVAILABLE) {
259 			ipmi_free_request(req);
260 
261 			req = ipmi_alloc_driver_request(
262 			    IPMI_ADDR(IPMI_APP_REQUEST, 0), IPMI_GET_MSG, 0,
263 			    16);
264 
265 			device_printf(sc->ipmi_dev, "throw out message ");
266 			dump_buf(temp, 16);
267 		}
268 	}
269 	error = req->ir_error;
270 	ipmi_free_request(req);
271 
272 	return (error);
273 }
274 #endif
275 
276 #ifdef IPMICTL_SEND_COMMAND_32
277 #define	PTRIN(p)	((void *)(uintptr_t)(p))
278 #define	PTROUT(p)	((uintptr_t)(p))
279 #endif
280 
281 static int
282 ipmi_ioctl(struct cdev *cdev, u_long cmd, caddr_t data,
283     int flags, struct thread *td)
284 {
285 	struct ipmi_softc *sc;
286 	struct ipmi_device *dev;
287 	struct ipmi_request *kreq;
288 	struct ipmi_req *req = (struct ipmi_req *)data;
289 	struct ipmi_recv *recv = (struct ipmi_recv *)data;
290 	struct ipmi_addr addr;
291 #ifdef IPMICTL_SEND_COMMAND_32
292 	struct ipmi_req32 *req32 = (struct ipmi_req32 *)data;
293 	struct ipmi_recv32 *recv32 = (struct ipmi_recv32 *)data;
294 	union {
295 		struct ipmi_req req;
296 		struct ipmi_recv recv;
297 	} thunk32;
298 #endif
299 	int error, len;
300 
301 	error = devfs_get_cdevpriv((void **)&dev);
302 	if (error)
303 		return (error);
304 
305 	sc = cdev->si_drv1;
306 
307 #ifdef IPMICTL_SEND_COMMAND_32
308 	/* Convert 32-bit structures to native. */
309 	switch (cmd) {
310 	case IPMICTL_SEND_COMMAND_32:
311 		req = &thunk32.req;
312 		req->addr = PTRIN(req32->addr);
313 		req->addr_len = req32->addr_len;
314 		req->msgid = req32->msgid;
315 		req->msg.netfn = req32->msg.netfn;
316 		req->msg.cmd = req32->msg.cmd;
317 		req->msg.data_len = req32->msg.data_len;
318 		req->msg.data = PTRIN(req32->msg.data);
319 		break;
320 	case IPMICTL_RECEIVE_MSG_TRUNC_32:
321 	case IPMICTL_RECEIVE_MSG_32:
322 		recv = &thunk32.recv;
323 		recv->addr = PTRIN(recv32->addr);
324 		recv->addr_len = recv32->addr_len;
325 		recv->msg.data_len = recv32->msg.data_len;
326 		recv->msg.data = PTRIN(recv32->msg.data);
327 		break;
328 	}
329 #endif
330 
331 	switch (cmd) {
332 #ifdef IPMICTL_SEND_COMMAND_32
333 	case IPMICTL_SEND_COMMAND_32:
334 #endif
335 	case IPMICTL_SEND_COMMAND:
336 		/*
337 		 * XXX: Need to add proper handling of this.
338 		 */
339 		error = copyin(req->addr, &addr, sizeof(addr));
340 		if (error)
341 			return (error);
342 
343 		IPMI_LOCK(sc);
344 		/* clear out old stuff in queue of stuff done */
345 		/* XXX: This seems odd. */
346 		while ((kreq = TAILQ_FIRST(&dev->ipmi_completed_requests))) {
347 			TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
348 			    ir_link);
349 			dev->ipmi_requests--;
350 			ipmi_free_request(kreq);
351 		}
352 		IPMI_UNLOCK(sc);
353 
354 		kreq = ipmi_alloc_request(dev, req->msgid,
355 		    IPMI_ADDR(req->msg.netfn, 0), req->msg.cmd,
356 		    req->msg.data_len, IPMI_MAX_RX);
357 		error = copyin(req->msg.data, kreq->ir_request,
358 		    req->msg.data_len);
359 		if (error) {
360 			ipmi_free_request(kreq);
361 			return (error);
362 		}
363 		IPMI_LOCK(sc);
364 		dev->ipmi_requests++;
365 		error = sc->ipmi_enqueue_request(sc, kreq);
366 		IPMI_UNLOCK(sc);
367 		if (error)
368 			return (error);
369 		break;
370 #ifdef IPMICTL_SEND_COMMAND_32
371 	case IPMICTL_RECEIVE_MSG_TRUNC_32:
372 	case IPMICTL_RECEIVE_MSG_32:
373 #endif
374 	case IPMICTL_RECEIVE_MSG_TRUNC:
375 	case IPMICTL_RECEIVE_MSG:
376 		error = copyin(recv->addr, &addr, sizeof(addr));
377 		if (error)
378 			return (error);
379 
380 		IPMI_LOCK(sc);
381 		kreq = TAILQ_FIRST(&dev->ipmi_completed_requests);
382 		if (kreq == NULL) {
383 			IPMI_UNLOCK(sc);
384 			return (EAGAIN);
385 		}
386 		addr.channel = IPMI_BMC_CHANNEL;
387 		/* XXX */
388 		recv->recv_type = IPMI_RESPONSE_RECV_TYPE;
389 		recv->msgid = kreq->ir_msgid;
390 		recv->msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2;
391 		recv->msg.cmd = kreq->ir_command;
392 		error = kreq->ir_error;
393 		if (error) {
394 			TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
395 			    ir_link);
396 			dev->ipmi_requests--;
397 			IPMI_UNLOCK(sc);
398 			ipmi_free_request(kreq);
399 			return (error);
400 		}
401 		len = kreq->ir_replylen + 1;
402 		if (recv->msg.data_len < len &&
403 		    (cmd == IPMICTL_RECEIVE_MSG
404 #ifdef IPMICTL_RECEIVE_MSG_32
405 		     || cmd == IPMICTL_RECEIVE_MSG_32
406 #endif
407 		    )) {
408 			IPMI_UNLOCK(sc);
409 			return (EMSGSIZE);
410 		}
411 		TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link);
412 		dev->ipmi_requests--;
413 		IPMI_UNLOCK(sc);
414 		len = min(recv->msg.data_len, len);
415 		recv->msg.data_len = len;
416 		error = copyout(&addr, recv->addr,sizeof(addr));
417 		if (error == 0)
418 			error = copyout(&kreq->ir_compcode, recv->msg.data, 1);
419 		if (error == 0)
420 			error = copyout(kreq->ir_reply, recv->msg.data + 1,
421 			    len - 1);
422 		ipmi_free_request(kreq);
423 		if (error)
424 			return (error);
425 		break;
426 	case IPMICTL_SET_MY_ADDRESS_CMD:
427 		IPMI_LOCK(sc);
428 		dev->ipmi_address = *(int*)data;
429 		IPMI_UNLOCK(sc);
430 		break;
431 	case IPMICTL_GET_MY_ADDRESS_CMD:
432 		IPMI_LOCK(sc);
433 		*(int*)data = dev->ipmi_address;
434 		IPMI_UNLOCK(sc);
435 		break;
436 	case IPMICTL_SET_MY_LUN_CMD:
437 		IPMI_LOCK(sc);
438 		dev->ipmi_lun = *(int*)data & 0x3;
439 		IPMI_UNLOCK(sc);
440 		break;
441 	case IPMICTL_GET_MY_LUN_CMD:
442 		IPMI_LOCK(sc);
443 		*(int*)data = dev->ipmi_lun;
444 		IPMI_UNLOCK(sc);
445 		break;
446 	case IPMICTL_SET_GETS_EVENTS_CMD:
447 		/*
448 		device_printf(sc->ipmi_dev,
449 		    "IPMICTL_SET_GETS_EVENTS_CMD NA\n");
450 		*/
451 		break;
452 	case IPMICTL_REGISTER_FOR_CMD:
453 	case IPMICTL_UNREGISTER_FOR_CMD:
454 		return (EOPNOTSUPP);
455 	default:
456 		device_printf(sc->ipmi_dev, "Unknown IOCTL %lX\n", cmd);
457 		return (ENOIOCTL);
458 	}
459 
460 #ifdef IPMICTL_SEND_COMMAND_32
461 	/* Update changed fields in 32-bit structures. */
462 	switch (cmd) {
463 	case IPMICTL_RECEIVE_MSG_TRUNC_32:
464 	case IPMICTL_RECEIVE_MSG_32:
465 		recv32->recv_type = recv->recv_type;
466 		recv32->msgid = recv->msgid;
467 		recv32->msg.netfn = recv->msg.netfn;
468 		recv32->msg.cmd = recv->msg.cmd;
469 		recv32->msg.data_len = recv->msg.data_len;
470 		break;
471 	}
472 #endif
473 	return (0);
474 }
475 
476 /*
477  * Request management.
478  */
479 
480 /* Allocate a new request with request and reply buffers. */
481 struct ipmi_request *
482 ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr,
483     uint8_t command, size_t requestlen, size_t replylen)
484 {
485 	struct ipmi_request *req;
486 
487 	req = malloc(sizeof(struct ipmi_request) + requestlen + replylen,
488 	    M_IPMI, M_WAITOK | M_ZERO);
489 	req->ir_owner = dev;
490 	req->ir_msgid = msgid;
491 	req->ir_addr = addr;
492 	req->ir_command = command;
493 	if (requestlen) {
494 		req->ir_request = (char *)&req[1];
495 		req->ir_requestlen = requestlen;
496 	}
497 	if (replylen) {
498 		req->ir_reply = (char *)&req[1] + requestlen;
499 		req->ir_replybuflen = replylen;
500 	}
501 	return (req);
502 }
503 
504 /* Free a request no longer in use. */
505 void
506 ipmi_free_request(struct ipmi_request *req)
507 {
508 
509 	free(req, M_IPMI);
510 }
511 
512 /* Store a processed request on the appropriate completion queue. */
513 void
514 ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req)
515 {
516 	struct ipmi_device *dev;
517 
518 	IPMI_LOCK_ASSERT(sc);
519 
520 	/*
521 	 * Anonymous requests (from inside the driver) always have a
522 	 * waiter that we awaken.
523 	 */
524 	if (req->ir_owner == NULL)
525 		wakeup(req);
526 	else {
527 		dev = req->ir_owner;
528 		TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link);
529 		selwakeup(&dev->ipmi_select);
530 		if (dev->ipmi_closing)
531 			wakeup(&dev->ipmi_requests);
532 	}
533 }
534 
535 /* Enqueue an internal driver request and wait until it is completed. */
536 int
537 ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req,
538     int timo)
539 {
540 	int error;
541 
542 	IPMI_LOCK(sc);
543 	error = sc->ipmi_enqueue_request(sc, req);
544 	if (error == 0)
545 		error = msleep(req, &sc->ipmi_lock, 0, "ipmireq", timo);
546 	if (error == 0)
547 		error = req->ir_error;
548 	IPMI_UNLOCK(sc);
549 	return (error);
550 }
551 
552 /*
553  * Helper routine for polled system interfaces that use
554  * ipmi_polled_enqueue_request() to queue requests.  This request
555  * waits until there is a pending request and then returns the first
556  * request.  If the driver is shutting down, it returns NULL.
557  */
558 struct ipmi_request *
559 ipmi_dequeue_request(struct ipmi_softc *sc)
560 {
561 	struct ipmi_request *req;
562 
563 	IPMI_LOCK_ASSERT(sc);
564 
565 	while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests))
566 		cv_wait(&sc->ipmi_request_added, &sc->ipmi_lock);
567 	if (sc->ipmi_detaching)
568 		return (NULL);
569 
570 	req = TAILQ_FIRST(&sc->ipmi_pending_requests);
571 	TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link);
572 	return (req);
573 }
574 
575 /* Default implementation of ipmi_enqueue_request() for polled interfaces. */
576 int
577 ipmi_polled_enqueue_request(struct ipmi_softc *sc, struct ipmi_request *req)
578 {
579 
580 	IPMI_LOCK_ASSERT(sc);
581 
582 	TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests, req, ir_link);
583 	cv_signal(&sc->ipmi_request_added);
584 	return (0);
585 }
586 
587 /*
588  * Watchdog event handler.
589  */
590 
591 static int
592 ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec)
593 {
594 	struct ipmi_request *req;
595 	int error;
596 
597 	if (sec > 0xffff / 10)
598 		return (EINVAL);
599 
600 	req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
601 	    IPMI_SET_WDOG, 6, 0);
602 
603 	if (sec) {
604 		req->ir_request[0] = IPMI_SET_WD_TIMER_DONT_STOP
605 		    | IPMI_SET_WD_TIMER_SMS_OS;
606 		req->ir_request[1] = IPMI_SET_WD_ACTION_RESET;
607 		req->ir_request[2] = 0;
608 		req->ir_request[3] = 0;	/* Timer use */
609 		req->ir_request[4] = (sec * 10) & 0xff;
610 		req->ir_request[5] = (sec * 10) >> 8;
611 	} else {
612 		req->ir_request[0] = IPMI_SET_WD_TIMER_SMS_OS;
613 		req->ir_request[1] = 0;
614 		req->ir_request[2] = 0;
615 		req->ir_request[3] = 0;	/* Timer use */
616 		req->ir_request[4] = 0;
617 		req->ir_request[5] = 0;
618 	}
619 
620 	error = ipmi_submit_driver_request(sc, req, 0);
621 	if (error)
622 		device_printf(sc->ipmi_dev, "Failed to set watchdog\n");
623 	else if (sec) {
624 		ipmi_free_request(req);
625 
626 		req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
627 		    IPMI_RESET_WDOG, 0, 0);
628 
629 		error = ipmi_submit_driver_request(sc, req, 0);
630 		if (error)
631 			device_printf(sc->ipmi_dev,
632 			    "Failed to reset watchdog\n");
633 	}
634 
635 	ipmi_free_request(req);
636 	return (error);
637 	/*
638 	dump_watchdog(sc);
639 	*/
640 }
641 
642 static void
643 ipmi_wd_event(void *arg, unsigned int cmd, int *error)
644 {
645 	struct ipmi_softc *sc = arg;
646 	unsigned int timeout;
647 	int e;
648 
649 	cmd &= WD_INTERVAL;
650 	if (cmd > 0 && cmd <= 63) {
651 		timeout = ((uint64_t)1 << cmd) / 1000000000;
652 		if (timeout == 0)
653 			timeout = 1;
654 		e = ipmi_set_watchdog(sc, timeout);
655 		if (e == 0)
656 			*error = 0;
657 		else
658 			(void)ipmi_set_watchdog(sc, 0);
659 	} else {
660 		e = ipmi_set_watchdog(sc, 0);
661 		if (e != 0 && cmd == 0)
662 			*error = EOPNOTSUPP;
663 	}
664 }
665 
666 static void
667 ipmi_startup(void *arg)
668 {
669 	struct ipmi_softc *sc = arg;
670 	struct ipmi_request *req;
671 	device_t dev;
672 	int error, i;
673 
674 	config_intrhook_disestablish(&sc->ipmi_ich);
675 	dev = sc->ipmi_dev;
676 
677 	/* Initialize interface-independent state. */
678 	mtx_init(&sc->ipmi_lock, device_get_nameunit(dev), "ipmi", MTX_DEF);
679 	cv_init(&sc->ipmi_request_added, "ipmireq");
680 	TAILQ_INIT(&sc->ipmi_pending_requests);
681 
682 	/* Initialize interface-dependent state. */
683 	error = sc->ipmi_startup(sc);
684 	if (error) {
685 		device_printf(dev, "Failed to initialize interface: %d\n",
686 		    error);
687 		return;
688 	}
689 
690 	/* Send a GET_DEVICE_ID request. */
691 	req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
692 	    IPMI_GET_DEVICE_ID, 0, 15);
693 
694 	error = ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
695 	if (error == EWOULDBLOCK) {
696 		device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n");
697 		ipmi_free_request(req);
698 		return;
699 	} else if (error) {
700 		device_printf(dev, "Failed GET_DEVICE_ID: %d\n", error);
701 		ipmi_free_request(req);
702 		return;
703 	} else if (req->ir_compcode != 0) {
704 		device_printf(dev,
705 		    "Bad completion code for GET_DEVICE_ID: %d\n",
706 		    req->ir_compcode);
707 		ipmi_free_request(req);
708 		return;
709 	} else if (req->ir_replylen < 5) {
710 		device_printf(dev, "Short reply for GET_DEVICE_ID: %d\n",
711 		    req->ir_replylen);
712 		ipmi_free_request(req);
713 		return;
714 	}
715 
716 	device_printf(dev, "IPMI device rev. %d, firmware rev. %d.%d, "
717 	    "version %d.%d\n",
718 	     req->ir_reply[1] & 0x0f,
719 	     req->ir_reply[2] & 0x0f, req->ir_reply[4],
720 	     req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4);
721 
722 	ipmi_free_request(req);
723 
724 	req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
725 	    IPMI_CLEAR_FLAGS, 1, 0);
726 
727 	ipmi_submit_driver_request(sc, req, 0);
728 
729 	/* XXX: Magic numbers */
730 	if (req->ir_compcode == 0xc0) {
731 		device_printf(dev, "Clear flags is busy\n");
732 	}
733 	if (req->ir_compcode == 0xc1) {
734 		device_printf(dev, "Clear flags illegal\n");
735 	}
736 	ipmi_free_request(req);
737 
738 	for (i = 0; i < 8; i++) {
739 		req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
740 		    IPMI_GET_CHANNEL_INFO, 1, 0);
741 		req->ir_request[0] = i;
742 
743 		ipmi_submit_driver_request(sc, req, 0);
744 
745 		if (req->ir_compcode != 0) {
746 			ipmi_free_request(req);
747 			break;
748 		}
749 		ipmi_free_request(req);
750 	}
751 	device_printf(dev, "Number of channels %d\n", i);
752 
753 	/* probe for watchdog */
754 	req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
755 	    IPMI_GET_WDOG, 0, 0);
756 
757 	ipmi_submit_driver_request(sc, req, 0);
758 
759 	if (req->ir_compcode == 0x00) {
760 		device_printf(dev, "Attached watchdog\n");
761 		/* register the watchdog event handler */
762 		sc->ipmi_watchdog_tag = EVENTHANDLER_REGISTER(watchdog_list,
763 		    ipmi_wd_event, sc, 0);
764 	}
765 	ipmi_free_request(req);
766 
767 	sc->ipmi_cdev = make_dev(&ipmi_cdevsw, device_get_unit(dev),
768 	    UID_ROOT, GID_OPERATOR, 0660, "ipmi%d", device_get_unit(dev));
769 	if (sc->ipmi_cdev == NULL) {
770 		device_printf(dev, "Failed to create cdev\n");
771 		return;
772 	}
773 	sc->ipmi_cdev->si_drv1 = sc;
774 }
775 
776 int
777 ipmi_attach(device_t dev)
778 {
779 	struct ipmi_softc *sc = device_get_softc(dev);
780 	int error;
781 
782 	if (sc->ipmi_irq_res != NULL && sc->ipmi_intr != NULL) {
783 		error = bus_setup_intr(dev, sc->ipmi_irq_res, INTR_TYPE_MISC,
784 		    NULL, sc->ipmi_intr, sc, &sc->ipmi_irq);
785 		if (error) {
786 			device_printf(dev, "can't set up interrupt\n");
787 			return (error);
788 		}
789 	}
790 
791 	bzero(&sc->ipmi_ich, sizeof(struct intr_config_hook));
792 	sc->ipmi_ich.ich_func = ipmi_startup;
793 	sc->ipmi_ich.ich_arg = sc;
794 	if (config_intrhook_establish(&sc->ipmi_ich) != 0) {
795 		device_printf(dev, "can't establish configuration hook\n");
796 		return (ENOMEM);
797 	}
798 
799 	ipmi_attached = 1;
800 	return (0);
801 }
802 
803 int
804 ipmi_detach(device_t dev)
805 {
806 	struct ipmi_softc *sc;
807 
808 	sc = device_get_softc(dev);
809 
810 	/* Fail if there are any open handles. */
811 	IPMI_LOCK(sc);
812 	if (sc->ipmi_opened) {
813 		IPMI_UNLOCK(sc);
814 		return (EBUSY);
815 	}
816 	IPMI_UNLOCK(sc);
817 	if (sc->ipmi_cdev)
818 		destroy_dev(sc->ipmi_cdev);
819 
820 	/* Detach from watchdog handling and turn off watchdog. */
821 	if (sc->ipmi_watchdog_tag) {
822 		EVENTHANDLER_DEREGISTER(watchdog_list, sc->ipmi_watchdog_tag);
823 		ipmi_set_watchdog(sc, 0);
824 	}
825 
826 	/* XXX: should use shutdown callout I think. */
827 	/* If the backend uses a kthread, shut it down. */
828 	IPMI_LOCK(sc);
829 	sc->ipmi_detaching = 1;
830 	if (sc->ipmi_kthread) {
831 		cv_broadcast(&sc->ipmi_request_added);
832 		msleep(sc->ipmi_kthread, &sc->ipmi_lock, 0, "ipmi_wait", 0);
833 	}
834 	IPMI_UNLOCK(sc);
835 	if (sc->ipmi_irq)
836 		bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
837 
838 	ipmi_release_resources(dev);
839 	mtx_destroy(&sc->ipmi_lock);
840 	return (0);
841 }
842 
843 void
844 ipmi_release_resources(device_t dev)
845 {
846 	struct ipmi_softc *sc;
847 	int i;
848 
849 	sc = device_get_softc(dev);
850 	if (sc->ipmi_irq)
851 		bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
852 	if (sc->ipmi_irq_res)
853 		bus_release_resource(dev, SYS_RES_IRQ, sc->ipmi_irq_rid,
854 		    sc->ipmi_irq_res);
855 	for (i = 0; i < MAX_RES; i++)
856 		if (sc->ipmi_io_res[i])
857 			bus_release_resource(dev, sc->ipmi_io_type,
858 			    sc->ipmi_io_rid + i, sc->ipmi_io_res[i]);
859 }
860 
861 devclass_t ipmi_devclass;
862 
863 /* XXX: Why? */
864 static void
865 ipmi_unload(void *arg)
866 {
867 	device_t *	devs;
868 	int		count;
869 	int		i;
870 
871 	if (devclass_get_devices(ipmi_devclass, &devs, &count) != 0)
872 		return;
873 	for (i = 0; i < count; i++)
874 		device_delete_child(device_get_parent(devs[i]), devs[i]);
875 	free(devs, M_TEMP);
876 }
877 SYSUNINIT(ipmi_unload, SI_SUB_DRIVERS, SI_ORDER_FIRST, ipmi_unload, NULL);
878 
879 #ifdef IMPI_DEBUG
880 static void
881 dump_buf(u_char *data, int len)
882 {
883 	char buf[20];
884 	char line[1024];
885 	char temp[30];
886 	int count = 0;
887 	int i=0;
888 
889 	printf("Address %p len %d\n", data, len);
890 	if (len > 256)
891 		len = 256;
892 	line[0] = '\000';
893 	for (; len > 0; len--, data++) {
894 		sprintf(temp, "%02x ", *data);
895 		strcat(line, temp);
896 		if (*data >= ' ' && *data <= '~')
897 			buf[count] = *data;
898 		else if (*data >= 'A' && *data <= 'Z')
899 			buf[count] = *data;
900 		else
901 			buf[count] = '.';
902 		if (++count == 16) {
903 			buf[count] = '\000';
904 			count = 0;
905 			printf("  %3x  %s %s\n", i, line, buf);
906 			i+=16;
907 			line[0] = '\000';
908 		}
909 	}
910 	buf[count] = '\000';
911 
912 	for (; count != 16; count++) {
913 		strcat(line, "   ");
914 	}
915 	printf("  %3x  %s %s\n", i, line, buf);
916 }
917 #endif
918