1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * ipmi_devintf.c
4  *
5  * Linux device interface for the IPMI message handler.
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  */
13 
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/errno.h>
17 #include <linux/poll.h>
18 #include <linux/sched.h>
19 #include <linux/spinlock.h>
20 #include <linux/slab.h>
21 #include <linux/ipmi.h>
22 #include <linux/mutex.h>
23 #include <linux/init.h>
24 #include <linux/device.h>
25 #include <linux/compat.h>
26 
27 struct ipmi_file_private
28 {
29 	struct ipmi_user     *user;
30 	spinlock_t           recv_msg_lock;
31 	struct list_head     recv_msgs;
32 	struct fasync_struct *fasync_queue;
33 	wait_queue_head_t    wait;
34 	struct mutex	     recv_mutex;
35 	int                  default_retries;
36 	unsigned int         default_retry_time_ms;
37 };
38 
file_receive_handler(struct ipmi_recv_msg * msg,void * handler_data)39 static void file_receive_handler(struct ipmi_recv_msg *msg,
40 				 void                 *handler_data)
41 {
42 	struct ipmi_file_private *priv = handler_data;
43 	int                      was_empty;
44 	unsigned long            flags;
45 
46 	spin_lock_irqsave(&priv->recv_msg_lock, flags);
47 	was_empty = list_empty(&priv->recv_msgs);
48 	list_add_tail(&msg->link, &priv->recv_msgs);
49 	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
50 
51 	if (was_empty) {
52 		wake_up_interruptible(&priv->wait);
53 		kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
54 	}
55 }
56 
ipmi_poll(struct file * file,poll_table * wait)57 static __poll_t ipmi_poll(struct file *file, poll_table *wait)
58 {
59 	struct ipmi_file_private *priv = file->private_data;
60 	__poll_t             mask = 0;
61 	unsigned long            flags;
62 
63 	poll_wait(file, &priv->wait, wait);
64 
65 	spin_lock_irqsave(&priv->recv_msg_lock, flags);
66 
67 	if (!list_empty(&priv->recv_msgs))
68 		mask |= (EPOLLIN | EPOLLRDNORM);
69 
70 	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
71 
72 	return mask;
73 }
74 
ipmi_fasync(int fd,struct file * file,int on)75 static int ipmi_fasync(int fd, struct file *file, int on)
76 {
77 	struct ipmi_file_private *priv = file->private_data;
78 
79 	return fasync_helper(fd, file, on, &priv->fasync_queue);
80 }
81 
82 static const struct ipmi_user_hndl ipmi_hndlrs =
83 {
84 	.ipmi_recv_hndl	= file_receive_handler,
85 };
86 
ipmi_open(struct inode * inode,struct file * file)87 static int ipmi_open(struct inode *inode, struct file *file)
88 {
89 	int                      if_num = iminor(inode);
90 	int                      rv;
91 	struct ipmi_file_private *priv;
92 
93 	priv = kmalloc(sizeof(*priv), GFP_KERNEL);
94 	if (!priv)
95 		return -ENOMEM;
96 
97 	rv = ipmi_create_user(if_num,
98 			      &ipmi_hndlrs,
99 			      priv,
100 			      &priv->user);
101 	if (rv) {
102 		kfree(priv);
103 		goto out;
104 	}
105 
106 	file->private_data = priv;
107 
108 	spin_lock_init(&priv->recv_msg_lock);
109 	INIT_LIST_HEAD(&priv->recv_msgs);
110 	init_waitqueue_head(&priv->wait);
111 	priv->fasync_queue = NULL;
112 	mutex_init(&priv->recv_mutex);
113 
114 	/* Use the low-level defaults. */
115 	priv->default_retries = -1;
116 	priv->default_retry_time_ms = 0;
117 
118 out:
119 	return rv;
120 }
121 
ipmi_release(struct inode * inode,struct file * file)122 static int ipmi_release(struct inode *inode, struct file *file)
123 {
124 	struct ipmi_file_private *priv = file->private_data;
125 	int                      rv;
126 	struct ipmi_recv_msg *msg, *next;
127 
128 	rv = ipmi_destroy_user(priv->user);
129 	if (rv)
130 		return rv;
131 
132 	list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
133 		ipmi_free_recv_msg(msg);
134 
135 	kfree(priv);
136 
137 	return 0;
138 }
139 
handle_send_req(struct ipmi_user * user,struct ipmi_req * req,int retries,unsigned int retry_time_ms)140 static int handle_send_req(struct ipmi_user *user,
141 			   struct ipmi_req *req,
142 			   int             retries,
143 			   unsigned int    retry_time_ms)
144 {
145 	int              rv;
146 	struct ipmi_addr addr;
147 	struct kernel_ipmi_msg msg;
148 
149 	if (req->addr_len > sizeof(struct ipmi_addr))
150 		return -EINVAL;
151 
152 	if (copy_from_user(&addr, req->addr, req->addr_len))
153 		return -EFAULT;
154 
155 	msg.netfn = req->msg.netfn;
156 	msg.cmd = req->msg.cmd;
157 	msg.data_len = req->msg.data_len;
158 	msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
159 	if (!msg.data)
160 		return -ENOMEM;
161 
162 	/* From here out we cannot return, we must jump to "out" for
163 	   error exits to free msgdata. */
164 
165 	rv = ipmi_validate_addr(&addr, req->addr_len);
166 	if (rv)
167 		goto out;
168 
169 	if (req->msg.data != NULL) {
170 		if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
171 			rv = -EMSGSIZE;
172 			goto out;
173 		}
174 
175 		if (copy_from_user(msg.data,
176 				   req->msg.data,
177 				   req->msg.data_len)) {
178 			rv = -EFAULT;
179 			goto out;
180 		}
181 	} else {
182 		msg.data_len = 0;
183 	}
184 
185 	rv = ipmi_request_settime(user,
186 				  &addr,
187 				  req->msgid,
188 				  &msg,
189 				  NULL,
190 				  0,
191 				  retries,
192 				  retry_time_ms);
193  out:
194 	kfree(msg.data);
195 	return rv;
196 }
197 
handle_recv(struct ipmi_file_private * priv,bool trunc,struct ipmi_recv * rsp,int (* copyout)(struct ipmi_recv *,void __user *),void __user * to)198 static int handle_recv(struct ipmi_file_private *priv,
199 			bool trunc, struct ipmi_recv *rsp,
200 			int (*copyout)(struct ipmi_recv *, void __user *),
201 			void __user *to)
202 {
203 	int              addr_len;
204 	struct list_head *entry;
205 	struct ipmi_recv_msg  *msg;
206 	unsigned long    flags;
207 	int rv = 0, rv2 = 0;
208 
209 	/* We claim a mutex because we don't want two
210 	   users getting something from the queue at a time.
211 	   Since we have to release the spinlock before we can
212 	   copy the data to the user, it's possible another
213 	   user will grab something from the queue, too.  Then
214 	   the messages might get out of order if something
215 	   fails and the message gets put back onto the
216 	   queue.  This mutex prevents that problem. */
217 	mutex_lock(&priv->recv_mutex);
218 
219 	/* Grab the message off the list. */
220 	spin_lock_irqsave(&priv->recv_msg_lock, flags);
221 	if (list_empty(&(priv->recv_msgs))) {
222 		spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
223 		rv = -EAGAIN;
224 		goto recv_err;
225 	}
226 	entry = priv->recv_msgs.next;
227 	msg = list_entry(entry, struct ipmi_recv_msg, link);
228 	list_del(entry);
229 	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
230 
231 	addr_len = ipmi_addr_length(msg->addr.addr_type);
232 	if (rsp->addr_len < addr_len) {
233 		rv = -EINVAL;
234 		goto recv_putback_on_err;
235 	}
236 
237 	if (copy_to_user(rsp->addr, &msg->addr, addr_len)) {
238 		rv = -EFAULT;
239 		goto recv_putback_on_err;
240 	}
241 	rsp->addr_len = addr_len;
242 
243 	rsp->recv_type = msg->recv_type;
244 	rsp->msgid = msg->msgid;
245 	rsp->msg.netfn = msg->msg.netfn;
246 	rsp->msg.cmd = msg->msg.cmd;
247 
248 	if (msg->msg.data_len > 0) {
249 		if (rsp->msg.data_len < msg->msg.data_len) {
250 			rv2 = -EMSGSIZE;
251 			if (trunc)
252 				msg->msg.data_len = rsp->msg.data_len;
253 			else
254 				goto recv_putback_on_err;
255 		}
256 
257 		if (copy_to_user(rsp->msg.data,
258 				 msg->msg.data,
259 				 msg->msg.data_len)) {
260 			rv = -EFAULT;
261 			goto recv_putback_on_err;
262 		}
263 		rsp->msg.data_len = msg->msg.data_len;
264 	} else {
265 		rsp->msg.data_len = 0;
266 	}
267 
268 	rv = copyout(rsp, to);
269 	if (rv)
270 		goto recv_putback_on_err;
271 
272 	mutex_unlock(&priv->recv_mutex);
273 	ipmi_free_recv_msg(msg);
274 	return rv2;
275 
276 recv_putback_on_err:
277 	/* If we got an error, put the message back onto
278 	   the head of the queue. */
279 	spin_lock_irqsave(&priv->recv_msg_lock, flags);
280 	list_add(entry, &priv->recv_msgs);
281 	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
282 recv_err:
283 	mutex_unlock(&priv->recv_mutex);
284 	return rv;
285 }
286 
copyout_recv(struct ipmi_recv * rsp,void __user * to)287 static int copyout_recv(struct ipmi_recv *rsp, void __user *to)
288 {
289 	return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0;
290 }
291 
ipmi_ioctl(struct file * file,unsigned int cmd,unsigned long data)292 static long ipmi_ioctl(struct file   *file,
293 		       unsigned int  cmd,
294 		       unsigned long data)
295 {
296 	int                      rv = -EINVAL;
297 	struct ipmi_file_private *priv = file->private_data;
298 	void __user *arg = (void __user *)data;
299 
300 	switch (cmd)
301 	{
302 	case IPMICTL_SEND_COMMAND:
303 	{
304 		struct ipmi_req req;
305 		int retries;
306 		unsigned int retry_time_ms;
307 
308 		if (copy_from_user(&req, arg, sizeof(req))) {
309 			rv = -EFAULT;
310 			break;
311 		}
312 
313 		mutex_lock(&priv->recv_mutex);
314 		retries = priv->default_retries;
315 		retry_time_ms = priv->default_retry_time_ms;
316 		mutex_unlock(&priv->recv_mutex);
317 
318 		rv = handle_send_req(priv->user, &req, retries, retry_time_ms);
319 		break;
320 	}
321 
322 	case IPMICTL_SEND_COMMAND_SETTIME:
323 	{
324 		struct ipmi_req_settime req;
325 
326 		if (copy_from_user(&req, arg, sizeof(req))) {
327 			rv = -EFAULT;
328 			break;
329 		}
330 
331 		rv = handle_send_req(priv->user,
332 				     &req.req,
333 				     req.retries,
334 				     req.retry_time_ms);
335 		break;
336 	}
337 
338 	case IPMICTL_RECEIVE_MSG:
339 	case IPMICTL_RECEIVE_MSG_TRUNC:
340 	{
341 		struct ipmi_recv      rsp;
342 
343 		if (copy_from_user(&rsp, arg, sizeof(rsp)))
344 			rv = -EFAULT;
345 		else
346 			rv = handle_recv(priv, cmd == IPMICTL_RECEIVE_MSG_TRUNC,
347 					 &rsp, copyout_recv, arg);
348 		break;
349 	}
350 
351 	case IPMICTL_REGISTER_FOR_CMD:
352 	{
353 		struct ipmi_cmdspec val;
354 
355 		if (copy_from_user(&val, arg, sizeof(val))) {
356 			rv = -EFAULT;
357 			break;
358 		}
359 
360 		rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
361 					   IPMI_CHAN_ALL);
362 		break;
363 	}
364 
365 	case IPMICTL_UNREGISTER_FOR_CMD:
366 	{
367 		struct ipmi_cmdspec   val;
368 
369 		if (copy_from_user(&val, arg, sizeof(val))) {
370 			rv = -EFAULT;
371 			break;
372 		}
373 
374 		rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
375 					     IPMI_CHAN_ALL);
376 		break;
377 	}
378 
379 	case IPMICTL_REGISTER_FOR_CMD_CHANS:
380 	{
381 		struct ipmi_cmdspec_chans val;
382 
383 		if (copy_from_user(&val, arg, sizeof(val))) {
384 			rv = -EFAULT;
385 			break;
386 		}
387 
388 		rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
389 					   val.chans);
390 		break;
391 	}
392 
393 	case IPMICTL_UNREGISTER_FOR_CMD_CHANS:
394 	{
395 		struct ipmi_cmdspec_chans val;
396 
397 		if (copy_from_user(&val, arg, sizeof(val))) {
398 			rv = -EFAULT;
399 			break;
400 		}
401 
402 		rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
403 					     val.chans);
404 		break;
405 	}
406 
407 	case IPMICTL_SET_GETS_EVENTS_CMD:
408 	{
409 		int val;
410 
411 		if (copy_from_user(&val, arg, sizeof(val))) {
412 			rv = -EFAULT;
413 			break;
414 		}
415 
416 		rv = ipmi_set_gets_events(priv->user, val);
417 		break;
418 	}
419 
420 	/* The next four are legacy, not per-channel. */
421 	case IPMICTL_SET_MY_ADDRESS_CMD:
422 	{
423 		unsigned int val;
424 
425 		if (copy_from_user(&val, arg, sizeof(val))) {
426 			rv = -EFAULT;
427 			break;
428 		}
429 
430 		rv = ipmi_set_my_address(priv->user, 0, val);
431 		break;
432 	}
433 
434 	case IPMICTL_GET_MY_ADDRESS_CMD:
435 	{
436 		unsigned int  val;
437 		unsigned char rval;
438 
439 		rv = ipmi_get_my_address(priv->user, 0, &rval);
440 		if (rv)
441 			break;
442 
443 		val = rval;
444 
445 		if (copy_to_user(arg, &val, sizeof(val))) {
446 			rv = -EFAULT;
447 			break;
448 		}
449 		break;
450 	}
451 
452 	case IPMICTL_SET_MY_LUN_CMD:
453 	{
454 		unsigned int val;
455 
456 		if (copy_from_user(&val, arg, sizeof(val))) {
457 			rv = -EFAULT;
458 			break;
459 		}
460 
461 		rv = ipmi_set_my_LUN(priv->user, 0, val);
462 		break;
463 	}
464 
465 	case IPMICTL_GET_MY_LUN_CMD:
466 	{
467 		unsigned int  val;
468 		unsigned char rval;
469 
470 		rv = ipmi_get_my_LUN(priv->user, 0, &rval);
471 		if (rv)
472 			break;
473 
474 		val = rval;
475 
476 		if (copy_to_user(arg, &val, sizeof(val))) {
477 			rv = -EFAULT;
478 			break;
479 		}
480 		break;
481 	}
482 
483 	case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
484 	{
485 		struct ipmi_channel_lun_address_set val;
486 
487 		if (copy_from_user(&val, arg, sizeof(val))) {
488 			rv = -EFAULT;
489 			break;
490 		}
491 
492 		return ipmi_set_my_address(priv->user, val.channel, val.value);
493 	}
494 
495 	case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
496 	{
497 		struct ipmi_channel_lun_address_set val;
498 
499 		if (copy_from_user(&val, arg, sizeof(val))) {
500 			rv = -EFAULT;
501 			break;
502 		}
503 
504 		rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
505 		if (rv)
506 			break;
507 
508 		if (copy_to_user(arg, &val, sizeof(val))) {
509 			rv = -EFAULT;
510 			break;
511 		}
512 		break;
513 	}
514 
515 	case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
516 	{
517 		struct ipmi_channel_lun_address_set val;
518 
519 		if (copy_from_user(&val, arg, sizeof(val))) {
520 			rv = -EFAULT;
521 			break;
522 		}
523 
524 		rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
525 		break;
526 	}
527 
528 	case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
529 	{
530 		struct ipmi_channel_lun_address_set val;
531 
532 		if (copy_from_user(&val, arg, sizeof(val))) {
533 			rv = -EFAULT;
534 			break;
535 		}
536 
537 		rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
538 		if (rv)
539 			break;
540 
541 		if (copy_to_user(arg, &val, sizeof(val))) {
542 			rv = -EFAULT;
543 			break;
544 		}
545 		break;
546 	}
547 
548 	case IPMICTL_SET_TIMING_PARMS_CMD:
549 	{
550 		struct ipmi_timing_parms parms;
551 
552 		if (copy_from_user(&parms, arg, sizeof(parms))) {
553 			rv = -EFAULT;
554 			break;
555 		}
556 
557 		mutex_lock(&priv->recv_mutex);
558 		priv->default_retries = parms.retries;
559 		priv->default_retry_time_ms = parms.retry_time_ms;
560 		mutex_unlock(&priv->recv_mutex);
561 		rv = 0;
562 		break;
563 	}
564 
565 	case IPMICTL_GET_TIMING_PARMS_CMD:
566 	{
567 		struct ipmi_timing_parms parms;
568 
569 		mutex_lock(&priv->recv_mutex);
570 		parms.retries = priv->default_retries;
571 		parms.retry_time_ms = priv->default_retry_time_ms;
572 		mutex_unlock(&priv->recv_mutex);
573 
574 		if (copy_to_user(arg, &parms, sizeof(parms))) {
575 			rv = -EFAULT;
576 			break;
577 		}
578 
579 		rv = 0;
580 		break;
581 	}
582 
583 	case IPMICTL_GET_MAINTENANCE_MODE_CMD:
584 	{
585 		int mode;
586 
587 		mode = ipmi_get_maintenance_mode(priv->user);
588 		if (copy_to_user(arg, &mode, sizeof(mode))) {
589 			rv = -EFAULT;
590 			break;
591 		}
592 		rv = 0;
593 		break;
594 	}
595 
596 	case IPMICTL_SET_MAINTENANCE_MODE_CMD:
597 	{
598 		int mode;
599 
600 		if (copy_from_user(&mode, arg, sizeof(mode))) {
601 			rv = -EFAULT;
602 			break;
603 		}
604 		rv = ipmi_set_maintenance_mode(priv->user, mode);
605 		break;
606 	}
607 
608 	default:
609 		rv = -ENOTTY;
610 		break;
611 	}
612 
613 	return rv;
614 }
615 
616 #ifdef CONFIG_COMPAT
617 /*
618  * The following code contains code for supporting 32-bit compatible
619  * ioctls on 64-bit kernels.  This allows running 32-bit apps on the
620  * 64-bit kernel
621  */
622 #define COMPAT_IPMICTL_SEND_COMMAND	\
623 	_IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
624 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME	\
625 	_IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
626 #define COMPAT_IPMICTL_RECEIVE_MSG	\
627 	_IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
628 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC	\
629 	_IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
630 
631 struct compat_ipmi_msg {
632 	u8		netfn;
633 	u8		cmd;
634 	u16		data_len;
635 	compat_uptr_t	data;
636 };
637 
638 struct compat_ipmi_req {
639 	compat_uptr_t		addr;
640 	compat_uint_t		addr_len;
641 	compat_long_t		msgid;
642 	struct compat_ipmi_msg	msg;
643 };
644 
645 struct compat_ipmi_recv {
646 	compat_int_t		recv_type;
647 	compat_uptr_t		addr;
648 	compat_uint_t		addr_len;
649 	compat_long_t		msgid;
650 	struct compat_ipmi_msg	msg;
651 };
652 
653 struct compat_ipmi_req_settime {
654 	struct compat_ipmi_req	req;
655 	compat_int_t		retries;
656 	compat_uint_t		retry_time_ms;
657 };
658 
659 /*
660  * Define some helper functions for copying IPMI data
661  */
get_compat_ipmi_msg(struct ipmi_msg * p64,struct compat_ipmi_msg * p32)662 static void get_compat_ipmi_msg(struct ipmi_msg *p64,
663 				struct compat_ipmi_msg *p32)
664 {
665 	p64->netfn = p32->netfn;
666 	p64->cmd = p32->cmd;
667 	p64->data_len = p32->data_len;
668 	p64->data = compat_ptr(p32->data);
669 }
670 
get_compat_ipmi_req(struct ipmi_req * p64,struct compat_ipmi_req * p32)671 static void get_compat_ipmi_req(struct ipmi_req *p64,
672 				struct compat_ipmi_req *p32)
673 {
674 	p64->addr = compat_ptr(p32->addr);
675 	p64->addr_len = p32->addr_len;
676 	p64->msgid = p32->msgid;
677 	get_compat_ipmi_msg(&p64->msg, &p32->msg);
678 }
679 
get_compat_ipmi_req_settime(struct ipmi_req_settime * p64,struct compat_ipmi_req_settime * p32)680 static void get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
681 		struct compat_ipmi_req_settime *p32)
682 {
683 	get_compat_ipmi_req(&p64->req, &p32->req);
684 	p64->retries = p32->retries;
685 	p64->retry_time_ms = p32->retry_time_ms;
686 }
687 
get_compat_ipmi_recv(struct ipmi_recv * p64,struct compat_ipmi_recv * p32)688 static void get_compat_ipmi_recv(struct ipmi_recv *p64,
689 				 struct compat_ipmi_recv *p32)
690 {
691 	memset(p64, 0, sizeof(struct ipmi_recv));
692 	p64->recv_type = p32->recv_type;
693 	p64->addr = compat_ptr(p32->addr);
694 	p64->addr_len = p32->addr_len;
695 	p64->msgid = p32->msgid;
696 	get_compat_ipmi_msg(&p64->msg, &p32->msg);
697 }
698 
copyout_recv32(struct ipmi_recv * p64,void __user * to)699 static int copyout_recv32(struct ipmi_recv *p64, void __user *to)
700 {
701 	struct compat_ipmi_recv v32;
702 	memset(&v32, 0, sizeof(struct compat_ipmi_recv));
703 	v32.recv_type = p64->recv_type;
704 	v32.addr = ptr_to_compat(p64->addr);
705 	v32.addr_len = p64->addr_len;
706 	v32.msgid = p64->msgid;
707 	v32.msg.netfn = p64->msg.netfn;
708 	v32.msg.cmd = p64->msg.cmd;
709 	v32.msg.data_len = p64->msg.data_len;
710 	v32.msg.data = ptr_to_compat(p64->msg.data);
711 	return copy_to_user(to, &v32, sizeof(v32)) ? -EFAULT : 0;
712 }
713 
714 /*
715  * Handle compatibility ioctls
716  */
compat_ipmi_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)717 static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
718 			      unsigned long arg)
719 {
720 	struct ipmi_file_private *priv = filep->private_data;
721 
722 	switch(cmd) {
723 	case COMPAT_IPMICTL_SEND_COMMAND:
724 	{
725 		struct ipmi_req	rp;
726 		struct compat_ipmi_req r32;
727 		int retries;
728 		unsigned int retry_time_ms;
729 
730 		if (copy_from_user(&r32, compat_ptr(arg), sizeof(r32)))
731 			return -EFAULT;
732 
733 		get_compat_ipmi_req(&rp, &r32);
734 
735 		mutex_lock(&priv->recv_mutex);
736 		retries = priv->default_retries;
737 		retry_time_ms = priv->default_retry_time_ms;
738 		mutex_unlock(&priv->recv_mutex);
739 
740 		return handle_send_req(priv->user, &rp,
741 				       retries, retry_time_ms);
742 	}
743 	case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
744 	{
745 		struct ipmi_req_settime	sp;
746 		struct compat_ipmi_req_settime sp32;
747 
748 		if (copy_from_user(&sp32, compat_ptr(arg), sizeof(sp32)))
749 			return -EFAULT;
750 
751 		get_compat_ipmi_req_settime(&sp, &sp32);
752 
753 		return handle_send_req(priv->user, &sp.req,
754 				sp.retries, sp.retry_time_ms);
755 	}
756 	case COMPAT_IPMICTL_RECEIVE_MSG:
757 	case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
758 	{
759 		struct ipmi_recv   recv64;
760 		struct compat_ipmi_recv recv32;
761 
762 		if (copy_from_user(&recv32, compat_ptr(arg), sizeof(recv32)))
763 			return -EFAULT;
764 
765 		get_compat_ipmi_recv(&recv64, &recv32);
766 
767 		return handle_recv(priv,
768 				 cmd == COMPAT_IPMICTL_RECEIVE_MSG_TRUNC,
769 				 &recv64, copyout_recv32, compat_ptr(arg));
770 	}
771 	default:
772 		return ipmi_ioctl(filep, cmd, arg);
773 	}
774 }
775 #endif
776 
777 static const struct file_operations ipmi_fops = {
778 	.owner		= THIS_MODULE,
779 	.unlocked_ioctl	= ipmi_ioctl,
780 #ifdef CONFIG_COMPAT
781 	.compat_ioctl   = compat_ipmi_ioctl,
782 #endif
783 	.open		= ipmi_open,
784 	.release	= ipmi_release,
785 	.fasync		= ipmi_fasync,
786 	.poll		= ipmi_poll,
787 	.llseek		= noop_llseek,
788 };
789 
790 #define DEVICE_NAME     "ipmidev"
791 
792 static int ipmi_major;
793 module_param(ipmi_major, int, 0);
794 MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device.  By"
795 		 " default, or if you set it to zero, it will choose the next"
796 		 " available device.  Setting it to -1 will disable the"
797 		 " interface.  Other values will set the major device number"
798 		 " to that value.");
799 
800 /* Keep track of the devices that are registered. */
801 struct ipmi_reg_list {
802 	dev_t            dev;
803 	struct list_head link;
804 };
805 static LIST_HEAD(reg_list);
806 static DEFINE_MUTEX(reg_list_mutex);
807 
808 static struct class *ipmi_class;
809 
ipmi_new_smi(int if_num,struct device * device)810 static void ipmi_new_smi(int if_num, struct device *device)
811 {
812 	dev_t dev = MKDEV(ipmi_major, if_num);
813 	struct ipmi_reg_list *entry;
814 
815 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
816 	if (!entry) {
817 		pr_err("ipmi_devintf: Unable to create the ipmi class device link\n");
818 		return;
819 	}
820 	entry->dev = dev;
821 
822 	mutex_lock(&reg_list_mutex);
823 	device_create(ipmi_class, device, dev, NULL, "ipmi%d", if_num);
824 	list_add(&entry->link, &reg_list);
825 	mutex_unlock(&reg_list_mutex);
826 }
827 
ipmi_smi_gone(int if_num)828 static void ipmi_smi_gone(int if_num)
829 {
830 	dev_t dev = MKDEV(ipmi_major, if_num);
831 	struct ipmi_reg_list *entry;
832 
833 	mutex_lock(&reg_list_mutex);
834 	list_for_each_entry(entry, &reg_list, link) {
835 		if (entry->dev == dev) {
836 			list_del(&entry->link);
837 			kfree(entry);
838 			break;
839 		}
840 	}
841 	device_destroy(ipmi_class, dev);
842 	mutex_unlock(&reg_list_mutex);
843 }
844 
845 static struct ipmi_smi_watcher smi_watcher =
846 {
847 	.owner    = THIS_MODULE,
848 	.new_smi  = ipmi_new_smi,
849 	.smi_gone = ipmi_smi_gone,
850 };
851 
init_ipmi_devintf(void)852 static int __init init_ipmi_devintf(void)
853 {
854 	int rv;
855 
856 	if (ipmi_major < 0)
857 		return -EINVAL;
858 
859 	pr_info("ipmi device interface\n");
860 
861 	ipmi_class = class_create(THIS_MODULE, "ipmi");
862 	if (IS_ERR(ipmi_class)) {
863 		pr_err("ipmi: can't register device class\n");
864 		return PTR_ERR(ipmi_class);
865 	}
866 
867 	rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
868 	if (rv < 0) {
869 		class_destroy(ipmi_class);
870 		pr_err("ipmi: can't get major %d\n", ipmi_major);
871 		return rv;
872 	}
873 
874 	if (ipmi_major == 0) {
875 		ipmi_major = rv;
876 	}
877 
878 	rv = ipmi_smi_watcher_register(&smi_watcher);
879 	if (rv) {
880 		unregister_chrdev(ipmi_major, DEVICE_NAME);
881 		class_destroy(ipmi_class);
882 		pr_warn("ipmi: can't register smi watcher\n");
883 		return rv;
884 	}
885 
886 	return 0;
887 }
888 module_init(init_ipmi_devintf);
889 
cleanup_ipmi(void)890 static void __exit cleanup_ipmi(void)
891 {
892 	struct ipmi_reg_list *entry, *entry2;
893 	mutex_lock(&reg_list_mutex);
894 	list_for_each_entry_safe(entry, entry2, &reg_list, link) {
895 		list_del(&entry->link);
896 		device_destroy(ipmi_class, entry->dev);
897 		kfree(entry);
898 	}
899 	mutex_unlock(&reg_list_mutex);
900 	class_destroy(ipmi_class);
901 	ipmi_smi_watcher_unregister(&smi_watcher);
902 	unregister_chrdev(ipmi_major, DEVICE_NAME);
903 }
904 module_exit(cleanup_ipmi);
905 
906 MODULE_LICENSE("GPL");
907 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
908 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
909