[PATCH] make more file_operation structs static
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / char / ipmi / ipmi_devintf.c
1 /*
2 * ipmi_devintf.c
3 *
4 * Linux device interface for the IPMI message handler.
5 *
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
9 *
10 * Copyright 2002 MontaVista Software Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/errno.h>
37 #include <asm/system.h>
38 #include <linux/sched.h>
39 #include <linux/poll.h>
40 #include <linux/spinlock.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/mutex.h>
44 #include <linux/init.h>
45 #include <linux/device.h>
46 #include <linux/compat.h>
47
48 struct ipmi_file_private
49 {
50 ipmi_user_t user;
51 spinlock_t recv_msg_lock;
52 struct list_head recv_msgs;
53 struct file *file;
54 struct fasync_struct *fasync_queue;
55 wait_queue_head_t wait;
56 struct mutex recv_mutex;
57 int default_retries;
58 unsigned int default_retry_time_ms;
59 };
60
61 static void file_receive_handler(struct ipmi_recv_msg *msg,
62 void *handler_data)
63 {
64 struct ipmi_file_private *priv = handler_data;
65 int was_empty;
66 unsigned long flags;
67
68 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
69
70 was_empty = list_empty(&(priv->recv_msgs));
71 list_add_tail(&(msg->link), &(priv->recv_msgs));
72
73 if (was_empty) {
74 wake_up_interruptible(&priv->wait);
75 kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
76 }
77
78 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
79 }
80
81 static unsigned int ipmi_poll(struct file *file, poll_table *wait)
82 {
83 struct ipmi_file_private *priv = file->private_data;
84 unsigned int mask = 0;
85 unsigned long flags;
86
87 poll_wait(file, &priv->wait, wait);
88
89 spin_lock_irqsave(&priv->recv_msg_lock, flags);
90
91 if (!list_empty(&(priv->recv_msgs)))
92 mask |= (POLLIN | POLLRDNORM);
93
94 spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
95
96 return mask;
97 }
98
99 static int ipmi_fasync(int fd, struct file *file, int on)
100 {
101 struct ipmi_file_private *priv = file->private_data;
102 int result;
103
104 result = fasync_helper(fd, file, on, &priv->fasync_queue);
105
106 return (result);
107 }
108
109 static struct ipmi_user_hndl ipmi_hndlrs =
110 {
111 .ipmi_recv_hndl = file_receive_handler,
112 };
113
114 static int ipmi_open(struct inode *inode, struct file *file)
115 {
116 int if_num = iminor(inode);
117 int rv;
118 struct ipmi_file_private *priv;
119
120
121 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
122 if (!priv)
123 return -ENOMEM;
124
125 priv->file = file;
126
127 rv = ipmi_create_user(if_num,
128 &ipmi_hndlrs,
129 priv,
130 &(priv->user));
131 if (rv) {
132 kfree(priv);
133 return rv;
134 }
135
136 file->private_data = priv;
137
138 spin_lock_init(&(priv->recv_msg_lock));
139 INIT_LIST_HEAD(&(priv->recv_msgs));
140 init_waitqueue_head(&priv->wait);
141 priv->fasync_queue = NULL;
142 mutex_init(&priv->recv_mutex);
143
144 /* Use the low-level defaults. */
145 priv->default_retries = -1;
146 priv->default_retry_time_ms = 0;
147
148 return 0;
149 }
150
151 static int ipmi_release(struct inode *inode, struct file *file)
152 {
153 struct ipmi_file_private *priv = file->private_data;
154 int rv;
155
156 rv = ipmi_destroy_user(priv->user);
157 if (rv)
158 return rv;
159
160 ipmi_fasync (-1, file, 0);
161
162 /* FIXME - free the messages in the list. */
163 kfree(priv);
164
165 return 0;
166 }
167
168 static int handle_send_req(ipmi_user_t user,
169 struct ipmi_req *req,
170 int retries,
171 unsigned int retry_time_ms)
172 {
173 int rv;
174 struct ipmi_addr addr;
175 struct kernel_ipmi_msg msg;
176
177 if (req->addr_len > sizeof(struct ipmi_addr))
178 return -EINVAL;
179
180 if (copy_from_user(&addr, req->addr, req->addr_len))
181 return -EFAULT;
182
183 msg.netfn = req->msg.netfn;
184 msg.cmd = req->msg.cmd;
185 msg.data_len = req->msg.data_len;
186 msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
187 if (!msg.data)
188 return -ENOMEM;
189
190 /* From here out we cannot return, we must jump to "out" for
191 error exits to free msgdata. */
192
193 rv = ipmi_validate_addr(&addr, req->addr_len);
194 if (rv)
195 goto out;
196
197 if (req->msg.data != NULL) {
198 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
199 rv = -EMSGSIZE;
200 goto out;
201 }
202
203 if (copy_from_user(msg.data,
204 req->msg.data,
205 req->msg.data_len))
206 {
207 rv = -EFAULT;
208 goto out;
209 }
210 } else {
211 msg.data_len = 0;
212 }
213
214 rv = ipmi_request_settime(user,
215 &addr,
216 req->msgid,
217 &msg,
218 NULL,
219 0,
220 retries,
221 retry_time_ms);
222 out:
223 kfree(msg.data);
224 return rv;
225 }
226
227 static int ipmi_ioctl(struct inode *inode,
228 struct file *file,
229 unsigned int cmd,
230 unsigned long data)
231 {
232 int rv = -EINVAL;
233 struct ipmi_file_private *priv = file->private_data;
234 void __user *arg = (void __user *)data;
235
236 switch (cmd)
237 {
238 case IPMICTL_SEND_COMMAND:
239 {
240 struct ipmi_req req;
241
242 if (copy_from_user(&req, arg, sizeof(req))) {
243 rv = -EFAULT;
244 break;
245 }
246
247 rv = handle_send_req(priv->user,
248 &req,
249 priv->default_retries,
250 priv->default_retry_time_ms);
251 break;
252 }
253
254 case IPMICTL_SEND_COMMAND_SETTIME:
255 {
256 struct ipmi_req_settime req;
257
258 if (copy_from_user(&req, arg, sizeof(req))) {
259 rv = -EFAULT;
260 break;
261 }
262
263 rv = handle_send_req(priv->user,
264 &req.req,
265 req.retries,
266 req.retry_time_ms);
267 break;
268 }
269
270 case IPMICTL_RECEIVE_MSG:
271 case IPMICTL_RECEIVE_MSG_TRUNC:
272 {
273 struct ipmi_recv rsp;
274 int addr_len;
275 struct list_head *entry;
276 struct ipmi_recv_msg *msg;
277 unsigned long flags;
278
279
280 rv = 0;
281 if (copy_from_user(&rsp, arg, sizeof(rsp))) {
282 rv = -EFAULT;
283 break;
284 }
285
286 /* We claim a mutex because we don't want two
287 users getting something from the queue at a time.
288 Since we have to release the spinlock before we can
289 copy the data to the user, it's possible another
290 user will grab something from the queue, too. Then
291 the messages might get out of order if something
292 fails and the message gets put back onto the
293 queue. This mutex prevents that problem. */
294 mutex_lock(&priv->recv_mutex);
295
296 /* Grab the message off the list. */
297 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
298 if (list_empty(&(priv->recv_msgs))) {
299 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
300 rv = -EAGAIN;
301 goto recv_err;
302 }
303 entry = priv->recv_msgs.next;
304 msg = list_entry(entry, struct ipmi_recv_msg, link);
305 list_del(entry);
306 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
307
308 addr_len = ipmi_addr_length(msg->addr.addr_type);
309 if (rsp.addr_len < addr_len)
310 {
311 rv = -EINVAL;
312 goto recv_putback_on_err;
313 }
314
315 if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) {
316 rv = -EFAULT;
317 goto recv_putback_on_err;
318 }
319 rsp.addr_len = addr_len;
320
321 rsp.recv_type = msg->recv_type;
322 rsp.msgid = msg->msgid;
323 rsp.msg.netfn = msg->msg.netfn;
324 rsp.msg.cmd = msg->msg.cmd;
325
326 if (msg->msg.data_len > 0) {
327 if (rsp.msg.data_len < msg->msg.data_len) {
328 rv = -EMSGSIZE;
329 if (cmd == IPMICTL_RECEIVE_MSG_TRUNC) {
330 msg->msg.data_len = rsp.msg.data_len;
331 } else {
332 goto recv_putback_on_err;
333 }
334 }
335
336 if (copy_to_user(rsp.msg.data,
337 msg->msg.data,
338 msg->msg.data_len))
339 {
340 rv = -EFAULT;
341 goto recv_putback_on_err;
342 }
343 rsp.msg.data_len = msg->msg.data_len;
344 } else {
345 rsp.msg.data_len = 0;
346 }
347
348 if (copy_to_user(arg, &rsp, sizeof(rsp))) {
349 rv = -EFAULT;
350 goto recv_putback_on_err;
351 }
352
353 mutex_unlock(&priv->recv_mutex);
354 ipmi_free_recv_msg(msg);
355 break;
356
357 recv_putback_on_err:
358 /* If we got an error, put the message back onto
359 the head of the queue. */
360 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
361 list_add(entry, &(priv->recv_msgs));
362 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
363 mutex_unlock(&priv->recv_mutex);
364 break;
365
366 recv_err:
367 mutex_unlock(&priv->recv_mutex);
368 break;
369 }
370
371 case IPMICTL_REGISTER_FOR_CMD:
372 {
373 struct ipmi_cmdspec val;
374
375 if (copy_from_user(&val, arg, sizeof(val))) {
376 rv = -EFAULT;
377 break;
378 }
379
380 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd);
381 break;
382 }
383
384 case IPMICTL_UNREGISTER_FOR_CMD:
385 {
386 struct ipmi_cmdspec val;
387
388 if (copy_from_user(&val, arg, sizeof(val))) {
389 rv = -EFAULT;
390 break;
391 }
392
393 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd);
394 break;
395 }
396
397 case IPMICTL_SET_GETS_EVENTS_CMD:
398 {
399 int val;
400
401 if (copy_from_user(&val, arg, sizeof(val))) {
402 rv = -EFAULT;
403 break;
404 }
405
406 rv = ipmi_set_gets_events(priv->user, val);
407 break;
408 }
409
410 /* The next four are legacy, not per-channel. */
411 case IPMICTL_SET_MY_ADDRESS_CMD:
412 {
413 unsigned int val;
414
415 if (copy_from_user(&val, arg, sizeof(val))) {
416 rv = -EFAULT;
417 break;
418 }
419
420 rv = ipmi_set_my_address(priv->user, 0, val);
421 break;
422 }
423
424 case IPMICTL_GET_MY_ADDRESS_CMD:
425 {
426 unsigned int val;
427 unsigned char rval;
428
429 rv = ipmi_get_my_address(priv->user, 0, &rval);
430 if (rv)
431 break;
432
433 val = rval;
434
435 if (copy_to_user(arg, &val, sizeof(val))) {
436 rv = -EFAULT;
437 break;
438 }
439 break;
440 }
441
442 case IPMICTL_SET_MY_LUN_CMD:
443 {
444 unsigned int val;
445
446 if (copy_from_user(&val, arg, sizeof(val))) {
447 rv = -EFAULT;
448 break;
449 }
450
451 rv = ipmi_set_my_LUN(priv->user, 0, val);
452 break;
453 }
454
455 case IPMICTL_GET_MY_LUN_CMD:
456 {
457 unsigned int val;
458 unsigned char rval;
459
460 rv = ipmi_get_my_LUN(priv->user, 0, &rval);
461 if (rv)
462 break;
463
464 val = rval;
465
466 if (copy_to_user(arg, &val, sizeof(val))) {
467 rv = -EFAULT;
468 break;
469 }
470 break;
471 }
472
473 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
474 {
475 struct ipmi_channel_lun_address_set val;
476
477 if (copy_from_user(&val, arg, sizeof(val))) {
478 rv = -EFAULT;
479 break;
480 }
481
482 return ipmi_set_my_address(priv->user, val.channel, val.value);
483 break;
484 }
485
486 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
487 {
488 struct ipmi_channel_lun_address_set val;
489
490 if (copy_from_user(&val, arg, sizeof(val))) {
491 rv = -EFAULT;
492 break;
493 }
494
495 rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
496 if (rv)
497 break;
498
499 if (copy_to_user(arg, &val, sizeof(val))) {
500 rv = -EFAULT;
501 break;
502 }
503 break;
504 }
505
506 case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
507 {
508 struct ipmi_channel_lun_address_set val;
509
510 if (copy_from_user(&val, arg, sizeof(val))) {
511 rv = -EFAULT;
512 break;
513 }
514
515 rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
516 break;
517 }
518
519 case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
520 {
521 struct ipmi_channel_lun_address_set val;
522
523 if (copy_from_user(&val, arg, sizeof(val))) {
524 rv = -EFAULT;
525 break;
526 }
527
528 rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
529 if (rv)
530 break;
531
532 if (copy_to_user(arg, &val, sizeof(val))) {
533 rv = -EFAULT;
534 break;
535 }
536 break;
537 }
538
539 case IPMICTL_SET_TIMING_PARMS_CMD:
540 {
541 struct ipmi_timing_parms parms;
542
543 if (copy_from_user(&parms, arg, sizeof(parms))) {
544 rv = -EFAULT;
545 break;
546 }
547
548 priv->default_retries = parms.retries;
549 priv->default_retry_time_ms = parms.retry_time_ms;
550 rv = 0;
551 break;
552 }
553
554 case IPMICTL_GET_TIMING_PARMS_CMD:
555 {
556 struct ipmi_timing_parms parms;
557
558 parms.retries = priv->default_retries;
559 parms.retry_time_ms = priv->default_retry_time_ms;
560
561 if (copy_to_user(arg, &parms, sizeof(parms))) {
562 rv = -EFAULT;
563 break;
564 }
565
566 rv = 0;
567 break;
568 }
569 }
570
571 return rv;
572 }
573
574 #ifdef CONFIG_COMPAT
575
576 /*
577 * The following code contains code for supporting 32-bit compatible
578 * ioctls on 64-bit kernels. This allows running 32-bit apps on the
579 * 64-bit kernel
580 */
581 #define COMPAT_IPMICTL_SEND_COMMAND \
582 _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
583 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \
584 _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
585 #define COMPAT_IPMICTL_RECEIVE_MSG \
586 _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
587 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \
588 _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
589
590 struct compat_ipmi_msg {
591 u8 netfn;
592 u8 cmd;
593 u16 data_len;
594 compat_uptr_t data;
595 };
596
597 struct compat_ipmi_req {
598 compat_uptr_t addr;
599 compat_uint_t addr_len;
600 compat_long_t msgid;
601 struct compat_ipmi_msg msg;
602 };
603
604 struct compat_ipmi_recv {
605 compat_int_t recv_type;
606 compat_uptr_t addr;
607 compat_uint_t addr_len;
608 compat_long_t msgid;
609 struct compat_ipmi_msg msg;
610 };
611
612 struct compat_ipmi_req_settime {
613 struct compat_ipmi_req req;
614 compat_int_t retries;
615 compat_uint_t retry_time_ms;
616 };
617
618 /*
619 * Define some helper functions for copying IPMI data
620 */
621 static long get_compat_ipmi_msg(struct ipmi_msg *p64,
622 struct compat_ipmi_msg __user *p32)
623 {
624 compat_uptr_t tmp;
625
626 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
627 __get_user(p64->netfn, &p32->netfn) ||
628 __get_user(p64->cmd, &p32->cmd) ||
629 __get_user(p64->data_len, &p32->data_len) ||
630 __get_user(tmp, &p32->data))
631 return -EFAULT;
632 p64->data = compat_ptr(tmp);
633 return 0;
634 }
635
636 static long put_compat_ipmi_msg(struct ipmi_msg *p64,
637 struct compat_ipmi_msg __user *p32)
638 {
639 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
640 __put_user(p64->netfn, &p32->netfn) ||
641 __put_user(p64->cmd, &p32->cmd) ||
642 __put_user(p64->data_len, &p32->data_len))
643 return -EFAULT;
644 return 0;
645 }
646
647 static long get_compat_ipmi_req(struct ipmi_req *p64,
648 struct compat_ipmi_req __user *p32)
649 {
650
651 compat_uptr_t tmp;
652
653 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
654 __get_user(tmp, &p32->addr) ||
655 __get_user(p64->addr_len, &p32->addr_len) ||
656 __get_user(p64->msgid, &p32->msgid) ||
657 get_compat_ipmi_msg(&p64->msg, &p32->msg))
658 return -EFAULT;
659 p64->addr = compat_ptr(tmp);
660 return 0;
661 }
662
663 static long get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
664 struct compat_ipmi_req_settime __user *p32)
665 {
666 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
667 get_compat_ipmi_req(&p64->req, &p32->req) ||
668 __get_user(p64->retries, &p32->retries) ||
669 __get_user(p64->retry_time_ms, &p32->retry_time_ms))
670 return -EFAULT;
671 return 0;
672 }
673
674 static long get_compat_ipmi_recv(struct ipmi_recv *p64,
675 struct compat_ipmi_recv __user *p32)
676 {
677 compat_uptr_t tmp;
678
679 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
680 __get_user(p64->recv_type, &p32->recv_type) ||
681 __get_user(tmp, &p32->addr) ||
682 __get_user(p64->addr_len, &p32->addr_len) ||
683 __get_user(p64->msgid, &p32->msgid) ||
684 get_compat_ipmi_msg(&p64->msg, &p32->msg))
685 return -EFAULT;
686 p64->addr = compat_ptr(tmp);
687 return 0;
688 }
689
690 static long put_compat_ipmi_recv(struct ipmi_recv *p64,
691 struct compat_ipmi_recv __user *p32)
692 {
693 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
694 __put_user(p64->recv_type, &p32->recv_type) ||
695 __put_user(p64->addr_len, &p32->addr_len) ||
696 __put_user(p64->msgid, &p32->msgid) ||
697 put_compat_ipmi_msg(&p64->msg, &p32->msg))
698 return -EFAULT;
699 return 0;
700 }
701
702 /*
703 * Handle compatibility ioctls
704 */
705 static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
706 unsigned long arg)
707 {
708 int rc;
709 struct ipmi_file_private *priv = filep->private_data;
710
711 switch(cmd) {
712 case COMPAT_IPMICTL_SEND_COMMAND:
713 {
714 struct ipmi_req rp;
715
716 if (get_compat_ipmi_req(&rp, compat_ptr(arg)))
717 return -EFAULT;
718
719 return handle_send_req(priv->user, &rp,
720 priv->default_retries,
721 priv->default_retry_time_ms);
722 }
723 case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
724 {
725 struct ipmi_req_settime sp;
726
727 if (get_compat_ipmi_req_settime(&sp, compat_ptr(arg)))
728 return -EFAULT;
729
730 return handle_send_req(priv->user, &sp.req,
731 sp.retries, sp.retry_time_ms);
732 }
733 case COMPAT_IPMICTL_RECEIVE_MSG:
734 case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
735 {
736 struct ipmi_recv __user *precv64;
737 struct ipmi_recv recv64;
738
739 if (get_compat_ipmi_recv(&recv64, compat_ptr(arg)))
740 return -EFAULT;
741
742 precv64 = compat_alloc_user_space(sizeof(recv64));
743 if (copy_to_user(precv64, &recv64, sizeof(recv64)))
744 return -EFAULT;
745
746 rc = ipmi_ioctl(filep->f_dentry->d_inode, filep,
747 ((cmd == COMPAT_IPMICTL_RECEIVE_MSG)
748 ? IPMICTL_RECEIVE_MSG
749 : IPMICTL_RECEIVE_MSG_TRUNC),
750 (unsigned long) precv64);
751 if (rc != 0)
752 return rc;
753
754 if (copy_from_user(&recv64, precv64, sizeof(recv64)))
755 return -EFAULT;
756
757 if (put_compat_ipmi_recv(&recv64, compat_ptr(arg)))
758 return -EFAULT;
759
760 return rc;
761 }
762 default:
763 return ipmi_ioctl(filep->f_dentry->d_inode, filep, cmd, arg);
764 }
765 }
766 #endif
767
768 static const struct file_operations ipmi_fops = {
769 .owner = THIS_MODULE,
770 .ioctl = ipmi_ioctl,
771 #ifdef CONFIG_COMPAT
772 .compat_ioctl = compat_ipmi_ioctl,
773 #endif
774 .open = ipmi_open,
775 .release = ipmi_release,
776 .fasync = ipmi_fasync,
777 .poll = ipmi_poll,
778 };
779
780 #define DEVICE_NAME "ipmidev"
781
782 static int ipmi_major = 0;
783 module_param(ipmi_major, int, 0);
784 MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
785 " default, or if you set it to zero, it will choose the next"
786 " available device. Setting it to -1 will disable the"
787 " interface. Other values will set the major device number"
788 " to that value.");
789
790 /* Keep track of the devices that are registered. */
791 struct ipmi_reg_list {
792 dev_t dev;
793 struct list_head link;
794 };
795 static LIST_HEAD(reg_list);
796 static DEFINE_MUTEX(reg_list_mutex);
797
798 static struct class *ipmi_class;
799
800 static void ipmi_new_smi(int if_num, struct device *device)
801 {
802 dev_t dev = MKDEV(ipmi_major, if_num);
803 struct ipmi_reg_list *entry;
804
805 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
806 if (!entry) {
807 printk(KERN_ERR "ipmi_devintf: Unable to create the"
808 " ipmi class device link\n");
809 return;
810 }
811 entry->dev = dev;
812
813 mutex_lock(&reg_list_mutex);
814 class_device_create(ipmi_class, NULL, dev, device, "ipmi%d", if_num);
815 list_add(&entry->link, &reg_list);
816 mutex_unlock(&reg_list_mutex);
817 }
818
819 static void ipmi_smi_gone(int if_num)
820 {
821 dev_t dev = MKDEV(ipmi_major, if_num);
822 struct ipmi_reg_list *entry;
823
824 mutex_lock(&reg_list_mutex);
825 list_for_each_entry(entry, &reg_list, link) {
826 if (entry->dev == dev) {
827 list_del(&entry->link);
828 kfree(entry);
829 break;
830 }
831 }
832 class_device_destroy(ipmi_class, dev);
833 mutex_unlock(&reg_list_mutex);
834 }
835
836 static struct ipmi_smi_watcher smi_watcher =
837 {
838 .owner = THIS_MODULE,
839 .new_smi = ipmi_new_smi,
840 .smi_gone = ipmi_smi_gone,
841 };
842
843 static __init int init_ipmi_devintf(void)
844 {
845 int rv;
846
847 if (ipmi_major < 0)
848 return -EINVAL;
849
850 printk(KERN_INFO "ipmi device interface\n");
851
852 ipmi_class = class_create(THIS_MODULE, "ipmi");
853 if (IS_ERR(ipmi_class)) {
854 printk(KERN_ERR "ipmi: can't register device class\n");
855 return PTR_ERR(ipmi_class);
856 }
857
858 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
859 if (rv < 0) {
860 class_destroy(ipmi_class);
861 printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
862 return rv;
863 }
864
865 if (ipmi_major == 0) {
866 ipmi_major = rv;
867 }
868
869 rv = ipmi_smi_watcher_register(&smi_watcher);
870 if (rv) {
871 unregister_chrdev(ipmi_major, DEVICE_NAME);
872 class_destroy(ipmi_class);
873 printk(KERN_WARNING "ipmi: can't register smi watcher\n");
874 return rv;
875 }
876
877 return 0;
878 }
879 module_init(init_ipmi_devintf);
880
881 static __exit void cleanup_ipmi(void)
882 {
883 struct ipmi_reg_list *entry, *entry2;
884 mutex_lock(&reg_list_mutex);
885 list_for_each_entry_safe(entry, entry2, &reg_list, link) {
886 list_del(&entry->link);
887 class_device_destroy(ipmi_class, entry->dev);
888 kfree(entry);
889 }
890 mutex_unlock(&reg_list_mutex);
891 class_destroy(ipmi_class);
892 ipmi_smi_watcher_unregister(&smi_watcher);
893 unregister_chrdev(ipmi_major, DEVICE_NAME);
894 }
895 module_exit(cleanup_ipmi);
896
897 MODULE_LICENSE("GPL");
898 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
899 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");