[PATCH] IPMI: pass sysfs name from lower level driver
[deliverable/linux.git] / drivers / char / ipmi / ipmi_msghandler.c
1 /*
2 * ipmi_msghandler.c
3 *
4 * Incoming and outgoing message routing for an IPMI interface.
5 *
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
9 *
10 * Copyright 2002 MontaVista Software Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/sched.h>
38 #include <linux/poll.h>
39 #include <linux/spinlock.h>
40 #include <linux/mutex.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/ipmi_smi.h>
44 #include <linux/notifier.h>
45 #include <linux/init.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
48
49 #define PFX "IPMI message handler: "
50
51 #define IPMI_DRIVER_VERSION "39.0"
52
53 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54 static int ipmi_init_msghandler(void);
55
56 static int initialized = 0;
57
58 #ifdef CONFIG_PROC_FS
59 static struct proc_dir_entry *proc_ipmi_root = NULL;
60 #endif /* CONFIG_PROC_FS */
61
62 #define MAX_EVENTS_IN_QUEUE 25
63
64 /* Don't let a message sit in a queue forever, always time it with at lest
65 the max message timer. This is in milliseconds. */
66 #define MAX_MSG_TIMEOUT 60000
67
68
69 /*
70 * The main "user" data structure.
71 */
72 struct ipmi_user
73 {
74 struct list_head link;
75
76 /* Set to "0" when the user is destroyed. */
77 int valid;
78
79 struct kref refcount;
80
81 /* The upper layer that handles receive messages. */
82 struct ipmi_user_hndl *handler;
83 void *handler_data;
84
85 /* The interface this user is bound to. */
86 ipmi_smi_t intf;
87
88 /* Does this interface receive IPMI events? */
89 int gets_events;
90 };
91
92 struct cmd_rcvr
93 {
94 struct list_head link;
95
96 ipmi_user_t user;
97 unsigned char netfn;
98 unsigned char cmd;
99 unsigned int chans;
100
101 /*
102 * This is used to form a linked lised during mass deletion.
103 * Since this is in an RCU list, we cannot use the link above
104 * or change any data until the RCU period completes. So we
105 * use this next variable during mass deletion so we can have
106 * a list and don't have to wait and restart the search on
107 * every individual deletion of a command. */
108 struct cmd_rcvr *next;
109 };
110
111 struct seq_table
112 {
113 unsigned int inuse : 1;
114 unsigned int broadcast : 1;
115
116 unsigned long timeout;
117 unsigned long orig_timeout;
118 unsigned int retries_left;
119
120 /* To verify on an incoming send message response that this is
121 the message that the response is for, we keep a sequence id
122 and increment it every time we send a message. */
123 long seqid;
124
125 /* This is held so we can properly respond to the message on a
126 timeout, and it is used to hold the temporary data for
127 retransmission, too. */
128 struct ipmi_recv_msg *recv_msg;
129 };
130
131 /* Store the information in a msgid (long) to allow us to find a
132 sequence table entry from the msgid. */
133 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
134
135 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
136 do { \
137 seq = ((msgid >> 26) & 0x3f); \
138 seqid = (msgid & 0x3fffff); \
139 } while (0)
140
141 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
142
143 struct ipmi_channel
144 {
145 unsigned char medium;
146 unsigned char protocol;
147
148 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
149 but may be changed by the user. */
150 unsigned char address;
151
152 /* My LUN. This should generally stay the SMS LUN, but just in
153 case... */
154 unsigned char lun;
155 };
156
157 #ifdef CONFIG_PROC_FS
158 struct ipmi_proc_entry
159 {
160 char *name;
161 struct ipmi_proc_entry *next;
162 };
163 #endif
164
165 struct bmc_device
166 {
167 struct platform_device *dev;
168 struct ipmi_device_id id;
169 unsigned char guid[16];
170 int guid_set;
171
172 struct kref refcount;
173
174 /* bmc device attributes */
175 struct device_attribute device_id_attr;
176 struct device_attribute provides_dev_sdrs_attr;
177 struct device_attribute revision_attr;
178 struct device_attribute firmware_rev_attr;
179 struct device_attribute version_attr;
180 struct device_attribute add_dev_support_attr;
181 struct device_attribute manufacturer_id_attr;
182 struct device_attribute product_id_attr;
183 struct device_attribute guid_attr;
184 struct device_attribute aux_firmware_rev_attr;
185 };
186
187 #define IPMI_IPMB_NUM_SEQ 64
188 #define IPMI_MAX_CHANNELS 16
189 struct ipmi_smi
190 {
191 /* What interface number are we? */
192 int intf_num;
193
194 struct kref refcount;
195
196 /* Used for a list of interfaces. */
197 struct list_head link;
198
199 /* The list of upper layers that are using me. seq_lock
200 * protects this. */
201 struct list_head users;
202
203 /* Used for wake ups at startup. */
204 wait_queue_head_t waitq;
205
206 struct bmc_device *bmc;
207 char *my_dev_name;
208 char *sysfs_name;
209
210 /* This is the lower-layer's sender routine. */
211 struct ipmi_smi_handlers *handlers;
212 void *send_info;
213
214 #ifdef CONFIG_PROC_FS
215 /* A list of proc entries for this interface. This does not
216 need a lock, only one thread creates it and only one thread
217 destroys it. */
218 spinlock_t proc_entry_lock;
219 struct ipmi_proc_entry *proc_entries;
220 #endif
221
222 /* Driver-model device for the system interface. */
223 struct device *si_dev;
224
225 /* A table of sequence numbers for this interface. We use the
226 sequence numbers for IPMB messages that go out of the
227 interface to match them up with their responses. A routine
228 is called periodically to time the items in this list. */
229 spinlock_t seq_lock;
230 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
231 int curr_seq;
232
233 /* Messages that were delayed for some reason (out of memory,
234 for instance), will go in here to be processed later in a
235 periodic timer interrupt. */
236 spinlock_t waiting_msgs_lock;
237 struct list_head waiting_msgs;
238
239 /* The list of command receivers that are registered for commands
240 on this interface. */
241 struct mutex cmd_rcvrs_mutex;
242 struct list_head cmd_rcvrs;
243
244 /* Events that were queues because no one was there to receive
245 them. */
246 spinlock_t events_lock; /* For dealing with event stuff. */
247 struct list_head waiting_events;
248 unsigned int waiting_events_count; /* How many events in queue? */
249
250 /* The event receiver for my BMC, only really used at panic
251 shutdown as a place to store this. */
252 unsigned char event_receiver;
253 unsigned char event_receiver_lun;
254 unsigned char local_sel_device;
255 unsigned char local_event_generator;
256
257 /* A cheap hack, if this is non-null and a message to an
258 interface comes in with a NULL user, call this routine with
259 it. Note that the message will still be freed by the
260 caller. This only works on the system interface. */
261 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
262
263 /* When we are scanning the channels for an SMI, this will
264 tell which channel we are scanning. */
265 int curr_channel;
266
267 /* Channel information */
268 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
269
270 /* Proc FS stuff. */
271 struct proc_dir_entry *proc_dir;
272 char proc_dir_name[10];
273
274 spinlock_t counter_lock; /* For making counters atomic. */
275
276 /* Commands we got that were invalid. */
277 unsigned int sent_invalid_commands;
278
279 /* Commands we sent to the MC. */
280 unsigned int sent_local_commands;
281 /* Responses from the MC that were delivered to a user. */
282 unsigned int handled_local_responses;
283 /* Responses from the MC that were not delivered to a user. */
284 unsigned int unhandled_local_responses;
285
286 /* Commands we sent out to the IPMB bus. */
287 unsigned int sent_ipmb_commands;
288 /* Commands sent on the IPMB that had errors on the SEND CMD */
289 unsigned int sent_ipmb_command_errs;
290 /* Each retransmit increments this count. */
291 unsigned int retransmitted_ipmb_commands;
292 /* When a message times out (runs out of retransmits) this is
293 incremented. */
294 unsigned int timed_out_ipmb_commands;
295
296 /* This is like above, but for broadcasts. Broadcasts are
297 *not* included in the above count (they are expected to
298 time out). */
299 unsigned int timed_out_ipmb_broadcasts;
300
301 /* Responses I have sent to the IPMB bus. */
302 unsigned int sent_ipmb_responses;
303
304 /* The response was delivered to the user. */
305 unsigned int handled_ipmb_responses;
306 /* The response had invalid data in it. */
307 unsigned int invalid_ipmb_responses;
308 /* The response didn't have anyone waiting for it. */
309 unsigned int unhandled_ipmb_responses;
310
311 /* Commands we sent out to the IPMB bus. */
312 unsigned int sent_lan_commands;
313 /* Commands sent on the IPMB that had errors on the SEND CMD */
314 unsigned int sent_lan_command_errs;
315 /* Each retransmit increments this count. */
316 unsigned int retransmitted_lan_commands;
317 /* When a message times out (runs out of retransmits) this is
318 incremented. */
319 unsigned int timed_out_lan_commands;
320
321 /* Responses I have sent to the IPMB bus. */
322 unsigned int sent_lan_responses;
323
324 /* The response was delivered to the user. */
325 unsigned int handled_lan_responses;
326 /* The response had invalid data in it. */
327 unsigned int invalid_lan_responses;
328 /* The response didn't have anyone waiting for it. */
329 unsigned int unhandled_lan_responses;
330
331 /* The command was delivered to the user. */
332 unsigned int handled_commands;
333 /* The command had invalid data in it. */
334 unsigned int invalid_commands;
335 /* The command didn't have anyone waiting for it. */
336 unsigned int unhandled_commands;
337
338 /* Invalid data in an event. */
339 unsigned int invalid_events;
340 /* Events that were received with the proper format. */
341 unsigned int events;
342 };
343 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
344
345 /**
346 * The driver model view of the IPMI messaging driver.
347 */
348 static struct device_driver ipmidriver = {
349 .name = "ipmi",
350 .bus = &platform_bus_type
351 };
352 static DEFINE_MUTEX(ipmidriver_mutex);
353
354 static struct list_head ipmi_interfaces = LIST_HEAD_INIT(ipmi_interfaces);
355 static DEFINE_MUTEX(ipmi_interfaces_mutex);
356
357 /* List of watchers that want to know when smi's are added and
358 deleted. */
359 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
360 static DECLARE_RWSEM(smi_watchers_sem);
361
362
363 static void free_recv_msg_list(struct list_head *q)
364 {
365 struct ipmi_recv_msg *msg, *msg2;
366
367 list_for_each_entry_safe(msg, msg2, q, link) {
368 list_del(&msg->link);
369 ipmi_free_recv_msg(msg);
370 }
371 }
372
373 static void free_smi_msg_list(struct list_head *q)
374 {
375 struct ipmi_smi_msg *msg, *msg2;
376
377 list_for_each_entry_safe(msg, msg2, q, link) {
378 list_del(&msg->link);
379 ipmi_free_smi_msg(msg);
380 }
381 }
382
383 static void clean_up_interface_data(ipmi_smi_t intf)
384 {
385 int i;
386 struct cmd_rcvr *rcvr, *rcvr2;
387 struct list_head list;
388
389 free_smi_msg_list(&intf->waiting_msgs);
390 free_recv_msg_list(&intf->waiting_events);
391
392 /* Wholesale remove all the entries from the list in the
393 * interface and wait for RCU to know that none are in use. */
394 mutex_lock(&intf->cmd_rcvrs_mutex);
395 list_add_rcu(&list, &intf->cmd_rcvrs);
396 list_del_rcu(&intf->cmd_rcvrs);
397 mutex_unlock(&intf->cmd_rcvrs_mutex);
398 synchronize_rcu();
399
400 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
401 kfree(rcvr);
402
403 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
404 if ((intf->seq_table[i].inuse)
405 && (intf->seq_table[i].recv_msg))
406 {
407 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
408 }
409 }
410 }
411
412 static void intf_free(struct kref *ref)
413 {
414 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
415
416 clean_up_interface_data(intf);
417 kfree(intf);
418 }
419
420 struct watcher_entry {
421 struct list_head link;
422 int intf_num;
423 };
424
425 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
426 {
427 ipmi_smi_t intf;
428 struct list_head to_deliver = LIST_HEAD_INIT(to_deliver);
429 struct watcher_entry *e, *e2;
430
431 mutex_lock(&ipmi_interfaces_mutex);
432
433 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
434 if (intf->intf_num == -1)
435 continue;
436 e = kmalloc(sizeof(*e), GFP_KERNEL);
437 if (!e)
438 goto out_err;
439 e->intf_num = intf->intf_num;
440 list_add_tail(&e->link, &to_deliver);
441 }
442
443 down_write(&smi_watchers_sem);
444 list_add(&(watcher->link), &smi_watchers);
445 up_write(&smi_watchers_sem);
446
447 mutex_unlock(&ipmi_interfaces_mutex);
448
449 list_for_each_entry_safe(e, e2, &to_deliver, link) {
450 list_del(&e->link);
451 watcher->new_smi(e->intf_num, intf->si_dev);
452 kfree(e);
453 }
454
455
456 return 0;
457
458 out_err:
459 list_for_each_entry_safe(e, e2, &to_deliver, link) {
460 list_del(&e->link);
461 kfree(e);
462 }
463 return -ENOMEM;
464 }
465
466 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
467 {
468 down_write(&smi_watchers_sem);
469 list_del(&(watcher->link));
470 up_write(&smi_watchers_sem);
471 return 0;
472 }
473
474 static void
475 call_smi_watchers(int i, struct device *dev)
476 {
477 struct ipmi_smi_watcher *w;
478
479 down_read(&smi_watchers_sem);
480 list_for_each_entry(w, &smi_watchers, link) {
481 if (try_module_get(w->owner)) {
482 w->new_smi(i, dev);
483 module_put(w->owner);
484 }
485 }
486 up_read(&smi_watchers_sem);
487 }
488
489 static int
490 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
491 {
492 if (addr1->addr_type != addr2->addr_type)
493 return 0;
494
495 if (addr1->channel != addr2->channel)
496 return 0;
497
498 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
499 struct ipmi_system_interface_addr *smi_addr1
500 = (struct ipmi_system_interface_addr *) addr1;
501 struct ipmi_system_interface_addr *smi_addr2
502 = (struct ipmi_system_interface_addr *) addr2;
503 return (smi_addr1->lun == smi_addr2->lun);
504 }
505
506 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
507 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
508 {
509 struct ipmi_ipmb_addr *ipmb_addr1
510 = (struct ipmi_ipmb_addr *) addr1;
511 struct ipmi_ipmb_addr *ipmb_addr2
512 = (struct ipmi_ipmb_addr *) addr2;
513
514 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
515 && (ipmb_addr1->lun == ipmb_addr2->lun));
516 }
517
518 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
519 struct ipmi_lan_addr *lan_addr1
520 = (struct ipmi_lan_addr *) addr1;
521 struct ipmi_lan_addr *lan_addr2
522 = (struct ipmi_lan_addr *) addr2;
523
524 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
525 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
526 && (lan_addr1->session_handle
527 == lan_addr2->session_handle)
528 && (lan_addr1->lun == lan_addr2->lun));
529 }
530
531 return 1;
532 }
533
534 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
535 {
536 if (len < sizeof(struct ipmi_system_interface_addr)) {
537 return -EINVAL;
538 }
539
540 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
541 if (addr->channel != IPMI_BMC_CHANNEL)
542 return -EINVAL;
543 return 0;
544 }
545
546 if ((addr->channel == IPMI_BMC_CHANNEL)
547 || (addr->channel >= IPMI_MAX_CHANNELS)
548 || (addr->channel < 0))
549 return -EINVAL;
550
551 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
552 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
553 {
554 if (len < sizeof(struct ipmi_ipmb_addr)) {
555 return -EINVAL;
556 }
557 return 0;
558 }
559
560 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
561 if (len < sizeof(struct ipmi_lan_addr)) {
562 return -EINVAL;
563 }
564 return 0;
565 }
566
567 return -EINVAL;
568 }
569
570 unsigned int ipmi_addr_length(int addr_type)
571 {
572 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
573 return sizeof(struct ipmi_system_interface_addr);
574
575 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
576 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
577 {
578 return sizeof(struct ipmi_ipmb_addr);
579 }
580
581 if (addr_type == IPMI_LAN_ADDR_TYPE)
582 return sizeof(struct ipmi_lan_addr);
583
584 return 0;
585 }
586
587 static void deliver_response(struct ipmi_recv_msg *msg)
588 {
589 if (!msg->user) {
590 ipmi_smi_t intf = msg->user_msg_data;
591 unsigned long flags;
592
593 /* Special handling for NULL users. */
594 if (intf->null_user_handler) {
595 intf->null_user_handler(intf, msg);
596 spin_lock_irqsave(&intf->counter_lock, flags);
597 intf->handled_local_responses++;
598 spin_unlock_irqrestore(&intf->counter_lock, flags);
599 } else {
600 /* No handler, so give up. */
601 spin_lock_irqsave(&intf->counter_lock, flags);
602 intf->unhandled_local_responses++;
603 spin_unlock_irqrestore(&intf->counter_lock, flags);
604 }
605 ipmi_free_recv_msg(msg);
606 } else {
607 ipmi_user_t user = msg->user;
608 user->handler->ipmi_recv_hndl(msg, user->handler_data);
609 }
610 }
611
612 /* Find the next sequence number not being used and add the given
613 message with the given timeout to the sequence table. This must be
614 called with the interface's seq_lock held. */
615 static int intf_next_seq(ipmi_smi_t intf,
616 struct ipmi_recv_msg *recv_msg,
617 unsigned long timeout,
618 int retries,
619 int broadcast,
620 unsigned char *seq,
621 long *seqid)
622 {
623 int rv = 0;
624 unsigned int i;
625
626 for (i = intf->curr_seq;
627 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
628 i = (i+1)%IPMI_IPMB_NUM_SEQ)
629 {
630 if (!intf->seq_table[i].inuse)
631 break;
632 }
633
634 if (!intf->seq_table[i].inuse) {
635 intf->seq_table[i].recv_msg = recv_msg;
636
637 /* Start with the maximum timeout, when the send response
638 comes in we will start the real timer. */
639 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
640 intf->seq_table[i].orig_timeout = timeout;
641 intf->seq_table[i].retries_left = retries;
642 intf->seq_table[i].broadcast = broadcast;
643 intf->seq_table[i].inuse = 1;
644 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
645 *seq = i;
646 *seqid = intf->seq_table[i].seqid;
647 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
648 } else {
649 rv = -EAGAIN;
650 }
651
652 return rv;
653 }
654
655 /* Return the receive message for the given sequence number and
656 release the sequence number so it can be reused. Some other data
657 is passed in to be sure the message matches up correctly (to help
658 guard against message coming in after their timeout and the
659 sequence number being reused). */
660 static int intf_find_seq(ipmi_smi_t intf,
661 unsigned char seq,
662 short channel,
663 unsigned char cmd,
664 unsigned char netfn,
665 struct ipmi_addr *addr,
666 struct ipmi_recv_msg **recv_msg)
667 {
668 int rv = -ENODEV;
669 unsigned long flags;
670
671 if (seq >= IPMI_IPMB_NUM_SEQ)
672 return -EINVAL;
673
674 spin_lock_irqsave(&(intf->seq_lock), flags);
675 if (intf->seq_table[seq].inuse) {
676 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
677
678 if ((msg->addr.channel == channel)
679 && (msg->msg.cmd == cmd)
680 && (msg->msg.netfn == netfn)
681 && (ipmi_addr_equal(addr, &(msg->addr))))
682 {
683 *recv_msg = msg;
684 intf->seq_table[seq].inuse = 0;
685 rv = 0;
686 }
687 }
688 spin_unlock_irqrestore(&(intf->seq_lock), flags);
689
690 return rv;
691 }
692
693
694 /* Start the timer for a specific sequence table entry. */
695 static int intf_start_seq_timer(ipmi_smi_t intf,
696 long msgid)
697 {
698 int rv = -ENODEV;
699 unsigned long flags;
700 unsigned char seq;
701 unsigned long seqid;
702
703
704 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
705
706 spin_lock_irqsave(&(intf->seq_lock), flags);
707 /* We do this verification because the user can be deleted
708 while a message is outstanding. */
709 if ((intf->seq_table[seq].inuse)
710 && (intf->seq_table[seq].seqid == seqid))
711 {
712 struct seq_table *ent = &(intf->seq_table[seq]);
713 ent->timeout = ent->orig_timeout;
714 rv = 0;
715 }
716 spin_unlock_irqrestore(&(intf->seq_lock), flags);
717
718 return rv;
719 }
720
721 /* Got an error for the send message for a specific sequence number. */
722 static int intf_err_seq(ipmi_smi_t intf,
723 long msgid,
724 unsigned int err)
725 {
726 int rv = -ENODEV;
727 unsigned long flags;
728 unsigned char seq;
729 unsigned long seqid;
730 struct ipmi_recv_msg *msg = NULL;
731
732
733 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
734
735 spin_lock_irqsave(&(intf->seq_lock), flags);
736 /* We do this verification because the user can be deleted
737 while a message is outstanding. */
738 if ((intf->seq_table[seq].inuse)
739 && (intf->seq_table[seq].seqid == seqid))
740 {
741 struct seq_table *ent = &(intf->seq_table[seq]);
742
743 ent->inuse = 0;
744 msg = ent->recv_msg;
745 rv = 0;
746 }
747 spin_unlock_irqrestore(&(intf->seq_lock), flags);
748
749 if (msg) {
750 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
751 msg->msg_data[0] = err;
752 msg->msg.netfn |= 1; /* Convert to a response. */
753 msg->msg.data_len = 1;
754 msg->msg.data = msg->msg_data;
755 deliver_response(msg);
756 }
757
758 return rv;
759 }
760
761
762 int ipmi_create_user(unsigned int if_num,
763 struct ipmi_user_hndl *handler,
764 void *handler_data,
765 ipmi_user_t *user)
766 {
767 unsigned long flags;
768 ipmi_user_t new_user;
769 int rv = 0;
770 ipmi_smi_t intf;
771
772 /* There is no module usecount here, because it's not
773 required. Since this can only be used by and called from
774 other modules, they will implicitly use this module, and
775 thus this can't be removed unless the other modules are
776 removed. */
777
778 if (handler == NULL)
779 return -EINVAL;
780
781 /* Make sure the driver is actually initialized, this handles
782 problems with initialization order. */
783 if (!initialized) {
784 rv = ipmi_init_msghandler();
785 if (rv)
786 return rv;
787
788 /* The init code doesn't return an error if it was turned
789 off, but it won't initialize. Check that. */
790 if (!initialized)
791 return -ENODEV;
792 }
793
794 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
795 if (!new_user)
796 return -ENOMEM;
797
798 rcu_read_lock();
799 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
800 if (intf->intf_num == if_num)
801 goto found;
802 }
803 rcu_read_unlock();
804 rv = -EINVAL;
805 goto out_kfree;
806
807 found:
808 /* Note that each existing user holds a refcount to the interface. */
809 kref_get(&intf->refcount);
810 rcu_read_unlock();
811
812 kref_init(&new_user->refcount);
813 new_user->handler = handler;
814 new_user->handler_data = handler_data;
815 new_user->intf = intf;
816 new_user->gets_events = 0;
817
818 if (!try_module_get(intf->handlers->owner)) {
819 rv = -ENODEV;
820 goto out_kref;
821 }
822
823 if (intf->handlers->inc_usecount) {
824 rv = intf->handlers->inc_usecount(intf->send_info);
825 if (rv) {
826 module_put(intf->handlers->owner);
827 goto out_kref;
828 }
829 }
830
831 new_user->valid = 1;
832 spin_lock_irqsave(&intf->seq_lock, flags);
833 list_add_rcu(&new_user->link, &intf->users);
834 spin_unlock_irqrestore(&intf->seq_lock, flags);
835 *user = new_user;
836 return 0;
837
838 out_kref:
839 kref_put(&intf->refcount, intf_free);
840 out_kfree:
841 kfree(new_user);
842 return rv;
843 }
844
845 static void free_user(struct kref *ref)
846 {
847 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
848 kfree(user);
849 }
850
851 int ipmi_destroy_user(ipmi_user_t user)
852 {
853 ipmi_smi_t intf = user->intf;
854 int i;
855 unsigned long flags;
856 struct cmd_rcvr *rcvr;
857 struct cmd_rcvr *rcvrs = NULL;
858
859 user->valid = 0;
860
861 /* Remove the user from the interface's sequence table. */
862 spin_lock_irqsave(&intf->seq_lock, flags);
863 list_del_rcu(&user->link);
864
865 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
866 if (intf->seq_table[i].inuse
867 && (intf->seq_table[i].recv_msg->user == user))
868 {
869 intf->seq_table[i].inuse = 0;
870 }
871 }
872 spin_unlock_irqrestore(&intf->seq_lock, flags);
873
874 /*
875 * Remove the user from the command receiver's table. First
876 * we build a list of everything (not using the standard link,
877 * since other things may be using it till we do
878 * synchronize_rcu()) then free everything in that list.
879 */
880 mutex_lock(&intf->cmd_rcvrs_mutex);
881 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
882 if (rcvr->user == user) {
883 list_del_rcu(&rcvr->link);
884 rcvr->next = rcvrs;
885 rcvrs = rcvr;
886 }
887 }
888 mutex_unlock(&intf->cmd_rcvrs_mutex);
889 synchronize_rcu();
890 while (rcvrs) {
891 rcvr = rcvrs;
892 rcvrs = rcvr->next;
893 kfree(rcvr);
894 }
895
896 module_put(intf->handlers->owner);
897 if (intf->handlers->dec_usecount)
898 intf->handlers->dec_usecount(intf->send_info);
899
900 kref_put(&intf->refcount, intf_free);
901
902 kref_put(&user->refcount, free_user);
903
904 return 0;
905 }
906
907 void ipmi_get_version(ipmi_user_t user,
908 unsigned char *major,
909 unsigned char *minor)
910 {
911 *major = ipmi_version_major(&user->intf->bmc->id);
912 *minor = ipmi_version_minor(&user->intf->bmc->id);
913 }
914
915 int ipmi_set_my_address(ipmi_user_t user,
916 unsigned int channel,
917 unsigned char address)
918 {
919 if (channel >= IPMI_MAX_CHANNELS)
920 return -EINVAL;
921 user->intf->channels[channel].address = address;
922 return 0;
923 }
924
925 int ipmi_get_my_address(ipmi_user_t user,
926 unsigned int channel,
927 unsigned char *address)
928 {
929 if (channel >= IPMI_MAX_CHANNELS)
930 return -EINVAL;
931 *address = user->intf->channels[channel].address;
932 return 0;
933 }
934
935 int ipmi_set_my_LUN(ipmi_user_t user,
936 unsigned int channel,
937 unsigned char LUN)
938 {
939 if (channel >= IPMI_MAX_CHANNELS)
940 return -EINVAL;
941 user->intf->channels[channel].lun = LUN & 0x3;
942 return 0;
943 }
944
945 int ipmi_get_my_LUN(ipmi_user_t user,
946 unsigned int channel,
947 unsigned char *address)
948 {
949 if (channel >= IPMI_MAX_CHANNELS)
950 return -EINVAL;
951 *address = user->intf->channels[channel].lun;
952 return 0;
953 }
954
955 int ipmi_set_gets_events(ipmi_user_t user, int val)
956 {
957 unsigned long flags;
958 ipmi_smi_t intf = user->intf;
959 struct ipmi_recv_msg *msg, *msg2;
960 struct list_head msgs;
961
962 INIT_LIST_HEAD(&msgs);
963
964 spin_lock_irqsave(&intf->events_lock, flags);
965 user->gets_events = val;
966
967 if (val) {
968 /* Deliver any queued events. */
969 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
970 list_move_tail(&msg->link, &msgs);
971 intf->waiting_events_count = 0;
972 }
973
974 /* Hold the events lock while doing this to preserve order. */
975 list_for_each_entry_safe(msg, msg2, &msgs, link) {
976 msg->user = user;
977 kref_get(&user->refcount);
978 deliver_response(msg);
979 }
980
981 spin_unlock_irqrestore(&intf->events_lock, flags);
982
983 return 0;
984 }
985
986 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
987 unsigned char netfn,
988 unsigned char cmd,
989 unsigned char chan)
990 {
991 struct cmd_rcvr *rcvr;
992
993 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
994 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
995 && (rcvr->chans & (1 << chan)))
996 return rcvr;
997 }
998 return NULL;
999 }
1000
1001 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf,
1002 unsigned char netfn,
1003 unsigned char cmd,
1004 unsigned int chans)
1005 {
1006 struct cmd_rcvr *rcvr;
1007
1008 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1009 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1010 && (rcvr->chans & chans))
1011 return 0;
1012 }
1013 return 1;
1014 }
1015
1016 int ipmi_register_for_cmd(ipmi_user_t user,
1017 unsigned char netfn,
1018 unsigned char cmd,
1019 unsigned int chans)
1020 {
1021 ipmi_smi_t intf = user->intf;
1022 struct cmd_rcvr *rcvr;
1023 int rv = 0;
1024
1025
1026 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1027 if (!rcvr)
1028 return -ENOMEM;
1029 rcvr->cmd = cmd;
1030 rcvr->netfn = netfn;
1031 rcvr->chans = chans;
1032 rcvr->user = user;
1033
1034 mutex_lock(&intf->cmd_rcvrs_mutex);
1035 /* Make sure the command/netfn is not already registered. */
1036 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1037 rv = -EBUSY;
1038 goto out_unlock;
1039 }
1040
1041 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1042
1043 out_unlock:
1044 mutex_unlock(&intf->cmd_rcvrs_mutex);
1045 if (rv)
1046 kfree(rcvr);
1047
1048 return rv;
1049 }
1050
1051 int ipmi_unregister_for_cmd(ipmi_user_t user,
1052 unsigned char netfn,
1053 unsigned char cmd,
1054 unsigned int chans)
1055 {
1056 ipmi_smi_t intf = user->intf;
1057 struct cmd_rcvr *rcvr;
1058 struct cmd_rcvr *rcvrs = NULL;
1059 int i, rv = -ENOENT;
1060
1061 mutex_lock(&intf->cmd_rcvrs_mutex);
1062 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1063 if (((1 << i) & chans) == 0)
1064 continue;
1065 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1066 if (rcvr == NULL)
1067 continue;
1068 if (rcvr->user == user) {
1069 rv = 0;
1070 rcvr->chans &= ~chans;
1071 if (rcvr->chans == 0) {
1072 list_del_rcu(&rcvr->link);
1073 rcvr->next = rcvrs;
1074 rcvrs = rcvr;
1075 }
1076 }
1077 }
1078 mutex_unlock(&intf->cmd_rcvrs_mutex);
1079 synchronize_rcu();
1080 while (rcvrs) {
1081 rcvr = rcvrs;
1082 rcvrs = rcvr->next;
1083 kfree(rcvr);
1084 }
1085 return rv;
1086 }
1087
1088 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
1089 {
1090 ipmi_smi_t intf = user->intf;
1091 intf->handlers->set_run_to_completion(intf->send_info, val);
1092 }
1093
1094 static unsigned char
1095 ipmb_checksum(unsigned char *data, int size)
1096 {
1097 unsigned char csum = 0;
1098
1099 for (; size > 0; size--, data++)
1100 csum += *data;
1101
1102 return -csum;
1103 }
1104
1105 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1106 struct kernel_ipmi_msg *msg,
1107 struct ipmi_ipmb_addr *ipmb_addr,
1108 long msgid,
1109 unsigned char ipmb_seq,
1110 int broadcast,
1111 unsigned char source_address,
1112 unsigned char source_lun)
1113 {
1114 int i = broadcast;
1115
1116 /* Format the IPMB header data. */
1117 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1118 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1119 smi_msg->data[2] = ipmb_addr->channel;
1120 if (broadcast)
1121 smi_msg->data[3] = 0;
1122 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1123 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1124 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1125 smi_msg->data[i+6] = source_address;
1126 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1127 smi_msg->data[i+8] = msg->cmd;
1128
1129 /* Now tack on the data to the message. */
1130 if (msg->data_len > 0)
1131 memcpy(&(smi_msg->data[i+9]), msg->data,
1132 msg->data_len);
1133 smi_msg->data_size = msg->data_len + 9;
1134
1135 /* Now calculate the checksum and tack it on. */
1136 smi_msg->data[i+smi_msg->data_size]
1137 = ipmb_checksum(&(smi_msg->data[i+6]),
1138 smi_msg->data_size-6);
1139
1140 /* Add on the checksum size and the offset from the
1141 broadcast. */
1142 smi_msg->data_size += 1 + i;
1143
1144 smi_msg->msgid = msgid;
1145 }
1146
1147 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1148 struct kernel_ipmi_msg *msg,
1149 struct ipmi_lan_addr *lan_addr,
1150 long msgid,
1151 unsigned char ipmb_seq,
1152 unsigned char source_lun)
1153 {
1154 /* Format the IPMB header data. */
1155 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1156 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1157 smi_msg->data[2] = lan_addr->channel;
1158 smi_msg->data[3] = lan_addr->session_handle;
1159 smi_msg->data[4] = lan_addr->remote_SWID;
1160 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1161 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1162 smi_msg->data[7] = lan_addr->local_SWID;
1163 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1164 smi_msg->data[9] = msg->cmd;
1165
1166 /* Now tack on the data to the message. */
1167 if (msg->data_len > 0)
1168 memcpy(&(smi_msg->data[10]), msg->data,
1169 msg->data_len);
1170 smi_msg->data_size = msg->data_len + 10;
1171
1172 /* Now calculate the checksum and tack it on. */
1173 smi_msg->data[smi_msg->data_size]
1174 = ipmb_checksum(&(smi_msg->data[7]),
1175 smi_msg->data_size-7);
1176
1177 /* Add on the checksum size and the offset from the
1178 broadcast. */
1179 smi_msg->data_size += 1;
1180
1181 smi_msg->msgid = msgid;
1182 }
1183
1184 /* Separate from ipmi_request so that the user does not have to be
1185 supplied in certain circumstances (mainly at panic time). If
1186 messages are supplied, they will be freed, even if an error
1187 occurs. */
1188 static int i_ipmi_request(ipmi_user_t user,
1189 ipmi_smi_t intf,
1190 struct ipmi_addr *addr,
1191 long msgid,
1192 struct kernel_ipmi_msg *msg,
1193 void *user_msg_data,
1194 void *supplied_smi,
1195 struct ipmi_recv_msg *supplied_recv,
1196 int priority,
1197 unsigned char source_address,
1198 unsigned char source_lun,
1199 int retries,
1200 unsigned int retry_time_ms)
1201 {
1202 int rv = 0;
1203 struct ipmi_smi_msg *smi_msg;
1204 struct ipmi_recv_msg *recv_msg;
1205 unsigned long flags;
1206
1207
1208 if (supplied_recv) {
1209 recv_msg = supplied_recv;
1210 } else {
1211 recv_msg = ipmi_alloc_recv_msg();
1212 if (recv_msg == NULL) {
1213 return -ENOMEM;
1214 }
1215 }
1216 recv_msg->user_msg_data = user_msg_data;
1217
1218 if (supplied_smi) {
1219 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1220 } else {
1221 smi_msg = ipmi_alloc_smi_msg();
1222 if (smi_msg == NULL) {
1223 ipmi_free_recv_msg(recv_msg);
1224 return -ENOMEM;
1225 }
1226 }
1227
1228 recv_msg->user = user;
1229 if (user)
1230 kref_get(&user->refcount);
1231 recv_msg->msgid = msgid;
1232 /* Store the message to send in the receive message so timeout
1233 responses can get the proper response data. */
1234 recv_msg->msg = *msg;
1235
1236 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1237 struct ipmi_system_interface_addr *smi_addr;
1238
1239 if (msg->netfn & 1) {
1240 /* Responses are not allowed to the SMI. */
1241 rv = -EINVAL;
1242 goto out_err;
1243 }
1244
1245 smi_addr = (struct ipmi_system_interface_addr *) addr;
1246 if (smi_addr->lun > 3) {
1247 spin_lock_irqsave(&intf->counter_lock, flags);
1248 intf->sent_invalid_commands++;
1249 spin_unlock_irqrestore(&intf->counter_lock, flags);
1250 rv = -EINVAL;
1251 goto out_err;
1252 }
1253
1254 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1255
1256 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1257 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1258 || (msg->cmd == IPMI_GET_MSG_CMD)
1259 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1260 {
1261 /* We don't let the user do these, since we manage
1262 the sequence numbers. */
1263 spin_lock_irqsave(&intf->counter_lock, flags);
1264 intf->sent_invalid_commands++;
1265 spin_unlock_irqrestore(&intf->counter_lock, flags);
1266 rv = -EINVAL;
1267 goto out_err;
1268 }
1269
1270 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1271 spin_lock_irqsave(&intf->counter_lock, flags);
1272 intf->sent_invalid_commands++;
1273 spin_unlock_irqrestore(&intf->counter_lock, flags);
1274 rv = -EMSGSIZE;
1275 goto out_err;
1276 }
1277
1278 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1279 smi_msg->data[1] = msg->cmd;
1280 smi_msg->msgid = msgid;
1281 smi_msg->user_data = recv_msg;
1282 if (msg->data_len > 0)
1283 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1284 smi_msg->data_size = msg->data_len + 2;
1285 spin_lock_irqsave(&intf->counter_lock, flags);
1286 intf->sent_local_commands++;
1287 spin_unlock_irqrestore(&intf->counter_lock, flags);
1288 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1289 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1290 {
1291 struct ipmi_ipmb_addr *ipmb_addr;
1292 unsigned char ipmb_seq;
1293 long seqid;
1294 int broadcast = 0;
1295
1296 if (addr->channel >= IPMI_MAX_CHANNELS) {
1297 spin_lock_irqsave(&intf->counter_lock, flags);
1298 intf->sent_invalid_commands++;
1299 spin_unlock_irqrestore(&intf->counter_lock, flags);
1300 rv = -EINVAL;
1301 goto out_err;
1302 }
1303
1304 if (intf->channels[addr->channel].medium
1305 != IPMI_CHANNEL_MEDIUM_IPMB)
1306 {
1307 spin_lock_irqsave(&intf->counter_lock, flags);
1308 intf->sent_invalid_commands++;
1309 spin_unlock_irqrestore(&intf->counter_lock, flags);
1310 rv = -EINVAL;
1311 goto out_err;
1312 }
1313
1314 if (retries < 0) {
1315 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1316 retries = 0; /* Don't retry broadcasts. */
1317 else
1318 retries = 4;
1319 }
1320 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1321 /* Broadcasts add a zero at the beginning of the
1322 message, but otherwise is the same as an IPMB
1323 address. */
1324 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1325 broadcast = 1;
1326 }
1327
1328
1329 /* Default to 1 second retries. */
1330 if (retry_time_ms == 0)
1331 retry_time_ms = 1000;
1332
1333 /* 9 for the header and 1 for the checksum, plus
1334 possibly one for the broadcast. */
1335 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1336 spin_lock_irqsave(&intf->counter_lock, flags);
1337 intf->sent_invalid_commands++;
1338 spin_unlock_irqrestore(&intf->counter_lock, flags);
1339 rv = -EMSGSIZE;
1340 goto out_err;
1341 }
1342
1343 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1344 if (ipmb_addr->lun > 3) {
1345 spin_lock_irqsave(&intf->counter_lock, flags);
1346 intf->sent_invalid_commands++;
1347 spin_unlock_irqrestore(&intf->counter_lock, flags);
1348 rv = -EINVAL;
1349 goto out_err;
1350 }
1351
1352 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1353
1354 if (recv_msg->msg.netfn & 0x1) {
1355 /* It's a response, so use the user's sequence
1356 from msgid. */
1357 spin_lock_irqsave(&intf->counter_lock, flags);
1358 intf->sent_ipmb_responses++;
1359 spin_unlock_irqrestore(&intf->counter_lock, flags);
1360 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1361 msgid, broadcast,
1362 source_address, source_lun);
1363
1364 /* Save the receive message so we can use it
1365 to deliver the response. */
1366 smi_msg->user_data = recv_msg;
1367 } else {
1368 /* It's a command, so get a sequence for it. */
1369
1370 spin_lock_irqsave(&(intf->seq_lock), flags);
1371
1372 spin_lock(&intf->counter_lock);
1373 intf->sent_ipmb_commands++;
1374 spin_unlock(&intf->counter_lock);
1375
1376 /* Create a sequence number with a 1 second
1377 timeout and 4 retries. */
1378 rv = intf_next_seq(intf,
1379 recv_msg,
1380 retry_time_ms,
1381 retries,
1382 broadcast,
1383 &ipmb_seq,
1384 &seqid);
1385 if (rv) {
1386 /* We have used up all the sequence numbers,
1387 probably, so abort. */
1388 spin_unlock_irqrestore(&(intf->seq_lock),
1389 flags);
1390 goto out_err;
1391 }
1392
1393 /* Store the sequence number in the message,
1394 so that when the send message response
1395 comes back we can start the timer. */
1396 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1397 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1398 ipmb_seq, broadcast,
1399 source_address, source_lun);
1400
1401 /* Copy the message into the recv message data, so we
1402 can retransmit it later if necessary. */
1403 memcpy(recv_msg->msg_data, smi_msg->data,
1404 smi_msg->data_size);
1405 recv_msg->msg.data = recv_msg->msg_data;
1406 recv_msg->msg.data_len = smi_msg->data_size;
1407
1408 /* We don't unlock until here, because we need
1409 to copy the completed message into the
1410 recv_msg before we release the lock.
1411 Otherwise, race conditions may bite us. I
1412 know that's pretty paranoid, but I prefer
1413 to be correct. */
1414 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1415 }
1416 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1417 struct ipmi_lan_addr *lan_addr;
1418 unsigned char ipmb_seq;
1419 long seqid;
1420
1421 if (addr->channel >= IPMI_MAX_CHANNELS) {
1422 spin_lock_irqsave(&intf->counter_lock, flags);
1423 intf->sent_invalid_commands++;
1424 spin_unlock_irqrestore(&intf->counter_lock, flags);
1425 rv = -EINVAL;
1426 goto out_err;
1427 }
1428
1429 if ((intf->channels[addr->channel].medium
1430 != IPMI_CHANNEL_MEDIUM_8023LAN)
1431 && (intf->channels[addr->channel].medium
1432 != IPMI_CHANNEL_MEDIUM_ASYNC))
1433 {
1434 spin_lock_irqsave(&intf->counter_lock, flags);
1435 intf->sent_invalid_commands++;
1436 spin_unlock_irqrestore(&intf->counter_lock, flags);
1437 rv = -EINVAL;
1438 goto out_err;
1439 }
1440
1441 retries = 4;
1442
1443 /* Default to 1 second retries. */
1444 if (retry_time_ms == 0)
1445 retry_time_ms = 1000;
1446
1447 /* 11 for the header and 1 for the checksum. */
1448 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1449 spin_lock_irqsave(&intf->counter_lock, flags);
1450 intf->sent_invalid_commands++;
1451 spin_unlock_irqrestore(&intf->counter_lock, flags);
1452 rv = -EMSGSIZE;
1453 goto out_err;
1454 }
1455
1456 lan_addr = (struct ipmi_lan_addr *) addr;
1457 if (lan_addr->lun > 3) {
1458 spin_lock_irqsave(&intf->counter_lock, flags);
1459 intf->sent_invalid_commands++;
1460 spin_unlock_irqrestore(&intf->counter_lock, flags);
1461 rv = -EINVAL;
1462 goto out_err;
1463 }
1464
1465 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1466
1467 if (recv_msg->msg.netfn & 0x1) {
1468 /* It's a response, so use the user's sequence
1469 from msgid. */
1470 spin_lock_irqsave(&intf->counter_lock, flags);
1471 intf->sent_lan_responses++;
1472 spin_unlock_irqrestore(&intf->counter_lock, flags);
1473 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1474 msgid, source_lun);
1475
1476 /* Save the receive message so we can use it
1477 to deliver the response. */
1478 smi_msg->user_data = recv_msg;
1479 } else {
1480 /* It's a command, so get a sequence for it. */
1481
1482 spin_lock_irqsave(&(intf->seq_lock), flags);
1483
1484 spin_lock(&intf->counter_lock);
1485 intf->sent_lan_commands++;
1486 spin_unlock(&intf->counter_lock);
1487
1488 /* Create a sequence number with a 1 second
1489 timeout and 4 retries. */
1490 rv = intf_next_seq(intf,
1491 recv_msg,
1492 retry_time_ms,
1493 retries,
1494 0,
1495 &ipmb_seq,
1496 &seqid);
1497 if (rv) {
1498 /* We have used up all the sequence numbers,
1499 probably, so abort. */
1500 spin_unlock_irqrestore(&(intf->seq_lock),
1501 flags);
1502 goto out_err;
1503 }
1504
1505 /* Store the sequence number in the message,
1506 so that when the send message response
1507 comes back we can start the timer. */
1508 format_lan_msg(smi_msg, msg, lan_addr,
1509 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1510 ipmb_seq, source_lun);
1511
1512 /* Copy the message into the recv message data, so we
1513 can retransmit it later if necessary. */
1514 memcpy(recv_msg->msg_data, smi_msg->data,
1515 smi_msg->data_size);
1516 recv_msg->msg.data = recv_msg->msg_data;
1517 recv_msg->msg.data_len = smi_msg->data_size;
1518
1519 /* We don't unlock until here, because we need
1520 to copy the completed message into the
1521 recv_msg before we release the lock.
1522 Otherwise, race conditions may bite us. I
1523 know that's pretty paranoid, but I prefer
1524 to be correct. */
1525 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1526 }
1527 } else {
1528 /* Unknown address type. */
1529 spin_lock_irqsave(&intf->counter_lock, flags);
1530 intf->sent_invalid_commands++;
1531 spin_unlock_irqrestore(&intf->counter_lock, flags);
1532 rv = -EINVAL;
1533 goto out_err;
1534 }
1535
1536 #ifdef DEBUG_MSGING
1537 {
1538 int m;
1539 for (m = 0; m < smi_msg->data_size; m++)
1540 printk(" %2.2x", smi_msg->data[m]);
1541 printk("\n");
1542 }
1543 #endif
1544 intf->handlers->sender(intf->send_info, smi_msg, priority);
1545
1546 return 0;
1547
1548 out_err:
1549 ipmi_free_smi_msg(smi_msg);
1550 ipmi_free_recv_msg(recv_msg);
1551 return rv;
1552 }
1553
1554 static int check_addr(ipmi_smi_t intf,
1555 struct ipmi_addr *addr,
1556 unsigned char *saddr,
1557 unsigned char *lun)
1558 {
1559 if (addr->channel >= IPMI_MAX_CHANNELS)
1560 return -EINVAL;
1561 *lun = intf->channels[addr->channel].lun;
1562 *saddr = intf->channels[addr->channel].address;
1563 return 0;
1564 }
1565
1566 int ipmi_request_settime(ipmi_user_t user,
1567 struct ipmi_addr *addr,
1568 long msgid,
1569 struct kernel_ipmi_msg *msg,
1570 void *user_msg_data,
1571 int priority,
1572 int retries,
1573 unsigned int retry_time_ms)
1574 {
1575 unsigned char saddr, lun;
1576 int rv;
1577
1578 if (!user)
1579 return -EINVAL;
1580 rv = check_addr(user->intf, addr, &saddr, &lun);
1581 if (rv)
1582 return rv;
1583 return i_ipmi_request(user,
1584 user->intf,
1585 addr,
1586 msgid,
1587 msg,
1588 user_msg_data,
1589 NULL, NULL,
1590 priority,
1591 saddr,
1592 lun,
1593 retries,
1594 retry_time_ms);
1595 }
1596
1597 int ipmi_request_supply_msgs(ipmi_user_t user,
1598 struct ipmi_addr *addr,
1599 long msgid,
1600 struct kernel_ipmi_msg *msg,
1601 void *user_msg_data,
1602 void *supplied_smi,
1603 struct ipmi_recv_msg *supplied_recv,
1604 int priority)
1605 {
1606 unsigned char saddr, lun;
1607 int rv;
1608
1609 if (!user)
1610 return -EINVAL;
1611 rv = check_addr(user->intf, addr, &saddr, &lun);
1612 if (rv)
1613 return rv;
1614 return i_ipmi_request(user,
1615 user->intf,
1616 addr,
1617 msgid,
1618 msg,
1619 user_msg_data,
1620 supplied_smi,
1621 supplied_recv,
1622 priority,
1623 saddr,
1624 lun,
1625 -1, 0);
1626 }
1627
1628 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1629 int count, int *eof, void *data)
1630 {
1631 char *out = (char *) page;
1632 ipmi_smi_t intf = data;
1633 int i;
1634 int rv = 0;
1635
1636 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1637 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1638 out[rv-1] = '\n'; /* Replace the final space with a newline */
1639 out[rv] = '\0';
1640 rv++;
1641 return rv;
1642 }
1643
1644 static int version_file_read_proc(char *page, char **start, off_t off,
1645 int count, int *eof, void *data)
1646 {
1647 char *out = (char *) page;
1648 ipmi_smi_t intf = data;
1649
1650 return sprintf(out, "%d.%d\n",
1651 ipmi_version_major(&intf->bmc->id),
1652 ipmi_version_minor(&intf->bmc->id));
1653 }
1654
1655 static int stat_file_read_proc(char *page, char **start, off_t off,
1656 int count, int *eof, void *data)
1657 {
1658 char *out = (char *) page;
1659 ipmi_smi_t intf = data;
1660
1661 out += sprintf(out, "sent_invalid_commands: %d\n",
1662 intf->sent_invalid_commands);
1663 out += sprintf(out, "sent_local_commands: %d\n",
1664 intf->sent_local_commands);
1665 out += sprintf(out, "handled_local_responses: %d\n",
1666 intf->handled_local_responses);
1667 out += sprintf(out, "unhandled_local_responses: %d\n",
1668 intf->unhandled_local_responses);
1669 out += sprintf(out, "sent_ipmb_commands: %d\n",
1670 intf->sent_ipmb_commands);
1671 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1672 intf->sent_ipmb_command_errs);
1673 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1674 intf->retransmitted_ipmb_commands);
1675 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1676 intf->timed_out_ipmb_commands);
1677 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1678 intf->timed_out_ipmb_broadcasts);
1679 out += sprintf(out, "sent_ipmb_responses: %d\n",
1680 intf->sent_ipmb_responses);
1681 out += sprintf(out, "handled_ipmb_responses: %d\n",
1682 intf->handled_ipmb_responses);
1683 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1684 intf->invalid_ipmb_responses);
1685 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1686 intf->unhandled_ipmb_responses);
1687 out += sprintf(out, "sent_lan_commands: %d\n",
1688 intf->sent_lan_commands);
1689 out += sprintf(out, "sent_lan_command_errs: %d\n",
1690 intf->sent_lan_command_errs);
1691 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1692 intf->retransmitted_lan_commands);
1693 out += sprintf(out, "timed_out_lan_commands: %d\n",
1694 intf->timed_out_lan_commands);
1695 out += sprintf(out, "sent_lan_responses: %d\n",
1696 intf->sent_lan_responses);
1697 out += sprintf(out, "handled_lan_responses: %d\n",
1698 intf->handled_lan_responses);
1699 out += sprintf(out, "invalid_lan_responses: %d\n",
1700 intf->invalid_lan_responses);
1701 out += sprintf(out, "unhandled_lan_responses: %d\n",
1702 intf->unhandled_lan_responses);
1703 out += sprintf(out, "handled_commands: %d\n",
1704 intf->handled_commands);
1705 out += sprintf(out, "invalid_commands: %d\n",
1706 intf->invalid_commands);
1707 out += sprintf(out, "unhandled_commands: %d\n",
1708 intf->unhandled_commands);
1709 out += sprintf(out, "invalid_events: %d\n",
1710 intf->invalid_events);
1711 out += sprintf(out, "events: %d\n",
1712 intf->events);
1713
1714 return (out - ((char *) page));
1715 }
1716
1717 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1718 read_proc_t *read_proc, write_proc_t *write_proc,
1719 void *data, struct module *owner)
1720 {
1721 int rv = 0;
1722 #ifdef CONFIG_PROC_FS
1723 struct proc_dir_entry *file;
1724 struct ipmi_proc_entry *entry;
1725
1726 /* Create a list element. */
1727 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1728 if (!entry)
1729 return -ENOMEM;
1730 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1731 if (!entry->name) {
1732 kfree(entry);
1733 return -ENOMEM;
1734 }
1735 strcpy(entry->name, name);
1736
1737 file = create_proc_entry(name, 0, smi->proc_dir);
1738 if (!file) {
1739 kfree(entry->name);
1740 kfree(entry);
1741 rv = -ENOMEM;
1742 } else {
1743 file->nlink = 1;
1744 file->data = data;
1745 file->read_proc = read_proc;
1746 file->write_proc = write_proc;
1747 file->owner = owner;
1748
1749 spin_lock(&smi->proc_entry_lock);
1750 /* Stick it on the list. */
1751 entry->next = smi->proc_entries;
1752 smi->proc_entries = entry;
1753 spin_unlock(&smi->proc_entry_lock);
1754 }
1755 #endif /* CONFIG_PROC_FS */
1756
1757 return rv;
1758 }
1759
1760 static int add_proc_entries(ipmi_smi_t smi, int num)
1761 {
1762 int rv = 0;
1763
1764 #ifdef CONFIG_PROC_FS
1765 sprintf(smi->proc_dir_name, "%d", num);
1766 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1767 if (!smi->proc_dir)
1768 rv = -ENOMEM;
1769 else {
1770 smi->proc_dir->owner = THIS_MODULE;
1771 }
1772
1773 if (rv == 0)
1774 rv = ipmi_smi_add_proc_entry(smi, "stats",
1775 stat_file_read_proc, NULL,
1776 smi, THIS_MODULE);
1777
1778 if (rv == 0)
1779 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1780 ipmb_file_read_proc, NULL,
1781 smi, THIS_MODULE);
1782
1783 if (rv == 0)
1784 rv = ipmi_smi_add_proc_entry(smi, "version",
1785 version_file_read_proc, NULL,
1786 smi, THIS_MODULE);
1787 #endif /* CONFIG_PROC_FS */
1788
1789 return rv;
1790 }
1791
1792 static void remove_proc_entries(ipmi_smi_t smi)
1793 {
1794 #ifdef CONFIG_PROC_FS
1795 struct ipmi_proc_entry *entry;
1796
1797 spin_lock(&smi->proc_entry_lock);
1798 while (smi->proc_entries) {
1799 entry = smi->proc_entries;
1800 smi->proc_entries = entry->next;
1801
1802 remove_proc_entry(entry->name, smi->proc_dir);
1803 kfree(entry->name);
1804 kfree(entry);
1805 }
1806 spin_unlock(&smi->proc_entry_lock);
1807 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1808 #endif /* CONFIG_PROC_FS */
1809 }
1810
1811 static int __find_bmc_guid(struct device *dev, void *data)
1812 {
1813 unsigned char *id = data;
1814 struct bmc_device *bmc = dev_get_drvdata(dev);
1815 return memcmp(bmc->guid, id, 16) == 0;
1816 }
1817
1818 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1819 unsigned char *guid)
1820 {
1821 struct device *dev;
1822
1823 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1824 if (dev)
1825 return dev_get_drvdata(dev);
1826 else
1827 return NULL;
1828 }
1829
1830 struct prod_dev_id {
1831 unsigned int product_id;
1832 unsigned char device_id;
1833 };
1834
1835 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1836 {
1837 struct prod_dev_id *id = data;
1838 struct bmc_device *bmc = dev_get_drvdata(dev);
1839
1840 return (bmc->id.product_id == id->product_id
1841 && bmc->id.device_id == id->device_id);
1842 }
1843
1844 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1845 struct device_driver *drv,
1846 unsigned int product_id, unsigned char device_id)
1847 {
1848 struct prod_dev_id id = {
1849 .product_id = product_id,
1850 .device_id = device_id,
1851 };
1852 struct device *dev;
1853
1854 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
1855 if (dev)
1856 return dev_get_drvdata(dev);
1857 else
1858 return NULL;
1859 }
1860
1861 static ssize_t device_id_show(struct device *dev,
1862 struct device_attribute *attr,
1863 char *buf)
1864 {
1865 struct bmc_device *bmc = dev_get_drvdata(dev);
1866
1867 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
1868 }
1869
1870 static ssize_t provides_dev_sdrs_show(struct device *dev,
1871 struct device_attribute *attr,
1872 char *buf)
1873 {
1874 struct bmc_device *bmc = dev_get_drvdata(dev);
1875
1876 return snprintf(buf, 10, "%u\n",
1877 (bmc->id.device_revision & 0x80) >> 7);
1878 }
1879
1880 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
1881 char *buf)
1882 {
1883 struct bmc_device *bmc = dev_get_drvdata(dev);
1884
1885 return snprintf(buf, 20, "%u\n",
1886 bmc->id.device_revision & 0x0F);
1887 }
1888
1889 static ssize_t firmware_rev_show(struct device *dev,
1890 struct device_attribute *attr,
1891 char *buf)
1892 {
1893 struct bmc_device *bmc = dev_get_drvdata(dev);
1894
1895 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
1896 bmc->id.firmware_revision_2);
1897 }
1898
1899 static ssize_t ipmi_version_show(struct device *dev,
1900 struct device_attribute *attr,
1901 char *buf)
1902 {
1903 struct bmc_device *bmc = dev_get_drvdata(dev);
1904
1905 return snprintf(buf, 20, "%u.%u\n",
1906 ipmi_version_major(&bmc->id),
1907 ipmi_version_minor(&bmc->id));
1908 }
1909
1910 static ssize_t add_dev_support_show(struct device *dev,
1911 struct device_attribute *attr,
1912 char *buf)
1913 {
1914 struct bmc_device *bmc = dev_get_drvdata(dev);
1915
1916 return snprintf(buf, 10, "0x%02x\n",
1917 bmc->id.additional_device_support);
1918 }
1919
1920 static ssize_t manufacturer_id_show(struct device *dev,
1921 struct device_attribute *attr,
1922 char *buf)
1923 {
1924 struct bmc_device *bmc = dev_get_drvdata(dev);
1925
1926 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
1927 }
1928
1929 static ssize_t product_id_show(struct device *dev,
1930 struct device_attribute *attr,
1931 char *buf)
1932 {
1933 struct bmc_device *bmc = dev_get_drvdata(dev);
1934
1935 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
1936 }
1937
1938 static ssize_t aux_firmware_rev_show(struct device *dev,
1939 struct device_attribute *attr,
1940 char *buf)
1941 {
1942 struct bmc_device *bmc = dev_get_drvdata(dev);
1943
1944 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
1945 bmc->id.aux_firmware_revision[3],
1946 bmc->id.aux_firmware_revision[2],
1947 bmc->id.aux_firmware_revision[1],
1948 bmc->id.aux_firmware_revision[0]);
1949 }
1950
1951 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
1952 char *buf)
1953 {
1954 struct bmc_device *bmc = dev_get_drvdata(dev);
1955
1956 return snprintf(buf, 100, "%Lx%Lx\n",
1957 (long long) bmc->guid[0],
1958 (long long) bmc->guid[8]);
1959 }
1960
1961 static void remove_files(struct bmc_device *bmc)
1962 {
1963 if (!bmc->dev)
1964 return;
1965
1966 device_remove_file(&bmc->dev->dev,
1967 &bmc->device_id_attr);
1968 device_remove_file(&bmc->dev->dev,
1969 &bmc->provides_dev_sdrs_attr);
1970 device_remove_file(&bmc->dev->dev,
1971 &bmc->revision_attr);
1972 device_remove_file(&bmc->dev->dev,
1973 &bmc->firmware_rev_attr);
1974 device_remove_file(&bmc->dev->dev,
1975 &bmc->version_attr);
1976 device_remove_file(&bmc->dev->dev,
1977 &bmc->add_dev_support_attr);
1978 device_remove_file(&bmc->dev->dev,
1979 &bmc->manufacturer_id_attr);
1980 device_remove_file(&bmc->dev->dev,
1981 &bmc->product_id_attr);
1982
1983 if (bmc->id.aux_firmware_revision_set)
1984 device_remove_file(&bmc->dev->dev,
1985 &bmc->aux_firmware_rev_attr);
1986 if (bmc->guid_set)
1987 device_remove_file(&bmc->dev->dev,
1988 &bmc->guid_attr);
1989 }
1990
1991 static void
1992 cleanup_bmc_device(struct kref *ref)
1993 {
1994 struct bmc_device *bmc;
1995
1996 bmc = container_of(ref, struct bmc_device, refcount);
1997
1998 remove_files(bmc);
1999 if (bmc->dev)
2000 platform_device_unregister(bmc->dev);
2001 kfree(bmc);
2002 }
2003
2004 static void ipmi_bmc_unregister(ipmi_smi_t intf)
2005 {
2006 struct bmc_device *bmc = intf->bmc;
2007
2008 if (intf->sysfs_name) {
2009 sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
2010 kfree(intf->sysfs_name);
2011 intf->sysfs_name = NULL;
2012 }
2013 if (intf->my_dev_name) {
2014 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
2015 kfree(intf->my_dev_name);
2016 intf->my_dev_name = NULL;
2017 }
2018
2019 mutex_lock(&ipmidriver_mutex);
2020 kref_put(&bmc->refcount, cleanup_bmc_device);
2021 intf->bmc = NULL;
2022 mutex_unlock(&ipmidriver_mutex);
2023 }
2024
2025 static int create_files(struct bmc_device *bmc)
2026 {
2027 int err;
2028
2029 bmc->device_id_attr.attr.name = "device_id";
2030 bmc->device_id_attr.attr.owner = THIS_MODULE;
2031 bmc->device_id_attr.attr.mode = S_IRUGO;
2032 bmc->device_id_attr.show = device_id_show;
2033
2034 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2035 bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
2036 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2037 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2038
2039 bmc->revision_attr.attr.name = "revision";
2040 bmc->revision_attr.attr.owner = THIS_MODULE;
2041 bmc->revision_attr.attr.mode = S_IRUGO;
2042 bmc->revision_attr.show = revision_show;
2043
2044 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2045 bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
2046 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2047 bmc->firmware_rev_attr.show = firmware_rev_show;
2048
2049 bmc->version_attr.attr.name = "ipmi_version";
2050 bmc->version_attr.attr.owner = THIS_MODULE;
2051 bmc->version_attr.attr.mode = S_IRUGO;
2052 bmc->version_attr.show = ipmi_version_show;
2053
2054 bmc->add_dev_support_attr.attr.name = "additional_device_support";
2055 bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
2056 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2057 bmc->add_dev_support_attr.show = add_dev_support_show;
2058
2059 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2060 bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
2061 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2062 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2063
2064 bmc->product_id_attr.attr.name = "product_id";
2065 bmc->product_id_attr.attr.owner = THIS_MODULE;
2066 bmc->product_id_attr.attr.mode = S_IRUGO;
2067 bmc->product_id_attr.show = product_id_show;
2068
2069 bmc->guid_attr.attr.name = "guid";
2070 bmc->guid_attr.attr.owner = THIS_MODULE;
2071 bmc->guid_attr.attr.mode = S_IRUGO;
2072 bmc->guid_attr.show = guid_show;
2073
2074 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2075 bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
2076 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2077 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2078
2079 err = device_create_file(&bmc->dev->dev,
2080 &bmc->device_id_attr);
2081 if (err) goto out;
2082 err = device_create_file(&bmc->dev->dev,
2083 &bmc->provides_dev_sdrs_attr);
2084 if (err) goto out_devid;
2085 err = device_create_file(&bmc->dev->dev,
2086 &bmc->revision_attr);
2087 if (err) goto out_sdrs;
2088 err = device_create_file(&bmc->dev->dev,
2089 &bmc->firmware_rev_attr);
2090 if (err) goto out_rev;
2091 err = device_create_file(&bmc->dev->dev,
2092 &bmc->version_attr);
2093 if (err) goto out_firm;
2094 err = device_create_file(&bmc->dev->dev,
2095 &bmc->add_dev_support_attr);
2096 if (err) goto out_version;
2097 err = device_create_file(&bmc->dev->dev,
2098 &bmc->manufacturer_id_attr);
2099 if (err) goto out_add_dev;
2100 err = device_create_file(&bmc->dev->dev,
2101 &bmc->product_id_attr);
2102 if (err) goto out_manu;
2103 if (bmc->id.aux_firmware_revision_set) {
2104 err = device_create_file(&bmc->dev->dev,
2105 &bmc->aux_firmware_rev_attr);
2106 if (err) goto out_prod_id;
2107 }
2108 if (bmc->guid_set) {
2109 err = device_create_file(&bmc->dev->dev,
2110 &bmc->guid_attr);
2111 if (err) goto out_aux_firm;
2112 }
2113
2114 return 0;
2115
2116 out_aux_firm:
2117 if (bmc->id.aux_firmware_revision_set)
2118 device_remove_file(&bmc->dev->dev,
2119 &bmc->aux_firmware_rev_attr);
2120 out_prod_id:
2121 device_remove_file(&bmc->dev->dev,
2122 &bmc->product_id_attr);
2123 out_manu:
2124 device_remove_file(&bmc->dev->dev,
2125 &bmc->manufacturer_id_attr);
2126 out_add_dev:
2127 device_remove_file(&bmc->dev->dev,
2128 &bmc->add_dev_support_attr);
2129 out_version:
2130 device_remove_file(&bmc->dev->dev,
2131 &bmc->version_attr);
2132 out_firm:
2133 device_remove_file(&bmc->dev->dev,
2134 &bmc->firmware_rev_attr);
2135 out_rev:
2136 device_remove_file(&bmc->dev->dev,
2137 &bmc->revision_attr);
2138 out_sdrs:
2139 device_remove_file(&bmc->dev->dev,
2140 &bmc->provides_dev_sdrs_attr);
2141 out_devid:
2142 device_remove_file(&bmc->dev->dev,
2143 &bmc->device_id_attr);
2144 out:
2145 return err;
2146 }
2147
2148 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2149 const char *sysfs_name)
2150 {
2151 int rv;
2152 struct bmc_device *bmc = intf->bmc;
2153 struct bmc_device *old_bmc;
2154 int size;
2155 char dummy[1];
2156
2157 mutex_lock(&ipmidriver_mutex);
2158
2159 /*
2160 * Try to find if there is an bmc_device struct
2161 * representing the interfaced BMC already
2162 */
2163 if (bmc->guid_set)
2164 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
2165 else
2166 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
2167 bmc->id.product_id,
2168 bmc->id.device_id);
2169
2170 /*
2171 * If there is already an bmc_device, free the new one,
2172 * otherwise register the new BMC device
2173 */
2174 if (old_bmc) {
2175 kfree(bmc);
2176 intf->bmc = old_bmc;
2177 bmc = old_bmc;
2178
2179 kref_get(&bmc->refcount);
2180 mutex_unlock(&ipmidriver_mutex);
2181
2182 printk(KERN_INFO
2183 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2184 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2185 bmc->id.manufacturer_id,
2186 bmc->id.product_id,
2187 bmc->id.device_id);
2188 } else {
2189 char name[14];
2190 unsigned char orig_dev_id = bmc->id.device_id;
2191 int warn_printed = 0;
2192
2193 snprintf(name, sizeof(name),
2194 "ipmi_bmc.%4.4x", bmc->id.product_id);
2195
2196 while (ipmi_find_bmc_prod_dev_id(&ipmidriver,
2197 bmc->id.product_id,
2198 bmc->id.device_id))
2199 {
2200 if (!warn_printed) {
2201 printk(KERN_WARNING PFX
2202 "This machine has two different BMCs"
2203 " with the same product id and device"
2204 " id. This is an error in the"
2205 " firmware, but incrementing the"
2206 " device id to work around the problem."
2207 " Prod ID = 0x%x, Dev ID = 0x%x\n",
2208 bmc->id.product_id, bmc->id.device_id);
2209 warn_printed = 1;
2210 }
2211 bmc->id.device_id++; /* Wraps at 255 */
2212 if (bmc->id.device_id == orig_dev_id) {
2213 printk(KERN_ERR PFX
2214 "Out of device ids!\n");
2215 break;
2216 }
2217 }
2218
2219 bmc->dev = platform_device_alloc(name, bmc->id.device_id);
2220 if (!bmc->dev) {
2221 mutex_unlock(&ipmidriver_mutex);
2222 printk(KERN_ERR
2223 "ipmi_msghandler:"
2224 " Unable to allocate platform device\n");
2225 return -ENOMEM;
2226 }
2227 bmc->dev->dev.driver = &ipmidriver;
2228 dev_set_drvdata(&bmc->dev->dev, bmc);
2229 kref_init(&bmc->refcount);
2230
2231 rv = platform_device_add(bmc->dev);
2232 mutex_unlock(&ipmidriver_mutex);
2233 if (rv) {
2234 platform_device_put(bmc->dev);
2235 bmc->dev = NULL;
2236 printk(KERN_ERR
2237 "ipmi_msghandler:"
2238 " Unable to register bmc device: %d\n",
2239 rv);
2240 /* Don't go to out_err, you can only do that if
2241 the device is registered already. */
2242 return rv;
2243 }
2244
2245 rv = create_files(bmc);
2246 if (rv) {
2247 mutex_lock(&ipmidriver_mutex);
2248 platform_device_unregister(bmc->dev);
2249 mutex_unlock(&ipmidriver_mutex);
2250
2251 return rv;
2252 }
2253
2254 printk(KERN_INFO
2255 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2256 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2257 bmc->id.manufacturer_id,
2258 bmc->id.product_id,
2259 bmc->id.device_id);
2260 }
2261
2262 /*
2263 * create symlink from system interface device to bmc device
2264 * and back.
2265 */
2266 intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
2267 if (!intf->sysfs_name) {
2268 rv = -ENOMEM;
2269 printk(KERN_ERR
2270 "ipmi_msghandler: allocate link to BMC: %d\n",
2271 rv);
2272 goto out_err;
2273 }
2274
2275 rv = sysfs_create_link(&intf->si_dev->kobj,
2276 &bmc->dev->dev.kobj, intf->sysfs_name);
2277 if (rv) {
2278 kfree(intf->sysfs_name);
2279 intf->sysfs_name = NULL;
2280 printk(KERN_ERR
2281 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2282 rv);
2283 goto out_err;
2284 }
2285
2286 size = snprintf(dummy, 0, "ipmi%d", ifnum);
2287 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2288 if (!intf->my_dev_name) {
2289 kfree(intf->sysfs_name);
2290 intf->sysfs_name = NULL;
2291 rv = -ENOMEM;
2292 printk(KERN_ERR
2293 "ipmi_msghandler: allocate link from BMC: %d\n",
2294 rv);
2295 goto out_err;
2296 }
2297 snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
2298
2299 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2300 intf->my_dev_name);
2301 if (rv) {
2302 kfree(intf->sysfs_name);
2303 intf->sysfs_name = NULL;
2304 kfree(intf->my_dev_name);
2305 intf->my_dev_name = NULL;
2306 printk(KERN_ERR
2307 "ipmi_msghandler:"
2308 " Unable to create symlink to bmc: %d\n",
2309 rv);
2310 goto out_err;
2311 }
2312
2313 return 0;
2314
2315 out_err:
2316 ipmi_bmc_unregister(intf);
2317 return rv;
2318 }
2319
2320 static int
2321 send_guid_cmd(ipmi_smi_t intf, int chan)
2322 {
2323 struct kernel_ipmi_msg msg;
2324 struct ipmi_system_interface_addr si;
2325
2326 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2327 si.channel = IPMI_BMC_CHANNEL;
2328 si.lun = 0;
2329
2330 msg.netfn = IPMI_NETFN_APP_REQUEST;
2331 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2332 msg.data = NULL;
2333 msg.data_len = 0;
2334 return i_ipmi_request(NULL,
2335 intf,
2336 (struct ipmi_addr *) &si,
2337 0,
2338 &msg,
2339 intf,
2340 NULL,
2341 NULL,
2342 0,
2343 intf->channels[0].address,
2344 intf->channels[0].lun,
2345 -1, 0);
2346 }
2347
2348 static void
2349 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2350 {
2351 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2352 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2353 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2354 /* Not for me */
2355 return;
2356
2357 if (msg->msg.data[0] != 0) {
2358 /* Error from getting the GUID, the BMC doesn't have one. */
2359 intf->bmc->guid_set = 0;
2360 goto out;
2361 }
2362
2363 if (msg->msg.data_len < 17) {
2364 intf->bmc->guid_set = 0;
2365 printk(KERN_WARNING PFX
2366 "guid_handler: The GUID response from the BMC was too"
2367 " short, it was %d but should have been 17. Assuming"
2368 " GUID is not available.\n",
2369 msg->msg.data_len);
2370 goto out;
2371 }
2372
2373 memcpy(intf->bmc->guid, msg->msg.data, 16);
2374 intf->bmc->guid_set = 1;
2375 out:
2376 wake_up(&intf->waitq);
2377 }
2378
2379 static void
2380 get_guid(ipmi_smi_t intf)
2381 {
2382 int rv;
2383
2384 intf->bmc->guid_set = 0x2;
2385 intf->null_user_handler = guid_handler;
2386 rv = send_guid_cmd(intf, 0);
2387 if (rv)
2388 /* Send failed, no GUID available. */
2389 intf->bmc->guid_set = 0;
2390 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2391 intf->null_user_handler = NULL;
2392 }
2393
2394 static int
2395 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2396 {
2397 struct kernel_ipmi_msg msg;
2398 unsigned char data[1];
2399 struct ipmi_system_interface_addr si;
2400
2401 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2402 si.channel = IPMI_BMC_CHANNEL;
2403 si.lun = 0;
2404
2405 msg.netfn = IPMI_NETFN_APP_REQUEST;
2406 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2407 msg.data = data;
2408 msg.data_len = 1;
2409 data[0] = chan;
2410 return i_ipmi_request(NULL,
2411 intf,
2412 (struct ipmi_addr *) &si,
2413 0,
2414 &msg,
2415 intf,
2416 NULL,
2417 NULL,
2418 0,
2419 intf->channels[0].address,
2420 intf->channels[0].lun,
2421 -1, 0);
2422 }
2423
2424 static void
2425 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2426 {
2427 int rv = 0;
2428 int chan;
2429
2430 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2431 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2432 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2433 {
2434 /* It's the one we want */
2435 if (msg->msg.data[0] != 0) {
2436 /* Got an error from the channel, just go on. */
2437
2438 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2439 /* If the MC does not support this
2440 command, that is legal. We just
2441 assume it has one IPMB at channel
2442 zero. */
2443 intf->channels[0].medium
2444 = IPMI_CHANNEL_MEDIUM_IPMB;
2445 intf->channels[0].protocol
2446 = IPMI_CHANNEL_PROTOCOL_IPMB;
2447 rv = -ENOSYS;
2448
2449 intf->curr_channel = IPMI_MAX_CHANNELS;
2450 wake_up(&intf->waitq);
2451 goto out;
2452 }
2453 goto next_channel;
2454 }
2455 if (msg->msg.data_len < 4) {
2456 /* Message not big enough, just go on. */
2457 goto next_channel;
2458 }
2459 chan = intf->curr_channel;
2460 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2461 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2462
2463 next_channel:
2464 intf->curr_channel++;
2465 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2466 wake_up(&intf->waitq);
2467 else
2468 rv = send_channel_info_cmd(intf, intf->curr_channel);
2469
2470 if (rv) {
2471 /* Got an error somehow, just give up. */
2472 intf->curr_channel = IPMI_MAX_CHANNELS;
2473 wake_up(&intf->waitq);
2474
2475 printk(KERN_WARNING PFX
2476 "Error sending channel information: %d\n",
2477 rv);
2478 }
2479 }
2480 out:
2481 return;
2482 }
2483
2484 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2485 void *send_info,
2486 struct ipmi_device_id *device_id,
2487 struct device *si_dev,
2488 const char *sysfs_name,
2489 unsigned char slave_addr)
2490 {
2491 int i, j;
2492 int rv;
2493 ipmi_smi_t intf;
2494 ipmi_smi_t tintf;
2495 int version_major;
2496 int version_minor;
2497 struct list_head *link;
2498
2499 version_major = ipmi_version_major(device_id);
2500 version_minor = ipmi_version_minor(device_id);
2501
2502 /* Make sure the driver is actually initialized, this handles
2503 problems with initialization order. */
2504 if (!initialized) {
2505 rv = ipmi_init_msghandler();
2506 if (rv)
2507 return rv;
2508 /* The init code doesn't return an error if it was turned
2509 off, but it won't initialize. Check that. */
2510 if (!initialized)
2511 return -ENODEV;
2512 }
2513
2514 intf = kmalloc(sizeof(*intf), GFP_KERNEL);
2515 if (!intf)
2516 return -ENOMEM;
2517 memset(intf, 0, sizeof(*intf));
2518 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2519 if (!intf->bmc) {
2520 kfree(intf);
2521 return -ENOMEM;
2522 }
2523 intf->intf_num = -1; /* Mark it invalid for now. */
2524 kref_init(&intf->refcount);
2525 intf->bmc->id = *device_id;
2526 intf->si_dev = si_dev;
2527 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2528 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2529 intf->channels[j].lun = 2;
2530 }
2531 if (slave_addr != 0)
2532 intf->channels[0].address = slave_addr;
2533 INIT_LIST_HEAD(&intf->users);
2534 intf->handlers = handlers;
2535 intf->send_info = send_info;
2536 spin_lock_init(&intf->seq_lock);
2537 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2538 intf->seq_table[j].inuse = 0;
2539 intf->seq_table[j].seqid = 0;
2540 }
2541 intf->curr_seq = 0;
2542 #ifdef CONFIG_PROC_FS
2543 spin_lock_init(&intf->proc_entry_lock);
2544 #endif
2545 spin_lock_init(&intf->waiting_msgs_lock);
2546 INIT_LIST_HEAD(&intf->waiting_msgs);
2547 spin_lock_init(&intf->events_lock);
2548 INIT_LIST_HEAD(&intf->waiting_events);
2549 intf->waiting_events_count = 0;
2550 mutex_init(&intf->cmd_rcvrs_mutex);
2551 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2552 init_waitqueue_head(&intf->waitq);
2553
2554 spin_lock_init(&intf->counter_lock);
2555 intf->proc_dir = NULL;
2556
2557 mutex_lock(&ipmi_interfaces_mutex);
2558 /* Look for a hole in the numbers. */
2559 i = 0;
2560 link = &ipmi_interfaces;
2561 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
2562 if (tintf->intf_num != i) {
2563 link = &tintf->link;
2564 break;
2565 }
2566 i++;
2567 }
2568 /* Add the new interface in numeric order. */
2569 if (i == 0)
2570 list_add_rcu(&intf->link, &ipmi_interfaces);
2571 else
2572 list_add_tail_rcu(&intf->link, link);
2573
2574 rv = handlers->start_processing(send_info, intf);
2575 if (rv)
2576 goto out;
2577
2578 get_guid(intf);
2579
2580 if ((version_major > 1)
2581 || ((version_major == 1) && (version_minor >= 5)))
2582 {
2583 /* Start scanning the channels to see what is
2584 available. */
2585 intf->null_user_handler = channel_handler;
2586 intf->curr_channel = 0;
2587 rv = send_channel_info_cmd(intf, 0);
2588 if (rv)
2589 goto out;
2590
2591 /* Wait for the channel info to be read. */
2592 wait_event(intf->waitq,
2593 intf->curr_channel >= IPMI_MAX_CHANNELS);
2594 intf->null_user_handler = NULL;
2595 } else {
2596 /* Assume a single IPMB channel at zero. */
2597 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2598 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2599 }
2600
2601 if (rv == 0)
2602 rv = add_proc_entries(intf, i);
2603
2604 rv = ipmi_bmc_register(intf, i, sysfs_name);
2605
2606 out:
2607 if (rv) {
2608 if (intf->proc_dir)
2609 remove_proc_entries(intf);
2610 list_del_rcu(&intf->link);
2611 mutex_unlock(&ipmi_interfaces_mutex);
2612 synchronize_rcu();
2613 kref_put(&intf->refcount, intf_free);
2614 } else {
2615 /* After this point the interface is legal to use. */
2616 intf->intf_num = i;
2617 mutex_unlock(&ipmi_interfaces_mutex);
2618 call_smi_watchers(i, intf->si_dev);
2619 }
2620
2621 return rv;
2622 }
2623
2624 int ipmi_unregister_smi(ipmi_smi_t intf)
2625 {
2626 struct ipmi_smi_watcher *w;
2627
2628 ipmi_bmc_unregister(intf);
2629
2630 mutex_lock(&ipmi_interfaces_mutex);
2631 list_del_rcu(&intf->link);
2632 mutex_unlock(&ipmi_interfaces_mutex);
2633 synchronize_rcu();
2634
2635 remove_proc_entries(intf);
2636
2637 /* Call all the watcher interfaces to tell them that
2638 an interface is gone. */
2639 down_read(&smi_watchers_sem);
2640 list_for_each_entry(w, &smi_watchers, link)
2641 w->smi_gone(intf->intf_num);
2642 up_read(&smi_watchers_sem);
2643
2644 kref_put(&intf->refcount, intf_free);
2645 return 0;
2646 }
2647
2648 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2649 struct ipmi_smi_msg *msg)
2650 {
2651 struct ipmi_ipmb_addr ipmb_addr;
2652 struct ipmi_recv_msg *recv_msg;
2653 unsigned long flags;
2654
2655
2656 /* This is 11, not 10, because the response must contain a
2657 * completion code. */
2658 if (msg->rsp_size < 11) {
2659 /* Message not big enough, just ignore it. */
2660 spin_lock_irqsave(&intf->counter_lock, flags);
2661 intf->invalid_ipmb_responses++;
2662 spin_unlock_irqrestore(&intf->counter_lock, flags);
2663 return 0;
2664 }
2665
2666 if (msg->rsp[2] != 0) {
2667 /* An error getting the response, just ignore it. */
2668 return 0;
2669 }
2670
2671 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2672 ipmb_addr.slave_addr = msg->rsp[6];
2673 ipmb_addr.channel = msg->rsp[3] & 0x0f;
2674 ipmb_addr.lun = msg->rsp[7] & 3;
2675
2676 /* It's a response from a remote entity. Look up the sequence
2677 number and handle the response. */
2678 if (intf_find_seq(intf,
2679 msg->rsp[7] >> 2,
2680 msg->rsp[3] & 0x0f,
2681 msg->rsp[8],
2682 (msg->rsp[4] >> 2) & (~1),
2683 (struct ipmi_addr *) &(ipmb_addr),
2684 &recv_msg))
2685 {
2686 /* We were unable to find the sequence number,
2687 so just nuke the message. */
2688 spin_lock_irqsave(&intf->counter_lock, flags);
2689 intf->unhandled_ipmb_responses++;
2690 spin_unlock_irqrestore(&intf->counter_lock, flags);
2691 return 0;
2692 }
2693
2694 memcpy(recv_msg->msg_data,
2695 &(msg->rsp[9]),
2696 msg->rsp_size - 9);
2697 /* THe other fields matched, so no need to set them, except
2698 for netfn, which needs to be the response that was
2699 returned, not the request value. */
2700 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2701 recv_msg->msg.data = recv_msg->msg_data;
2702 recv_msg->msg.data_len = msg->rsp_size - 10;
2703 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2704 spin_lock_irqsave(&intf->counter_lock, flags);
2705 intf->handled_ipmb_responses++;
2706 spin_unlock_irqrestore(&intf->counter_lock, flags);
2707 deliver_response(recv_msg);
2708
2709 return 0;
2710 }
2711
2712 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2713 struct ipmi_smi_msg *msg)
2714 {
2715 struct cmd_rcvr *rcvr;
2716 int rv = 0;
2717 unsigned char netfn;
2718 unsigned char cmd;
2719 unsigned char chan;
2720 ipmi_user_t user = NULL;
2721 struct ipmi_ipmb_addr *ipmb_addr;
2722 struct ipmi_recv_msg *recv_msg;
2723 unsigned long flags;
2724
2725 if (msg->rsp_size < 10) {
2726 /* Message not big enough, just ignore it. */
2727 spin_lock_irqsave(&intf->counter_lock, flags);
2728 intf->invalid_commands++;
2729 spin_unlock_irqrestore(&intf->counter_lock, flags);
2730 return 0;
2731 }
2732
2733 if (msg->rsp[2] != 0) {
2734 /* An error getting the response, just ignore it. */
2735 return 0;
2736 }
2737
2738 netfn = msg->rsp[4] >> 2;
2739 cmd = msg->rsp[8];
2740 chan = msg->rsp[3] & 0xf;
2741
2742 rcu_read_lock();
2743 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2744 if (rcvr) {
2745 user = rcvr->user;
2746 kref_get(&user->refcount);
2747 } else
2748 user = NULL;
2749 rcu_read_unlock();
2750
2751 if (user == NULL) {
2752 /* We didn't find a user, deliver an error response. */
2753 spin_lock_irqsave(&intf->counter_lock, flags);
2754 intf->unhandled_commands++;
2755 spin_unlock_irqrestore(&intf->counter_lock, flags);
2756
2757 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2758 msg->data[1] = IPMI_SEND_MSG_CMD;
2759 msg->data[2] = msg->rsp[3];
2760 msg->data[3] = msg->rsp[6];
2761 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2762 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2763 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2764 /* rqseq/lun */
2765 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2766 msg->data[8] = msg->rsp[8]; /* cmd */
2767 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2768 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2769 msg->data_size = 11;
2770
2771 #ifdef DEBUG_MSGING
2772 {
2773 int m;
2774 printk("Invalid command:");
2775 for (m = 0; m < msg->data_size; m++)
2776 printk(" %2.2x", msg->data[m]);
2777 printk("\n");
2778 }
2779 #endif
2780 intf->handlers->sender(intf->send_info, msg, 0);
2781
2782 rv = -1; /* We used the message, so return the value that
2783 causes it to not be freed or queued. */
2784 } else {
2785 /* Deliver the message to the user. */
2786 spin_lock_irqsave(&intf->counter_lock, flags);
2787 intf->handled_commands++;
2788 spin_unlock_irqrestore(&intf->counter_lock, flags);
2789
2790 recv_msg = ipmi_alloc_recv_msg();
2791 if (!recv_msg) {
2792 /* We couldn't allocate memory for the
2793 message, so requeue it for handling
2794 later. */
2795 rv = 1;
2796 kref_put(&user->refcount, free_user);
2797 } else {
2798 /* Extract the source address from the data. */
2799 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2800 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2801 ipmb_addr->slave_addr = msg->rsp[6];
2802 ipmb_addr->lun = msg->rsp[7] & 3;
2803 ipmb_addr->channel = msg->rsp[3] & 0xf;
2804
2805 /* Extract the rest of the message information
2806 from the IPMB header.*/
2807 recv_msg->user = user;
2808 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2809 recv_msg->msgid = msg->rsp[7] >> 2;
2810 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2811 recv_msg->msg.cmd = msg->rsp[8];
2812 recv_msg->msg.data = recv_msg->msg_data;
2813
2814 /* We chop off 10, not 9 bytes because the checksum
2815 at the end also needs to be removed. */
2816 recv_msg->msg.data_len = msg->rsp_size - 10;
2817 memcpy(recv_msg->msg_data,
2818 &(msg->rsp[9]),
2819 msg->rsp_size - 10);
2820 deliver_response(recv_msg);
2821 }
2822 }
2823
2824 return rv;
2825 }
2826
2827 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2828 struct ipmi_smi_msg *msg)
2829 {
2830 struct ipmi_lan_addr lan_addr;
2831 struct ipmi_recv_msg *recv_msg;
2832 unsigned long flags;
2833
2834
2835 /* This is 13, not 12, because the response must contain a
2836 * completion code. */
2837 if (msg->rsp_size < 13) {
2838 /* Message not big enough, just ignore it. */
2839 spin_lock_irqsave(&intf->counter_lock, flags);
2840 intf->invalid_lan_responses++;
2841 spin_unlock_irqrestore(&intf->counter_lock, flags);
2842 return 0;
2843 }
2844
2845 if (msg->rsp[2] != 0) {
2846 /* An error getting the response, just ignore it. */
2847 return 0;
2848 }
2849
2850 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
2851 lan_addr.session_handle = msg->rsp[4];
2852 lan_addr.remote_SWID = msg->rsp[8];
2853 lan_addr.local_SWID = msg->rsp[5];
2854 lan_addr.channel = msg->rsp[3] & 0x0f;
2855 lan_addr.privilege = msg->rsp[3] >> 4;
2856 lan_addr.lun = msg->rsp[9] & 3;
2857
2858 /* It's a response from a remote entity. Look up the sequence
2859 number and handle the response. */
2860 if (intf_find_seq(intf,
2861 msg->rsp[9] >> 2,
2862 msg->rsp[3] & 0x0f,
2863 msg->rsp[10],
2864 (msg->rsp[6] >> 2) & (~1),
2865 (struct ipmi_addr *) &(lan_addr),
2866 &recv_msg))
2867 {
2868 /* We were unable to find the sequence number,
2869 so just nuke the message. */
2870 spin_lock_irqsave(&intf->counter_lock, flags);
2871 intf->unhandled_lan_responses++;
2872 spin_unlock_irqrestore(&intf->counter_lock, flags);
2873 return 0;
2874 }
2875
2876 memcpy(recv_msg->msg_data,
2877 &(msg->rsp[11]),
2878 msg->rsp_size - 11);
2879 /* The other fields matched, so no need to set them, except
2880 for netfn, which needs to be the response that was
2881 returned, not the request value. */
2882 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2883 recv_msg->msg.data = recv_msg->msg_data;
2884 recv_msg->msg.data_len = msg->rsp_size - 12;
2885 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2886 spin_lock_irqsave(&intf->counter_lock, flags);
2887 intf->handled_lan_responses++;
2888 spin_unlock_irqrestore(&intf->counter_lock, flags);
2889 deliver_response(recv_msg);
2890
2891 return 0;
2892 }
2893
2894 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2895 struct ipmi_smi_msg *msg)
2896 {
2897 struct cmd_rcvr *rcvr;
2898 int rv = 0;
2899 unsigned char netfn;
2900 unsigned char cmd;
2901 unsigned char chan;
2902 ipmi_user_t user = NULL;
2903 struct ipmi_lan_addr *lan_addr;
2904 struct ipmi_recv_msg *recv_msg;
2905 unsigned long flags;
2906
2907 if (msg->rsp_size < 12) {
2908 /* Message not big enough, just ignore it. */
2909 spin_lock_irqsave(&intf->counter_lock, flags);
2910 intf->invalid_commands++;
2911 spin_unlock_irqrestore(&intf->counter_lock, flags);
2912 return 0;
2913 }
2914
2915 if (msg->rsp[2] != 0) {
2916 /* An error getting the response, just ignore it. */
2917 return 0;
2918 }
2919
2920 netfn = msg->rsp[6] >> 2;
2921 cmd = msg->rsp[10];
2922 chan = msg->rsp[3] & 0xf;
2923
2924 rcu_read_lock();
2925 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2926 if (rcvr) {
2927 user = rcvr->user;
2928 kref_get(&user->refcount);
2929 } else
2930 user = NULL;
2931 rcu_read_unlock();
2932
2933 if (user == NULL) {
2934 /* We didn't find a user, just give up. */
2935 spin_lock_irqsave(&intf->counter_lock, flags);
2936 intf->unhandled_commands++;
2937 spin_unlock_irqrestore(&intf->counter_lock, flags);
2938
2939 rv = 0; /* Don't do anything with these messages, just
2940 allow them to be freed. */
2941 } else {
2942 /* Deliver the message to the user. */
2943 spin_lock_irqsave(&intf->counter_lock, flags);
2944 intf->handled_commands++;
2945 spin_unlock_irqrestore(&intf->counter_lock, flags);
2946
2947 recv_msg = ipmi_alloc_recv_msg();
2948 if (!recv_msg) {
2949 /* We couldn't allocate memory for the
2950 message, so requeue it for handling
2951 later. */
2952 rv = 1;
2953 kref_put(&user->refcount, free_user);
2954 } else {
2955 /* Extract the source address from the data. */
2956 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
2957 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
2958 lan_addr->session_handle = msg->rsp[4];
2959 lan_addr->remote_SWID = msg->rsp[8];
2960 lan_addr->local_SWID = msg->rsp[5];
2961 lan_addr->lun = msg->rsp[9] & 3;
2962 lan_addr->channel = msg->rsp[3] & 0xf;
2963 lan_addr->privilege = msg->rsp[3] >> 4;
2964
2965 /* Extract the rest of the message information
2966 from the IPMB header.*/
2967 recv_msg->user = user;
2968 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2969 recv_msg->msgid = msg->rsp[9] >> 2;
2970 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2971 recv_msg->msg.cmd = msg->rsp[10];
2972 recv_msg->msg.data = recv_msg->msg_data;
2973
2974 /* We chop off 12, not 11 bytes because the checksum
2975 at the end also needs to be removed. */
2976 recv_msg->msg.data_len = msg->rsp_size - 12;
2977 memcpy(recv_msg->msg_data,
2978 &(msg->rsp[11]),
2979 msg->rsp_size - 12);
2980 deliver_response(recv_msg);
2981 }
2982 }
2983
2984 return rv;
2985 }
2986
2987 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
2988 struct ipmi_smi_msg *msg)
2989 {
2990 struct ipmi_system_interface_addr *smi_addr;
2991
2992 recv_msg->msgid = 0;
2993 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
2994 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2995 smi_addr->channel = IPMI_BMC_CHANNEL;
2996 smi_addr->lun = msg->rsp[0] & 3;
2997 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
2998 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2999 recv_msg->msg.cmd = msg->rsp[1];
3000 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
3001 recv_msg->msg.data = recv_msg->msg_data;
3002 recv_msg->msg.data_len = msg->rsp_size - 3;
3003 }
3004
3005 static int handle_read_event_rsp(ipmi_smi_t intf,
3006 struct ipmi_smi_msg *msg)
3007 {
3008 struct ipmi_recv_msg *recv_msg, *recv_msg2;
3009 struct list_head msgs;
3010 ipmi_user_t user;
3011 int rv = 0;
3012 int deliver_count = 0;
3013 unsigned long flags;
3014
3015 if (msg->rsp_size < 19) {
3016 /* Message is too small to be an IPMB event. */
3017 spin_lock_irqsave(&intf->counter_lock, flags);
3018 intf->invalid_events++;
3019 spin_unlock_irqrestore(&intf->counter_lock, flags);
3020 return 0;
3021 }
3022
3023 if (msg->rsp[2] != 0) {
3024 /* An error getting the event, just ignore it. */
3025 return 0;
3026 }
3027
3028 INIT_LIST_HEAD(&msgs);
3029
3030 spin_lock_irqsave(&intf->events_lock, flags);
3031
3032 spin_lock(&intf->counter_lock);
3033 intf->events++;
3034 spin_unlock(&intf->counter_lock);
3035
3036 /* Allocate and fill in one message for every user that is getting
3037 events. */
3038 rcu_read_lock();
3039 list_for_each_entry_rcu(user, &intf->users, link) {
3040 if (!user->gets_events)
3041 continue;
3042
3043 recv_msg = ipmi_alloc_recv_msg();
3044 if (!recv_msg) {
3045 rcu_read_unlock();
3046 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
3047 link) {
3048 list_del(&recv_msg->link);
3049 ipmi_free_recv_msg(recv_msg);
3050 }
3051 /* We couldn't allocate memory for the
3052 message, so requeue it for handling
3053 later. */
3054 rv = 1;
3055 goto out;
3056 }
3057
3058 deliver_count++;
3059
3060 copy_event_into_recv_msg(recv_msg, msg);
3061 recv_msg->user = user;
3062 kref_get(&user->refcount);
3063 list_add_tail(&(recv_msg->link), &msgs);
3064 }
3065 rcu_read_unlock();
3066
3067 if (deliver_count) {
3068 /* Now deliver all the messages. */
3069 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
3070 list_del(&recv_msg->link);
3071 deliver_response(recv_msg);
3072 }
3073 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3074 /* No one to receive the message, put it in queue if there's
3075 not already too many things in the queue. */
3076 recv_msg = ipmi_alloc_recv_msg();
3077 if (!recv_msg) {
3078 /* We couldn't allocate memory for the
3079 message, so requeue it for handling
3080 later. */
3081 rv = 1;
3082 goto out;
3083 }
3084
3085 copy_event_into_recv_msg(recv_msg, msg);
3086 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3087 intf->waiting_events_count++;
3088 } else {
3089 /* There's too many things in the queue, discard this
3090 message. */
3091 printk(KERN_WARNING PFX "Event queue full, discarding an"
3092 " incoming event\n");
3093 }
3094
3095 out:
3096 spin_unlock_irqrestore(&(intf->events_lock), flags);
3097
3098 return rv;
3099 }
3100
3101 static int handle_bmc_rsp(ipmi_smi_t intf,
3102 struct ipmi_smi_msg *msg)
3103 {
3104 struct ipmi_recv_msg *recv_msg;
3105 unsigned long flags;
3106 struct ipmi_user *user;
3107
3108 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3109 if (recv_msg == NULL)
3110 {
3111 printk(KERN_WARNING"IPMI message received with no owner. This\n"
3112 "could be because of a malformed message, or\n"
3113 "because of a hardware error. Contact your\n"
3114 "hardware vender for assistance\n");
3115 return 0;
3116 }
3117
3118 user = recv_msg->user;
3119 /* Make sure the user still exists. */
3120 if (user && !user->valid) {
3121 /* The user for the message went away, so give up. */
3122 spin_lock_irqsave(&intf->counter_lock, flags);
3123 intf->unhandled_local_responses++;
3124 spin_unlock_irqrestore(&intf->counter_lock, flags);
3125 ipmi_free_recv_msg(recv_msg);
3126 } else {
3127 struct ipmi_system_interface_addr *smi_addr;
3128
3129 spin_lock_irqsave(&intf->counter_lock, flags);
3130 intf->handled_local_responses++;
3131 spin_unlock_irqrestore(&intf->counter_lock, flags);
3132 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3133 recv_msg->msgid = msg->msgid;
3134 smi_addr = ((struct ipmi_system_interface_addr *)
3135 &(recv_msg->addr));
3136 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3137 smi_addr->channel = IPMI_BMC_CHANNEL;
3138 smi_addr->lun = msg->rsp[0] & 3;
3139 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3140 recv_msg->msg.cmd = msg->rsp[1];
3141 memcpy(recv_msg->msg_data,
3142 &(msg->rsp[2]),
3143 msg->rsp_size - 2);
3144 recv_msg->msg.data = recv_msg->msg_data;
3145 recv_msg->msg.data_len = msg->rsp_size - 2;
3146 deliver_response(recv_msg);
3147 }
3148
3149 return 0;
3150 }
3151
3152 /* Handle a new message. Return 1 if the message should be requeued,
3153 0 if the message should be freed, or -1 if the message should not
3154 be freed or requeued. */
3155 static int handle_new_recv_msg(ipmi_smi_t intf,
3156 struct ipmi_smi_msg *msg)
3157 {
3158 int requeue;
3159 int chan;
3160
3161 #ifdef DEBUG_MSGING
3162 int m;
3163 printk("Recv:");
3164 for (m = 0; m < msg->rsp_size; m++)
3165 printk(" %2.2x", msg->rsp[m]);
3166 printk("\n");
3167 #endif
3168 if (msg->rsp_size < 2) {
3169 /* Message is too small to be correct. */
3170 printk(KERN_WARNING PFX "BMC returned to small a message"
3171 " for netfn %x cmd %x, got %d bytes\n",
3172 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3173
3174 /* Generate an error response for the message. */
3175 msg->rsp[0] = msg->data[0] | (1 << 2);
3176 msg->rsp[1] = msg->data[1];
3177 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3178 msg->rsp_size = 3;
3179 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3180 || (msg->rsp[1] != msg->data[1])) /* Command */
3181 {
3182 /* The response is not even marginally correct. */
3183 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3184 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3185 (msg->data[0] >> 2) | 1, msg->data[1],
3186 msg->rsp[0] >> 2, msg->rsp[1]);
3187
3188 /* Generate an error response for the message. */
3189 msg->rsp[0] = msg->data[0] | (1 << 2);
3190 msg->rsp[1] = msg->data[1];
3191 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3192 msg->rsp_size = 3;
3193 }
3194
3195 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3196 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3197 && (msg->user_data != NULL))
3198 {
3199 /* It's a response to a response we sent. For this we
3200 deliver a send message response to the user. */
3201 struct ipmi_recv_msg *recv_msg = msg->user_data;
3202
3203 requeue = 0;
3204 if (msg->rsp_size < 2)
3205 /* Message is too small to be correct. */
3206 goto out;
3207
3208 chan = msg->data[2] & 0x0f;
3209 if (chan >= IPMI_MAX_CHANNELS)
3210 /* Invalid channel number */
3211 goto out;
3212
3213 if (!recv_msg)
3214 goto out;
3215
3216 /* Make sure the user still exists. */
3217 if (!recv_msg->user || !recv_msg->user->valid)
3218 goto out;
3219
3220 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3221 recv_msg->msg.data = recv_msg->msg_data;
3222 recv_msg->msg.data_len = 1;
3223 recv_msg->msg_data[0] = msg->rsp[2];
3224 deliver_response(recv_msg);
3225 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3226 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3227 {
3228 /* It's from the receive queue. */
3229 chan = msg->rsp[3] & 0xf;
3230 if (chan >= IPMI_MAX_CHANNELS) {
3231 /* Invalid channel number */
3232 requeue = 0;
3233 goto out;
3234 }
3235
3236 switch (intf->channels[chan].medium) {
3237 case IPMI_CHANNEL_MEDIUM_IPMB:
3238 if (msg->rsp[4] & 0x04) {
3239 /* It's a response, so find the
3240 requesting message and send it up. */
3241 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3242 } else {
3243 /* It's a command to the SMS from some other
3244 entity. Handle that. */
3245 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3246 }
3247 break;
3248
3249 case IPMI_CHANNEL_MEDIUM_8023LAN:
3250 case IPMI_CHANNEL_MEDIUM_ASYNC:
3251 if (msg->rsp[6] & 0x04) {
3252 /* It's a response, so find the
3253 requesting message and send it up. */
3254 requeue = handle_lan_get_msg_rsp(intf, msg);
3255 } else {
3256 /* It's a command to the SMS from some other
3257 entity. Handle that. */
3258 requeue = handle_lan_get_msg_cmd(intf, msg);
3259 }
3260 break;
3261
3262 default:
3263 /* We don't handle the channel type, so just
3264 * free the message. */
3265 requeue = 0;
3266 }
3267
3268 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3269 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3270 {
3271 /* It's an asyncronous event. */
3272 requeue = handle_read_event_rsp(intf, msg);
3273 } else {
3274 /* It's a response from the local BMC. */
3275 requeue = handle_bmc_rsp(intf, msg);
3276 }
3277
3278 out:
3279 return requeue;
3280 }
3281
3282 /* Handle a new message from the lower layer. */
3283 void ipmi_smi_msg_received(ipmi_smi_t intf,
3284 struct ipmi_smi_msg *msg)
3285 {
3286 unsigned long flags;
3287 int rv;
3288
3289
3290 if ((msg->data_size >= 2)
3291 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3292 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3293 && (msg->user_data == NULL))
3294 {
3295 /* This is the local response to a command send, start
3296 the timer for these. The user_data will not be
3297 NULL if this is a response send, and we will let
3298 response sends just go through. */
3299
3300 /* Check for errors, if we get certain errors (ones
3301 that mean basically we can try again later), we
3302 ignore them and start the timer. Otherwise we
3303 report the error immediately. */
3304 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3305 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3306 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
3307 && (msg->rsp[2] != IPMI_BUS_ERR)
3308 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR))
3309 {
3310 int chan = msg->rsp[3] & 0xf;
3311
3312 /* Got an error sending the message, handle it. */
3313 spin_lock_irqsave(&intf->counter_lock, flags);
3314 if (chan >= IPMI_MAX_CHANNELS)
3315 ; /* This shouldn't happen */
3316 else if ((intf->channels[chan].medium
3317 == IPMI_CHANNEL_MEDIUM_8023LAN)
3318 || (intf->channels[chan].medium
3319 == IPMI_CHANNEL_MEDIUM_ASYNC))
3320 intf->sent_lan_command_errs++;
3321 else
3322 intf->sent_ipmb_command_errs++;
3323 spin_unlock_irqrestore(&intf->counter_lock, flags);
3324 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3325 } else {
3326 /* The message was sent, start the timer. */
3327 intf_start_seq_timer(intf, msg->msgid);
3328 }
3329
3330 ipmi_free_smi_msg(msg);
3331 goto out;
3332 }
3333
3334 /* To preserve message order, if the list is not empty, we
3335 tack this message onto the end of the list. */
3336 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3337 if (!list_empty(&intf->waiting_msgs)) {
3338 list_add_tail(&msg->link, &intf->waiting_msgs);
3339 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3340 goto out;
3341 }
3342 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3343
3344 rv = handle_new_recv_msg(intf, msg);
3345 if (rv > 0) {
3346 /* Could not handle the message now, just add it to a
3347 list to handle later. */
3348 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3349 list_add_tail(&msg->link, &intf->waiting_msgs);
3350 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3351 } else if (rv == 0) {
3352 ipmi_free_smi_msg(msg);
3353 }
3354
3355 out:
3356 return;
3357 }
3358
3359 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3360 {
3361 ipmi_user_t user;
3362
3363 rcu_read_lock();
3364 list_for_each_entry_rcu(user, &intf->users, link) {
3365 if (!user->handler->ipmi_watchdog_pretimeout)
3366 continue;
3367
3368 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3369 }
3370 rcu_read_unlock();
3371 }
3372
3373 static void
3374 handle_msg_timeout(struct ipmi_recv_msg *msg)
3375 {
3376 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3377 msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
3378 msg->msg.netfn |= 1; /* Convert to a response. */
3379 msg->msg.data_len = 1;
3380 msg->msg.data = msg->msg_data;
3381 deliver_response(msg);
3382 }
3383
3384 static struct ipmi_smi_msg *
3385 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3386 unsigned char seq, long seqid)
3387 {
3388 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3389 if (!smi_msg)
3390 /* If we can't allocate the message, then just return, we
3391 get 4 retries, so this should be ok. */
3392 return NULL;
3393
3394 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3395 smi_msg->data_size = recv_msg->msg.data_len;
3396 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3397
3398 #ifdef DEBUG_MSGING
3399 {
3400 int m;
3401 printk("Resend: ");
3402 for (m = 0; m < smi_msg->data_size; m++)
3403 printk(" %2.2x", smi_msg->data[m]);
3404 printk("\n");
3405 }
3406 #endif
3407 return smi_msg;
3408 }
3409
3410 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3411 struct list_head *timeouts, long timeout_period,
3412 int slot, unsigned long *flags)
3413 {
3414 struct ipmi_recv_msg *msg;
3415
3416 if (!ent->inuse)
3417 return;
3418
3419 ent->timeout -= timeout_period;
3420 if (ent->timeout > 0)
3421 return;
3422
3423 if (ent->retries_left == 0) {
3424 /* The message has used all its retries. */
3425 ent->inuse = 0;
3426 msg = ent->recv_msg;
3427 list_add_tail(&msg->link, timeouts);
3428 spin_lock(&intf->counter_lock);
3429 if (ent->broadcast)
3430 intf->timed_out_ipmb_broadcasts++;
3431 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3432 intf->timed_out_lan_commands++;
3433 else
3434 intf->timed_out_ipmb_commands++;
3435 spin_unlock(&intf->counter_lock);
3436 } else {
3437 struct ipmi_smi_msg *smi_msg;
3438 /* More retries, send again. */
3439
3440 /* Start with the max timer, set to normal
3441 timer after the message is sent. */
3442 ent->timeout = MAX_MSG_TIMEOUT;
3443 ent->retries_left--;
3444 spin_lock(&intf->counter_lock);
3445 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3446 intf->retransmitted_lan_commands++;
3447 else
3448 intf->retransmitted_ipmb_commands++;
3449 spin_unlock(&intf->counter_lock);
3450
3451 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3452 ent->seqid);
3453 if (!smi_msg)
3454 return;
3455
3456 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3457 /* Send the new message. We send with a zero
3458 * priority. It timed out, I doubt time is
3459 * that critical now, and high priority
3460 * messages are really only for messages to the
3461 * local MC, which don't get resent. */
3462 intf->handlers->sender(intf->send_info,
3463 smi_msg, 0);
3464 spin_lock_irqsave(&intf->seq_lock, *flags);
3465 }
3466 }
3467
3468 static void ipmi_timeout_handler(long timeout_period)
3469 {
3470 ipmi_smi_t intf;
3471 struct list_head timeouts;
3472 struct ipmi_recv_msg *msg, *msg2;
3473 struct ipmi_smi_msg *smi_msg, *smi_msg2;
3474 unsigned long flags;
3475 int i;
3476
3477 INIT_LIST_HEAD(&timeouts);
3478
3479 rcu_read_lock();
3480 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3481 /* See if any waiting messages need to be processed. */
3482 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3483 list_for_each_entry_safe(smi_msg, smi_msg2,
3484 &intf->waiting_msgs, link) {
3485 if (!handle_new_recv_msg(intf, smi_msg)) {
3486 list_del(&smi_msg->link);
3487 ipmi_free_smi_msg(smi_msg);
3488 } else {
3489 /* To preserve message order, quit if we
3490 can't handle a message. */
3491 break;
3492 }
3493 }
3494 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3495
3496 /* Go through the seq table and find any messages that
3497 have timed out, putting them in the timeouts
3498 list. */
3499 spin_lock_irqsave(&intf->seq_lock, flags);
3500 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
3501 check_msg_timeout(intf, &(intf->seq_table[i]),
3502 &timeouts, timeout_period, i,
3503 &flags);
3504 spin_unlock_irqrestore(&intf->seq_lock, flags);
3505
3506 list_for_each_entry_safe(msg, msg2, &timeouts, link)
3507 handle_msg_timeout(msg);
3508 }
3509 rcu_read_unlock();
3510 }
3511
3512 static void ipmi_request_event(void)
3513 {
3514 ipmi_smi_t intf;
3515
3516 rcu_read_lock();
3517 list_for_each_entry_rcu(intf, &ipmi_interfaces, link)
3518 intf->handlers->request_events(intf->send_info);
3519 rcu_read_unlock();
3520 }
3521
3522 static struct timer_list ipmi_timer;
3523
3524 /* Call every ~100 ms. */
3525 #define IPMI_TIMEOUT_TIME 100
3526
3527 /* How many jiffies does it take to get to the timeout time. */
3528 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3529
3530 /* Request events from the queue every second (this is the number of
3531 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3532 future, IPMI will add a way to know immediately if an event is in
3533 the queue and this silliness can go away. */
3534 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3535
3536 static atomic_t stop_operation;
3537 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3538
3539 static void ipmi_timeout(unsigned long data)
3540 {
3541 if (atomic_read(&stop_operation))
3542 return;
3543
3544 ticks_to_req_ev--;
3545 if (ticks_to_req_ev == 0) {
3546 ipmi_request_event();
3547 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3548 }
3549
3550 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3551
3552 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3553 }
3554
3555
3556 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3557 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3558
3559 /* FIXME - convert these to slabs. */
3560 static void free_smi_msg(struct ipmi_smi_msg *msg)
3561 {
3562 atomic_dec(&smi_msg_inuse_count);
3563 kfree(msg);
3564 }
3565
3566 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3567 {
3568 struct ipmi_smi_msg *rv;
3569 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3570 if (rv) {
3571 rv->done = free_smi_msg;
3572 rv->user_data = NULL;
3573 atomic_inc(&smi_msg_inuse_count);
3574 }
3575 return rv;
3576 }
3577
3578 static void free_recv_msg(struct ipmi_recv_msg *msg)
3579 {
3580 atomic_dec(&recv_msg_inuse_count);
3581 kfree(msg);
3582 }
3583
3584 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3585 {
3586 struct ipmi_recv_msg *rv;
3587
3588 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3589 if (rv) {
3590 rv->user = NULL;
3591 rv->done = free_recv_msg;
3592 atomic_inc(&recv_msg_inuse_count);
3593 }
3594 return rv;
3595 }
3596
3597 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3598 {
3599 if (msg->user)
3600 kref_put(&msg->user->refcount, free_user);
3601 msg->done(msg);
3602 }
3603
3604 #ifdef CONFIG_IPMI_PANIC_EVENT
3605
3606 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3607 {
3608 }
3609
3610 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3611 {
3612 }
3613
3614 #ifdef CONFIG_IPMI_PANIC_STRING
3615 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3616 {
3617 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3618 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3619 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3620 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3621 {
3622 /* A get event receiver command, save it. */
3623 intf->event_receiver = msg->msg.data[1];
3624 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3625 }
3626 }
3627
3628 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3629 {
3630 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3631 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3632 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3633 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3634 {
3635 /* A get device id command, save if we are an event
3636 receiver or generator. */
3637 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3638 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3639 }
3640 }
3641 #endif
3642
3643 static void send_panic_events(char *str)
3644 {
3645 struct kernel_ipmi_msg msg;
3646 ipmi_smi_t intf;
3647 unsigned char data[16];
3648 struct ipmi_system_interface_addr *si;
3649 struct ipmi_addr addr;
3650 struct ipmi_smi_msg smi_msg;
3651 struct ipmi_recv_msg recv_msg;
3652
3653 si = (struct ipmi_system_interface_addr *) &addr;
3654 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3655 si->channel = IPMI_BMC_CHANNEL;
3656 si->lun = 0;
3657
3658 /* Fill in an event telling that we have failed. */
3659 msg.netfn = 0x04; /* Sensor or Event. */
3660 msg.cmd = 2; /* Platform event command. */
3661 msg.data = data;
3662 msg.data_len = 8;
3663 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3664 data[1] = 0x03; /* This is for IPMI 1.0. */
3665 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3666 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3667 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3668
3669 /* Put a few breadcrumbs in. Hopefully later we can add more things
3670 to make the panic events more useful. */
3671 if (str) {
3672 data[3] = str[0];
3673 data[6] = str[1];
3674 data[7] = str[2];
3675 }
3676
3677 smi_msg.done = dummy_smi_done_handler;
3678 recv_msg.done = dummy_recv_done_handler;
3679
3680 /* For every registered interface, send the event. */
3681 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3682 if (intf->intf_num == -1)
3683 /* Interface was not ready yet. */
3684 continue;
3685
3686 /* Send the event announcing the panic. */
3687 intf->handlers->set_run_to_completion(intf->send_info, 1);
3688 i_ipmi_request(NULL,
3689 intf,
3690 &addr,
3691 0,
3692 &msg,
3693 intf,
3694 &smi_msg,
3695 &recv_msg,
3696 0,
3697 intf->channels[0].address,
3698 intf->channels[0].lun,
3699 0, 1); /* Don't retry, and don't wait. */
3700 }
3701
3702 #ifdef CONFIG_IPMI_PANIC_STRING
3703 /* On every interface, dump a bunch of OEM event holding the
3704 string. */
3705 if (!str)
3706 return;
3707
3708 /* For every registered interface, send the event. */
3709 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3710 char *p = str;
3711 struct ipmi_ipmb_addr *ipmb;
3712 int j;
3713
3714 if (intf->intf_num == -1)
3715 /* Interface was not ready yet. */
3716 continue;
3717
3718 /* First job here is to figure out where to send the
3719 OEM events. There's no way in IPMI to send OEM
3720 events using an event send command, so we have to
3721 find the SEL to put them in and stick them in
3722 there. */
3723
3724 /* Get capabilities from the get device id. */
3725 intf->local_sel_device = 0;
3726 intf->local_event_generator = 0;
3727 intf->event_receiver = 0;
3728
3729 /* Request the device info from the local MC. */
3730 msg.netfn = IPMI_NETFN_APP_REQUEST;
3731 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3732 msg.data = NULL;
3733 msg.data_len = 0;
3734 intf->null_user_handler = device_id_fetcher;
3735 i_ipmi_request(NULL,
3736 intf,
3737 &addr,
3738 0,
3739 &msg,
3740 intf,
3741 &smi_msg,
3742 &recv_msg,
3743 0,
3744 intf->channels[0].address,
3745 intf->channels[0].lun,
3746 0, 1); /* Don't retry, and don't wait. */
3747
3748 if (intf->local_event_generator) {
3749 /* Request the event receiver from the local MC. */
3750 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3751 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3752 msg.data = NULL;
3753 msg.data_len = 0;
3754 intf->null_user_handler = event_receiver_fetcher;
3755 i_ipmi_request(NULL,
3756 intf,
3757 &addr,
3758 0,
3759 &msg,
3760 intf,
3761 &smi_msg,
3762 &recv_msg,
3763 0,
3764 intf->channels[0].address,
3765 intf->channels[0].lun,
3766 0, 1); /* no retry, and no wait. */
3767 }
3768 intf->null_user_handler = NULL;
3769
3770 /* Validate the event receiver. The low bit must not
3771 be 1 (it must be a valid IPMB address), it cannot
3772 be zero, and it must not be my address. */
3773 if (((intf->event_receiver & 1) == 0)
3774 && (intf->event_receiver != 0)
3775 && (intf->event_receiver != intf->channels[0].address))
3776 {
3777 /* The event receiver is valid, send an IPMB
3778 message. */
3779 ipmb = (struct ipmi_ipmb_addr *) &addr;
3780 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3781 ipmb->channel = 0; /* FIXME - is this right? */
3782 ipmb->lun = intf->event_receiver_lun;
3783 ipmb->slave_addr = intf->event_receiver;
3784 } else if (intf->local_sel_device) {
3785 /* The event receiver was not valid (or was
3786 me), but I am an SEL device, just dump it
3787 in my SEL. */
3788 si = (struct ipmi_system_interface_addr *) &addr;
3789 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3790 si->channel = IPMI_BMC_CHANNEL;
3791 si->lun = 0;
3792 } else
3793 continue; /* No where to send the event. */
3794
3795
3796 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
3797 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
3798 msg.data = data;
3799 msg.data_len = 16;
3800
3801 j = 0;
3802 while (*p) {
3803 int size = strlen(p);
3804
3805 if (size > 11)
3806 size = 11;
3807 data[0] = 0;
3808 data[1] = 0;
3809 data[2] = 0xf0; /* OEM event without timestamp. */
3810 data[3] = intf->channels[0].address;
3811 data[4] = j++; /* sequence # */
3812 /* Always give 11 bytes, so strncpy will fill
3813 it with zeroes for me. */
3814 strncpy(data+5, p, 11);
3815 p += size;
3816
3817 i_ipmi_request(NULL,
3818 intf,
3819 &addr,
3820 0,
3821 &msg,
3822 intf,
3823 &smi_msg,
3824 &recv_msg,
3825 0,
3826 intf->channels[0].address,
3827 intf->channels[0].lun,
3828 0, 1); /* no retry, and no wait. */
3829 }
3830 }
3831 #endif /* CONFIG_IPMI_PANIC_STRING */
3832 }
3833 #endif /* CONFIG_IPMI_PANIC_EVENT */
3834
3835 static int has_panicked = 0;
3836
3837 static int panic_event(struct notifier_block *this,
3838 unsigned long event,
3839 void *ptr)
3840 {
3841 ipmi_smi_t intf;
3842
3843 if (has_panicked)
3844 return NOTIFY_DONE;
3845 has_panicked = 1;
3846
3847 /* For every registered interface, set it to run to completion. */
3848 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3849 if (intf->intf_num == -1)
3850 /* Interface was not ready yet. */
3851 continue;
3852
3853 intf->handlers->set_run_to_completion(intf->send_info, 1);
3854 }
3855
3856 #ifdef CONFIG_IPMI_PANIC_EVENT
3857 send_panic_events(ptr);
3858 #endif
3859
3860 return NOTIFY_DONE;
3861 }
3862
3863 static struct notifier_block panic_block = {
3864 .notifier_call = panic_event,
3865 .next = NULL,
3866 .priority = 200 /* priority: INT_MAX >= x >= 0 */
3867 };
3868
3869 static int ipmi_init_msghandler(void)
3870 {
3871 int rv;
3872
3873 if (initialized)
3874 return 0;
3875
3876 rv = driver_register(&ipmidriver);
3877 if (rv) {
3878 printk(KERN_ERR PFX "Could not register IPMI driver\n");
3879 return rv;
3880 }
3881
3882 printk(KERN_INFO "ipmi message handler version "
3883 IPMI_DRIVER_VERSION "\n");
3884
3885 #ifdef CONFIG_PROC_FS
3886 proc_ipmi_root = proc_mkdir("ipmi", NULL);
3887 if (!proc_ipmi_root) {
3888 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
3889 return -ENOMEM;
3890 }
3891
3892 proc_ipmi_root->owner = THIS_MODULE;
3893 #endif /* CONFIG_PROC_FS */
3894
3895 setup_timer(&ipmi_timer, ipmi_timeout, 0);
3896 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3897
3898 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
3899
3900 initialized = 1;
3901
3902 return 0;
3903 }
3904
3905 static __init int ipmi_init_msghandler_mod(void)
3906 {
3907 ipmi_init_msghandler();
3908 return 0;
3909 }
3910
3911 static __exit void cleanup_ipmi(void)
3912 {
3913 int count;
3914
3915 if (!initialized)
3916 return;
3917
3918 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
3919
3920 /* This can't be called if any interfaces exist, so no worry about
3921 shutting down the interfaces. */
3922
3923 /* Tell the timer to stop, then wait for it to stop. This avoids
3924 problems with race conditions removing the timer here. */
3925 atomic_inc(&stop_operation);
3926 del_timer_sync(&ipmi_timer);
3927
3928 #ifdef CONFIG_PROC_FS
3929 remove_proc_entry(proc_ipmi_root->name, &proc_root);
3930 #endif /* CONFIG_PROC_FS */
3931
3932 driver_unregister(&ipmidriver);
3933
3934 initialized = 0;
3935
3936 /* Check for buffer leaks. */
3937 count = atomic_read(&smi_msg_inuse_count);
3938 if (count != 0)
3939 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
3940 count);
3941 count = atomic_read(&recv_msg_inuse_count);
3942 if (count != 0)
3943 printk(KERN_WARNING PFX "recv message count %d at exit\n",
3944 count);
3945 }
3946 module_exit(cleanup_ipmi);
3947
3948 module_init(ipmi_init_msghandler_mod);
3949 MODULE_LICENSE("GPL");
3950 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3951 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
3952 MODULE_VERSION(IPMI_DRIVER_VERSION);
3953
3954 EXPORT_SYMBOL(ipmi_create_user);
3955 EXPORT_SYMBOL(ipmi_destroy_user);
3956 EXPORT_SYMBOL(ipmi_get_version);
3957 EXPORT_SYMBOL(ipmi_request_settime);
3958 EXPORT_SYMBOL(ipmi_request_supply_msgs);
3959 EXPORT_SYMBOL(ipmi_register_smi);
3960 EXPORT_SYMBOL(ipmi_unregister_smi);
3961 EXPORT_SYMBOL(ipmi_register_for_cmd);
3962 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
3963 EXPORT_SYMBOL(ipmi_smi_msg_received);
3964 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3965 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
3966 EXPORT_SYMBOL(ipmi_addr_length);
3967 EXPORT_SYMBOL(ipmi_validate_addr);
3968 EXPORT_SYMBOL(ipmi_set_gets_events);
3969 EXPORT_SYMBOL(ipmi_smi_watcher_register);
3970 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
3971 EXPORT_SYMBOL(ipmi_set_my_address);
3972 EXPORT_SYMBOL(ipmi_get_my_address);
3973 EXPORT_SYMBOL(ipmi_set_my_LUN);
3974 EXPORT_SYMBOL(ipmi_get_my_LUN);
3975 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
3976 EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
3977 EXPORT_SYMBOL(ipmi_free_recv_msg);
This page took 0.173032 seconds and 5 git commands to generate.