ipmi: convert locked counters to atomics
[deliverable/linux.git] / drivers / char / ipmi / ipmi_msghandler.c
1 /*
2 * ipmi_msghandler.c
3 *
4 * Incoming and outgoing message routing for an IPMI interface.
5 *
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
9 *
10 * Copyright 2002 MontaVista Software Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/poll.h>
38 #include <linux/spinlock.h>
39 #include <linux/mutex.h>
40 #include <linux/slab.h>
41 #include <linux/ipmi.h>
42 #include <linux/ipmi_smi.h>
43 #include <linux/notifier.h>
44 #include <linux/init.h>
45 #include <linux/proc_fs.h>
46 #include <linux/rcupdate.h>
47
48 #define PFX "IPMI message handler: "
49
50 #define IPMI_DRIVER_VERSION "39.2"
51
52 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
53 static int ipmi_init_msghandler(void);
54
55 static int initialized;
56
57 #ifdef CONFIG_PROC_FS
58 static struct proc_dir_entry *proc_ipmi_root;
59 #endif /* CONFIG_PROC_FS */
60
61 /* Remain in auto-maintenance mode for this amount of time (in ms). */
62 #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
63
64 #define MAX_EVENTS_IN_QUEUE 25
65
66 /* Don't let a message sit in a queue forever, always time it with at lest
67 the max message timer. This is in milliseconds. */
68 #define MAX_MSG_TIMEOUT 60000
69
70 /*
71 * The main "user" data structure.
72 */
73 struct ipmi_user
74 {
75 struct list_head link;
76
77 /* Set to "0" when the user is destroyed. */
78 int valid;
79
80 struct kref refcount;
81
82 /* The upper layer that handles receive messages. */
83 struct ipmi_user_hndl *handler;
84 void *handler_data;
85
86 /* The interface this user is bound to. */
87 ipmi_smi_t intf;
88
89 /* Does this interface receive IPMI events? */
90 int gets_events;
91 };
92
93 struct cmd_rcvr
94 {
95 struct list_head link;
96
97 ipmi_user_t user;
98 unsigned char netfn;
99 unsigned char cmd;
100 unsigned int chans;
101
102 /*
103 * This is used to form a linked lised during mass deletion.
104 * Since this is in an RCU list, we cannot use the link above
105 * or change any data until the RCU period completes. So we
106 * use this next variable during mass deletion so we can have
107 * a list and don't have to wait and restart the search on
108 * every individual deletion of a command. */
109 struct cmd_rcvr *next;
110 };
111
112 struct seq_table
113 {
114 unsigned int inuse : 1;
115 unsigned int broadcast : 1;
116
117 unsigned long timeout;
118 unsigned long orig_timeout;
119 unsigned int retries_left;
120
121 /* To verify on an incoming send message response that this is
122 the message that the response is for, we keep a sequence id
123 and increment it every time we send a message. */
124 long seqid;
125
126 /* This is held so we can properly respond to the message on a
127 timeout, and it is used to hold the temporary data for
128 retransmission, too. */
129 struct ipmi_recv_msg *recv_msg;
130 };
131
132 /* Store the information in a msgid (long) to allow us to find a
133 sequence table entry from the msgid. */
134 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
135
136 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
137 do { \
138 seq = ((msgid >> 26) & 0x3f); \
139 seqid = (msgid & 0x3fffff); \
140 } while (0)
141
142 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
143
144 struct ipmi_channel
145 {
146 unsigned char medium;
147 unsigned char protocol;
148
149 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
150 but may be changed by the user. */
151 unsigned char address;
152
153 /* My LUN. This should generally stay the SMS LUN, but just in
154 case... */
155 unsigned char lun;
156 };
157
158 #ifdef CONFIG_PROC_FS
159 struct ipmi_proc_entry
160 {
161 char *name;
162 struct ipmi_proc_entry *next;
163 };
164 #endif
165
166 struct bmc_device
167 {
168 struct platform_device *dev;
169 struct ipmi_device_id id;
170 unsigned char guid[16];
171 int guid_set;
172
173 struct kref refcount;
174
175 /* bmc device attributes */
176 struct device_attribute device_id_attr;
177 struct device_attribute provides_dev_sdrs_attr;
178 struct device_attribute revision_attr;
179 struct device_attribute firmware_rev_attr;
180 struct device_attribute version_attr;
181 struct device_attribute add_dev_support_attr;
182 struct device_attribute manufacturer_id_attr;
183 struct device_attribute product_id_attr;
184 struct device_attribute guid_attr;
185 struct device_attribute aux_firmware_rev_attr;
186 };
187
188 /*
189 * Various statistics for IPMI, these index stats[] in the ipmi_smi
190 * structure.
191 */
192 /* Commands we got from the user that were invalid. */
193 #define IPMI_STAT_sent_invalid_commands 0
194
195 /* Commands we sent to the MC. */
196 #define IPMI_STAT_sent_local_commands 1
197
198 /* Responses from the MC that were delivered to a user. */
199 #define IPMI_STAT_handled_local_responses 2
200
201 /* Responses from the MC that were not delivered to a user. */
202 #define IPMI_STAT_unhandled_local_responses 3
203
204 /* Commands we sent out to the IPMB bus. */
205 #define IPMI_STAT_sent_ipmb_commands 4
206
207 /* Commands sent on the IPMB that had errors on the SEND CMD */
208 #define IPMI_STAT_sent_ipmb_command_errs 5
209
210 /* Each retransmit increments this count. */
211 #define IPMI_STAT_retransmitted_ipmb_commands 6
212
213 /* When a message times out (runs out of retransmits) this is incremented. */
214 #define IPMI_STAT_timed_out_ipmb_commands 7
215
216 /*
217 * This is like above, but for broadcasts. Broadcasts are
218 * *not* included in the above count (they are expected to
219 * time out).
220 */
221 #define IPMI_STAT_timed_out_ipmb_broadcasts 8
222
223 /* Responses I have sent to the IPMB bus. */
224 #define IPMI_STAT_sent_ipmb_responses 9
225
226 /* The response was delivered to the user. */
227 #define IPMI_STAT_handled_ipmb_responses 10
228
229 /* The response had invalid data in it. */
230 #define IPMI_STAT_invalid_ipmb_responses 11
231
232 /* The response didn't have anyone waiting for it. */
233 #define IPMI_STAT_unhandled_ipmb_responses 12
234
235 /* Commands we sent out to the IPMB bus. */
236 #define IPMI_STAT_sent_lan_commands 13
237
238 /* Commands sent on the IPMB that had errors on the SEND CMD */
239 #define IPMI_STAT_sent_lan_command_errs 14
240
241 /* Each retransmit increments this count. */
242 #define IPMI_STAT_retransmitted_lan_commands 15
243
244 /* When a message times out (runs out of retransmits) this is incremented. */
245 #define IPMI_STAT_timed_out_lan_commands 16
246
247 /* Responses I have sent to the IPMB bus. */
248 #define IPMI_STAT_sent_lan_responses 17
249
250 /* The response was delivered to the user. */
251 #define IPMI_STAT_handled_lan_responses 18
252
253 /* The response had invalid data in it. */
254 #define IPMI_STAT_invalid_lan_responses 19
255
256 /* The response didn't have anyone waiting for it. */
257 #define IPMI_STAT_unhandled_lan_responses 20
258
259 /* The command was delivered to the user. */
260 #define IPMI_STAT_handled_commands 21
261
262 /* The command had invalid data in it. */
263 #define IPMI_STAT_invalid_commands 22
264
265 /* The command didn't have anyone waiting for it. */
266 #define IPMI_STAT_unhandled_commands 23
267
268 /* Invalid data in an event. */
269 #define IPMI_STAT_invalid_events 24
270
271 /* Events that were received with the proper format. */
272 #define IPMI_STAT_events 25
273
274 /* When you add a statistic, you must update this value. */
275 #define IPMI_NUM_STATS 26
276
277
278 #define IPMI_IPMB_NUM_SEQ 64
279 #define IPMI_MAX_CHANNELS 16
280 struct ipmi_smi
281 {
282 /* What interface number are we? */
283 int intf_num;
284
285 struct kref refcount;
286
287 /* Used for a list of interfaces. */
288 struct list_head link;
289
290 /* The list of upper layers that are using me. seq_lock
291 * protects this. */
292 struct list_head users;
293
294 /* Information to supply to users. */
295 unsigned char ipmi_version_major;
296 unsigned char ipmi_version_minor;
297
298 /* Used for wake ups at startup. */
299 wait_queue_head_t waitq;
300
301 struct bmc_device *bmc;
302 char *my_dev_name;
303 char *sysfs_name;
304
305 /* This is the lower-layer's sender routine. Note that you
306 * must either be holding the ipmi_interfaces_mutex or be in
307 * an umpreemptible region to use this. You must fetch the
308 * value into a local variable and make sure it is not NULL. */
309 struct ipmi_smi_handlers *handlers;
310 void *send_info;
311
312 #ifdef CONFIG_PROC_FS
313 /* A list of proc entries for this interface. */
314 struct mutex proc_entry_lock;
315 struct ipmi_proc_entry *proc_entries;
316 #endif
317
318 /* Driver-model device for the system interface. */
319 struct device *si_dev;
320
321 /* A table of sequence numbers for this interface. We use the
322 sequence numbers for IPMB messages that go out of the
323 interface to match them up with their responses. A routine
324 is called periodically to time the items in this list. */
325 spinlock_t seq_lock;
326 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
327 int curr_seq;
328
329 /* Messages that were delayed for some reason (out of memory,
330 for instance), will go in here to be processed later in a
331 periodic timer interrupt. */
332 spinlock_t waiting_msgs_lock;
333 struct list_head waiting_msgs;
334
335 /* The list of command receivers that are registered for commands
336 on this interface. */
337 struct mutex cmd_rcvrs_mutex;
338 struct list_head cmd_rcvrs;
339
340 /* Events that were queues because no one was there to receive
341 them. */
342 spinlock_t events_lock; /* For dealing with event stuff. */
343 struct list_head waiting_events;
344 unsigned int waiting_events_count; /* How many events in queue? */
345 char delivering_events;
346 char event_msg_printed;
347
348 /* The event receiver for my BMC, only really used at panic
349 shutdown as a place to store this. */
350 unsigned char event_receiver;
351 unsigned char event_receiver_lun;
352 unsigned char local_sel_device;
353 unsigned char local_event_generator;
354
355 /* For handling of maintenance mode. */
356 int maintenance_mode;
357 int maintenance_mode_enable;
358 int auto_maintenance_timeout;
359 spinlock_t maintenance_mode_lock; /* Used in a timer... */
360
361 /* A cheap hack, if this is non-null and a message to an
362 interface comes in with a NULL user, call this routine with
363 it. Note that the message will still be freed by the
364 caller. This only works on the system interface. */
365 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
366
367 /* When we are scanning the channels for an SMI, this will
368 tell which channel we are scanning. */
369 int curr_channel;
370
371 /* Channel information */
372 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
373
374 /* Proc FS stuff. */
375 struct proc_dir_entry *proc_dir;
376 char proc_dir_name[10];
377
378 atomic_t stats[IPMI_NUM_STATS];
379
380 /*
381 * run_to_completion duplicate of smb_info, smi_info
382 * and ipmi_serial_info structures. Used to decrease numbers of
383 * parameters passed by "low" level IPMI code.
384 */
385 int run_to_completion;
386 };
387 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
388
389 /**
390 * The driver model view of the IPMI messaging driver.
391 */
392 static struct device_driver ipmidriver = {
393 .name = "ipmi",
394 .bus = &platform_bus_type
395 };
396 static DEFINE_MUTEX(ipmidriver_mutex);
397
398 static LIST_HEAD(ipmi_interfaces);
399 static DEFINE_MUTEX(ipmi_interfaces_mutex);
400
401 /* List of watchers that want to know when smi's are added and
402 deleted. */
403 static LIST_HEAD(smi_watchers);
404 static DEFINE_MUTEX(smi_watchers_mutex);
405
406
407 #define ipmi_inc_stat(intf, stat) \
408 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
409 #define ipmi_get_stat(intf, stat) \
410 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
411
412
413 static void free_recv_msg_list(struct list_head *q)
414 {
415 struct ipmi_recv_msg *msg, *msg2;
416
417 list_for_each_entry_safe(msg, msg2, q, link) {
418 list_del(&msg->link);
419 ipmi_free_recv_msg(msg);
420 }
421 }
422
423 static void free_smi_msg_list(struct list_head *q)
424 {
425 struct ipmi_smi_msg *msg, *msg2;
426
427 list_for_each_entry_safe(msg, msg2, q, link) {
428 list_del(&msg->link);
429 ipmi_free_smi_msg(msg);
430 }
431 }
432
433 static void clean_up_interface_data(ipmi_smi_t intf)
434 {
435 int i;
436 struct cmd_rcvr *rcvr, *rcvr2;
437 struct list_head list;
438
439 free_smi_msg_list(&intf->waiting_msgs);
440 free_recv_msg_list(&intf->waiting_events);
441
442 /*
443 * Wholesale remove all the entries from the list in the
444 * interface and wait for RCU to know that none are in use.
445 */
446 mutex_lock(&intf->cmd_rcvrs_mutex);
447 INIT_LIST_HEAD(&list);
448 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
449 mutex_unlock(&intf->cmd_rcvrs_mutex);
450
451 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
452 kfree(rcvr);
453
454 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
455 if ((intf->seq_table[i].inuse)
456 && (intf->seq_table[i].recv_msg))
457 {
458 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
459 }
460 }
461 }
462
463 static void intf_free(struct kref *ref)
464 {
465 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
466
467 clean_up_interface_data(intf);
468 kfree(intf);
469 }
470
471 struct watcher_entry {
472 int intf_num;
473 ipmi_smi_t intf;
474 struct list_head link;
475 };
476
477 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
478 {
479 ipmi_smi_t intf;
480 LIST_HEAD(to_deliver);
481 struct watcher_entry *e, *e2;
482
483 mutex_lock(&smi_watchers_mutex);
484
485 mutex_lock(&ipmi_interfaces_mutex);
486
487 /* Build a list of things to deliver. */
488 list_for_each_entry(intf, &ipmi_interfaces, link) {
489 if (intf->intf_num == -1)
490 continue;
491 e = kmalloc(sizeof(*e), GFP_KERNEL);
492 if (!e)
493 goto out_err;
494 kref_get(&intf->refcount);
495 e->intf = intf;
496 e->intf_num = intf->intf_num;
497 list_add_tail(&e->link, &to_deliver);
498 }
499
500 /* We will succeed, so add it to the list. */
501 list_add(&watcher->link, &smi_watchers);
502
503 mutex_unlock(&ipmi_interfaces_mutex);
504
505 list_for_each_entry_safe(e, e2, &to_deliver, link) {
506 list_del(&e->link);
507 watcher->new_smi(e->intf_num, e->intf->si_dev);
508 kref_put(&e->intf->refcount, intf_free);
509 kfree(e);
510 }
511
512 mutex_unlock(&smi_watchers_mutex);
513
514 return 0;
515
516 out_err:
517 mutex_unlock(&ipmi_interfaces_mutex);
518 mutex_unlock(&smi_watchers_mutex);
519 list_for_each_entry_safe(e, e2, &to_deliver, link) {
520 list_del(&e->link);
521 kref_put(&e->intf->refcount, intf_free);
522 kfree(e);
523 }
524 return -ENOMEM;
525 }
526
527 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
528 {
529 mutex_lock(&smi_watchers_mutex);
530 list_del(&(watcher->link));
531 mutex_unlock(&smi_watchers_mutex);
532 return 0;
533 }
534
535 /*
536 * Must be called with smi_watchers_mutex held.
537 */
538 static void
539 call_smi_watchers(int i, struct device *dev)
540 {
541 struct ipmi_smi_watcher *w;
542
543 list_for_each_entry(w, &smi_watchers, link) {
544 if (try_module_get(w->owner)) {
545 w->new_smi(i, dev);
546 module_put(w->owner);
547 }
548 }
549 }
550
551 static int
552 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
553 {
554 if (addr1->addr_type != addr2->addr_type)
555 return 0;
556
557 if (addr1->channel != addr2->channel)
558 return 0;
559
560 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
561 struct ipmi_system_interface_addr *smi_addr1
562 = (struct ipmi_system_interface_addr *) addr1;
563 struct ipmi_system_interface_addr *smi_addr2
564 = (struct ipmi_system_interface_addr *) addr2;
565 return (smi_addr1->lun == smi_addr2->lun);
566 }
567
568 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
569 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
570 {
571 struct ipmi_ipmb_addr *ipmb_addr1
572 = (struct ipmi_ipmb_addr *) addr1;
573 struct ipmi_ipmb_addr *ipmb_addr2
574 = (struct ipmi_ipmb_addr *) addr2;
575
576 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
577 && (ipmb_addr1->lun == ipmb_addr2->lun));
578 }
579
580 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
581 struct ipmi_lan_addr *lan_addr1
582 = (struct ipmi_lan_addr *) addr1;
583 struct ipmi_lan_addr *lan_addr2
584 = (struct ipmi_lan_addr *) addr2;
585
586 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
587 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
588 && (lan_addr1->session_handle
589 == lan_addr2->session_handle)
590 && (lan_addr1->lun == lan_addr2->lun));
591 }
592
593 return 1;
594 }
595
596 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
597 {
598 if (len < sizeof(struct ipmi_system_interface_addr)) {
599 return -EINVAL;
600 }
601
602 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
603 if (addr->channel != IPMI_BMC_CHANNEL)
604 return -EINVAL;
605 return 0;
606 }
607
608 if ((addr->channel == IPMI_BMC_CHANNEL)
609 || (addr->channel >= IPMI_MAX_CHANNELS)
610 || (addr->channel < 0))
611 return -EINVAL;
612
613 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
614 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
615 {
616 if (len < sizeof(struct ipmi_ipmb_addr)) {
617 return -EINVAL;
618 }
619 return 0;
620 }
621
622 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
623 if (len < sizeof(struct ipmi_lan_addr)) {
624 return -EINVAL;
625 }
626 return 0;
627 }
628
629 return -EINVAL;
630 }
631
632 unsigned int ipmi_addr_length(int addr_type)
633 {
634 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
635 return sizeof(struct ipmi_system_interface_addr);
636
637 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
638 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
639 {
640 return sizeof(struct ipmi_ipmb_addr);
641 }
642
643 if (addr_type == IPMI_LAN_ADDR_TYPE)
644 return sizeof(struct ipmi_lan_addr);
645
646 return 0;
647 }
648
649 static void deliver_response(struct ipmi_recv_msg *msg)
650 {
651 if (!msg->user) {
652 ipmi_smi_t intf = msg->user_msg_data;
653
654 /* Special handling for NULL users. */
655 if (intf->null_user_handler) {
656 intf->null_user_handler(intf, msg);
657 ipmi_inc_stat(intf, handled_local_responses);
658 } else {
659 /* No handler, so give up. */
660 ipmi_inc_stat(intf, unhandled_local_responses);
661 }
662 ipmi_free_recv_msg(msg);
663 } else {
664 ipmi_user_t user = msg->user;
665 user->handler->ipmi_recv_hndl(msg, user->handler_data);
666 }
667 }
668
669 static void
670 deliver_err_response(struct ipmi_recv_msg *msg, int err)
671 {
672 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
673 msg->msg_data[0] = err;
674 msg->msg.netfn |= 1; /* Convert to a response. */
675 msg->msg.data_len = 1;
676 msg->msg.data = msg->msg_data;
677 deliver_response(msg);
678 }
679
680 /* Find the next sequence number not being used and add the given
681 message with the given timeout to the sequence table. This must be
682 called with the interface's seq_lock held. */
683 static int intf_next_seq(ipmi_smi_t intf,
684 struct ipmi_recv_msg *recv_msg,
685 unsigned long timeout,
686 int retries,
687 int broadcast,
688 unsigned char *seq,
689 long *seqid)
690 {
691 int rv = 0;
692 unsigned int i;
693
694 for (i = intf->curr_seq;
695 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
696 i = (i+1)%IPMI_IPMB_NUM_SEQ)
697 {
698 if (!intf->seq_table[i].inuse)
699 break;
700 }
701
702 if (!intf->seq_table[i].inuse) {
703 intf->seq_table[i].recv_msg = recv_msg;
704
705 /* Start with the maximum timeout, when the send response
706 comes in we will start the real timer. */
707 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
708 intf->seq_table[i].orig_timeout = timeout;
709 intf->seq_table[i].retries_left = retries;
710 intf->seq_table[i].broadcast = broadcast;
711 intf->seq_table[i].inuse = 1;
712 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
713 *seq = i;
714 *seqid = intf->seq_table[i].seqid;
715 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
716 } else {
717 rv = -EAGAIN;
718 }
719
720 return rv;
721 }
722
723 /* Return the receive message for the given sequence number and
724 release the sequence number so it can be reused. Some other data
725 is passed in to be sure the message matches up correctly (to help
726 guard against message coming in after their timeout and the
727 sequence number being reused). */
728 static int intf_find_seq(ipmi_smi_t intf,
729 unsigned char seq,
730 short channel,
731 unsigned char cmd,
732 unsigned char netfn,
733 struct ipmi_addr *addr,
734 struct ipmi_recv_msg **recv_msg)
735 {
736 int rv = -ENODEV;
737 unsigned long flags;
738
739 if (seq >= IPMI_IPMB_NUM_SEQ)
740 return -EINVAL;
741
742 spin_lock_irqsave(&(intf->seq_lock), flags);
743 if (intf->seq_table[seq].inuse) {
744 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
745
746 if ((msg->addr.channel == channel)
747 && (msg->msg.cmd == cmd)
748 && (msg->msg.netfn == netfn)
749 && (ipmi_addr_equal(addr, &(msg->addr))))
750 {
751 *recv_msg = msg;
752 intf->seq_table[seq].inuse = 0;
753 rv = 0;
754 }
755 }
756 spin_unlock_irqrestore(&(intf->seq_lock), flags);
757
758 return rv;
759 }
760
761
762 /* Start the timer for a specific sequence table entry. */
763 static int intf_start_seq_timer(ipmi_smi_t intf,
764 long msgid)
765 {
766 int rv = -ENODEV;
767 unsigned long flags;
768 unsigned char seq;
769 unsigned long seqid;
770
771
772 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
773
774 spin_lock_irqsave(&(intf->seq_lock), flags);
775 /* We do this verification because the user can be deleted
776 while a message is outstanding. */
777 if ((intf->seq_table[seq].inuse)
778 && (intf->seq_table[seq].seqid == seqid))
779 {
780 struct seq_table *ent = &(intf->seq_table[seq]);
781 ent->timeout = ent->orig_timeout;
782 rv = 0;
783 }
784 spin_unlock_irqrestore(&(intf->seq_lock), flags);
785
786 return rv;
787 }
788
789 /* Got an error for the send message for a specific sequence number. */
790 static int intf_err_seq(ipmi_smi_t intf,
791 long msgid,
792 unsigned int err)
793 {
794 int rv = -ENODEV;
795 unsigned long flags;
796 unsigned char seq;
797 unsigned long seqid;
798 struct ipmi_recv_msg *msg = NULL;
799
800
801 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
802
803 spin_lock_irqsave(&(intf->seq_lock), flags);
804 /* We do this verification because the user can be deleted
805 while a message is outstanding. */
806 if ((intf->seq_table[seq].inuse)
807 && (intf->seq_table[seq].seqid == seqid))
808 {
809 struct seq_table *ent = &(intf->seq_table[seq]);
810
811 ent->inuse = 0;
812 msg = ent->recv_msg;
813 rv = 0;
814 }
815 spin_unlock_irqrestore(&(intf->seq_lock), flags);
816
817 if (msg)
818 deliver_err_response(msg, err);
819
820 return rv;
821 }
822
823
824 int ipmi_create_user(unsigned int if_num,
825 struct ipmi_user_hndl *handler,
826 void *handler_data,
827 ipmi_user_t *user)
828 {
829 unsigned long flags;
830 ipmi_user_t new_user;
831 int rv = 0;
832 ipmi_smi_t intf;
833
834 /* There is no module usecount here, because it's not
835 required. Since this can only be used by and called from
836 other modules, they will implicitly use this module, and
837 thus this can't be removed unless the other modules are
838 removed. */
839
840 if (handler == NULL)
841 return -EINVAL;
842
843 /* Make sure the driver is actually initialized, this handles
844 problems with initialization order. */
845 if (!initialized) {
846 rv = ipmi_init_msghandler();
847 if (rv)
848 return rv;
849
850 /* The init code doesn't return an error if it was turned
851 off, but it won't initialize. Check that. */
852 if (!initialized)
853 return -ENODEV;
854 }
855
856 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
857 if (!new_user)
858 return -ENOMEM;
859
860 mutex_lock(&ipmi_interfaces_mutex);
861 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
862 if (intf->intf_num == if_num)
863 goto found;
864 }
865 /* Not found, return an error */
866 rv = -EINVAL;
867 goto out_kfree;
868
869 found:
870 /* Note that each existing user holds a refcount to the interface. */
871 kref_get(&intf->refcount);
872
873 kref_init(&new_user->refcount);
874 new_user->handler = handler;
875 new_user->handler_data = handler_data;
876 new_user->intf = intf;
877 new_user->gets_events = 0;
878
879 if (!try_module_get(intf->handlers->owner)) {
880 rv = -ENODEV;
881 goto out_kref;
882 }
883
884 if (intf->handlers->inc_usecount) {
885 rv = intf->handlers->inc_usecount(intf->send_info);
886 if (rv) {
887 module_put(intf->handlers->owner);
888 goto out_kref;
889 }
890 }
891
892 /* Hold the lock so intf->handlers is guaranteed to be good
893 * until now */
894 mutex_unlock(&ipmi_interfaces_mutex);
895
896 new_user->valid = 1;
897 spin_lock_irqsave(&intf->seq_lock, flags);
898 list_add_rcu(&new_user->link, &intf->users);
899 spin_unlock_irqrestore(&intf->seq_lock, flags);
900 *user = new_user;
901 return 0;
902
903 out_kref:
904 kref_put(&intf->refcount, intf_free);
905 out_kfree:
906 mutex_unlock(&ipmi_interfaces_mutex);
907 kfree(new_user);
908 return rv;
909 }
910
911 static void free_user(struct kref *ref)
912 {
913 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
914 kfree(user);
915 }
916
917 int ipmi_destroy_user(ipmi_user_t user)
918 {
919 ipmi_smi_t intf = user->intf;
920 int i;
921 unsigned long flags;
922 struct cmd_rcvr *rcvr;
923 struct cmd_rcvr *rcvrs = NULL;
924
925 user->valid = 0;
926
927 /* Remove the user from the interface's sequence table. */
928 spin_lock_irqsave(&intf->seq_lock, flags);
929 list_del_rcu(&user->link);
930
931 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
932 if (intf->seq_table[i].inuse
933 && (intf->seq_table[i].recv_msg->user == user))
934 {
935 intf->seq_table[i].inuse = 0;
936 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
937 }
938 }
939 spin_unlock_irqrestore(&intf->seq_lock, flags);
940
941 /*
942 * Remove the user from the command receiver's table. First
943 * we build a list of everything (not using the standard link,
944 * since other things may be using it till we do
945 * synchronize_rcu()) then free everything in that list.
946 */
947 mutex_lock(&intf->cmd_rcvrs_mutex);
948 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
949 if (rcvr->user == user) {
950 list_del_rcu(&rcvr->link);
951 rcvr->next = rcvrs;
952 rcvrs = rcvr;
953 }
954 }
955 mutex_unlock(&intf->cmd_rcvrs_mutex);
956 synchronize_rcu();
957 while (rcvrs) {
958 rcvr = rcvrs;
959 rcvrs = rcvr->next;
960 kfree(rcvr);
961 }
962
963 mutex_lock(&ipmi_interfaces_mutex);
964 if (intf->handlers) {
965 module_put(intf->handlers->owner);
966 if (intf->handlers->dec_usecount)
967 intf->handlers->dec_usecount(intf->send_info);
968 }
969 mutex_unlock(&ipmi_interfaces_mutex);
970
971 kref_put(&intf->refcount, intf_free);
972
973 kref_put(&user->refcount, free_user);
974
975 return 0;
976 }
977
978 void ipmi_get_version(ipmi_user_t user,
979 unsigned char *major,
980 unsigned char *minor)
981 {
982 *major = user->intf->ipmi_version_major;
983 *minor = user->intf->ipmi_version_minor;
984 }
985
986 int ipmi_set_my_address(ipmi_user_t user,
987 unsigned int channel,
988 unsigned char address)
989 {
990 if (channel >= IPMI_MAX_CHANNELS)
991 return -EINVAL;
992 user->intf->channels[channel].address = address;
993 return 0;
994 }
995
996 int ipmi_get_my_address(ipmi_user_t user,
997 unsigned int channel,
998 unsigned char *address)
999 {
1000 if (channel >= IPMI_MAX_CHANNELS)
1001 return -EINVAL;
1002 *address = user->intf->channels[channel].address;
1003 return 0;
1004 }
1005
1006 int ipmi_set_my_LUN(ipmi_user_t user,
1007 unsigned int channel,
1008 unsigned char LUN)
1009 {
1010 if (channel >= IPMI_MAX_CHANNELS)
1011 return -EINVAL;
1012 user->intf->channels[channel].lun = LUN & 0x3;
1013 return 0;
1014 }
1015
1016 int ipmi_get_my_LUN(ipmi_user_t user,
1017 unsigned int channel,
1018 unsigned char *address)
1019 {
1020 if (channel >= IPMI_MAX_CHANNELS)
1021 return -EINVAL;
1022 *address = user->intf->channels[channel].lun;
1023 return 0;
1024 }
1025
1026 int ipmi_get_maintenance_mode(ipmi_user_t user)
1027 {
1028 int mode;
1029 unsigned long flags;
1030
1031 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1032 mode = user->intf->maintenance_mode;
1033 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1034
1035 return mode;
1036 }
1037 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1038
1039 static void maintenance_mode_update(ipmi_smi_t intf)
1040 {
1041 if (intf->handlers->set_maintenance_mode)
1042 intf->handlers->set_maintenance_mode(
1043 intf->send_info, intf->maintenance_mode_enable);
1044 }
1045
1046 int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1047 {
1048 int rv = 0;
1049 unsigned long flags;
1050 ipmi_smi_t intf = user->intf;
1051
1052 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1053 if (intf->maintenance_mode != mode) {
1054 switch (mode) {
1055 case IPMI_MAINTENANCE_MODE_AUTO:
1056 intf->maintenance_mode = mode;
1057 intf->maintenance_mode_enable
1058 = (intf->auto_maintenance_timeout > 0);
1059 break;
1060
1061 case IPMI_MAINTENANCE_MODE_OFF:
1062 intf->maintenance_mode = mode;
1063 intf->maintenance_mode_enable = 0;
1064 break;
1065
1066 case IPMI_MAINTENANCE_MODE_ON:
1067 intf->maintenance_mode = mode;
1068 intf->maintenance_mode_enable = 1;
1069 break;
1070
1071 default:
1072 rv = -EINVAL;
1073 goto out_unlock;
1074 }
1075
1076 maintenance_mode_update(intf);
1077 }
1078 out_unlock:
1079 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1080
1081 return rv;
1082 }
1083 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1084
1085 int ipmi_set_gets_events(ipmi_user_t user, int val)
1086 {
1087 unsigned long flags;
1088 ipmi_smi_t intf = user->intf;
1089 struct ipmi_recv_msg *msg, *msg2;
1090 struct list_head msgs;
1091
1092 INIT_LIST_HEAD(&msgs);
1093
1094 spin_lock_irqsave(&intf->events_lock, flags);
1095 user->gets_events = val;
1096
1097 if (intf->delivering_events)
1098 /*
1099 * Another thread is delivering events for this, so
1100 * let it handle any new events.
1101 */
1102 goto out;
1103
1104 /* Deliver any queued events. */
1105 while (user->gets_events && !list_empty(&intf->waiting_events)) {
1106 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1107 list_move_tail(&msg->link, &msgs);
1108 intf->waiting_events_count = 0;
1109 if (intf->event_msg_printed) {
1110 printk(KERN_WARNING PFX "Event queue no longer"
1111 " full\n");
1112 intf->event_msg_printed = 0;
1113 }
1114
1115 intf->delivering_events = 1;
1116 spin_unlock_irqrestore(&intf->events_lock, flags);
1117
1118 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1119 msg->user = user;
1120 kref_get(&user->refcount);
1121 deliver_response(msg);
1122 }
1123
1124 spin_lock_irqsave(&intf->events_lock, flags);
1125 intf->delivering_events = 0;
1126 }
1127
1128 out:
1129 spin_unlock_irqrestore(&intf->events_lock, flags);
1130
1131 return 0;
1132 }
1133
1134 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
1135 unsigned char netfn,
1136 unsigned char cmd,
1137 unsigned char chan)
1138 {
1139 struct cmd_rcvr *rcvr;
1140
1141 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1142 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1143 && (rcvr->chans & (1 << chan)))
1144 return rcvr;
1145 }
1146 return NULL;
1147 }
1148
1149 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf,
1150 unsigned char netfn,
1151 unsigned char cmd,
1152 unsigned int chans)
1153 {
1154 struct cmd_rcvr *rcvr;
1155
1156 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1157 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1158 && (rcvr->chans & chans))
1159 return 0;
1160 }
1161 return 1;
1162 }
1163
1164 int ipmi_register_for_cmd(ipmi_user_t user,
1165 unsigned char netfn,
1166 unsigned char cmd,
1167 unsigned int chans)
1168 {
1169 ipmi_smi_t intf = user->intf;
1170 struct cmd_rcvr *rcvr;
1171 int rv = 0;
1172
1173
1174 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1175 if (!rcvr)
1176 return -ENOMEM;
1177 rcvr->cmd = cmd;
1178 rcvr->netfn = netfn;
1179 rcvr->chans = chans;
1180 rcvr->user = user;
1181
1182 mutex_lock(&intf->cmd_rcvrs_mutex);
1183 /* Make sure the command/netfn is not already registered. */
1184 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1185 rv = -EBUSY;
1186 goto out_unlock;
1187 }
1188
1189 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1190
1191 out_unlock:
1192 mutex_unlock(&intf->cmd_rcvrs_mutex);
1193 if (rv)
1194 kfree(rcvr);
1195
1196 return rv;
1197 }
1198
1199 int ipmi_unregister_for_cmd(ipmi_user_t user,
1200 unsigned char netfn,
1201 unsigned char cmd,
1202 unsigned int chans)
1203 {
1204 ipmi_smi_t intf = user->intf;
1205 struct cmd_rcvr *rcvr;
1206 struct cmd_rcvr *rcvrs = NULL;
1207 int i, rv = -ENOENT;
1208
1209 mutex_lock(&intf->cmd_rcvrs_mutex);
1210 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1211 if (((1 << i) & chans) == 0)
1212 continue;
1213 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1214 if (rcvr == NULL)
1215 continue;
1216 if (rcvr->user == user) {
1217 rv = 0;
1218 rcvr->chans &= ~chans;
1219 if (rcvr->chans == 0) {
1220 list_del_rcu(&rcvr->link);
1221 rcvr->next = rcvrs;
1222 rcvrs = rcvr;
1223 }
1224 }
1225 }
1226 mutex_unlock(&intf->cmd_rcvrs_mutex);
1227 synchronize_rcu();
1228 while (rcvrs) {
1229 rcvr = rcvrs;
1230 rcvrs = rcvr->next;
1231 kfree(rcvr);
1232 }
1233 return rv;
1234 }
1235
1236 static unsigned char
1237 ipmb_checksum(unsigned char *data, int size)
1238 {
1239 unsigned char csum = 0;
1240
1241 for (; size > 0; size--, data++)
1242 csum += *data;
1243
1244 return -csum;
1245 }
1246
1247 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1248 struct kernel_ipmi_msg *msg,
1249 struct ipmi_ipmb_addr *ipmb_addr,
1250 long msgid,
1251 unsigned char ipmb_seq,
1252 int broadcast,
1253 unsigned char source_address,
1254 unsigned char source_lun)
1255 {
1256 int i = broadcast;
1257
1258 /* Format the IPMB header data. */
1259 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1260 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1261 smi_msg->data[2] = ipmb_addr->channel;
1262 if (broadcast)
1263 smi_msg->data[3] = 0;
1264 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1265 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1266 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1267 smi_msg->data[i+6] = source_address;
1268 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1269 smi_msg->data[i+8] = msg->cmd;
1270
1271 /* Now tack on the data to the message. */
1272 if (msg->data_len > 0)
1273 memcpy(&(smi_msg->data[i+9]), msg->data,
1274 msg->data_len);
1275 smi_msg->data_size = msg->data_len + 9;
1276
1277 /* Now calculate the checksum and tack it on. */
1278 smi_msg->data[i+smi_msg->data_size]
1279 = ipmb_checksum(&(smi_msg->data[i+6]),
1280 smi_msg->data_size-6);
1281
1282 /* Add on the checksum size and the offset from the
1283 broadcast. */
1284 smi_msg->data_size += 1 + i;
1285
1286 smi_msg->msgid = msgid;
1287 }
1288
1289 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1290 struct kernel_ipmi_msg *msg,
1291 struct ipmi_lan_addr *lan_addr,
1292 long msgid,
1293 unsigned char ipmb_seq,
1294 unsigned char source_lun)
1295 {
1296 /* Format the IPMB header data. */
1297 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1298 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1299 smi_msg->data[2] = lan_addr->channel;
1300 smi_msg->data[3] = lan_addr->session_handle;
1301 smi_msg->data[4] = lan_addr->remote_SWID;
1302 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1303 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1304 smi_msg->data[7] = lan_addr->local_SWID;
1305 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1306 smi_msg->data[9] = msg->cmd;
1307
1308 /* Now tack on the data to the message. */
1309 if (msg->data_len > 0)
1310 memcpy(&(smi_msg->data[10]), msg->data,
1311 msg->data_len);
1312 smi_msg->data_size = msg->data_len + 10;
1313
1314 /* Now calculate the checksum and tack it on. */
1315 smi_msg->data[smi_msg->data_size]
1316 = ipmb_checksum(&(smi_msg->data[7]),
1317 smi_msg->data_size-7);
1318
1319 /* Add on the checksum size and the offset from the
1320 broadcast. */
1321 smi_msg->data_size += 1;
1322
1323 smi_msg->msgid = msgid;
1324 }
1325
1326 /* Separate from ipmi_request so that the user does not have to be
1327 supplied in certain circumstances (mainly at panic time). If
1328 messages are supplied, they will be freed, even if an error
1329 occurs. */
1330 static int i_ipmi_request(ipmi_user_t user,
1331 ipmi_smi_t intf,
1332 struct ipmi_addr *addr,
1333 long msgid,
1334 struct kernel_ipmi_msg *msg,
1335 void *user_msg_data,
1336 void *supplied_smi,
1337 struct ipmi_recv_msg *supplied_recv,
1338 int priority,
1339 unsigned char source_address,
1340 unsigned char source_lun,
1341 int retries,
1342 unsigned int retry_time_ms)
1343 {
1344 int rv = 0;
1345 struct ipmi_smi_msg *smi_msg;
1346 struct ipmi_recv_msg *recv_msg;
1347 unsigned long flags;
1348 struct ipmi_smi_handlers *handlers;
1349
1350
1351 if (supplied_recv) {
1352 recv_msg = supplied_recv;
1353 } else {
1354 recv_msg = ipmi_alloc_recv_msg();
1355 if (recv_msg == NULL) {
1356 return -ENOMEM;
1357 }
1358 }
1359 recv_msg->user_msg_data = user_msg_data;
1360
1361 if (supplied_smi) {
1362 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1363 } else {
1364 smi_msg = ipmi_alloc_smi_msg();
1365 if (smi_msg == NULL) {
1366 ipmi_free_recv_msg(recv_msg);
1367 return -ENOMEM;
1368 }
1369 }
1370
1371 rcu_read_lock();
1372 handlers = intf->handlers;
1373 if (!handlers) {
1374 rv = -ENODEV;
1375 goto out_err;
1376 }
1377
1378 recv_msg->user = user;
1379 if (user)
1380 kref_get(&user->refcount);
1381 recv_msg->msgid = msgid;
1382 /* Store the message to send in the receive message so timeout
1383 responses can get the proper response data. */
1384 recv_msg->msg = *msg;
1385
1386 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1387 struct ipmi_system_interface_addr *smi_addr;
1388
1389 if (msg->netfn & 1) {
1390 /* Responses are not allowed to the SMI. */
1391 rv = -EINVAL;
1392 goto out_err;
1393 }
1394
1395 smi_addr = (struct ipmi_system_interface_addr *) addr;
1396 if (smi_addr->lun > 3) {
1397 ipmi_inc_stat(intf, sent_invalid_commands);
1398 rv = -EINVAL;
1399 goto out_err;
1400 }
1401
1402 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1403
1404 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1405 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1406 || (msg->cmd == IPMI_GET_MSG_CMD)
1407 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1408 {
1409 /* We don't let the user do these, since we manage
1410 the sequence numbers. */
1411 ipmi_inc_stat(intf, sent_invalid_commands);
1412 rv = -EINVAL;
1413 goto out_err;
1414 }
1415
1416 if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1417 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1418 || (msg->cmd == IPMI_WARM_RESET_CMD)))
1419 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST))
1420 {
1421 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1422 intf->auto_maintenance_timeout
1423 = IPMI_MAINTENANCE_MODE_TIMEOUT;
1424 if (!intf->maintenance_mode
1425 && !intf->maintenance_mode_enable)
1426 {
1427 intf->maintenance_mode_enable = 1;
1428 maintenance_mode_update(intf);
1429 }
1430 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1431 flags);
1432 }
1433
1434 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1435 ipmi_inc_stat(intf, sent_invalid_commands);
1436 rv = -EMSGSIZE;
1437 goto out_err;
1438 }
1439
1440 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1441 smi_msg->data[1] = msg->cmd;
1442 smi_msg->msgid = msgid;
1443 smi_msg->user_data = recv_msg;
1444 if (msg->data_len > 0)
1445 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1446 smi_msg->data_size = msg->data_len + 2;
1447 ipmi_inc_stat(intf, sent_local_commands);
1448 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1449 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1450 {
1451 struct ipmi_ipmb_addr *ipmb_addr;
1452 unsigned char ipmb_seq;
1453 long seqid;
1454 int broadcast = 0;
1455
1456 if (addr->channel >= IPMI_MAX_CHANNELS) {
1457 ipmi_inc_stat(intf, sent_invalid_commands);
1458 rv = -EINVAL;
1459 goto out_err;
1460 }
1461
1462 if (intf->channels[addr->channel].medium
1463 != IPMI_CHANNEL_MEDIUM_IPMB)
1464 {
1465 ipmi_inc_stat(intf, sent_invalid_commands);
1466 rv = -EINVAL;
1467 goto out_err;
1468 }
1469
1470 if (retries < 0) {
1471 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1472 retries = 0; /* Don't retry broadcasts. */
1473 else
1474 retries = 4;
1475 }
1476 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1477 /* Broadcasts add a zero at the beginning of the
1478 message, but otherwise is the same as an IPMB
1479 address. */
1480 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1481 broadcast = 1;
1482 }
1483
1484
1485 /* Default to 1 second retries. */
1486 if (retry_time_ms == 0)
1487 retry_time_ms = 1000;
1488
1489 /* 9 for the header and 1 for the checksum, plus
1490 possibly one for the broadcast. */
1491 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1492 ipmi_inc_stat(intf, sent_invalid_commands);
1493 rv = -EMSGSIZE;
1494 goto out_err;
1495 }
1496
1497 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1498 if (ipmb_addr->lun > 3) {
1499 ipmi_inc_stat(intf, sent_invalid_commands);
1500 rv = -EINVAL;
1501 goto out_err;
1502 }
1503
1504 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1505
1506 if (recv_msg->msg.netfn & 0x1) {
1507 /* It's a response, so use the user's sequence
1508 from msgid. */
1509 ipmi_inc_stat(intf, sent_ipmb_responses);
1510 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1511 msgid, broadcast,
1512 source_address, source_lun);
1513
1514 /* Save the receive message so we can use it
1515 to deliver the response. */
1516 smi_msg->user_data = recv_msg;
1517 } else {
1518 /* It's a command, so get a sequence for it. */
1519
1520 spin_lock_irqsave(&(intf->seq_lock), flags);
1521
1522 ipmi_inc_stat(intf, sent_ipmb_commands);
1523
1524 /* Create a sequence number with a 1 second
1525 timeout and 4 retries. */
1526 rv = intf_next_seq(intf,
1527 recv_msg,
1528 retry_time_ms,
1529 retries,
1530 broadcast,
1531 &ipmb_seq,
1532 &seqid);
1533 if (rv) {
1534 /* We have used up all the sequence numbers,
1535 probably, so abort. */
1536 spin_unlock_irqrestore(&(intf->seq_lock),
1537 flags);
1538 goto out_err;
1539 }
1540
1541 /* Store the sequence number in the message,
1542 so that when the send message response
1543 comes back we can start the timer. */
1544 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1545 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1546 ipmb_seq, broadcast,
1547 source_address, source_lun);
1548
1549 /* Copy the message into the recv message data, so we
1550 can retransmit it later if necessary. */
1551 memcpy(recv_msg->msg_data, smi_msg->data,
1552 smi_msg->data_size);
1553 recv_msg->msg.data = recv_msg->msg_data;
1554 recv_msg->msg.data_len = smi_msg->data_size;
1555
1556 /* We don't unlock until here, because we need
1557 to copy the completed message into the
1558 recv_msg before we release the lock.
1559 Otherwise, race conditions may bite us. I
1560 know that's pretty paranoid, but I prefer
1561 to be correct. */
1562 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1563 }
1564 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1565 struct ipmi_lan_addr *lan_addr;
1566 unsigned char ipmb_seq;
1567 long seqid;
1568
1569 if (addr->channel >= IPMI_MAX_CHANNELS) {
1570 ipmi_inc_stat(intf, sent_invalid_commands);
1571 rv = -EINVAL;
1572 goto out_err;
1573 }
1574
1575 if ((intf->channels[addr->channel].medium
1576 != IPMI_CHANNEL_MEDIUM_8023LAN)
1577 && (intf->channels[addr->channel].medium
1578 != IPMI_CHANNEL_MEDIUM_ASYNC))
1579 {
1580 ipmi_inc_stat(intf, sent_invalid_commands);
1581 rv = -EINVAL;
1582 goto out_err;
1583 }
1584
1585 retries = 4;
1586
1587 /* Default to 1 second retries. */
1588 if (retry_time_ms == 0)
1589 retry_time_ms = 1000;
1590
1591 /* 11 for the header and 1 for the checksum. */
1592 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1593 ipmi_inc_stat(intf, sent_invalid_commands);
1594 rv = -EMSGSIZE;
1595 goto out_err;
1596 }
1597
1598 lan_addr = (struct ipmi_lan_addr *) addr;
1599 if (lan_addr->lun > 3) {
1600 ipmi_inc_stat(intf, sent_invalid_commands);
1601 rv = -EINVAL;
1602 goto out_err;
1603 }
1604
1605 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1606
1607 if (recv_msg->msg.netfn & 0x1) {
1608 /* It's a response, so use the user's sequence
1609 from msgid. */
1610 ipmi_inc_stat(intf, sent_lan_responses);
1611 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1612 msgid, source_lun);
1613
1614 /* Save the receive message so we can use it
1615 to deliver the response. */
1616 smi_msg->user_data = recv_msg;
1617 } else {
1618 /* It's a command, so get a sequence for it. */
1619
1620 spin_lock_irqsave(&(intf->seq_lock), flags);
1621
1622 ipmi_inc_stat(intf, sent_lan_commands);
1623
1624 /* Create a sequence number with a 1 second
1625 timeout and 4 retries. */
1626 rv = intf_next_seq(intf,
1627 recv_msg,
1628 retry_time_ms,
1629 retries,
1630 0,
1631 &ipmb_seq,
1632 &seqid);
1633 if (rv) {
1634 /* We have used up all the sequence numbers,
1635 probably, so abort. */
1636 spin_unlock_irqrestore(&(intf->seq_lock),
1637 flags);
1638 goto out_err;
1639 }
1640
1641 /* Store the sequence number in the message,
1642 so that when the send message response
1643 comes back we can start the timer. */
1644 format_lan_msg(smi_msg, msg, lan_addr,
1645 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1646 ipmb_seq, source_lun);
1647
1648 /* Copy the message into the recv message data, so we
1649 can retransmit it later if necessary. */
1650 memcpy(recv_msg->msg_data, smi_msg->data,
1651 smi_msg->data_size);
1652 recv_msg->msg.data = recv_msg->msg_data;
1653 recv_msg->msg.data_len = smi_msg->data_size;
1654
1655 /* We don't unlock until here, because we need
1656 to copy the completed message into the
1657 recv_msg before we release the lock.
1658 Otherwise, race conditions may bite us. I
1659 know that's pretty paranoid, but I prefer
1660 to be correct. */
1661 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1662 }
1663 } else {
1664 /* Unknown address type. */
1665 ipmi_inc_stat(intf, sent_invalid_commands);
1666 rv = -EINVAL;
1667 goto out_err;
1668 }
1669
1670 #ifdef DEBUG_MSGING
1671 {
1672 int m;
1673 for (m = 0; m < smi_msg->data_size; m++)
1674 printk(" %2.2x", smi_msg->data[m]);
1675 printk("\n");
1676 }
1677 #endif
1678
1679 handlers->sender(intf->send_info, smi_msg, priority);
1680 rcu_read_unlock();
1681
1682 return 0;
1683
1684 out_err:
1685 rcu_read_unlock();
1686 ipmi_free_smi_msg(smi_msg);
1687 ipmi_free_recv_msg(recv_msg);
1688 return rv;
1689 }
1690
1691 static int check_addr(ipmi_smi_t intf,
1692 struct ipmi_addr *addr,
1693 unsigned char *saddr,
1694 unsigned char *lun)
1695 {
1696 if (addr->channel >= IPMI_MAX_CHANNELS)
1697 return -EINVAL;
1698 *lun = intf->channels[addr->channel].lun;
1699 *saddr = intf->channels[addr->channel].address;
1700 return 0;
1701 }
1702
1703 int ipmi_request_settime(ipmi_user_t user,
1704 struct ipmi_addr *addr,
1705 long msgid,
1706 struct kernel_ipmi_msg *msg,
1707 void *user_msg_data,
1708 int priority,
1709 int retries,
1710 unsigned int retry_time_ms)
1711 {
1712 unsigned char saddr, lun;
1713 int rv;
1714
1715 if (!user)
1716 return -EINVAL;
1717 rv = check_addr(user->intf, addr, &saddr, &lun);
1718 if (rv)
1719 return rv;
1720 return i_ipmi_request(user,
1721 user->intf,
1722 addr,
1723 msgid,
1724 msg,
1725 user_msg_data,
1726 NULL, NULL,
1727 priority,
1728 saddr,
1729 lun,
1730 retries,
1731 retry_time_ms);
1732 }
1733
1734 int ipmi_request_supply_msgs(ipmi_user_t user,
1735 struct ipmi_addr *addr,
1736 long msgid,
1737 struct kernel_ipmi_msg *msg,
1738 void *user_msg_data,
1739 void *supplied_smi,
1740 struct ipmi_recv_msg *supplied_recv,
1741 int priority)
1742 {
1743 unsigned char saddr, lun;
1744 int rv;
1745
1746 if (!user)
1747 return -EINVAL;
1748 rv = check_addr(user->intf, addr, &saddr, &lun);
1749 if (rv)
1750 return rv;
1751 return i_ipmi_request(user,
1752 user->intf,
1753 addr,
1754 msgid,
1755 msg,
1756 user_msg_data,
1757 supplied_smi,
1758 supplied_recv,
1759 priority,
1760 saddr,
1761 lun,
1762 -1, 0);
1763 }
1764
1765 #ifdef CONFIG_PROC_FS
1766 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1767 int count, int *eof, void *data)
1768 {
1769 char *out = (char *) page;
1770 ipmi_smi_t intf = data;
1771 int i;
1772 int rv = 0;
1773
1774 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1775 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1776 out[rv-1] = '\n'; /* Replace the final space with a newline */
1777 out[rv] = '\0';
1778 rv++;
1779 return rv;
1780 }
1781
1782 static int version_file_read_proc(char *page, char **start, off_t off,
1783 int count, int *eof, void *data)
1784 {
1785 char *out = (char *) page;
1786 ipmi_smi_t intf = data;
1787
1788 return sprintf(out, "%u.%u\n",
1789 ipmi_version_major(&intf->bmc->id),
1790 ipmi_version_minor(&intf->bmc->id));
1791 }
1792
1793 static int stat_file_read_proc(char *page, char **start, off_t off,
1794 int count, int *eof, void *data)
1795 {
1796 char *out = (char *) page;
1797 ipmi_smi_t intf = data;
1798
1799 out += sprintf(out, "sent_invalid_commands: %u\n",
1800 ipmi_get_stat(intf, sent_invalid_commands));
1801 out += sprintf(out, "sent_local_commands: %u\n",
1802 ipmi_get_stat(intf, sent_local_commands));
1803 out += sprintf(out, "handled_local_responses: %u\n",
1804 ipmi_get_stat(intf, handled_local_responses));
1805 out += sprintf(out, "unhandled_local_responses: %u\n",
1806 ipmi_get_stat(intf, unhandled_local_responses));
1807 out += sprintf(out, "sent_ipmb_commands: %u\n",
1808 ipmi_get_stat(intf, sent_ipmb_commands));
1809 out += sprintf(out, "sent_ipmb_command_errs: %u\n",
1810 ipmi_get_stat(intf, sent_ipmb_command_errs));
1811 out += sprintf(out, "retransmitted_ipmb_commands: %u\n",
1812 ipmi_get_stat(intf, retransmitted_ipmb_commands));
1813 out += sprintf(out, "timed_out_ipmb_commands: %u\n",
1814 ipmi_get_stat(intf, timed_out_ipmb_commands));
1815 out += sprintf(out, "timed_out_ipmb_broadcasts: %u\n",
1816 ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
1817 out += sprintf(out, "sent_ipmb_responses: %u\n",
1818 ipmi_get_stat(intf, sent_ipmb_responses));
1819 out += sprintf(out, "handled_ipmb_responses: %u\n",
1820 ipmi_get_stat(intf, handled_ipmb_responses));
1821 out += sprintf(out, "invalid_ipmb_responses: %u\n",
1822 ipmi_get_stat(intf, invalid_ipmb_responses));
1823 out += sprintf(out, "unhandled_ipmb_responses: %u\n",
1824 ipmi_get_stat(intf, unhandled_ipmb_responses));
1825 out += sprintf(out, "sent_lan_commands: %u\n",
1826 ipmi_get_stat(intf, sent_lan_commands));
1827 out += sprintf(out, "sent_lan_command_errs: %u\n",
1828 ipmi_get_stat(intf, sent_lan_command_errs));
1829 out += sprintf(out, "retransmitted_lan_commands: %u\n",
1830 ipmi_get_stat(intf, retransmitted_lan_commands));
1831 out += sprintf(out, "timed_out_lan_commands: %u\n",
1832 ipmi_get_stat(intf, timed_out_lan_commands));
1833 out += sprintf(out, "sent_lan_responses: %u\n",
1834 ipmi_get_stat(intf, sent_lan_responses));
1835 out += sprintf(out, "handled_lan_responses: %u\n",
1836 ipmi_get_stat(intf, handled_lan_responses));
1837 out += sprintf(out, "invalid_lan_responses: %u\n",
1838 ipmi_get_stat(intf, invalid_lan_responses));
1839 out += sprintf(out, "unhandled_lan_responses: %u\n",
1840 ipmi_get_stat(intf, unhandled_lan_responses));
1841 out += sprintf(out, "handled_commands: %u\n",
1842 ipmi_get_stat(intf, handled_commands));
1843 out += sprintf(out, "invalid_commands: %u\n",
1844 ipmi_get_stat(intf, invalid_commands));
1845 out += sprintf(out, "unhandled_commands: %u\n",
1846 ipmi_get_stat(intf, unhandled_commands));
1847 out += sprintf(out, "invalid_events: %u\n",
1848 ipmi_get_stat(intf, invalid_events));
1849 out += sprintf(out, "events: %u\n",
1850 ipmi_get_stat(intf, events));
1851
1852 return (out - ((char *) page));
1853 }
1854 #endif /* CONFIG_PROC_FS */
1855
1856 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1857 read_proc_t *read_proc, write_proc_t *write_proc,
1858 void *data, struct module *owner)
1859 {
1860 int rv = 0;
1861 #ifdef CONFIG_PROC_FS
1862 struct proc_dir_entry *file;
1863 struct ipmi_proc_entry *entry;
1864
1865 /* Create a list element. */
1866 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1867 if (!entry)
1868 return -ENOMEM;
1869 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1870 if (!entry->name) {
1871 kfree(entry);
1872 return -ENOMEM;
1873 }
1874 strcpy(entry->name, name);
1875
1876 file = create_proc_entry(name, 0, smi->proc_dir);
1877 if (!file) {
1878 kfree(entry->name);
1879 kfree(entry);
1880 rv = -ENOMEM;
1881 } else {
1882 file->data = data;
1883 file->read_proc = read_proc;
1884 file->write_proc = write_proc;
1885 file->owner = owner;
1886
1887 mutex_lock(&smi->proc_entry_lock);
1888 /* Stick it on the list. */
1889 entry->next = smi->proc_entries;
1890 smi->proc_entries = entry;
1891 mutex_unlock(&smi->proc_entry_lock);
1892 }
1893 #endif /* CONFIG_PROC_FS */
1894
1895 return rv;
1896 }
1897
1898 static int add_proc_entries(ipmi_smi_t smi, int num)
1899 {
1900 int rv = 0;
1901
1902 #ifdef CONFIG_PROC_FS
1903 sprintf(smi->proc_dir_name, "%d", num);
1904 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1905 if (!smi->proc_dir)
1906 rv = -ENOMEM;
1907 else {
1908 smi->proc_dir->owner = THIS_MODULE;
1909 }
1910
1911 if (rv == 0)
1912 rv = ipmi_smi_add_proc_entry(smi, "stats",
1913 stat_file_read_proc, NULL,
1914 smi, THIS_MODULE);
1915
1916 if (rv == 0)
1917 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1918 ipmb_file_read_proc, NULL,
1919 smi, THIS_MODULE);
1920
1921 if (rv == 0)
1922 rv = ipmi_smi_add_proc_entry(smi, "version",
1923 version_file_read_proc, NULL,
1924 smi, THIS_MODULE);
1925 #endif /* CONFIG_PROC_FS */
1926
1927 return rv;
1928 }
1929
1930 static void remove_proc_entries(ipmi_smi_t smi)
1931 {
1932 #ifdef CONFIG_PROC_FS
1933 struct ipmi_proc_entry *entry;
1934
1935 mutex_lock(&smi->proc_entry_lock);
1936 while (smi->proc_entries) {
1937 entry = smi->proc_entries;
1938 smi->proc_entries = entry->next;
1939
1940 remove_proc_entry(entry->name, smi->proc_dir);
1941 kfree(entry->name);
1942 kfree(entry);
1943 }
1944 mutex_unlock(&smi->proc_entry_lock);
1945 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1946 #endif /* CONFIG_PROC_FS */
1947 }
1948
1949 static int __find_bmc_guid(struct device *dev, void *data)
1950 {
1951 unsigned char *id = data;
1952 struct bmc_device *bmc = dev_get_drvdata(dev);
1953 return memcmp(bmc->guid, id, 16) == 0;
1954 }
1955
1956 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1957 unsigned char *guid)
1958 {
1959 struct device *dev;
1960
1961 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1962 if (dev)
1963 return dev_get_drvdata(dev);
1964 else
1965 return NULL;
1966 }
1967
1968 struct prod_dev_id {
1969 unsigned int product_id;
1970 unsigned char device_id;
1971 };
1972
1973 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1974 {
1975 struct prod_dev_id *id = data;
1976 struct bmc_device *bmc = dev_get_drvdata(dev);
1977
1978 return (bmc->id.product_id == id->product_id
1979 && bmc->id.device_id == id->device_id);
1980 }
1981
1982 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1983 struct device_driver *drv,
1984 unsigned int product_id, unsigned char device_id)
1985 {
1986 struct prod_dev_id id = {
1987 .product_id = product_id,
1988 .device_id = device_id,
1989 };
1990 struct device *dev;
1991
1992 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
1993 if (dev)
1994 return dev_get_drvdata(dev);
1995 else
1996 return NULL;
1997 }
1998
1999 static ssize_t device_id_show(struct device *dev,
2000 struct device_attribute *attr,
2001 char *buf)
2002 {
2003 struct bmc_device *bmc = dev_get_drvdata(dev);
2004
2005 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
2006 }
2007
2008 static ssize_t provides_dev_sdrs_show(struct device *dev,
2009 struct device_attribute *attr,
2010 char *buf)
2011 {
2012 struct bmc_device *bmc = dev_get_drvdata(dev);
2013
2014 return snprintf(buf, 10, "%u\n",
2015 (bmc->id.device_revision & 0x80) >> 7);
2016 }
2017
2018 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2019 char *buf)
2020 {
2021 struct bmc_device *bmc = dev_get_drvdata(dev);
2022
2023 return snprintf(buf, 20, "%u\n",
2024 bmc->id.device_revision & 0x0F);
2025 }
2026
2027 static ssize_t firmware_rev_show(struct device *dev,
2028 struct device_attribute *attr,
2029 char *buf)
2030 {
2031 struct bmc_device *bmc = dev_get_drvdata(dev);
2032
2033 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
2034 bmc->id.firmware_revision_2);
2035 }
2036
2037 static ssize_t ipmi_version_show(struct device *dev,
2038 struct device_attribute *attr,
2039 char *buf)
2040 {
2041 struct bmc_device *bmc = dev_get_drvdata(dev);
2042
2043 return snprintf(buf, 20, "%u.%u\n",
2044 ipmi_version_major(&bmc->id),
2045 ipmi_version_minor(&bmc->id));
2046 }
2047
2048 static ssize_t add_dev_support_show(struct device *dev,
2049 struct device_attribute *attr,
2050 char *buf)
2051 {
2052 struct bmc_device *bmc = dev_get_drvdata(dev);
2053
2054 return snprintf(buf, 10, "0x%02x\n",
2055 bmc->id.additional_device_support);
2056 }
2057
2058 static ssize_t manufacturer_id_show(struct device *dev,
2059 struct device_attribute *attr,
2060 char *buf)
2061 {
2062 struct bmc_device *bmc = dev_get_drvdata(dev);
2063
2064 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
2065 }
2066
2067 static ssize_t product_id_show(struct device *dev,
2068 struct device_attribute *attr,
2069 char *buf)
2070 {
2071 struct bmc_device *bmc = dev_get_drvdata(dev);
2072
2073 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
2074 }
2075
2076 static ssize_t aux_firmware_rev_show(struct device *dev,
2077 struct device_attribute *attr,
2078 char *buf)
2079 {
2080 struct bmc_device *bmc = dev_get_drvdata(dev);
2081
2082 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2083 bmc->id.aux_firmware_revision[3],
2084 bmc->id.aux_firmware_revision[2],
2085 bmc->id.aux_firmware_revision[1],
2086 bmc->id.aux_firmware_revision[0]);
2087 }
2088
2089 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2090 char *buf)
2091 {
2092 struct bmc_device *bmc = dev_get_drvdata(dev);
2093
2094 return snprintf(buf, 100, "%Lx%Lx\n",
2095 (long long) bmc->guid[0],
2096 (long long) bmc->guid[8]);
2097 }
2098
2099 static void remove_files(struct bmc_device *bmc)
2100 {
2101 if (!bmc->dev)
2102 return;
2103
2104 device_remove_file(&bmc->dev->dev,
2105 &bmc->device_id_attr);
2106 device_remove_file(&bmc->dev->dev,
2107 &bmc->provides_dev_sdrs_attr);
2108 device_remove_file(&bmc->dev->dev,
2109 &bmc->revision_attr);
2110 device_remove_file(&bmc->dev->dev,
2111 &bmc->firmware_rev_attr);
2112 device_remove_file(&bmc->dev->dev,
2113 &bmc->version_attr);
2114 device_remove_file(&bmc->dev->dev,
2115 &bmc->add_dev_support_attr);
2116 device_remove_file(&bmc->dev->dev,
2117 &bmc->manufacturer_id_attr);
2118 device_remove_file(&bmc->dev->dev,
2119 &bmc->product_id_attr);
2120
2121 if (bmc->id.aux_firmware_revision_set)
2122 device_remove_file(&bmc->dev->dev,
2123 &bmc->aux_firmware_rev_attr);
2124 if (bmc->guid_set)
2125 device_remove_file(&bmc->dev->dev,
2126 &bmc->guid_attr);
2127 }
2128
2129 static void
2130 cleanup_bmc_device(struct kref *ref)
2131 {
2132 struct bmc_device *bmc;
2133
2134 bmc = container_of(ref, struct bmc_device, refcount);
2135
2136 remove_files(bmc);
2137 platform_device_unregister(bmc->dev);
2138 kfree(bmc);
2139 }
2140
2141 static void ipmi_bmc_unregister(ipmi_smi_t intf)
2142 {
2143 struct bmc_device *bmc = intf->bmc;
2144
2145 if (intf->sysfs_name) {
2146 sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
2147 kfree(intf->sysfs_name);
2148 intf->sysfs_name = NULL;
2149 }
2150 if (intf->my_dev_name) {
2151 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
2152 kfree(intf->my_dev_name);
2153 intf->my_dev_name = NULL;
2154 }
2155
2156 mutex_lock(&ipmidriver_mutex);
2157 kref_put(&bmc->refcount, cleanup_bmc_device);
2158 intf->bmc = NULL;
2159 mutex_unlock(&ipmidriver_mutex);
2160 }
2161
2162 static int create_files(struct bmc_device *bmc)
2163 {
2164 int err;
2165
2166 bmc->device_id_attr.attr.name = "device_id";
2167 bmc->device_id_attr.attr.mode = S_IRUGO;
2168 bmc->device_id_attr.show = device_id_show;
2169
2170 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2171 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2172 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2173
2174 bmc->revision_attr.attr.name = "revision";
2175 bmc->revision_attr.attr.mode = S_IRUGO;
2176 bmc->revision_attr.show = revision_show;
2177
2178 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2179 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2180 bmc->firmware_rev_attr.show = firmware_rev_show;
2181
2182 bmc->version_attr.attr.name = "ipmi_version";
2183 bmc->version_attr.attr.mode = S_IRUGO;
2184 bmc->version_attr.show = ipmi_version_show;
2185
2186 bmc->add_dev_support_attr.attr.name = "additional_device_support";
2187 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2188 bmc->add_dev_support_attr.show = add_dev_support_show;
2189
2190 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2191 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2192 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2193
2194 bmc->product_id_attr.attr.name = "product_id";
2195 bmc->product_id_attr.attr.mode = S_IRUGO;
2196 bmc->product_id_attr.show = product_id_show;
2197
2198 bmc->guid_attr.attr.name = "guid";
2199 bmc->guid_attr.attr.mode = S_IRUGO;
2200 bmc->guid_attr.show = guid_show;
2201
2202 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2203 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2204 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2205
2206 err = device_create_file(&bmc->dev->dev,
2207 &bmc->device_id_attr);
2208 if (err) goto out;
2209 err = device_create_file(&bmc->dev->dev,
2210 &bmc->provides_dev_sdrs_attr);
2211 if (err) goto out_devid;
2212 err = device_create_file(&bmc->dev->dev,
2213 &bmc->revision_attr);
2214 if (err) goto out_sdrs;
2215 err = device_create_file(&bmc->dev->dev,
2216 &bmc->firmware_rev_attr);
2217 if (err) goto out_rev;
2218 err = device_create_file(&bmc->dev->dev,
2219 &bmc->version_attr);
2220 if (err) goto out_firm;
2221 err = device_create_file(&bmc->dev->dev,
2222 &bmc->add_dev_support_attr);
2223 if (err) goto out_version;
2224 err = device_create_file(&bmc->dev->dev,
2225 &bmc->manufacturer_id_attr);
2226 if (err) goto out_add_dev;
2227 err = device_create_file(&bmc->dev->dev,
2228 &bmc->product_id_attr);
2229 if (err) goto out_manu;
2230 if (bmc->id.aux_firmware_revision_set) {
2231 err = device_create_file(&bmc->dev->dev,
2232 &bmc->aux_firmware_rev_attr);
2233 if (err) goto out_prod_id;
2234 }
2235 if (bmc->guid_set) {
2236 err = device_create_file(&bmc->dev->dev,
2237 &bmc->guid_attr);
2238 if (err) goto out_aux_firm;
2239 }
2240
2241 return 0;
2242
2243 out_aux_firm:
2244 if (bmc->id.aux_firmware_revision_set)
2245 device_remove_file(&bmc->dev->dev,
2246 &bmc->aux_firmware_rev_attr);
2247 out_prod_id:
2248 device_remove_file(&bmc->dev->dev,
2249 &bmc->product_id_attr);
2250 out_manu:
2251 device_remove_file(&bmc->dev->dev,
2252 &bmc->manufacturer_id_attr);
2253 out_add_dev:
2254 device_remove_file(&bmc->dev->dev,
2255 &bmc->add_dev_support_attr);
2256 out_version:
2257 device_remove_file(&bmc->dev->dev,
2258 &bmc->version_attr);
2259 out_firm:
2260 device_remove_file(&bmc->dev->dev,
2261 &bmc->firmware_rev_attr);
2262 out_rev:
2263 device_remove_file(&bmc->dev->dev,
2264 &bmc->revision_attr);
2265 out_sdrs:
2266 device_remove_file(&bmc->dev->dev,
2267 &bmc->provides_dev_sdrs_attr);
2268 out_devid:
2269 device_remove_file(&bmc->dev->dev,
2270 &bmc->device_id_attr);
2271 out:
2272 return err;
2273 }
2274
2275 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2276 const char *sysfs_name)
2277 {
2278 int rv;
2279 struct bmc_device *bmc = intf->bmc;
2280 struct bmc_device *old_bmc;
2281 int size;
2282 char dummy[1];
2283
2284 mutex_lock(&ipmidriver_mutex);
2285
2286 /*
2287 * Try to find if there is an bmc_device struct
2288 * representing the interfaced BMC already
2289 */
2290 if (bmc->guid_set)
2291 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
2292 else
2293 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
2294 bmc->id.product_id,
2295 bmc->id.device_id);
2296
2297 /*
2298 * If there is already an bmc_device, free the new one,
2299 * otherwise register the new BMC device
2300 */
2301 if (old_bmc) {
2302 kfree(bmc);
2303 intf->bmc = old_bmc;
2304 bmc = old_bmc;
2305
2306 kref_get(&bmc->refcount);
2307 mutex_unlock(&ipmidriver_mutex);
2308
2309 printk(KERN_INFO
2310 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2311 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2312 bmc->id.manufacturer_id,
2313 bmc->id.product_id,
2314 bmc->id.device_id);
2315 } else {
2316 char name[14];
2317 unsigned char orig_dev_id = bmc->id.device_id;
2318 int warn_printed = 0;
2319
2320 snprintf(name, sizeof(name),
2321 "ipmi_bmc.%4.4x", bmc->id.product_id);
2322
2323 while (ipmi_find_bmc_prod_dev_id(&ipmidriver,
2324 bmc->id.product_id,
2325 bmc->id.device_id)) {
2326 if (!warn_printed) {
2327 printk(KERN_WARNING PFX
2328 "This machine has two different BMCs"
2329 " with the same product id and device"
2330 " id. This is an error in the"
2331 " firmware, but incrementing the"
2332 " device id to work around the problem."
2333 " Prod ID = 0x%x, Dev ID = 0x%x\n",
2334 bmc->id.product_id, bmc->id.device_id);
2335 warn_printed = 1;
2336 }
2337 bmc->id.device_id++; /* Wraps at 255 */
2338 if (bmc->id.device_id == orig_dev_id) {
2339 printk(KERN_ERR PFX
2340 "Out of device ids!\n");
2341 break;
2342 }
2343 }
2344
2345 bmc->dev = platform_device_alloc(name, bmc->id.device_id);
2346 if (!bmc->dev) {
2347 mutex_unlock(&ipmidriver_mutex);
2348 printk(KERN_ERR
2349 "ipmi_msghandler:"
2350 " Unable to allocate platform device\n");
2351 return -ENOMEM;
2352 }
2353 bmc->dev->dev.driver = &ipmidriver;
2354 dev_set_drvdata(&bmc->dev->dev, bmc);
2355 kref_init(&bmc->refcount);
2356
2357 rv = platform_device_add(bmc->dev);
2358 mutex_unlock(&ipmidriver_mutex);
2359 if (rv) {
2360 platform_device_put(bmc->dev);
2361 bmc->dev = NULL;
2362 printk(KERN_ERR
2363 "ipmi_msghandler:"
2364 " Unable to register bmc device: %d\n",
2365 rv);
2366 /* Don't go to out_err, you can only do that if
2367 the device is registered already. */
2368 return rv;
2369 }
2370
2371 rv = create_files(bmc);
2372 if (rv) {
2373 mutex_lock(&ipmidriver_mutex);
2374 platform_device_unregister(bmc->dev);
2375 mutex_unlock(&ipmidriver_mutex);
2376
2377 return rv;
2378 }
2379
2380 printk(KERN_INFO
2381 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2382 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2383 bmc->id.manufacturer_id,
2384 bmc->id.product_id,
2385 bmc->id.device_id);
2386 }
2387
2388 /*
2389 * create symlink from system interface device to bmc device
2390 * and back.
2391 */
2392 intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
2393 if (!intf->sysfs_name) {
2394 rv = -ENOMEM;
2395 printk(KERN_ERR
2396 "ipmi_msghandler: allocate link to BMC: %d\n",
2397 rv);
2398 goto out_err;
2399 }
2400
2401 rv = sysfs_create_link(&intf->si_dev->kobj,
2402 &bmc->dev->dev.kobj, intf->sysfs_name);
2403 if (rv) {
2404 kfree(intf->sysfs_name);
2405 intf->sysfs_name = NULL;
2406 printk(KERN_ERR
2407 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2408 rv);
2409 goto out_err;
2410 }
2411
2412 size = snprintf(dummy, 0, "ipmi%d", ifnum);
2413 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2414 if (!intf->my_dev_name) {
2415 kfree(intf->sysfs_name);
2416 intf->sysfs_name = NULL;
2417 rv = -ENOMEM;
2418 printk(KERN_ERR
2419 "ipmi_msghandler: allocate link from BMC: %d\n",
2420 rv);
2421 goto out_err;
2422 }
2423 snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
2424
2425 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2426 intf->my_dev_name);
2427 if (rv) {
2428 kfree(intf->sysfs_name);
2429 intf->sysfs_name = NULL;
2430 kfree(intf->my_dev_name);
2431 intf->my_dev_name = NULL;
2432 printk(KERN_ERR
2433 "ipmi_msghandler:"
2434 " Unable to create symlink to bmc: %d\n",
2435 rv);
2436 goto out_err;
2437 }
2438
2439 return 0;
2440
2441 out_err:
2442 ipmi_bmc_unregister(intf);
2443 return rv;
2444 }
2445
2446 static int
2447 send_guid_cmd(ipmi_smi_t intf, int chan)
2448 {
2449 struct kernel_ipmi_msg msg;
2450 struct ipmi_system_interface_addr si;
2451
2452 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2453 si.channel = IPMI_BMC_CHANNEL;
2454 si.lun = 0;
2455
2456 msg.netfn = IPMI_NETFN_APP_REQUEST;
2457 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2458 msg.data = NULL;
2459 msg.data_len = 0;
2460 return i_ipmi_request(NULL,
2461 intf,
2462 (struct ipmi_addr *) &si,
2463 0,
2464 &msg,
2465 intf,
2466 NULL,
2467 NULL,
2468 0,
2469 intf->channels[0].address,
2470 intf->channels[0].lun,
2471 -1, 0);
2472 }
2473
2474 static void
2475 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2476 {
2477 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2478 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2479 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2480 /* Not for me */
2481 return;
2482
2483 if (msg->msg.data[0] != 0) {
2484 /* Error from getting the GUID, the BMC doesn't have one. */
2485 intf->bmc->guid_set = 0;
2486 goto out;
2487 }
2488
2489 if (msg->msg.data_len < 17) {
2490 intf->bmc->guid_set = 0;
2491 printk(KERN_WARNING PFX
2492 "guid_handler: The GUID response from the BMC was too"
2493 " short, it was %d but should have been 17. Assuming"
2494 " GUID is not available.\n",
2495 msg->msg.data_len);
2496 goto out;
2497 }
2498
2499 memcpy(intf->bmc->guid, msg->msg.data, 16);
2500 intf->bmc->guid_set = 1;
2501 out:
2502 wake_up(&intf->waitq);
2503 }
2504
2505 static void
2506 get_guid(ipmi_smi_t intf)
2507 {
2508 int rv;
2509
2510 intf->bmc->guid_set = 0x2;
2511 intf->null_user_handler = guid_handler;
2512 rv = send_guid_cmd(intf, 0);
2513 if (rv)
2514 /* Send failed, no GUID available. */
2515 intf->bmc->guid_set = 0;
2516 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2517 intf->null_user_handler = NULL;
2518 }
2519
2520 static int
2521 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2522 {
2523 struct kernel_ipmi_msg msg;
2524 unsigned char data[1];
2525 struct ipmi_system_interface_addr si;
2526
2527 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2528 si.channel = IPMI_BMC_CHANNEL;
2529 si.lun = 0;
2530
2531 msg.netfn = IPMI_NETFN_APP_REQUEST;
2532 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2533 msg.data = data;
2534 msg.data_len = 1;
2535 data[0] = chan;
2536 return i_ipmi_request(NULL,
2537 intf,
2538 (struct ipmi_addr *) &si,
2539 0,
2540 &msg,
2541 intf,
2542 NULL,
2543 NULL,
2544 0,
2545 intf->channels[0].address,
2546 intf->channels[0].lun,
2547 -1, 0);
2548 }
2549
2550 static void
2551 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2552 {
2553 int rv = 0;
2554 int chan;
2555
2556 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2557 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2558 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2559 {
2560 /* It's the one we want */
2561 if (msg->msg.data[0] != 0) {
2562 /* Got an error from the channel, just go on. */
2563
2564 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2565 /* If the MC does not support this
2566 command, that is legal. We just
2567 assume it has one IPMB at channel
2568 zero. */
2569 intf->channels[0].medium
2570 = IPMI_CHANNEL_MEDIUM_IPMB;
2571 intf->channels[0].protocol
2572 = IPMI_CHANNEL_PROTOCOL_IPMB;
2573 rv = -ENOSYS;
2574
2575 intf->curr_channel = IPMI_MAX_CHANNELS;
2576 wake_up(&intf->waitq);
2577 goto out;
2578 }
2579 goto next_channel;
2580 }
2581 if (msg->msg.data_len < 4) {
2582 /* Message not big enough, just go on. */
2583 goto next_channel;
2584 }
2585 chan = intf->curr_channel;
2586 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2587 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2588
2589 next_channel:
2590 intf->curr_channel++;
2591 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2592 wake_up(&intf->waitq);
2593 else
2594 rv = send_channel_info_cmd(intf, intf->curr_channel);
2595
2596 if (rv) {
2597 /* Got an error somehow, just give up. */
2598 intf->curr_channel = IPMI_MAX_CHANNELS;
2599 wake_up(&intf->waitq);
2600
2601 printk(KERN_WARNING PFX
2602 "Error sending channel information: %d\n",
2603 rv);
2604 }
2605 }
2606 out:
2607 return;
2608 }
2609
2610 void ipmi_poll_interface(ipmi_user_t user)
2611 {
2612 ipmi_smi_t intf = user->intf;
2613
2614 if (intf->handlers->poll)
2615 intf->handlers->poll(intf->send_info);
2616 }
2617
2618 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2619 void *send_info,
2620 struct ipmi_device_id *device_id,
2621 struct device *si_dev,
2622 const char *sysfs_name,
2623 unsigned char slave_addr)
2624 {
2625 int i, j;
2626 int rv;
2627 ipmi_smi_t intf;
2628 ipmi_smi_t tintf;
2629 struct list_head *link;
2630
2631 /* Make sure the driver is actually initialized, this handles
2632 problems with initialization order. */
2633 if (!initialized) {
2634 rv = ipmi_init_msghandler();
2635 if (rv)
2636 return rv;
2637 /* The init code doesn't return an error if it was turned
2638 off, but it won't initialize. Check that. */
2639 if (!initialized)
2640 return -ENODEV;
2641 }
2642
2643 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
2644 if (!intf)
2645 return -ENOMEM;
2646
2647 intf->ipmi_version_major = ipmi_version_major(device_id);
2648 intf->ipmi_version_minor = ipmi_version_minor(device_id);
2649
2650 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2651 if (!intf->bmc) {
2652 kfree(intf);
2653 return -ENOMEM;
2654 }
2655 intf->intf_num = -1; /* Mark it invalid for now. */
2656 kref_init(&intf->refcount);
2657 intf->bmc->id = *device_id;
2658 intf->si_dev = si_dev;
2659 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2660 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2661 intf->channels[j].lun = 2;
2662 }
2663 if (slave_addr != 0)
2664 intf->channels[0].address = slave_addr;
2665 INIT_LIST_HEAD(&intf->users);
2666 intf->handlers = handlers;
2667 intf->send_info = send_info;
2668 spin_lock_init(&intf->seq_lock);
2669 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2670 intf->seq_table[j].inuse = 0;
2671 intf->seq_table[j].seqid = 0;
2672 }
2673 intf->curr_seq = 0;
2674 #ifdef CONFIG_PROC_FS
2675 mutex_init(&intf->proc_entry_lock);
2676 #endif
2677 spin_lock_init(&intf->waiting_msgs_lock);
2678 INIT_LIST_HEAD(&intf->waiting_msgs);
2679 spin_lock_init(&intf->events_lock);
2680 INIT_LIST_HEAD(&intf->waiting_events);
2681 intf->waiting_events_count = 0;
2682 mutex_init(&intf->cmd_rcvrs_mutex);
2683 spin_lock_init(&intf->maintenance_mode_lock);
2684 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2685 init_waitqueue_head(&intf->waitq);
2686 for (i = 0; i < IPMI_NUM_STATS; i++)
2687 atomic_set(&intf->stats[i], 0);
2688
2689 intf->proc_dir = NULL;
2690
2691 mutex_lock(&smi_watchers_mutex);
2692 mutex_lock(&ipmi_interfaces_mutex);
2693 /* Look for a hole in the numbers. */
2694 i = 0;
2695 link = &ipmi_interfaces;
2696 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
2697 if (tintf->intf_num != i) {
2698 link = &tintf->link;
2699 break;
2700 }
2701 i++;
2702 }
2703 /* Add the new interface in numeric order. */
2704 if (i == 0)
2705 list_add_rcu(&intf->link, &ipmi_interfaces);
2706 else
2707 list_add_tail_rcu(&intf->link, link);
2708
2709 rv = handlers->start_processing(send_info, intf);
2710 if (rv)
2711 goto out;
2712
2713 get_guid(intf);
2714
2715 if ((intf->ipmi_version_major > 1)
2716 || ((intf->ipmi_version_major == 1)
2717 && (intf->ipmi_version_minor >= 5)))
2718 {
2719 /* Start scanning the channels to see what is
2720 available. */
2721 intf->null_user_handler = channel_handler;
2722 intf->curr_channel = 0;
2723 rv = send_channel_info_cmd(intf, 0);
2724 if (rv)
2725 goto out;
2726
2727 /* Wait for the channel info to be read. */
2728 wait_event(intf->waitq,
2729 intf->curr_channel >= IPMI_MAX_CHANNELS);
2730 intf->null_user_handler = NULL;
2731 } else {
2732 /* Assume a single IPMB channel at zero. */
2733 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2734 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2735 }
2736
2737 if (rv == 0)
2738 rv = add_proc_entries(intf, i);
2739
2740 rv = ipmi_bmc_register(intf, i, sysfs_name);
2741
2742 out:
2743 if (rv) {
2744 if (intf->proc_dir)
2745 remove_proc_entries(intf);
2746 intf->handlers = NULL;
2747 list_del_rcu(&intf->link);
2748 mutex_unlock(&ipmi_interfaces_mutex);
2749 mutex_unlock(&smi_watchers_mutex);
2750 synchronize_rcu();
2751 kref_put(&intf->refcount, intf_free);
2752 } else {
2753 /*
2754 * Keep memory order straight for RCU readers. Make
2755 * sure everything else is committed to memory before
2756 * setting intf_num to mark the interface valid.
2757 */
2758 smp_wmb();
2759 intf->intf_num = i;
2760 mutex_unlock(&ipmi_interfaces_mutex);
2761 /* After this point the interface is legal to use. */
2762 call_smi_watchers(i, intf->si_dev);
2763 mutex_unlock(&smi_watchers_mutex);
2764 }
2765
2766 return rv;
2767 }
2768
2769 static void cleanup_smi_msgs(ipmi_smi_t intf)
2770 {
2771 int i;
2772 struct seq_table *ent;
2773
2774 /* No need for locks, the interface is down. */
2775 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
2776 ent = &(intf->seq_table[i]);
2777 if (!ent->inuse)
2778 continue;
2779 deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
2780 }
2781 }
2782
2783 int ipmi_unregister_smi(ipmi_smi_t intf)
2784 {
2785 struct ipmi_smi_watcher *w;
2786 int intf_num = intf->intf_num;
2787
2788 ipmi_bmc_unregister(intf);
2789
2790 mutex_lock(&smi_watchers_mutex);
2791 mutex_lock(&ipmi_interfaces_mutex);
2792 intf->intf_num = -1;
2793 intf->handlers = NULL;
2794 list_del_rcu(&intf->link);
2795 mutex_unlock(&ipmi_interfaces_mutex);
2796 synchronize_rcu();
2797
2798 cleanup_smi_msgs(intf);
2799
2800 remove_proc_entries(intf);
2801
2802 /* Call all the watcher interfaces to tell them that
2803 an interface is gone. */
2804 list_for_each_entry(w, &smi_watchers, link)
2805 w->smi_gone(intf_num);
2806 mutex_unlock(&smi_watchers_mutex);
2807
2808 kref_put(&intf->refcount, intf_free);
2809 return 0;
2810 }
2811
2812 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2813 struct ipmi_smi_msg *msg)
2814 {
2815 struct ipmi_ipmb_addr ipmb_addr;
2816 struct ipmi_recv_msg *recv_msg;
2817
2818
2819 /* This is 11, not 10, because the response must contain a
2820 * completion code. */
2821 if (msg->rsp_size < 11) {
2822 /* Message not big enough, just ignore it. */
2823 ipmi_inc_stat(intf, invalid_ipmb_responses);
2824 return 0;
2825 }
2826
2827 if (msg->rsp[2] != 0) {
2828 /* An error getting the response, just ignore it. */
2829 return 0;
2830 }
2831
2832 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2833 ipmb_addr.slave_addr = msg->rsp[6];
2834 ipmb_addr.channel = msg->rsp[3] & 0x0f;
2835 ipmb_addr.lun = msg->rsp[7] & 3;
2836
2837 /* It's a response from a remote entity. Look up the sequence
2838 number and handle the response. */
2839 if (intf_find_seq(intf,
2840 msg->rsp[7] >> 2,
2841 msg->rsp[3] & 0x0f,
2842 msg->rsp[8],
2843 (msg->rsp[4] >> 2) & (~1),
2844 (struct ipmi_addr *) &(ipmb_addr),
2845 &recv_msg))
2846 {
2847 /* We were unable to find the sequence number,
2848 so just nuke the message. */
2849 ipmi_inc_stat(intf, unhandled_ipmb_responses);
2850 return 0;
2851 }
2852
2853 memcpy(recv_msg->msg_data,
2854 &(msg->rsp[9]),
2855 msg->rsp_size - 9);
2856 /* THe other fields matched, so no need to set them, except
2857 for netfn, which needs to be the response that was
2858 returned, not the request value. */
2859 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2860 recv_msg->msg.data = recv_msg->msg_data;
2861 recv_msg->msg.data_len = msg->rsp_size - 10;
2862 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2863 ipmi_inc_stat(intf, handled_ipmb_responses);
2864 deliver_response(recv_msg);
2865
2866 return 0;
2867 }
2868
2869 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2870 struct ipmi_smi_msg *msg)
2871 {
2872 struct cmd_rcvr *rcvr;
2873 int rv = 0;
2874 unsigned char netfn;
2875 unsigned char cmd;
2876 unsigned char chan;
2877 ipmi_user_t user = NULL;
2878 struct ipmi_ipmb_addr *ipmb_addr;
2879 struct ipmi_recv_msg *recv_msg;
2880 struct ipmi_smi_handlers *handlers;
2881
2882 if (msg->rsp_size < 10) {
2883 /* Message not big enough, just ignore it. */
2884 ipmi_inc_stat(intf, invalid_commands);
2885 return 0;
2886 }
2887
2888 if (msg->rsp[2] != 0) {
2889 /* An error getting the response, just ignore it. */
2890 return 0;
2891 }
2892
2893 netfn = msg->rsp[4] >> 2;
2894 cmd = msg->rsp[8];
2895 chan = msg->rsp[3] & 0xf;
2896
2897 rcu_read_lock();
2898 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2899 if (rcvr) {
2900 user = rcvr->user;
2901 kref_get(&user->refcount);
2902 } else
2903 user = NULL;
2904 rcu_read_unlock();
2905
2906 if (user == NULL) {
2907 /* We didn't find a user, deliver an error response. */
2908 ipmi_inc_stat(intf, unhandled_commands);
2909
2910 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2911 msg->data[1] = IPMI_SEND_MSG_CMD;
2912 msg->data[2] = msg->rsp[3];
2913 msg->data[3] = msg->rsp[6];
2914 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2915 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2916 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2917 /* rqseq/lun */
2918 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2919 msg->data[8] = msg->rsp[8]; /* cmd */
2920 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2921 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2922 msg->data_size = 11;
2923
2924 #ifdef DEBUG_MSGING
2925 {
2926 int m;
2927 printk("Invalid command:");
2928 for (m = 0; m < msg->data_size; m++)
2929 printk(" %2.2x", msg->data[m]);
2930 printk("\n");
2931 }
2932 #endif
2933 rcu_read_lock();
2934 handlers = intf->handlers;
2935 if (handlers) {
2936 handlers->sender(intf->send_info, msg, 0);
2937 /* We used the message, so return the value
2938 that causes it to not be freed or
2939 queued. */
2940 rv = -1;
2941 }
2942 rcu_read_unlock();
2943 } else {
2944 /* Deliver the message to the user. */
2945 ipmi_inc_stat(intf, handled_commands);
2946
2947 recv_msg = ipmi_alloc_recv_msg();
2948 if (!recv_msg) {
2949 /* We couldn't allocate memory for the
2950 message, so requeue it for handling
2951 later. */
2952 rv = 1;
2953 kref_put(&user->refcount, free_user);
2954 } else {
2955 /* Extract the source address from the data. */
2956 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2957 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2958 ipmb_addr->slave_addr = msg->rsp[6];
2959 ipmb_addr->lun = msg->rsp[7] & 3;
2960 ipmb_addr->channel = msg->rsp[3] & 0xf;
2961
2962 /* Extract the rest of the message information
2963 from the IPMB header.*/
2964 recv_msg->user = user;
2965 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2966 recv_msg->msgid = msg->rsp[7] >> 2;
2967 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2968 recv_msg->msg.cmd = msg->rsp[8];
2969 recv_msg->msg.data = recv_msg->msg_data;
2970
2971 /* We chop off 10, not 9 bytes because the checksum
2972 at the end also needs to be removed. */
2973 recv_msg->msg.data_len = msg->rsp_size - 10;
2974 memcpy(recv_msg->msg_data,
2975 &(msg->rsp[9]),
2976 msg->rsp_size - 10);
2977 deliver_response(recv_msg);
2978 }
2979 }
2980
2981 return rv;
2982 }
2983
2984 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2985 struct ipmi_smi_msg *msg)
2986 {
2987 struct ipmi_lan_addr lan_addr;
2988 struct ipmi_recv_msg *recv_msg;
2989
2990
2991 /* This is 13, not 12, because the response must contain a
2992 * completion code. */
2993 if (msg->rsp_size < 13) {
2994 /* Message not big enough, just ignore it. */
2995 ipmi_inc_stat(intf, invalid_lan_responses);
2996 return 0;
2997 }
2998
2999 if (msg->rsp[2] != 0) {
3000 /* An error getting the response, just ignore it. */
3001 return 0;
3002 }
3003
3004 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3005 lan_addr.session_handle = msg->rsp[4];
3006 lan_addr.remote_SWID = msg->rsp[8];
3007 lan_addr.local_SWID = msg->rsp[5];
3008 lan_addr.channel = msg->rsp[3] & 0x0f;
3009 lan_addr.privilege = msg->rsp[3] >> 4;
3010 lan_addr.lun = msg->rsp[9] & 3;
3011
3012 /* It's a response from a remote entity. Look up the sequence
3013 number and handle the response. */
3014 if (intf_find_seq(intf,
3015 msg->rsp[9] >> 2,
3016 msg->rsp[3] & 0x0f,
3017 msg->rsp[10],
3018 (msg->rsp[6] >> 2) & (~1),
3019 (struct ipmi_addr *) &(lan_addr),
3020 &recv_msg))
3021 {
3022 /* We were unable to find the sequence number,
3023 so just nuke the message. */
3024 ipmi_inc_stat(intf, unhandled_lan_responses);
3025 return 0;
3026 }
3027
3028 memcpy(recv_msg->msg_data,
3029 &(msg->rsp[11]),
3030 msg->rsp_size - 11);
3031 /* The other fields matched, so no need to set them, except
3032 for netfn, which needs to be the response that was
3033 returned, not the request value. */
3034 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3035 recv_msg->msg.data = recv_msg->msg_data;
3036 recv_msg->msg.data_len = msg->rsp_size - 12;
3037 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3038 ipmi_inc_stat(intf, handled_lan_responses);
3039 deliver_response(recv_msg);
3040
3041 return 0;
3042 }
3043
3044 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
3045 struct ipmi_smi_msg *msg)
3046 {
3047 struct cmd_rcvr *rcvr;
3048 int rv = 0;
3049 unsigned char netfn;
3050 unsigned char cmd;
3051 unsigned char chan;
3052 ipmi_user_t user = NULL;
3053 struct ipmi_lan_addr *lan_addr;
3054 struct ipmi_recv_msg *recv_msg;
3055
3056 if (msg->rsp_size < 12) {
3057 /* Message not big enough, just ignore it. */
3058 ipmi_inc_stat(intf, invalid_commands);
3059 return 0;
3060 }
3061
3062 if (msg->rsp[2] != 0) {
3063 /* An error getting the response, just ignore it. */
3064 return 0;
3065 }
3066
3067 netfn = msg->rsp[6] >> 2;
3068 cmd = msg->rsp[10];
3069 chan = msg->rsp[3] & 0xf;
3070
3071 rcu_read_lock();
3072 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3073 if (rcvr) {
3074 user = rcvr->user;
3075 kref_get(&user->refcount);
3076 } else
3077 user = NULL;
3078 rcu_read_unlock();
3079
3080 if (user == NULL) {
3081 /* We didn't find a user, just give up. */
3082 ipmi_inc_stat(intf, unhandled_commands);
3083
3084 rv = 0; /* Don't do anything with these messages, just
3085 allow them to be freed. */
3086 } else {
3087 /* Deliver the message to the user. */
3088 ipmi_inc_stat(intf, handled_commands);
3089
3090 recv_msg = ipmi_alloc_recv_msg();
3091 if (!recv_msg) {
3092 /* We couldn't allocate memory for the
3093 message, so requeue it for handling
3094 later. */
3095 rv = 1;
3096 kref_put(&user->refcount, free_user);
3097 } else {
3098 /* Extract the source address from the data. */
3099 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3100 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3101 lan_addr->session_handle = msg->rsp[4];
3102 lan_addr->remote_SWID = msg->rsp[8];
3103 lan_addr->local_SWID = msg->rsp[5];
3104 lan_addr->lun = msg->rsp[9] & 3;
3105 lan_addr->channel = msg->rsp[3] & 0xf;
3106 lan_addr->privilege = msg->rsp[3] >> 4;
3107
3108 /* Extract the rest of the message information
3109 from the IPMB header.*/
3110 recv_msg->user = user;
3111 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3112 recv_msg->msgid = msg->rsp[9] >> 2;
3113 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3114 recv_msg->msg.cmd = msg->rsp[10];
3115 recv_msg->msg.data = recv_msg->msg_data;
3116
3117 /* We chop off 12, not 11 bytes because the checksum
3118 at the end also needs to be removed. */
3119 recv_msg->msg.data_len = msg->rsp_size - 12;
3120 memcpy(recv_msg->msg_data,
3121 &(msg->rsp[11]),
3122 msg->rsp_size - 12);
3123 deliver_response(recv_msg);
3124 }
3125 }
3126
3127 return rv;
3128 }
3129
3130 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3131 struct ipmi_smi_msg *msg)
3132 {
3133 struct ipmi_system_interface_addr *smi_addr;
3134
3135 recv_msg->msgid = 0;
3136 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
3137 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3138 smi_addr->channel = IPMI_BMC_CHANNEL;
3139 smi_addr->lun = msg->rsp[0] & 3;
3140 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3141 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3142 recv_msg->msg.cmd = msg->rsp[1];
3143 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
3144 recv_msg->msg.data = recv_msg->msg_data;
3145 recv_msg->msg.data_len = msg->rsp_size - 3;
3146 }
3147
3148 static int handle_read_event_rsp(ipmi_smi_t intf,
3149 struct ipmi_smi_msg *msg)
3150 {
3151 struct ipmi_recv_msg *recv_msg, *recv_msg2;
3152 struct list_head msgs;
3153 ipmi_user_t user;
3154 int rv = 0;
3155 int deliver_count = 0;
3156 unsigned long flags;
3157
3158 if (msg->rsp_size < 19) {
3159 /* Message is too small to be an IPMB event. */
3160 ipmi_inc_stat(intf, invalid_events);
3161 return 0;
3162 }
3163
3164 if (msg->rsp[2] != 0) {
3165 /* An error getting the event, just ignore it. */
3166 return 0;
3167 }
3168
3169 INIT_LIST_HEAD(&msgs);
3170
3171 spin_lock_irqsave(&intf->events_lock, flags);
3172
3173 ipmi_inc_stat(intf, events);
3174
3175 /* Allocate and fill in one message for every user that is getting
3176 events. */
3177 rcu_read_lock();
3178 list_for_each_entry_rcu(user, &intf->users, link) {
3179 if (!user->gets_events)
3180 continue;
3181
3182 recv_msg = ipmi_alloc_recv_msg();
3183 if (!recv_msg) {
3184 rcu_read_unlock();
3185 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
3186 link) {
3187 list_del(&recv_msg->link);
3188 ipmi_free_recv_msg(recv_msg);
3189 }
3190 /* We couldn't allocate memory for the
3191 message, so requeue it for handling
3192 later. */
3193 rv = 1;
3194 goto out;
3195 }
3196
3197 deliver_count++;
3198
3199 copy_event_into_recv_msg(recv_msg, msg);
3200 recv_msg->user = user;
3201 kref_get(&user->refcount);
3202 list_add_tail(&(recv_msg->link), &msgs);
3203 }
3204 rcu_read_unlock();
3205
3206 if (deliver_count) {
3207 /* Now deliver all the messages. */
3208 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
3209 list_del(&recv_msg->link);
3210 deliver_response(recv_msg);
3211 }
3212 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3213 /* No one to receive the message, put it in queue if there's
3214 not already too many things in the queue. */
3215 recv_msg = ipmi_alloc_recv_msg();
3216 if (!recv_msg) {
3217 /* We couldn't allocate memory for the
3218 message, so requeue it for handling
3219 later. */
3220 rv = 1;
3221 goto out;
3222 }
3223
3224 copy_event_into_recv_msg(recv_msg, msg);
3225 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3226 intf->waiting_events_count++;
3227 } else if (!intf->event_msg_printed) {
3228 /* There's too many things in the queue, discard this
3229 message. */
3230 printk(KERN_WARNING PFX "Event queue full, discarding"
3231 " incoming events\n");
3232 intf->event_msg_printed = 1;
3233 }
3234
3235 out:
3236 spin_unlock_irqrestore(&(intf->events_lock), flags);
3237
3238 return rv;
3239 }
3240
3241 static int handle_bmc_rsp(ipmi_smi_t intf,
3242 struct ipmi_smi_msg *msg)
3243 {
3244 struct ipmi_recv_msg *recv_msg;
3245 struct ipmi_user *user;
3246
3247 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3248 if (recv_msg == NULL)
3249 {
3250 printk(KERN_WARNING"IPMI message received with no owner. This\n"
3251 "could be because of a malformed message, or\n"
3252 "because of a hardware error. Contact your\n"
3253 "hardware vender for assistance\n");
3254 return 0;
3255 }
3256
3257 user = recv_msg->user;
3258 /* Make sure the user still exists. */
3259 if (user && !user->valid) {
3260 /* The user for the message went away, so give up. */
3261 ipmi_inc_stat(intf, unhandled_local_responses);
3262 ipmi_free_recv_msg(recv_msg);
3263 } else {
3264 struct ipmi_system_interface_addr *smi_addr;
3265
3266 ipmi_inc_stat(intf, handled_local_responses);
3267 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3268 recv_msg->msgid = msg->msgid;
3269 smi_addr = ((struct ipmi_system_interface_addr *)
3270 &(recv_msg->addr));
3271 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3272 smi_addr->channel = IPMI_BMC_CHANNEL;
3273 smi_addr->lun = msg->rsp[0] & 3;
3274 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3275 recv_msg->msg.cmd = msg->rsp[1];
3276 memcpy(recv_msg->msg_data,
3277 &(msg->rsp[2]),
3278 msg->rsp_size - 2);
3279 recv_msg->msg.data = recv_msg->msg_data;
3280 recv_msg->msg.data_len = msg->rsp_size - 2;
3281 deliver_response(recv_msg);
3282 }
3283
3284 return 0;
3285 }
3286
3287 /* Handle a new message. Return 1 if the message should be requeued,
3288 0 if the message should be freed, or -1 if the message should not
3289 be freed or requeued. */
3290 static int handle_new_recv_msg(ipmi_smi_t intf,
3291 struct ipmi_smi_msg *msg)
3292 {
3293 int requeue;
3294 int chan;
3295
3296 #ifdef DEBUG_MSGING
3297 int m;
3298 printk("Recv:");
3299 for (m = 0; m < msg->rsp_size; m++)
3300 printk(" %2.2x", msg->rsp[m]);
3301 printk("\n");
3302 #endif
3303 if (msg->rsp_size < 2) {
3304 /* Message is too small to be correct. */
3305 printk(KERN_WARNING PFX "BMC returned to small a message"
3306 " for netfn %x cmd %x, got %d bytes\n",
3307 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3308
3309 /* Generate an error response for the message. */
3310 msg->rsp[0] = msg->data[0] | (1 << 2);
3311 msg->rsp[1] = msg->data[1];
3312 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3313 msg->rsp_size = 3;
3314 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3315 || (msg->rsp[1] != msg->data[1])) /* Command */
3316 {
3317 /* The response is not even marginally correct. */
3318 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3319 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3320 (msg->data[0] >> 2) | 1, msg->data[1],
3321 msg->rsp[0] >> 2, msg->rsp[1]);
3322
3323 /* Generate an error response for the message. */
3324 msg->rsp[0] = msg->data[0] | (1 << 2);
3325 msg->rsp[1] = msg->data[1];
3326 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3327 msg->rsp_size = 3;
3328 }
3329
3330 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3331 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3332 && (msg->user_data != NULL))
3333 {
3334 /* It's a response to a response we sent. For this we
3335 deliver a send message response to the user. */
3336 struct ipmi_recv_msg *recv_msg = msg->user_data;
3337
3338 requeue = 0;
3339 if (msg->rsp_size < 2)
3340 /* Message is too small to be correct. */
3341 goto out;
3342
3343 chan = msg->data[2] & 0x0f;
3344 if (chan >= IPMI_MAX_CHANNELS)
3345 /* Invalid channel number */
3346 goto out;
3347
3348 if (!recv_msg)
3349 goto out;
3350
3351 /* Make sure the user still exists. */
3352 if (!recv_msg->user || !recv_msg->user->valid)
3353 goto out;
3354
3355 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3356 recv_msg->msg.data = recv_msg->msg_data;
3357 recv_msg->msg.data_len = 1;
3358 recv_msg->msg_data[0] = msg->rsp[2];
3359 deliver_response(recv_msg);
3360 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3361 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3362 {
3363 /* It's from the receive queue. */
3364 chan = msg->rsp[3] & 0xf;
3365 if (chan >= IPMI_MAX_CHANNELS) {
3366 /* Invalid channel number */
3367 requeue = 0;
3368 goto out;
3369 }
3370
3371 switch (intf->channels[chan].medium) {
3372 case IPMI_CHANNEL_MEDIUM_IPMB:
3373 if (msg->rsp[4] & 0x04) {
3374 /* It's a response, so find the
3375 requesting message and send it up. */
3376 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3377 } else {
3378 /* It's a command to the SMS from some other
3379 entity. Handle that. */
3380 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3381 }
3382 break;
3383
3384 case IPMI_CHANNEL_MEDIUM_8023LAN:
3385 case IPMI_CHANNEL_MEDIUM_ASYNC:
3386 if (msg->rsp[6] & 0x04) {
3387 /* It's a response, so find the
3388 requesting message and send it up. */
3389 requeue = handle_lan_get_msg_rsp(intf, msg);
3390 } else {
3391 /* It's a command to the SMS from some other
3392 entity. Handle that. */
3393 requeue = handle_lan_get_msg_cmd(intf, msg);
3394 }
3395 break;
3396
3397 default:
3398 /* We don't handle the channel type, so just
3399 * free the message. */
3400 requeue = 0;
3401 }
3402
3403 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3404 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3405 {
3406 /* It's an asyncronous event. */
3407 requeue = handle_read_event_rsp(intf, msg);
3408 } else {
3409 /* It's a response from the local BMC. */
3410 requeue = handle_bmc_rsp(intf, msg);
3411 }
3412
3413 out:
3414 return requeue;
3415 }
3416
3417 /* Handle a new message from the lower layer. */
3418 void ipmi_smi_msg_received(ipmi_smi_t intf,
3419 struct ipmi_smi_msg *msg)
3420 {
3421 unsigned long flags = 0; /* keep us warning-free. */
3422 int rv;
3423 int run_to_completion;
3424
3425
3426 if ((msg->data_size >= 2)
3427 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3428 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3429 && (msg->user_data == NULL))
3430 {
3431 /* This is the local response to a command send, start
3432 the timer for these. The user_data will not be
3433 NULL if this is a response send, and we will let
3434 response sends just go through. */
3435
3436 /* Check for errors, if we get certain errors (ones
3437 that mean basically we can try again later), we
3438 ignore them and start the timer. Otherwise we
3439 report the error immediately. */
3440 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3441 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3442 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
3443 && (msg->rsp[2] != IPMI_BUS_ERR)
3444 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR))
3445 {
3446 int chan = msg->rsp[3] & 0xf;
3447
3448 /* Got an error sending the message, handle it. */
3449 if (chan >= IPMI_MAX_CHANNELS)
3450 ; /* This shouldn't happen */
3451 else if ((intf->channels[chan].medium
3452 == IPMI_CHANNEL_MEDIUM_8023LAN)
3453 || (intf->channels[chan].medium
3454 == IPMI_CHANNEL_MEDIUM_ASYNC))
3455 ipmi_inc_stat(intf, sent_lan_command_errs);
3456 else
3457 ipmi_inc_stat(intf, sent_ipmb_command_errs);
3458 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3459 } else {
3460 /* The message was sent, start the timer. */
3461 intf_start_seq_timer(intf, msg->msgid);
3462 }
3463
3464 ipmi_free_smi_msg(msg);
3465 goto out;
3466 }
3467
3468 /* To preserve message order, if the list is not empty, we
3469 tack this message onto the end of the list. */
3470 run_to_completion = intf->run_to_completion;
3471 if (!run_to_completion)
3472 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3473 if (!list_empty(&intf->waiting_msgs)) {
3474 list_add_tail(&msg->link, &intf->waiting_msgs);
3475 if (!run_to_completion)
3476 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3477 goto out;
3478 }
3479 if (!run_to_completion)
3480 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3481
3482 rv = handle_new_recv_msg(intf, msg);
3483 if (rv > 0) {
3484 /* Could not handle the message now, just add it to a
3485 list to handle later. */
3486 run_to_completion = intf->run_to_completion;
3487 if (!run_to_completion)
3488 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3489 list_add_tail(&msg->link, &intf->waiting_msgs);
3490 if (!run_to_completion)
3491 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3492 } else if (rv == 0) {
3493 ipmi_free_smi_msg(msg);
3494 }
3495
3496 out:
3497 return;
3498 }
3499
3500 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3501 {
3502 ipmi_user_t user;
3503
3504 rcu_read_lock();
3505 list_for_each_entry_rcu(user, &intf->users, link) {
3506 if (!user->handler->ipmi_watchdog_pretimeout)
3507 continue;
3508
3509 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3510 }
3511 rcu_read_unlock();
3512 }
3513
3514
3515 static struct ipmi_smi_msg *
3516 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3517 unsigned char seq, long seqid)
3518 {
3519 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3520 if (!smi_msg)
3521 /* If we can't allocate the message, then just return, we
3522 get 4 retries, so this should be ok. */
3523 return NULL;
3524
3525 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3526 smi_msg->data_size = recv_msg->msg.data_len;
3527 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3528
3529 #ifdef DEBUG_MSGING
3530 {
3531 int m;
3532 printk("Resend: ");
3533 for (m = 0; m < smi_msg->data_size; m++)
3534 printk(" %2.2x", smi_msg->data[m]);
3535 printk("\n");
3536 }
3537 #endif
3538 return smi_msg;
3539 }
3540
3541 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3542 struct list_head *timeouts, long timeout_period,
3543 int slot, unsigned long *flags)
3544 {
3545 struct ipmi_recv_msg *msg;
3546 struct ipmi_smi_handlers *handlers;
3547
3548 if (intf->intf_num == -1)
3549 return;
3550
3551 if (!ent->inuse)
3552 return;
3553
3554 ent->timeout -= timeout_period;
3555 if (ent->timeout > 0)
3556 return;
3557
3558 if (ent->retries_left == 0) {
3559 /* The message has used all its retries. */
3560 ent->inuse = 0;
3561 msg = ent->recv_msg;
3562 list_add_tail(&msg->link, timeouts);
3563 if (ent->broadcast)
3564 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
3565 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3566 ipmi_inc_stat(intf, timed_out_lan_commands);
3567 else
3568 ipmi_inc_stat(intf, timed_out_ipmb_commands);
3569 } else {
3570 struct ipmi_smi_msg *smi_msg;
3571 /* More retries, send again. */
3572
3573 /* Start with the max timer, set to normal
3574 timer after the message is sent. */
3575 ent->timeout = MAX_MSG_TIMEOUT;
3576 ent->retries_left--;
3577 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3578 ipmi_inc_stat(intf, retransmitted_lan_commands);
3579 else
3580 ipmi_inc_stat(intf, retransmitted_ipmb_commands);
3581
3582 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3583 ent->seqid);
3584 if (!smi_msg)
3585 return;
3586
3587 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3588
3589 /* Send the new message. We send with a zero
3590 * priority. It timed out, I doubt time is
3591 * that critical now, and high priority
3592 * messages are really only for messages to the
3593 * local MC, which don't get resent. */
3594 handlers = intf->handlers;
3595 if (handlers)
3596 intf->handlers->sender(intf->send_info,
3597 smi_msg, 0);
3598 else
3599 ipmi_free_smi_msg(smi_msg);
3600
3601 spin_lock_irqsave(&intf->seq_lock, *flags);
3602 }
3603 }
3604
3605 static void ipmi_timeout_handler(long timeout_period)
3606 {
3607 ipmi_smi_t intf;
3608 struct list_head timeouts;
3609 struct ipmi_recv_msg *msg, *msg2;
3610 struct ipmi_smi_msg *smi_msg, *smi_msg2;
3611 unsigned long flags;
3612 int i;
3613
3614 rcu_read_lock();
3615 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3616 /* See if any waiting messages need to be processed. */
3617 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3618 list_for_each_entry_safe(smi_msg, smi_msg2,
3619 &intf->waiting_msgs, link) {
3620 if (!handle_new_recv_msg(intf, smi_msg)) {
3621 list_del(&smi_msg->link);
3622 ipmi_free_smi_msg(smi_msg);
3623 } else {
3624 /* To preserve message order, quit if we
3625 can't handle a message. */
3626 break;
3627 }
3628 }
3629 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3630
3631 /* Go through the seq table and find any messages that
3632 have timed out, putting them in the timeouts
3633 list. */
3634 INIT_LIST_HEAD(&timeouts);
3635 spin_lock_irqsave(&intf->seq_lock, flags);
3636 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
3637 check_msg_timeout(intf, &(intf->seq_table[i]),
3638 &timeouts, timeout_period, i,
3639 &flags);
3640 spin_unlock_irqrestore(&intf->seq_lock, flags);
3641
3642 list_for_each_entry_safe(msg, msg2, &timeouts, link)
3643 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
3644
3645 /*
3646 * Maintenance mode handling. Check the timeout
3647 * optimistically before we claim the lock. It may
3648 * mean a timeout gets missed occasionally, but that
3649 * only means the timeout gets extended by one period
3650 * in that case. No big deal, and it avoids the lock
3651 * most of the time.
3652 */
3653 if (intf->auto_maintenance_timeout > 0) {
3654 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
3655 if (intf->auto_maintenance_timeout > 0) {
3656 intf->auto_maintenance_timeout
3657 -= timeout_period;
3658 if (!intf->maintenance_mode
3659 && (intf->auto_maintenance_timeout <= 0))
3660 {
3661 intf->maintenance_mode_enable = 0;
3662 maintenance_mode_update(intf);
3663 }
3664 }
3665 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
3666 flags);
3667 }
3668 }
3669 rcu_read_unlock();
3670 }
3671
3672 static void ipmi_request_event(void)
3673 {
3674 ipmi_smi_t intf;
3675 struct ipmi_smi_handlers *handlers;
3676
3677 rcu_read_lock();
3678 /* Called from the timer, no need to check if handlers is
3679 * valid. */
3680 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3681 /* No event requests when in maintenance mode. */
3682 if (intf->maintenance_mode_enable)
3683 continue;
3684
3685 handlers = intf->handlers;
3686 if (handlers)
3687 handlers->request_events(intf->send_info);
3688 }
3689 rcu_read_unlock();
3690 }
3691
3692 static struct timer_list ipmi_timer;
3693
3694 /* Call every ~100 ms. */
3695 #define IPMI_TIMEOUT_TIME 100
3696
3697 /* How many jiffies does it take to get to the timeout time. */
3698 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3699
3700 /* Request events from the queue every second (this is the number of
3701 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3702 future, IPMI will add a way to know immediately if an event is in
3703 the queue and this silliness can go away. */
3704 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3705
3706 static atomic_t stop_operation;
3707 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3708
3709 static void ipmi_timeout(unsigned long data)
3710 {
3711 if (atomic_read(&stop_operation))
3712 return;
3713
3714 ticks_to_req_ev--;
3715 if (ticks_to_req_ev == 0) {
3716 ipmi_request_event();
3717 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3718 }
3719
3720 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3721
3722 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3723 }
3724
3725
3726 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3727 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3728
3729 /* FIXME - convert these to slabs. */
3730 static void free_smi_msg(struct ipmi_smi_msg *msg)
3731 {
3732 atomic_dec(&smi_msg_inuse_count);
3733 kfree(msg);
3734 }
3735
3736 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3737 {
3738 struct ipmi_smi_msg *rv;
3739 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3740 if (rv) {
3741 rv->done = free_smi_msg;
3742 rv->user_data = NULL;
3743 atomic_inc(&smi_msg_inuse_count);
3744 }
3745 return rv;
3746 }
3747
3748 static void free_recv_msg(struct ipmi_recv_msg *msg)
3749 {
3750 atomic_dec(&recv_msg_inuse_count);
3751 kfree(msg);
3752 }
3753
3754 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3755 {
3756 struct ipmi_recv_msg *rv;
3757
3758 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3759 if (rv) {
3760 rv->user = NULL;
3761 rv->done = free_recv_msg;
3762 atomic_inc(&recv_msg_inuse_count);
3763 }
3764 return rv;
3765 }
3766
3767 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3768 {
3769 if (msg->user)
3770 kref_put(&msg->user->refcount, free_user);
3771 msg->done(msg);
3772 }
3773
3774 #ifdef CONFIG_IPMI_PANIC_EVENT
3775
3776 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3777 {
3778 }
3779
3780 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3781 {
3782 }
3783
3784 #ifdef CONFIG_IPMI_PANIC_STRING
3785 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3786 {
3787 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3788 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3789 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3790 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3791 {
3792 /* A get event receiver command, save it. */
3793 intf->event_receiver = msg->msg.data[1];
3794 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3795 }
3796 }
3797
3798 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3799 {
3800 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3801 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3802 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3803 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3804 {
3805 /* A get device id command, save if we are an event
3806 receiver or generator. */
3807 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3808 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3809 }
3810 }
3811 #endif
3812
3813 static void send_panic_events(char *str)
3814 {
3815 struct kernel_ipmi_msg msg;
3816 ipmi_smi_t intf;
3817 unsigned char data[16];
3818 struct ipmi_system_interface_addr *si;
3819 struct ipmi_addr addr;
3820 struct ipmi_smi_msg smi_msg;
3821 struct ipmi_recv_msg recv_msg;
3822
3823 si = (struct ipmi_system_interface_addr *) &addr;
3824 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3825 si->channel = IPMI_BMC_CHANNEL;
3826 si->lun = 0;
3827
3828 /* Fill in an event telling that we have failed. */
3829 msg.netfn = 0x04; /* Sensor or Event. */
3830 msg.cmd = 2; /* Platform event command. */
3831 msg.data = data;
3832 msg.data_len = 8;
3833 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3834 data[1] = 0x03; /* This is for IPMI 1.0. */
3835 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3836 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3837 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3838
3839 /* Put a few breadcrumbs in. Hopefully later we can add more things
3840 to make the panic events more useful. */
3841 if (str) {
3842 data[3] = str[0];
3843 data[6] = str[1];
3844 data[7] = str[2];
3845 }
3846
3847 smi_msg.done = dummy_smi_done_handler;
3848 recv_msg.done = dummy_recv_done_handler;
3849
3850 /* For every registered interface, send the event. */
3851 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3852 if (!intf->handlers)
3853 /* Interface is not ready. */
3854 continue;
3855
3856 intf->run_to_completion = 1;
3857 /* Send the event announcing the panic. */
3858 intf->handlers->set_run_to_completion(intf->send_info, 1);
3859 i_ipmi_request(NULL,
3860 intf,
3861 &addr,
3862 0,
3863 &msg,
3864 intf,
3865 &smi_msg,
3866 &recv_msg,
3867 0,
3868 intf->channels[0].address,
3869 intf->channels[0].lun,
3870 0, 1); /* Don't retry, and don't wait. */
3871 }
3872
3873 #ifdef CONFIG_IPMI_PANIC_STRING
3874 /* On every interface, dump a bunch of OEM event holding the
3875 string. */
3876 if (!str)
3877 return;
3878
3879 /* For every registered interface, send the event. */
3880 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3881 char *p = str;
3882 struct ipmi_ipmb_addr *ipmb;
3883 int j;
3884
3885 if (intf->intf_num == -1)
3886 /* Interface was not ready yet. */
3887 continue;
3888
3889 /*
3890 * intf_num is used as an marker to tell if the
3891 * interface is valid. Thus we need a read barrier to
3892 * make sure data fetched before checking intf_num
3893 * won't be used.
3894 */
3895 smp_rmb();
3896
3897 /* First job here is to figure out where to send the
3898 OEM events. There's no way in IPMI to send OEM
3899 events using an event send command, so we have to
3900 find the SEL to put them in and stick them in
3901 there. */
3902
3903 /* Get capabilities from the get device id. */
3904 intf->local_sel_device = 0;
3905 intf->local_event_generator = 0;
3906 intf->event_receiver = 0;
3907
3908 /* Request the device info from the local MC. */
3909 msg.netfn = IPMI_NETFN_APP_REQUEST;
3910 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3911 msg.data = NULL;
3912 msg.data_len = 0;
3913 intf->null_user_handler = device_id_fetcher;
3914 i_ipmi_request(NULL,
3915 intf,
3916 &addr,
3917 0,
3918 &msg,
3919 intf,
3920 &smi_msg,
3921 &recv_msg,
3922 0,
3923 intf->channels[0].address,
3924 intf->channels[0].lun,
3925 0, 1); /* Don't retry, and don't wait. */
3926
3927 if (intf->local_event_generator) {
3928 /* Request the event receiver from the local MC. */
3929 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3930 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3931 msg.data = NULL;
3932 msg.data_len = 0;
3933 intf->null_user_handler = event_receiver_fetcher;
3934 i_ipmi_request(NULL,
3935 intf,
3936 &addr,
3937 0,
3938 &msg,
3939 intf,
3940 &smi_msg,
3941 &recv_msg,
3942 0,
3943 intf->channels[0].address,
3944 intf->channels[0].lun,
3945 0, 1); /* no retry, and no wait. */
3946 }
3947 intf->null_user_handler = NULL;
3948
3949 /* Validate the event receiver. The low bit must not
3950 be 1 (it must be a valid IPMB address), it cannot
3951 be zero, and it must not be my address. */
3952 if (((intf->event_receiver & 1) == 0)
3953 && (intf->event_receiver != 0)
3954 && (intf->event_receiver != intf->channels[0].address))
3955 {
3956 /* The event receiver is valid, send an IPMB
3957 message. */
3958 ipmb = (struct ipmi_ipmb_addr *) &addr;
3959 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3960 ipmb->channel = 0; /* FIXME - is this right? */
3961 ipmb->lun = intf->event_receiver_lun;
3962 ipmb->slave_addr = intf->event_receiver;
3963 } else if (intf->local_sel_device) {
3964 /* The event receiver was not valid (or was
3965 me), but I am an SEL device, just dump it
3966 in my SEL. */
3967 si = (struct ipmi_system_interface_addr *) &addr;
3968 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3969 si->channel = IPMI_BMC_CHANNEL;
3970 si->lun = 0;
3971 } else
3972 continue; /* No where to send the event. */
3973
3974
3975 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
3976 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
3977 msg.data = data;
3978 msg.data_len = 16;
3979
3980 j = 0;
3981 while (*p) {
3982 int size = strlen(p);
3983
3984 if (size > 11)
3985 size = 11;
3986 data[0] = 0;
3987 data[1] = 0;
3988 data[2] = 0xf0; /* OEM event without timestamp. */
3989 data[3] = intf->channels[0].address;
3990 data[4] = j++; /* sequence # */
3991 /* Always give 11 bytes, so strncpy will fill
3992 it with zeroes for me. */
3993 strncpy(data+5, p, 11);
3994 p += size;
3995
3996 i_ipmi_request(NULL,
3997 intf,
3998 &addr,
3999 0,
4000 &msg,
4001 intf,
4002 &smi_msg,
4003 &recv_msg,
4004 0,
4005 intf->channels[0].address,
4006 intf->channels[0].lun,
4007 0, 1); /* no retry, and no wait. */
4008 }
4009 }
4010 #endif /* CONFIG_IPMI_PANIC_STRING */
4011 }
4012 #endif /* CONFIG_IPMI_PANIC_EVENT */
4013
4014 static int has_panicked;
4015
4016 static int panic_event(struct notifier_block *this,
4017 unsigned long event,
4018 void *ptr)
4019 {
4020 ipmi_smi_t intf;
4021
4022 if (has_panicked)
4023 return NOTIFY_DONE;
4024 has_panicked = 1;
4025
4026 /* For every registered interface, set it to run to completion. */
4027 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4028 if (!intf->handlers)
4029 /* Interface is not ready. */
4030 continue;
4031
4032 intf->run_to_completion = 1;
4033 intf->handlers->set_run_to_completion(intf->send_info, 1);
4034 }
4035
4036 #ifdef CONFIG_IPMI_PANIC_EVENT
4037 send_panic_events(ptr);
4038 #endif
4039
4040 return NOTIFY_DONE;
4041 }
4042
4043 static struct notifier_block panic_block = {
4044 .notifier_call = panic_event,
4045 .next = NULL,
4046 .priority = 200 /* priority: INT_MAX >= x >= 0 */
4047 };
4048
4049 static int ipmi_init_msghandler(void)
4050 {
4051 int rv;
4052
4053 if (initialized)
4054 return 0;
4055
4056 rv = driver_register(&ipmidriver);
4057 if (rv) {
4058 printk(KERN_ERR PFX "Could not register IPMI driver\n");
4059 return rv;
4060 }
4061
4062 printk(KERN_INFO "ipmi message handler version "
4063 IPMI_DRIVER_VERSION "\n");
4064
4065 #ifdef CONFIG_PROC_FS
4066 proc_ipmi_root = proc_mkdir("ipmi", NULL);
4067 if (!proc_ipmi_root) {
4068 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
4069 return -ENOMEM;
4070 }
4071
4072 proc_ipmi_root->owner = THIS_MODULE;
4073 #endif /* CONFIG_PROC_FS */
4074
4075 setup_timer(&ipmi_timer, ipmi_timeout, 0);
4076 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4077
4078 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
4079
4080 initialized = 1;
4081
4082 return 0;
4083 }
4084
4085 static __init int ipmi_init_msghandler_mod(void)
4086 {
4087 ipmi_init_msghandler();
4088 return 0;
4089 }
4090
4091 static __exit void cleanup_ipmi(void)
4092 {
4093 int count;
4094
4095 if (!initialized)
4096 return;
4097
4098 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
4099
4100 /* This can't be called if any interfaces exist, so no worry about
4101 shutting down the interfaces. */
4102
4103 /* Tell the timer to stop, then wait for it to stop. This avoids
4104 problems with race conditions removing the timer here. */
4105 atomic_inc(&stop_operation);
4106 del_timer_sync(&ipmi_timer);
4107
4108 #ifdef CONFIG_PROC_FS
4109 remove_proc_entry(proc_ipmi_root->name, NULL);
4110 #endif /* CONFIG_PROC_FS */
4111
4112 driver_unregister(&ipmidriver);
4113
4114 initialized = 0;
4115
4116 /* Check for buffer leaks. */
4117 count = atomic_read(&smi_msg_inuse_count);
4118 if (count != 0)
4119 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
4120 count);
4121 count = atomic_read(&recv_msg_inuse_count);
4122 if (count != 0)
4123 printk(KERN_WARNING PFX "recv message count %d at exit\n",
4124 count);
4125 }
4126 module_exit(cleanup_ipmi);
4127
4128 module_init(ipmi_init_msghandler_mod);
4129 MODULE_LICENSE("GPL");
4130 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
4131 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
4132 MODULE_VERSION(IPMI_DRIVER_VERSION);
4133
4134 EXPORT_SYMBOL(ipmi_create_user);
4135 EXPORT_SYMBOL(ipmi_destroy_user);
4136 EXPORT_SYMBOL(ipmi_get_version);
4137 EXPORT_SYMBOL(ipmi_request_settime);
4138 EXPORT_SYMBOL(ipmi_request_supply_msgs);
4139 EXPORT_SYMBOL(ipmi_poll_interface);
4140 EXPORT_SYMBOL(ipmi_register_smi);
4141 EXPORT_SYMBOL(ipmi_unregister_smi);
4142 EXPORT_SYMBOL(ipmi_register_for_cmd);
4143 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
4144 EXPORT_SYMBOL(ipmi_smi_msg_received);
4145 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4146 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4147 EXPORT_SYMBOL(ipmi_addr_length);
4148 EXPORT_SYMBOL(ipmi_validate_addr);
4149 EXPORT_SYMBOL(ipmi_set_gets_events);
4150 EXPORT_SYMBOL(ipmi_smi_watcher_register);
4151 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
4152 EXPORT_SYMBOL(ipmi_set_my_address);
4153 EXPORT_SYMBOL(ipmi_get_my_address);
4154 EXPORT_SYMBOL(ipmi_set_my_LUN);
4155 EXPORT_SYMBOL(ipmi_get_my_LUN);
4156 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
4157 EXPORT_SYMBOL(ipmi_free_recv_msg);
This page took 0.175572 seconds and 6 git commands to generate.