4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/poll.h>
38 #include <linux/spinlock.h>
39 #include <linux/mutex.h>
40 #include <linux/slab.h>
41 #include <linux/ipmi.h>
42 #include <linux/ipmi_smi.h>
43 #include <linux/notifier.h>
44 #include <linux/init.h>
45 #include <linux/proc_fs.h>
46 #include <linux/rcupdate.h>
48 #define PFX "IPMI message handler: "
50 #define IPMI_DRIVER_VERSION "39.2"
52 static struct ipmi_recv_msg
*ipmi_alloc_recv_msg(void);
53 static int ipmi_init_msghandler(void);
55 static int initialized
;
58 static struct proc_dir_entry
*proc_ipmi_root
;
59 #endif /* CONFIG_PROC_FS */
61 /* Remain in auto-maintenance mode for this amount of time (in ms). */
62 #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
64 #define MAX_EVENTS_IN_QUEUE 25
66 /* Don't let a message sit in a queue forever, always time it with at lest
67 the max message timer. This is in milliseconds. */
68 #define MAX_MSG_TIMEOUT 60000
71 * The main "user" data structure.
75 struct list_head link
;
77 /* Set to "0" when the user is destroyed. */
82 /* The upper layer that handles receive messages. */
83 struct ipmi_user_hndl
*handler
;
86 /* The interface this user is bound to. */
89 /* Does this interface receive IPMI events? */
95 struct list_head link
;
103 * This is used to form a linked lised during mass deletion.
104 * Since this is in an RCU list, we cannot use the link above
105 * or change any data until the RCU period completes. So we
106 * use this next variable during mass deletion so we can have
107 * a list and don't have to wait and restart the search on
108 * every individual deletion of a command. */
109 struct cmd_rcvr
*next
;
114 unsigned int inuse
: 1;
115 unsigned int broadcast
: 1;
117 unsigned long timeout
;
118 unsigned long orig_timeout
;
119 unsigned int retries_left
;
121 /* To verify on an incoming send message response that this is
122 the message that the response is for, we keep a sequence id
123 and increment it every time we send a message. */
126 /* This is held so we can properly respond to the message on a
127 timeout, and it is used to hold the temporary data for
128 retransmission, too. */
129 struct ipmi_recv_msg
*recv_msg
;
132 /* Store the information in a msgid (long) to allow us to find a
133 sequence table entry from the msgid. */
134 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
136 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
138 seq = ((msgid >> 26) & 0x3f); \
139 seqid = (msgid & 0x3fffff); \
142 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
146 unsigned char medium
;
147 unsigned char protocol
;
149 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
150 but may be changed by the user. */
151 unsigned char address
;
153 /* My LUN. This should generally stay the SMS LUN, but just in
158 #ifdef CONFIG_PROC_FS
159 struct ipmi_proc_entry
162 struct ipmi_proc_entry
*next
;
168 struct platform_device
*dev
;
169 struct ipmi_device_id id
;
170 unsigned char guid
[16];
173 struct kref refcount
;
175 /* bmc device attributes */
176 struct device_attribute device_id_attr
;
177 struct device_attribute provides_dev_sdrs_attr
;
178 struct device_attribute revision_attr
;
179 struct device_attribute firmware_rev_attr
;
180 struct device_attribute version_attr
;
181 struct device_attribute add_dev_support_attr
;
182 struct device_attribute manufacturer_id_attr
;
183 struct device_attribute product_id_attr
;
184 struct device_attribute guid_attr
;
185 struct device_attribute aux_firmware_rev_attr
;
189 * Various statistics for IPMI, these index stats[] in the ipmi_smi
192 enum ipmi_stat_indexes
{
193 /* Commands we got from the user that were invalid. */
194 IPMI_STAT_sent_invalid_commands
= 0,
196 /* Commands we sent to the MC. */
197 IPMI_STAT_sent_local_commands
,
199 /* Responses from the MC that were delivered to a user. */
200 IPMI_STAT_handled_local_responses
,
202 /* Responses from the MC that were not delivered to a user. */
203 IPMI_STAT_unhandled_local_responses
,
205 /* Commands we sent out to the IPMB bus. */
206 IPMI_STAT_sent_ipmb_commands
,
208 /* Commands sent on the IPMB that had errors on the SEND CMD */
209 IPMI_STAT_sent_ipmb_command_errs
,
211 /* Each retransmit increments this count. */
212 IPMI_STAT_retransmitted_ipmb_commands
,
215 * When a message times out (runs out of retransmits) this is
218 IPMI_STAT_timed_out_ipmb_commands
,
221 * This is like above, but for broadcasts. Broadcasts are
222 * *not* included in the above count (they are expected to
225 IPMI_STAT_timed_out_ipmb_broadcasts
,
227 /* Responses I have sent to the IPMB bus. */
228 IPMI_STAT_sent_ipmb_responses
,
230 /* The response was delivered to the user. */
231 IPMI_STAT_handled_ipmb_responses
,
233 /* The response had invalid data in it. */
234 IPMI_STAT_invalid_ipmb_responses
,
236 /* The response didn't have anyone waiting for it. */
237 IPMI_STAT_unhandled_ipmb_responses
,
239 /* Commands we sent out to the IPMB bus. */
240 IPMI_STAT_sent_lan_commands
,
242 /* Commands sent on the IPMB that had errors on the SEND CMD */
243 IPMI_STAT_sent_lan_command_errs
,
245 /* Each retransmit increments this count. */
246 IPMI_STAT_retransmitted_lan_commands
,
249 * When a message times out (runs out of retransmits) this is
252 IPMI_STAT_timed_out_lan_commands
,
254 /* Responses I have sent to the IPMB bus. */
255 IPMI_STAT_sent_lan_responses
,
257 /* The response was delivered to the user. */
258 IPMI_STAT_handled_lan_responses
,
260 /* The response had invalid data in it. */
261 IPMI_STAT_invalid_lan_responses
,
263 /* The response didn't have anyone waiting for it. */
264 IPMI_STAT_unhandled_lan_responses
,
266 /* The command was delivered to the user. */
267 IPMI_STAT_handled_commands
,
269 /* The command had invalid data in it. */
270 IPMI_STAT_invalid_commands
,
272 /* The command didn't have anyone waiting for it. */
273 IPMI_STAT_unhandled_commands
,
275 /* Invalid data in an event. */
276 IPMI_STAT_invalid_events
,
278 /* Events that were received with the proper format. */
282 /* This *must* remain last, add new values above this. */
287 #define IPMI_IPMB_NUM_SEQ 64
288 #define IPMI_MAX_CHANNELS 16
291 /* What interface number are we? */
294 struct kref refcount
;
296 /* Used for a list of interfaces. */
297 struct list_head link
;
299 /* The list of upper layers that are using me. seq_lock
301 struct list_head users
;
303 /* Information to supply to users. */
304 unsigned char ipmi_version_major
;
305 unsigned char ipmi_version_minor
;
307 /* Used for wake ups at startup. */
308 wait_queue_head_t waitq
;
310 struct bmc_device
*bmc
;
314 /* This is the lower-layer's sender routine. Note that you
315 * must either be holding the ipmi_interfaces_mutex or be in
316 * an umpreemptible region to use this. You must fetch the
317 * value into a local variable and make sure it is not NULL. */
318 struct ipmi_smi_handlers
*handlers
;
321 #ifdef CONFIG_PROC_FS
322 /* A list of proc entries for this interface. */
323 struct mutex proc_entry_lock
;
324 struct ipmi_proc_entry
*proc_entries
;
327 /* Driver-model device for the system interface. */
328 struct device
*si_dev
;
330 /* A table of sequence numbers for this interface. We use the
331 sequence numbers for IPMB messages that go out of the
332 interface to match them up with their responses. A routine
333 is called periodically to time the items in this list. */
335 struct seq_table seq_table
[IPMI_IPMB_NUM_SEQ
];
338 /* Messages that were delayed for some reason (out of memory,
339 for instance), will go in here to be processed later in a
340 periodic timer interrupt. */
341 spinlock_t waiting_msgs_lock
;
342 struct list_head waiting_msgs
;
344 /* The list of command receivers that are registered for commands
345 on this interface. */
346 struct mutex cmd_rcvrs_mutex
;
347 struct list_head cmd_rcvrs
;
349 /* Events that were queues because no one was there to receive
351 spinlock_t events_lock
; /* For dealing with event stuff. */
352 struct list_head waiting_events
;
353 unsigned int waiting_events_count
; /* How many events in queue? */
354 char delivering_events
;
355 char event_msg_printed
;
357 /* The event receiver for my BMC, only really used at panic
358 shutdown as a place to store this. */
359 unsigned char event_receiver
;
360 unsigned char event_receiver_lun
;
361 unsigned char local_sel_device
;
362 unsigned char local_event_generator
;
364 /* For handling of maintenance mode. */
365 int maintenance_mode
;
366 int maintenance_mode_enable
;
367 int auto_maintenance_timeout
;
368 spinlock_t maintenance_mode_lock
; /* Used in a timer... */
370 /* A cheap hack, if this is non-null and a message to an
371 interface comes in with a NULL user, call this routine with
372 it. Note that the message will still be freed by the
373 caller. This only works on the system interface. */
374 void (*null_user_handler
)(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
);
376 /* When we are scanning the channels for an SMI, this will
377 tell which channel we are scanning. */
380 /* Channel information */
381 struct ipmi_channel channels
[IPMI_MAX_CHANNELS
];
384 struct proc_dir_entry
*proc_dir
;
385 char proc_dir_name
[10];
387 atomic_t stats
[IPMI_NUM_STATS
];
390 * run_to_completion duplicate of smb_info, smi_info
391 * and ipmi_serial_info structures. Used to decrease numbers of
392 * parameters passed by "low" level IPMI code.
394 int run_to_completion
;
396 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
399 * The driver model view of the IPMI messaging driver.
401 static struct device_driver ipmidriver
= {
403 .bus
= &platform_bus_type
405 static DEFINE_MUTEX(ipmidriver_mutex
);
407 static LIST_HEAD(ipmi_interfaces
);
408 static DEFINE_MUTEX(ipmi_interfaces_mutex
);
410 /* List of watchers that want to know when smi's are added and
412 static LIST_HEAD(smi_watchers
);
413 static DEFINE_MUTEX(smi_watchers_mutex
);
416 #define ipmi_inc_stat(intf, stat) \
417 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
418 #define ipmi_get_stat(intf, stat) \
419 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
422 static void free_recv_msg_list(struct list_head
*q
)
424 struct ipmi_recv_msg
*msg
, *msg2
;
426 list_for_each_entry_safe(msg
, msg2
, q
, link
) {
427 list_del(&msg
->link
);
428 ipmi_free_recv_msg(msg
);
432 static void free_smi_msg_list(struct list_head
*q
)
434 struct ipmi_smi_msg
*msg
, *msg2
;
436 list_for_each_entry_safe(msg
, msg2
, q
, link
) {
437 list_del(&msg
->link
);
438 ipmi_free_smi_msg(msg
);
442 static void clean_up_interface_data(ipmi_smi_t intf
)
445 struct cmd_rcvr
*rcvr
, *rcvr2
;
446 struct list_head list
;
448 free_smi_msg_list(&intf
->waiting_msgs
);
449 free_recv_msg_list(&intf
->waiting_events
);
452 * Wholesale remove all the entries from the list in the
453 * interface and wait for RCU to know that none are in use.
455 mutex_lock(&intf
->cmd_rcvrs_mutex
);
456 INIT_LIST_HEAD(&list
);
457 list_splice_init_rcu(&intf
->cmd_rcvrs
, &list
, synchronize_rcu
);
458 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
460 list_for_each_entry_safe(rcvr
, rcvr2
, &list
, link
)
463 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++) {
464 if ((intf
->seq_table
[i
].inuse
)
465 && (intf
->seq_table
[i
].recv_msg
))
467 ipmi_free_recv_msg(intf
->seq_table
[i
].recv_msg
);
472 static void intf_free(struct kref
*ref
)
474 ipmi_smi_t intf
= container_of(ref
, struct ipmi_smi
, refcount
);
476 clean_up_interface_data(intf
);
480 struct watcher_entry
{
483 struct list_head link
;
486 int ipmi_smi_watcher_register(struct ipmi_smi_watcher
*watcher
)
489 LIST_HEAD(to_deliver
);
490 struct watcher_entry
*e
, *e2
;
492 mutex_lock(&smi_watchers_mutex
);
494 mutex_lock(&ipmi_interfaces_mutex
);
496 /* Build a list of things to deliver. */
497 list_for_each_entry(intf
, &ipmi_interfaces
, link
) {
498 if (intf
->intf_num
== -1)
500 e
= kmalloc(sizeof(*e
), GFP_KERNEL
);
503 kref_get(&intf
->refcount
);
505 e
->intf_num
= intf
->intf_num
;
506 list_add_tail(&e
->link
, &to_deliver
);
509 /* We will succeed, so add it to the list. */
510 list_add(&watcher
->link
, &smi_watchers
);
512 mutex_unlock(&ipmi_interfaces_mutex
);
514 list_for_each_entry_safe(e
, e2
, &to_deliver
, link
) {
516 watcher
->new_smi(e
->intf_num
, e
->intf
->si_dev
);
517 kref_put(&e
->intf
->refcount
, intf_free
);
521 mutex_unlock(&smi_watchers_mutex
);
526 mutex_unlock(&ipmi_interfaces_mutex
);
527 mutex_unlock(&smi_watchers_mutex
);
528 list_for_each_entry_safe(e
, e2
, &to_deliver
, link
) {
530 kref_put(&e
->intf
->refcount
, intf_free
);
536 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher
*watcher
)
538 mutex_lock(&smi_watchers_mutex
);
539 list_del(&(watcher
->link
));
540 mutex_unlock(&smi_watchers_mutex
);
545 * Must be called with smi_watchers_mutex held.
548 call_smi_watchers(int i
, struct device
*dev
)
550 struct ipmi_smi_watcher
*w
;
552 list_for_each_entry(w
, &smi_watchers
, link
) {
553 if (try_module_get(w
->owner
)) {
555 module_put(w
->owner
);
561 ipmi_addr_equal(struct ipmi_addr
*addr1
, struct ipmi_addr
*addr2
)
563 if (addr1
->addr_type
!= addr2
->addr_type
)
566 if (addr1
->channel
!= addr2
->channel
)
569 if (addr1
->addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
) {
570 struct ipmi_system_interface_addr
*smi_addr1
571 = (struct ipmi_system_interface_addr
*) addr1
;
572 struct ipmi_system_interface_addr
*smi_addr2
573 = (struct ipmi_system_interface_addr
*) addr2
;
574 return (smi_addr1
->lun
== smi_addr2
->lun
);
577 if ((addr1
->addr_type
== IPMI_IPMB_ADDR_TYPE
)
578 || (addr1
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
))
580 struct ipmi_ipmb_addr
*ipmb_addr1
581 = (struct ipmi_ipmb_addr
*) addr1
;
582 struct ipmi_ipmb_addr
*ipmb_addr2
583 = (struct ipmi_ipmb_addr
*) addr2
;
585 return ((ipmb_addr1
->slave_addr
== ipmb_addr2
->slave_addr
)
586 && (ipmb_addr1
->lun
== ipmb_addr2
->lun
));
589 if (addr1
->addr_type
== IPMI_LAN_ADDR_TYPE
) {
590 struct ipmi_lan_addr
*lan_addr1
591 = (struct ipmi_lan_addr
*) addr1
;
592 struct ipmi_lan_addr
*lan_addr2
593 = (struct ipmi_lan_addr
*) addr2
;
595 return ((lan_addr1
->remote_SWID
== lan_addr2
->remote_SWID
)
596 && (lan_addr1
->local_SWID
== lan_addr2
->local_SWID
)
597 && (lan_addr1
->session_handle
598 == lan_addr2
->session_handle
)
599 && (lan_addr1
->lun
== lan_addr2
->lun
));
605 int ipmi_validate_addr(struct ipmi_addr
*addr
, int len
)
607 if (len
< sizeof(struct ipmi_system_interface_addr
)) {
611 if (addr
->addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
) {
612 if (addr
->channel
!= IPMI_BMC_CHANNEL
)
617 if ((addr
->channel
== IPMI_BMC_CHANNEL
)
618 || (addr
->channel
>= IPMI_MAX_CHANNELS
)
619 || (addr
->channel
< 0))
622 if ((addr
->addr_type
== IPMI_IPMB_ADDR_TYPE
)
623 || (addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
))
625 if (len
< sizeof(struct ipmi_ipmb_addr
)) {
631 if (addr
->addr_type
== IPMI_LAN_ADDR_TYPE
) {
632 if (len
< sizeof(struct ipmi_lan_addr
)) {
641 unsigned int ipmi_addr_length(int addr_type
)
643 if (addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
644 return sizeof(struct ipmi_system_interface_addr
);
646 if ((addr_type
== IPMI_IPMB_ADDR_TYPE
)
647 || (addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
))
649 return sizeof(struct ipmi_ipmb_addr
);
652 if (addr_type
== IPMI_LAN_ADDR_TYPE
)
653 return sizeof(struct ipmi_lan_addr
);
658 static void deliver_response(struct ipmi_recv_msg
*msg
)
661 ipmi_smi_t intf
= msg
->user_msg_data
;
663 /* Special handling for NULL users. */
664 if (intf
->null_user_handler
) {
665 intf
->null_user_handler(intf
, msg
);
666 ipmi_inc_stat(intf
, handled_local_responses
);
668 /* No handler, so give up. */
669 ipmi_inc_stat(intf
, unhandled_local_responses
);
671 ipmi_free_recv_msg(msg
);
673 ipmi_user_t user
= msg
->user
;
674 user
->handler
->ipmi_recv_hndl(msg
, user
->handler_data
);
679 deliver_err_response(struct ipmi_recv_msg
*msg
, int err
)
681 msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
682 msg
->msg_data
[0] = err
;
683 msg
->msg
.netfn
|= 1; /* Convert to a response. */
684 msg
->msg
.data_len
= 1;
685 msg
->msg
.data
= msg
->msg_data
;
686 deliver_response(msg
);
689 /* Find the next sequence number not being used and add the given
690 message with the given timeout to the sequence table. This must be
691 called with the interface's seq_lock held. */
692 static int intf_next_seq(ipmi_smi_t intf
,
693 struct ipmi_recv_msg
*recv_msg
,
694 unsigned long timeout
,
703 for (i
= intf
->curr_seq
;
704 (i
+1)%IPMI_IPMB_NUM_SEQ
!= intf
->curr_seq
;
705 i
= (i
+1)%IPMI_IPMB_NUM_SEQ
)
707 if (!intf
->seq_table
[i
].inuse
)
711 if (!intf
->seq_table
[i
].inuse
) {
712 intf
->seq_table
[i
].recv_msg
= recv_msg
;
714 /* Start with the maximum timeout, when the send response
715 comes in we will start the real timer. */
716 intf
->seq_table
[i
].timeout
= MAX_MSG_TIMEOUT
;
717 intf
->seq_table
[i
].orig_timeout
= timeout
;
718 intf
->seq_table
[i
].retries_left
= retries
;
719 intf
->seq_table
[i
].broadcast
= broadcast
;
720 intf
->seq_table
[i
].inuse
= 1;
721 intf
->seq_table
[i
].seqid
= NEXT_SEQID(intf
->seq_table
[i
].seqid
);
723 *seqid
= intf
->seq_table
[i
].seqid
;
724 intf
->curr_seq
= (i
+1)%IPMI_IPMB_NUM_SEQ
;
732 /* Return the receive message for the given sequence number and
733 release the sequence number so it can be reused. Some other data
734 is passed in to be sure the message matches up correctly (to help
735 guard against message coming in after their timeout and the
736 sequence number being reused). */
737 static int intf_find_seq(ipmi_smi_t intf
,
742 struct ipmi_addr
*addr
,
743 struct ipmi_recv_msg
**recv_msg
)
748 if (seq
>= IPMI_IPMB_NUM_SEQ
)
751 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
752 if (intf
->seq_table
[seq
].inuse
) {
753 struct ipmi_recv_msg
*msg
= intf
->seq_table
[seq
].recv_msg
;
755 if ((msg
->addr
.channel
== channel
)
756 && (msg
->msg
.cmd
== cmd
)
757 && (msg
->msg
.netfn
== netfn
)
758 && (ipmi_addr_equal(addr
, &(msg
->addr
))))
761 intf
->seq_table
[seq
].inuse
= 0;
765 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
771 /* Start the timer for a specific sequence table entry. */
772 static int intf_start_seq_timer(ipmi_smi_t intf
,
781 GET_SEQ_FROM_MSGID(msgid
, seq
, seqid
);
783 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
784 /* We do this verification because the user can be deleted
785 while a message is outstanding. */
786 if ((intf
->seq_table
[seq
].inuse
)
787 && (intf
->seq_table
[seq
].seqid
== seqid
))
789 struct seq_table
*ent
= &(intf
->seq_table
[seq
]);
790 ent
->timeout
= ent
->orig_timeout
;
793 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
798 /* Got an error for the send message for a specific sequence number. */
799 static int intf_err_seq(ipmi_smi_t intf
,
807 struct ipmi_recv_msg
*msg
= NULL
;
810 GET_SEQ_FROM_MSGID(msgid
, seq
, seqid
);
812 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
813 /* We do this verification because the user can be deleted
814 while a message is outstanding. */
815 if ((intf
->seq_table
[seq
].inuse
)
816 && (intf
->seq_table
[seq
].seqid
== seqid
))
818 struct seq_table
*ent
= &(intf
->seq_table
[seq
]);
824 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
827 deliver_err_response(msg
, err
);
833 int ipmi_create_user(unsigned int if_num
,
834 struct ipmi_user_hndl
*handler
,
839 ipmi_user_t new_user
;
843 /* There is no module usecount here, because it's not
844 required. Since this can only be used by and called from
845 other modules, they will implicitly use this module, and
846 thus this can't be removed unless the other modules are
852 /* Make sure the driver is actually initialized, this handles
853 problems with initialization order. */
855 rv
= ipmi_init_msghandler();
859 /* The init code doesn't return an error if it was turned
860 off, but it won't initialize. Check that. */
865 new_user
= kmalloc(sizeof(*new_user
), GFP_KERNEL
);
869 mutex_lock(&ipmi_interfaces_mutex
);
870 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
871 if (intf
->intf_num
== if_num
)
874 /* Not found, return an error */
879 /* Note that each existing user holds a refcount to the interface. */
880 kref_get(&intf
->refcount
);
882 kref_init(&new_user
->refcount
);
883 new_user
->handler
= handler
;
884 new_user
->handler_data
= handler_data
;
885 new_user
->intf
= intf
;
886 new_user
->gets_events
= 0;
888 if (!try_module_get(intf
->handlers
->owner
)) {
893 if (intf
->handlers
->inc_usecount
) {
894 rv
= intf
->handlers
->inc_usecount(intf
->send_info
);
896 module_put(intf
->handlers
->owner
);
901 /* Hold the lock so intf->handlers is guaranteed to be good
903 mutex_unlock(&ipmi_interfaces_mutex
);
906 spin_lock_irqsave(&intf
->seq_lock
, flags
);
907 list_add_rcu(&new_user
->link
, &intf
->users
);
908 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
913 kref_put(&intf
->refcount
, intf_free
);
915 mutex_unlock(&ipmi_interfaces_mutex
);
920 static void free_user(struct kref
*ref
)
922 ipmi_user_t user
= container_of(ref
, struct ipmi_user
, refcount
);
926 int ipmi_destroy_user(ipmi_user_t user
)
928 ipmi_smi_t intf
= user
->intf
;
931 struct cmd_rcvr
*rcvr
;
932 struct cmd_rcvr
*rcvrs
= NULL
;
936 /* Remove the user from the interface's sequence table. */
937 spin_lock_irqsave(&intf
->seq_lock
, flags
);
938 list_del_rcu(&user
->link
);
940 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++) {
941 if (intf
->seq_table
[i
].inuse
942 && (intf
->seq_table
[i
].recv_msg
->user
== user
))
944 intf
->seq_table
[i
].inuse
= 0;
945 ipmi_free_recv_msg(intf
->seq_table
[i
].recv_msg
);
948 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
951 * Remove the user from the command receiver's table. First
952 * we build a list of everything (not using the standard link,
953 * since other things may be using it till we do
954 * synchronize_rcu()) then free everything in that list.
956 mutex_lock(&intf
->cmd_rcvrs_mutex
);
957 list_for_each_entry_rcu(rcvr
, &intf
->cmd_rcvrs
, link
) {
958 if (rcvr
->user
== user
) {
959 list_del_rcu(&rcvr
->link
);
964 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
972 mutex_lock(&ipmi_interfaces_mutex
);
973 if (intf
->handlers
) {
974 module_put(intf
->handlers
->owner
);
975 if (intf
->handlers
->dec_usecount
)
976 intf
->handlers
->dec_usecount(intf
->send_info
);
978 mutex_unlock(&ipmi_interfaces_mutex
);
980 kref_put(&intf
->refcount
, intf_free
);
982 kref_put(&user
->refcount
, free_user
);
987 void ipmi_get_version(ipmi_user_t user
,
988 unsigned char *major
,
989 unsigned char *minor
)
991 *major
= user
->intf
->ipmi_version_major
;
992 *minor
= user
->intf
->ipmi_version_minor
;
995 int ipmi_set_my_address(ipmi_user_t user
,
996 unsigned int channel
,
997 unsigned char address
)
999 if (channel
>= IPMI_MAX_CHANNELS
)
1001 user
->intf
->channels
[channel
].address
= address
;
1005 int ipmi_get_my_address(ipmi_user_t user
,
1006 unsigned int channel
,
1007 unsigned char *address
)
1009 if (channel
>= IPMI_MAX_CHANNELS
)
1011 *address
= user
->intf
->channels
[channel
].address
;
1015 int ipmi_set_my_LUN(ipmi_user_t user
,
1016 unsigned int channel
,
1019 if (channel
>= IPMI_MAX_CHANNELS
)
1021 user
->intf
->channels
[channel
].lun
= LUN
& 0x3;
1025 int ipmi_get_my_LUN(ipmi_user_t user
,
1026 unsigned int channel
,
1027 unsigned char *address
)
1029 if (channel
>= IPMI_MAX_CHANNELS
)
1031 *address
= user
->intf
->channels
[channel
].lun
;
1035 int ipmi_get_maintenance_mode(ipmi_user_t user
)
1038 unsigned long flags
;
1040 spin_lock_irqsave(&user
->intf
->maintenance_mode_lock
, flags
);
1041 mode
= user
->intf
->maintenance_mode
;
1042 spin_unlock_irqrestore(&user
->intf
->maintenance_mode_lock
, flags
);
1046 EXPORT_SYMBOL(ipmi_get_maintenance_mode
);
1048 static void maintenance_mode_update(ipmi_smi_t intf
)
1050 if (intf
->handlers
->set_maintenance_mode
)
1051 intf
->handlers
->set_maintenance_mode(
1052 intf
->send_info
, intf
->maintenance_mode_enable
);
1055 int ipmi_set_maintenance_mode(ipmi_user_t user
, int mode
)
1058 unsigned long flags
;
1059 ipmi_smi_t intf
= user
->intf
;
1061 spin_lock_irqsave(&intf
->maintenance_mode_lock
, flags
);
1062 if (intf
->maintenance_mode
!= mode
) {
1064 case IPMI_MAINTENANCE_MODE_AUTO
:
1065 intf
->maintenance_mode
= mode
;
1066 intf
->maintenance_mode_enable
1067 = (intf
->auto_maintenance_timeout
> 0);
1070 case IPMI_MAINTENANCE_MODE_OFF
:
1071 intf
->maintenance_mode
= mode
;
1072 intf
->maintenance_mode_enable
= 0;
1075 case IPMI_MAINTENANCE_MODE_ON
:
1076 intf
->maintenance_mode
= mode
;
1077 intf
->maintenance_mode_enable
= 1;
1085 maintenance_mode_update(intf
);
1088 spin_unlock_irqrestore(&intf
->maintenance_mode_lock
, flags
);
1092 EXPORT_SYMBOL(ipmi_set_maintenance_mode
);
1094 int ipmi_set_gets_events(ipmi_user_t user
, int val
)
1096 unsigned long flags
;
1097 ipmi_smi_t intf
= user
->intf
;
1098 struct ipmi_recv_msg
*msg
, *msg2
;
1099 struct list_head msgs
;
1101 INIT_LIST_HEAD(&msgs
);
1103 spin_lock_irqsave(&intf
->events_lock
, flags
);
1104 user
->gets_events
= val
;
1106 if (intf
->delivering_events
)
1108 * Another thread is delivering events for this, so
1109 * let it handle any new events.
1113 /* Deliver any queued events. */
1114 while (user
->gets_events
&& !list_empty(&intf
->waiting_events
)) {
1115 list_for_each_entry_safe(msg
, msg2
, &intf
->waiting_events
, link
)
1116 list_move_tail(&msg
->link
, &msgs
);
1117 intf
->waiting_events_count
= 0;
1118 if (intf
->event_msg_printed
) {
1119 printk(KERN_WARNING PFX
"Event queue no longer"
1121 intf
->event_msg_printed
= 0;
1124 intf
->delivering_events
= 1;
1125 spin_unlock_irqrestore(&intf
->events_lock
, flags
);
1127 list_for_each_entry_safe(msg
, msg2
, &msgs
, link
) {
1129 kref_get(&user
->refcount
);
1130 deliver_response(msg
);
1133 spin_lock_irqsave(&intf
->events_lock
, flags
);
1134 intf
->delivering_events
= 0;
1138 spin_unlock_irqrestore(&intf
->events_lock
, flags
);
1143 static struct cmd_rcvr
*find_cmd_rcvr(ipmi_smi_t intf
,
1144 unsigned char netfn
,
1148 struct cmd_rcvr
*rcvr
;
1150 list_for_each_entry_rcu(rcvr
, &intf
->cmd_rcvrs
, link
) {
1151 if ((rcvr
->netfn
== netfn
) && (rcvr
->cmd
== cmd
)
1152 && (rcvr
->chans
& (1 << chan
)))
1158 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf
,
1159 unsigned char netfn
,
1163 struct cmd_rcvr
*rcvr
;
1165 list_for_each_entry_rcu(rcvr
, &intf
->cmd_rcvrs
, link
) {
1166 if ((rcvr
->netfn
== netfn
) && (rcvr
->cmd
== cmd
)
1167 && (rcvr
->chans
& chans
))
1173 int ipmi_register_for_cmd(ipmi_user_t user
,
1174 unsigned char netfn
,
1178 ipmi_smi_t intf
= user
->intf
;
1179 struct cmd_rcvr
*rcvr
;
1183 rcvr
= kmalloc(sizeof(*rcvr
), GFP_KERNEL
);
1187 rcvr
->netfn
= netfn
;
1188 rcvr
->chans
= chans
;
1191 mutex_lock(&intf
->cmd_rcvrs_mutex
);
1192 /* Make sure the command/netfn is not already registered. */
1193 if (!is_cmd_rcvr_exclusive(intf
, netfn
, cmd
, chans
)) {
1198 list_add_rcu(&rcvr
->link
, &intf
->cmd_rcvrs
);
1201 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
1208 int ipmi_unregister_for_cmd(ipmi_user_t user
,
1209 unsigned char netfn
,
1213 ipmi_smi_t intf
= user
->intf
;
1214 struct cmd_rcvr
*rcvr
;
1215 struct cmd_rcvr
*rcvrs
= NULL
;
1216 int i
, rv
= -ENOENT
;
1218 mutex_lock(&intf
->cmd_rcvrs_mutex
);
1219 for (i
= 0; i
< IPMI_NUM_CHANNELS
; i
++) {
1220 if (((1 << i
) & chans
) == 0)
1222 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, i
);
1225 if (rcvr
->user
== user
) {
1227 rcvr
->chans
&= ~chans
;
1228 if (rcvr
->chans
== 0) {
1229 list_del_rcu(&rcvr
->link
);
1235 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
1245 static unsigned char
1246 ipmb_checksum(unsigned char *data
, int size
)
1248 unsigned char csum
= 0;
1250 for (; size
> 0; size
--, data
++)
1256 static inline void format_ipmb_msg(struct ipmi_smi_msg
*smi_msg
,
1257 struct kernel_ipmi_msg
*msg
,
1258 struct ipmi_ipmb_addr
*ipmb_addr
,
1260 unsigned char ipmb_seq
,
1262 unsigned char source_address
,
1263 unsigned char source_lun
)
1267 /* Format the IPMB header data. */
1268 smi_msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
1269 smi_msg
->data
[1] = IPMI_SEND_MSG_CMD
;
1270 smi_msg
->data
[2] = ipmb_addr
->channel
;
1272 smi_msg
->data
[3] = 0;
1273 smi_msg
->data
[i
+3] = ipmb_addr
->slave_addr
;
1274 smi_msg
->data
[i
+4] = (msg
->netfn
<< 2) | (ipmb_addr
->lun
& 0x3);
1275 smi_msg
->data
[i
+5] = ipmb_checksum(&(smi_msg
->data
[i
+3]), 2);
1276 smi_msg
->data
[i
+6] = source_address
;
1277 smi_msg
->data
[i
+7] = (ipmb_seq
<< 2) | source_lun
;
1278 smi_msg
->data
[i
+8] = msg
->cmd
;
1280 /* Now tack on the data to the message. */
1281 if (msg
->data_len
> 0)
1282 memcpy(&(smi_msg
->data
[i
+9]), msg
->data
,
1284 smi_msg
->data_size
= msg
->data_len
+ 9;
1286 /* Now calculate the checksum and tack it on. */
1287 smi_msg
->data
[i
+smi_msg
->data_size
]
1288 = ipmb_checksum(&(smi_msg
->data
[i
+6]),
1289 smi_msg
->data_size
-6);
1291 /* Add on the checksum size and the offset from the
1293 smi_msg
->data_size
+= 1 + i
;
1295 smi_msg
->msgid
= msgid
;
1298 static inline void format_lan_msg(struct ipmi_smi_msg
*smi_msg
,
1299 struct kernel_ipmi_msg
*msg
,
1300 struct ipmi_lan_addr
*lan_addr
,
1302 unsigned char ipmb_seq
,
1303 unsigned char source_lun
)
1305 /* Format the IPMB header data. */
1306 smi_msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
1307 smi_msg
->data
[1] = IPMI_SEND_MSG_CMD
;
1308 smi_msg
->data
[2] = lan_addr
->channel
;
1309 smi_msg
->data
[3] = lan_addr
->session_handle
;
1310 smi_msg
->data
[4] = lan_addr
->remote_SWID
;
1311 smi_msg
->data
[5] = (msg
->netfn
<< 2) | (lan_addr
->lun
& 0x3);
1312 smi_msg
->data
[6] = ipmb_checksum(&(smi_msg
->data
[4]), 2);
1313 smi_msg
->data
[7] = lan_addr
->local_SWID
;
1314 smi_msg
->data
[8] = (ipmb_seq
<< 2) | source_lun
;
1315 smi_msg
->data
[9] = msg
->cmd
;
1317 /* Now tack on the data to the message. */
1318 if (msg
->data_len
> 0)
1319 memcpy(&(smi_msg
->data
[10]), msg
->data
,
1321 smi_msg
->data_size
= msg
->data_len
+ 10;
1323 /* Now calculate the checksum and tack it on. */
1324 smi_msg
->data
[smi_msg
->data_size
]
1325 = ipmb_checksum(&(smi_msg
->data
[7]),
1326 smi_msg
->data_size
-7);
1328 /* Add on the checksum size and the offset from the
1330 smi_msg
->data_size
+= 1;
1332 smi_msg
->msgid
= msgid
;
1335 /* Separate from ipmi_request so that the user does not have to be
1336 supplied in certain circumstances (mainly at panic time). If
1337 messages are supplied, they will be freed, even if an error
1339 static int i_ipmi_request(ipmi_user_t user
,
1341 struct ipmi_addr
*addr
,
1343 struct kernel_ipmi_msg
*msg
,
1344 void *user_msg_data
,
1346 struct ipmi_recv_msg
*supplied_recv
,
1348 unsigned char source_address
,
1349 unsigned char source_lun
,
1351 unsigned int retry_time_ms
)
1354 struct ipmi_smi_msg
*smi_msg
;
1355 struct ipmi_recv_msg
*recv_msg
;
1356 unsigned long flags
;
1357 struct ipmi_smi_handlers
*handlers
;
1360 if (supplied_recv
) {
1361 recv_msg
= supplied_recv
;
1363 recv_msg
= ipmi_alloc_recv_msg();
1364 if (recv_msg
== NULL
) {
1368 recv_msg
->user_msg_data
= user_msg_data
;
1371 smi_msg
= (struct ipmi_smi_msg
*) supplied_smi
;
1373 smi_msg
= ipmi_alloc_smi_msg();
1374 if (smi_msg
== NULL
) {
1375 ipmi_free_recv_msg(recv_msg
);
1381 handlers
= intf
->handlers
;
1387 recv_msg
->user
= user
;
1389 kref_get(&user
->refcount
);
1390 recv_msg
->msgid
= msgid
;
1391 /* Store the message to send in the receive message so timeout
1392 responses can get the proper response data. */
1393 recv_msg
->msg
= *msg
;
1395 if (addr
->addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
) {
1396 struct ipmi_system_interface_addr
*smi_addr
;
1398 if (msg
->netfn
& 1) {
1399 /* Responses are not allowed to the SMI. */
1404 smi_addr
= (struct ipmi_system_interface_addr
*) addr
;
1405 if (smi_addr
->lun
> 3) {
1406 ipmi_inc_stat(intf
, sent_invalid_commands
);
1411 memcpy(&recv_msg
->addr
, smi_addr
, sizeof(*smi_addr
));
1413 if ((msg
->netfn
== IPMI_NETFN_APP_REQUEST
)
1414 && ((msg
->cmd
== IPMI_SEND_MSG_CMD
)
1415 || (msg
->cmd
== IPMI_GET_MSG_CMD
)
1416 || (msg
->cmd
== IPMI_READ_EVENT_MSG_BUFFER_CMD
)))
1418 /* We don't let the user do these, since we manage
1419 the sequence numbers. */
1420 ipmi_inc_stat(intf
, sent_invalid_commands
);
1425 if (((msg
->netfn
== IPMI_NETFN_APP_REQUEST
)
1426 && ((msg
->cmd
== IPMI_COLD_RESET_CMD
)
1427 || (msg
->cmd
== IPMI_WARM_RESET_CMD
)))
1428 || (msg
->netfn
== IPMI_NETFN_FIRMWARE_REQUEST
))
1430 spin_lock_irqsave(&intf
->maintenance_mode_lock
, flags
);
1431 intf
->auto_maintenance_timeout
1432 = IPMI_MAINTENANCE_MODE_TIMEOUT
;
1433 if (!intf
->maintenance_mode
1434 && !intf
->maintenance_mode_enable
)
1436 intf
->maintenance_mode_enable
= 1;
1437 maintenance_mode_update(intf
);
1439 spin_unlock_irqrestore(&intf
->maintenance_mode_lock
,
1443 if ((msg
->data_len
+ 2) > IPMI_MAX_MSG_LENGTH
) {
1444 ipmi_inc_stat(intf
, sent_invalid_commands
);
1449 smi_msg
->data
[0] = (msg
->netfn
<< 2) | (smi_addr
->lun
& 0x3);
1450 smi_msg
->data
[1] = msg
->cmd
;
1451 smi_msg
->msgid
= msgid
;
1452 smi_msg
->user_data
= recv_msg
;
1453 if (msg
->data_len
> 0)
1454 memcpy(&(smi_msg
->data
[2]), msg
->data
, msg
->data_len
);
1455 smi_msg
->data_size
= msg
->data_len
+ 2;
1456 ipmi_inc_stat(intf
, sent_local_commands
);
1457 } else if ((addr
->addr_type
== IPMI_IPMB_ADDR_TYPE
)
1458 || (addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
))
1460 struct ipmi_ipmb_addr
*ipmb_addr
;
1461 unsigned char ipmb_seq
;
1465 if (addr
->channel
>= IPMI_MAX_CHANNELS
) {
1466 ipmi_inc_stat(intf
, sent_invalid_commands
);
1471 if (intf
->channels
[addr
->channel
].medium
1472 != IPMI_CHANNEL_MEDIUM_IPMB
)
1474 ipmi_inc_stat(intf
, sent_invalid_commands
);
1480 if (addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
)
1481 retries
= 0; /* Don't retry broadcasts. */
1485 if (addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
) {
1486 /* Broadcasts add a zero at the beginning of the
1487 message, but otherwise is the same as an IPMB
1489 addr
->addr_type
= IPMI_IPMB_ADDR_TYPE
;
1494 /* Default to 1 second retries. */
1495 if (retry_time_ms
== 0)
1496 retry_time_ms
= 1000;
1498 /* 9 for the header and 1 for the checksum, plus
1499 possibly one for the broadcast. */
1500 if ((msg
->data_len
+ 10 + broadcast
) > IPMI_MAX_MSG_LENGTH
) {
1501 ipmi_inc_stat(intf
, sent_invalid_commands
);
1506 ipmb_addr
= (struct ipmi_ipmb_addr
*) addr
;
1507 if (ipmb_addr
->lun
> 3) {
1508 ipmi_inc_stat(intf
, sent_invalid_commands
);
1513 memcpy(&recv_msg
->addr
, ipmb_addr
, sizeof(*ipmb_addr
));
1515 if (recv_msg
->msg
.netfn
& 0x1) {
1516 /* It's a response, so use the user's sequence
1518 ipmi_inc_stat(intf
, sent_ipmb_responses
);
1519 format_ipmb_msg(smi_msg
, msg
, ipmb_addr
, msgid
,
1521 source_address
, source_lun
);
1523 /* Save the receive message so we can use it
1524 to deliver the response. */
1525 smi_msg
->user_data
= recv_msg
;
1527 /* It's a command, so get a sequence for it. */
1529 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
1531 ipmi_inc_stat(intf
, sent_ipmb_commands
);
1533 /* Create a sequence number with a 1 second
1534 timeout and 4 retries. */
1535 rv
= intf_next_seq(intf
,
1543 /* We have used up all the sequence numbers,
1544 probably, so abort. */
1545 spin_unlock_irqrestore(&(intf
->seq_lock
),
1550 /* Store the sequence number in the message,
1551 so that when the send message response
1552 comes back we can start the timer. */
1553 format_ipmb_msg(smi_msg
, msg
, ipmb_addr
,
1554 STORE_SEQ_IN_MSGID(ipmb_seq
, seqid
),
1555 ipmb_seq
, broadcast
,
1556 source_address
, source_lun
);
1558 /* Copy the message into the recv message data, so we
1559 can retransmit it later if necessary. */
1560 memcpy(recv_msg
->msg_data
, smi_msg
->data
,
1561 smi_msg
->data_size
);
1562 recv_msg
->msg
.data
= recv_msg
->msg_data
;
1563 recv_msg
->msg
.data_len
= smi_msg
->data_size
;
1565 /* We don't unlock until here, because we need
1566 to copy the completed message into the
1567 recv_msg before we release the lock.
1568 Otherwise, race conditions may bite us. I
1569 know that's pretty paranoid, but I prefer
1571 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
1573 } else if (addr
->addr_type
== IPMI_LAN_ADDR_TYPE
) {
1574 struct ipmi_lan_addr
*lan_addr
;
1575 unsigned char ipmb_seq
;
1578 if (addr
->channel
>= IPMI_MAX_CHANNELS
) {
1579 ipmi_inc_stat(intf
, sent_invalid_commands
);
1584 if ((intf
->channels
[addr
->channel
].medium
1585 != IPMI_CHANNEL_MEDIUM_8023LAN
)
1586 && (intf
->channels
[addr
->channel
].medium
1587 != IPMI_CHANNEL_MEDIUM_ASYNC
))
1589 ipmi_inc_stat(intf
, sent_invalid_commands
);
1596 /* Default to 1 second retries. */
1597 if (retry_time_ms
== 0)
1598 retry_time_ms
= 1000;
1600 /* 11 for the header and 1 for the checksum. */
1601 if ((msg
->data_len
+ 12) > IPMI_MAX_MSG_LENGTH
) {
1602 ipmi_inc_stat(intf
, sent_invalid_commands
);
1607 lan_addr
= (struct ipmi_lan_addr
*) addr
;
1608 if (lan_addr
->lun
> 3) {
1609 ipmi_inc_stat(intf
, sent_invalid_commands
);
1614 memcpy(&recv_msg
->addr
, lan_addr
, sizeof(*lan_addr
));
1616 if (recv_msg
->msg
.netfn
& 0x1) {
1617 /* It's a response, so use the user's sequence
1619 ipmi_inc_stat(intf
, sent_lan_responses
);
1620 format_lan_msg(smi_msg
, msg
, lan_addr
, msgid
,
1623 /* Save the receive message so we can use it
1624 to deliver the response. */
1625 smi_msg
->user_data
= recv_msg
;
1627 /* It's a command, so get a sequence for it. */
1629 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
1631 ipmi_inc_stat(intf
, sent_lan_commands
);
1633 /* Create a sequence number with a 1 second
1634 timeout and 4 retries. */
1635 rv
= intf_next_seq(intf
,
1643 /* We have used up all the sequence numbers,
1644 probably, so abort. */
1645 spin_unlock_irqrestore(&(intf
->seq_lock
),
1650 /* Store the sequence number in the message,
1651 so that when the send message response
1652 comes back we can start the timer. */
1653 format_lan_msg(smi_msg
, msg
, lan_addr
,
1654 STORE_SEQ_IN_MSGID(ipmb_seq
, seqid
),
1655 ipmb_seq
, source_lun
);
1657 /* Copy the message into the recv message data, so we
1658 can retransmit it later if necessary. */
1659 memcpy(recv_msg
->msg_data
, smi_msg
->data
,
1660 smi_msg
->data_size
);
1661 recv_msg
->msg
.data
= recv_msg
->msg_data
;
1662 recv_msg
->msg
.data_len
= smi_msg
->data_size
;
1664 /* We don't unlock until here, because we need
1665 to copy the completed message into the
1666 recv_msg before we release the lock.
1667 Otherwise, race conditions may bite us. I
1668 know that's pretty paranoid, but I prefer
1670 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
1673 /* Unknown address type. */
1674 ipmi_inc_stat(intf
, sent_invalid_commands
);
1682 for (m
= 0; m
< smi_msg
->data_size
; m
++)
1683 printk(" %2.2x", smi_msg
->data
[m
]);
1688 handlers
->sender(intf
->send_info
, smi_msg
, priority
);
1695 ipmi_free_smi_msg(smi_msg
);
1696 ipmi_free_recv_msg(recv_msg
);
1700 static int check_addr(ipmi_smi_t intf
,
1701 struct ipmi_addr
*addr
,
1702 unsigned char *saddr
,
1705 if (addr
->channel
>= IPMI_MAX_CHANNELS
)
1707 *lun
= intf
->channels
[addr
->channel
].lun
;
1708 *saddr
= intf
->channels
[addr
->channel
].address
;
1712 int ipmi_request_settime(ipmi_user_t user
,
1713 struct ipmi_addr
*addr
,
1715 struct kernel_ipmi_msg
*msg
,
1716 void *user_msg_data
,
1719 unsigned int retry_time_ms
)
1721 unsigned char saddr
, lun
;
1726 rv
= check_addr(user
->intf
, addr
, &saddr
, &lun
);
1729 return i_ipmi_request(user
,
1743 int ipmi_request_supply_msgs(ipmi_user_t user
,
1744 struct ipmi_addr
*addr
,
1746 struct kernel_ipmi_msg
*msg
,
1747 void *user_msg_data
,
1749 struct ipmi_recv_msg
*supplied_recv
,
1752 unsigned char saddr
, lun
;
1757 rv
= check_addr(user
->intf
, addr
, &saddr
, &lun
);
1760 return i_ipmi_request(user
,
1774 #ifdef CONFIG_PROC_FS
1775 static int ipmb_file_read_proc(char *page
, char **start
, off_t off
,
1776 int count
, int *eof
, void *data
)
1778 char *out
= (char *) page
;
1779 ipmi_smi_t intf
= data
;
1783 for (i
= 0; i
< IPMI_MAX_CHANNELS
; i
++)
1784 rv
+= sprintf(out
+rv
, "%x ", intf
->channels
[i
].address
);
1785 out
[rv
-1] = '\n'; /* Replace the final space with a newline */
1791 static int version_file_read_proc(char *page
, char **start
, off_t off
,
1792 int count
, int *eof
, void *data
)
1794 char *out
= (char *) page
;
1795 ipmi_smi_t intf
= data
;
1797 return sprintf(out
, "%u.%u\n",
1798 ipmi_version_major(&intf
->bmc
->id
),
1799 ipmi_version_minor(&intf
->bmc
->id
));
1802 static int stat_file_read_proc(char *page
, char **start
, off_t off
,
1803 int count
, int *eof
, void *data
)
1805 char *out
= (char *) page
;
1806 ipmi_smi_t intf
= data
;
1808 out
+= sprintf(out
, "sent_invalid_commands: %u\n",
1809 ipmi_get_stat(intf
, sent_invalid_commands
));
1810 out
+= sprintf(out
, "sent_local_commands: %u\n",
1811 ipmi_get_stat(intf
, sent_local_commands
));
1812 out
+= sprintf(out
, "handled_local_responses: %u\n",
1813 ipmi_get_stat(intf
, handled_local_responses
));
1814 out
+= sprintf(out
, "unhandled_local_responses: %u\n",
1815 ipmi_get_stat(intf
, unhandled_local_responses
));
1816 out
+= sprintf(out
, "sent_ipmb_commands: %u\n",
1817 ipmi_get_stat(intf
, sent_ipmb_commands
));
1818 out
+= sprintf(out
, "sent_ipmb_command_errs: %u\n",
1819 ipmi_get_stat(intf
, sent_ipmb_command_errs
));
1820 out
+= sprintf(out
, "retransmitted_ipmb_commands: %u\n",
1821 ipmi_get_stat(intf
, retransmitted_ipmb_commands
));
1822 out
+= sprintf(out
, "timed_out_ipmb_commands: %u\n",
1823 ipmi_get_stat(intf
, timed_out_ipmb_commands
));
1824 out
+= sprintf(out
, "timed_out_ipmb_broadcasts: %u\n",
1825 ipmi_get_stat(intf
, timed_out_ipmb_broadcasts
));
1826 out
+= sprintf(out
, "sent_ipmb_responses: %u\n",
1827 ipmi_get_stat(intf
, sent_ipmb_responses
));
1828 out
+= sprintf(out
, "handled_ipmb_responses: %u\n",
1829 ipmi_get_stat(intf
, handled_ipmb_responses
));
1830 out
+= sprintf(out
, "invalid_ipmb_responses: %u\n",
1831 ipmi_get_stat(intf
, invalid_ipmb_responses
));
1832 out
+= sprintf(out
, "unhandled_ipmb_responses: %u\n",
1833 ipmi_get_stat(intf
, unhandled_ipmb_responses
));
1834 out
+= sprintf(out
, "sent_lan_commands: %u\n",
1835 ipmi_get_stat(intf
, sent_lan_commands
));
1836 out
+= sprintf(out
, "sent_lan_command_errs: %u\n",
1837 ipmi_get_stat(intf
, sent_lan_command_errs
));
1838 out
+= sprintf(out
, "retransmitted_lan_commands: %u\n",
1839 ipmi_get_stat(intf
, retransmitted_lan_commands
));
1840 out
+= sprintf(out
, "timed_out_lan_commands: %u\n",
1841 ipmi_get_stat(intf
, timed_out_lan_commands
));
1842 out
+= sprintf(out
, "sent_lan_responses: %u\n",
1843 ipmi_get_stat(intf
, sent_lan_responses
));
1844 out
+= sprintf(out
, "handled_lan_responses: %u\n",
1845 ipmi_get_stat(intf
, handled_lan_responses
));
1846 out
+= sprintf(out
, "invalid_lan_responses: %u\n",
1847 ipmi_get_stat(intf
, invalid_lan_responses
));
1848 out
+= sprintf(out
, "unhandled_lan_responses: %u\n",
1849 ipmi_get_stat(intf
, unhandled_lan_responses
));
1850 out
+= sprintf(out
, "handled_commands: %u\n",
1851 ipmi_get_stat(intf
, handled_commands
));
1852 out
+= sprintf(out
, "invalid_commands: %u\n",
1853 ipmi_get_stat(intf
, invalid_commands
));
1854 out
+= sprintf(out
, "unhandled_commands: %u\n",
1855 ipmi_get_stat(intf
, unhandled_commands
));
1856 out
+= sprintf(out
, "invalid_events: %u\n",
1857 ipmi_get_stat(intf
, invalid_events
));
1858 out
+= sprintf(out
, "events: %u\n",
1859 ipmi_get_stat(intf
, events
));
1861 return (out
- ((char *) page
));
1863 #endif /* CONFIG_PROC_FS */
1865 int ipmi_smi_add_proc_entry(ipmi_smi_t smi
, char *name
,
1866 read_proc_t
*read_proc
, write_proc_t
*write_proc
,
1867 void *data
, struct module
*owner
)
1870 #ifdef CONFIG_PROC_FS
1871 struct proc_dir_entry
*file
;
1872 struct ipmi_proc_entry
*entry
;
1874 /* Create a list element. */
1875 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1878 entry
->name
= kmalloc(strlen(name
)+1, GFP_KERNEL
);
1883 strcpy(entry
->name
, name
);
1885 file
= create_proc_entry(name
, 0, smi
->proc_dir
);
1892 file
->read_proc
= read_proc
;
1893 file
->write_proc
= write_proc
;
1894 file
->owner
= owner
;
1896 mutex_lock(&smi
->proc_entry_lock
);
1897 /* Stick it on the list. */
1898 entry
->next
= smi
->proc_entries
;
1899 smi
->proc_entries
= entry
;
1900 mutex_unlock(&smi
->proc_entry_lock
);
1902 #endif /* CONFIG_PROC_FS */
1907 static int add_proc_entries(ipmi_smi_t smi
, int num
)
1911 #ifdef CONFIG_PROC_FS
1912 sprintf(smi
->proc_dir_name
, "%d", num
);
1913 smi
->proc_dir
= proc_mkdir(smi
->proc_dir_name
, proc_ipmi_root
);
1917 smi
->proc_dir
->owner
= THIS_MODULE
;
1921 rv
= ipmi_smi_add_proc_entry(smi
, "stats",
1922 stat_file_read_proc
, NULL
,
1926 rv
= ipmi_smi_add_proc_entry(smi
, "ipmb",
1927 ipmb_file_read_proc
, NULL
,
1931 rv
= ipmi_smi_add_proc_entry(smi
, "version",
1932 version_file_read_proc
, NULL
,
1934 #endif /* CONFIG_PROC_FS */
1939 static void remove_proc_entries(ipmi_smi_t smi
)
1941 #ifdef CONFIG_PROC_FS
1942 struct ipmi_proc_entry
*entry
;
1944 mutex_lock(&smi
->proc_entry_lock
);
1945 while (smi
->proc_entries
) {
1946 entry
= smi
->proc_entries
;
1947 smi
->proc_entries
= entry
->next
;
1949 remove_proc_entry(entry
->name
, smi
->proc_dir
);
1953 mutex_unlock(&smi
->proc_entry_lock
);
1954 remove_proc_entry(smi
->proc_dir_name
, proc_ipmi_root
);
1955 #endif /* CONFIG_PROC_FS */
1958 static int __find_bmc_guid(struct device
*dev
, void *data
)
1960 unsigned char *id
= data
;
1961 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
1962 return memcmp(bmc
->guid
, id
, 16) == 0;
1965 static struct bmc_device
*ipmi_find_bmc_guid(struct device_driver
*drv
,
1966 unsigned char *guid
)
1970 dev
= driver_find_device(drv
, NULL
, guid
, __find_bmc_guid
);
1972 return dev_get_drvdata(dev
);
1977 struct prod_dev_id
{
1978 unsigned int product_id
;
1979 unsigned char device_id
;
1982 static int __find_bmc_prod_dev_id(struct device
*dev
, void *data
)
1984 struct prod_dev_id
*id
= data
;
1985 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
1987 return (bmc
->id
.product_id
== id
->product_id
1988 && bmc
->id
.device_id
== id
->device_id
);
1991 static struct bmc_device
*ipmi_find_bmc_prod_dev_id(
1992 struct device_driver
*drv
,
1993 unsigned int product_id
, unsigned char device_id
)
1995 struct prod_dev_id id
= {
1996 .product_id
= product_id
,
1997 .device_id
= device_id
,
2001 dev
= driver_find_device(drv
, NULL
, &id
, __find_bmc_prod_dev_id
);
2003 return dev_get_drvdata(dev
);
2008 static ssize_t
device_id_show(struct device
*dev
,
2009 struct device_attribute
*attr
,
2012 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2014 return snprintf(buf
, 10, "%u\n", bmc
->id
.device_id
);
2017 static ssize_t
provides_dev_sdrs_show(struct device
*dev
,
2018 struct device_attribute
*attr
,
2021 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2023 return snprintf(buf
, 10, "%u\n",
2024 (bmc
->id
.device_revision
& 0x80) >> 7);
2027 static ssize_t
revision_show(struct device
*dev
, struct device_attribute
*attr
,
2030 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2032 return snprintf(buf
, 20, "%u\n",
2033 bmc
->id
.device_revision
& 0x0F);
2036 static ssize_t
firmware_rev_show(struct device
*dev
,
2037 struct device_attribute
*attr
,
2040 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2042 return snprintf(buf
, 20, "%u.%x\n", bmc
->id
.firmware_revision_1
,
2043 bmc
->id
.firmware_revision_2
);
2046 static ssize_t
ipmi_version_show(struct device
*dev
,
2047 struct device_attribute
*attr
,
2050 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2052 return snprintf(buf
, 20, "%u.%u\n",
2053 ipmi_version_major(&bmc
->id
),
2054 ipmi_version_minor(&bmc
->id
));
2057 static ssize_t
add_dev_support_show(struct device
*dev
,
2058 struct device_attribute
*attr
,
2061 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2063 return snprintf(buf
, 10, "0x%02x\n",
2064 bmc
->id
.additional_device_support
);
2067 static ssize_t
manufacturer_id_show(struct device
*dev
,
2068 struct device_attribute
*attr
,
2071 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2073 return snprintf(buf
, 20, "0x%6.6x\n", bmc
->id
.manufacturer_id
);
2076 static ssize_t
product_id_show(struct device
*dev
,
2077 struct device_attribute
*attr
,
2080 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2082 return snprintf(buf
, 10, "0x%4.4x\n", bmc
->id
.product_id
);
2085 static ssize_t
aux_firmware_rev_show(struct device
*dev
,
2086 struct device_attribute
*attr
,
2089 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2091 return snprintf(buf
, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2092 bmc
->id
.aux_firmware_revision
[3],
2093 bmc
->id
.aux_firmware_revision
[2],
2094 bmc
->id
.aux_firmware_revision
[1],
2095 bmc
->id
.aux_firmware_revision
[0]);
2098 static ssize_t
guid_show(struct device
*dev
, struct device_attribute
*attr
,
2101 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2103 return snprintf(buf
, 100, "%Lx%Lx\n",
2104 (long long) bmc
->guid
[0],
2105 (long long) bmc
->guid
[8]);
2108 static void remove_files(struct bmc_device
*bmc
)
2113 device_remove_file(&bmc
->dev
->dev
,
2114 &bmc
->device_id_attr
);
2115 device_remove_file(&bmc
->dev
->dev
,
2116 &bmc
->provides_dev_sdrs_attr
);
2117 device_remove_file(&bmc
->dev
->dev
,
2118 &bmc
->revision_attr
);
2119 device_remove_file(&bmc
->dev
->dev
,
2120 &bmc
->firmware_rev_attr
);
2121 device_remove_file(&bmc
->dev
->dev
,
2122 &bmc
->version_attr
);
2123 device_remove_file(&bmc
->dev
->dev
,
2124 &bmc
->add_dev_support_attr
);
2125 device_remove_file(&bmc
->dev
->dev
,
2126 &bmc
->manufacturer_id_attr
);
2127 device_remove_file(&bmc
->dev
->dev
,
2128 &bmc
->product_id_attr
);
2130 if (bmc
->id
.aux_firmware_revision_set
)
2131 device_remove_file(&bmc
->dev
->dev
,
2132 &bmc
->aux_firmware_rev_attr
);
2134 device_remove_file(&bmc
->dev
->dev
,
2139 cleanup_bmc_device(struct kref
*ref
)
2141 struct bmc_device
*bmc
;
2143 bmc
= container_of(ref
, struct bmc_device
, refcount
);
2146 platform_device_unregister(bmc
->dev
);
2150 static void ipmi_bmc_unregister(ipmi_smi_t intf
)
2152 struct bmc_device
*bmc
= intf
->bmc
;
2154 if (intf
->sysfs_name
) {
2155 sysfs_remove_link(&intf
->si_dev
->kobj
, intf
->sysfs_name
);
2156 kfree(intf
->sysfs_name
);
2157 intf
->sysfs_name
= NULL
;
2159 if (intf
->my_dev_name
) {
2160 sysfs_remove_link(&bmc
->dev
->dev
.kobj
, intf
->my_dev_name
);
2161 kfree(intf
->my_dev_name
);
2162 intf
->my_dev_name
= NULL
;
2165 mutex_lock(&ipmidriver_mutex
);
2166 kref_put(&bmc
->refcount
, cleanup_bmc_device
);
2168 mutex_unlock(&ipmidriver_mutex
);
2171 static int create_files(struct bmc_device
*bmc
)
2175 bmc
->device_id_attr
.attr
.name
= "device_id";
2176 bmc
->device_id_attr
.attr
.mode
= S_IRUGO
;
2177 bmc
->device_id_attr
.show
= device_id_show
;
2179 bmc
->provides_dev_sdrs_attr
.attr
.name
= "provides_device_sdrs";
2180 bmc
->provides_dev_sdrs_attr
.attr
.mode
= S_IRUGO
;
2181 bmc
->provides_dev_sdrs_attr
.show
= provides_dev_sdrs_show
;
2183 bmc
->revision_attr
.attr
.name
= "revision";
2184 bmc
->revision_attr
.attr
.mode
= S_IRUGO
;
2185 bmc
->revision_attr
.show
= revision_show
;
2187 bmc
->firmware_rev_attr
.attr
.name
= "firmware_revision";
2188 bmc
->firmware_rev_attr
.attr
.mode
= S_IRUGO
;
2189 bmc
->firmware_rev_attr
.show
= firmware_rev_show
;
2191 bmc
->version_attr
.attr
.name
= "ipmi_version";
2192 bmc
->version_attr
.attr
.mode
= S_IRUGO
;
2193 bmc
->version_attr
.show
= ipmi_version_show
;
2195 bmc
->add_dev_support_attr
.attr
.name
= "additional_device_support";
2196 bmc
->add_dev_support_attr
.attr
.mode
= S_IRUGO
;
2197 bmc
->add_dev_support_attr
.show
= add_dev_support_show
;
2199 bmc
->manufacturer_id_attr
.attr
.name
= "manufacturer_id";
2200 bmc
->manufacturer_id_attr
.attr
.mode
= S_IRUGO
;
2201 bmc
->manufacturer_id_attr
.show
= manufacturer_id_show
;
2203 bmc
->product_id_attr
.attr
.name
= "product_id";
2204 bmc
->product_id_attr
.attr
.mode
= S_IRUGO
;
2205 bmc
->product_id_attr
.show
= product_id_show
;
2207 bmc
->guid_attr
.attr
.name
= "guid";
2208 bmc
->guid_attr
.attr
.mode
= S_IRUGO
;
2209 bmc
->guid_attr
.show
= guid_show
;
2211 bmc
->aux_firmware_rev_attr
.attr
.name
= "aux_firmware_revision";
2212 bmc
->aux_firmware_rev_attr
.attr
.mode
= S_IRUGO
;
2213 bmc
->aux_firmware_rev_attr
.show
= aux_firmware_rev_show
;
2215 err
= device_create_file(&bmc
->dev
->dev
,
2216 &bmc
->device_id_attr
);
2218 err
= device_create_file(&bmc
->dev
->dev
,
2219 &bmc
->provides_dev_sdrs_attr
);
2220 if (err
) goto out_devid
;
2221 err
= device_create_file(&bmc
->dev
->dev
,
2222 &bmc
->revision_attr
);
2223 if (err
) goto out_sdrs
;
2224 err
= device_create_file(&bmc
->dev
->dev
,
2225 &bmc
->firmware_rev_attr
);
2226 if (err
) goto out_rev
;
2227 err
= device_create_file(&bmc
->dev
->dev
,
2228 &bmc
->version_attr
);
2229 if (err
) goto out_firm
;
2230 err
= device_create_file(&bmc
->dev
->dev
,
2231 &bmc
->add_dev_support_attr
);
2232 if (err
) goto out_version
;
2233 err
= device_create_file(&bmc
->dev
->dev
,
2234 &bmc
->manufacturer_id_attr
);
2235 if (err
) goto out_add_dev
;
2236 err
= device_create_file(&bmc
->dev
->dev
,
2237 &bmc
->product_id_attr
);
2238 if (err
) goto out_manu
;
2239 if (bmc
->id
.aux_firmware_revision_set
) {
2240 err
= device_create_file(&bmc
->dev
->dev
,
2241 &bmc
->aux_firmware_rev_attr
);
2242 if (err
) goto out_prod_id
;
2244 if (bmc
->guid_set
) {
2245 err
= device_create_file(&bmc
->dev
->dev
,
2247 if (err
) goto out_aux_firm
;
2253 if (bmc
->id
.aux_firmware_revision_set
)
2254 device_remove_file(&bmc
->dev
->dev
,
2255 &bmc
->aux_firmware_rev_attr
);
2257 device_remove_file(&bmc
->dev
->dev
,
2258 &bmc
->product_id_attr
);
2260 device_remove_file(&bmc
->dev
->dev
,
2261 &bmc
->manufacturer_id_attr
);
2263 device_remove_file(&bmc
->dev
->dev
,
2264 &bmc
->add_dev_support_attr
);
2266 device_remove_file(&bmc
->dev
->dev
,
2267 &bmc
->version_attr
);
2269 device_remove_file(&bmc
->dev
->dev
,
2270 &bmc
->firmware_rev_attr
);
2272 device_remove_file(&bmc
->dev
->dev
,
2273 &bmc
->revision_attr
);
2275 device_remove_file(&bmc
->dev
->dev
,
2276 &bmc
->provides_dev_sdrs_attr
);
2278 device_remove_file(&bmc
->dev
->dev
,
2279 &bmc
->device_id_attr
);
2284 static int ipmi_bmc_register(ipmi_smi_t intf
, int ifnum
,
2285 const char *sysfs_name
)
2288 struct bmc_device
*bmc
= intf
->bmc
;
2289 struct bmc_device
*old_bmc
;
2293 mutex_lock(&ipmidriver_mutex
);
2296 * Try to find if there is an bmc_device struct
2297 * representing the interfaced BMC already
2300 old_bmc
= ipmi_find_bmc_guid(&ipmidriver
, bmc
->guid
);
2302 old_bmc
= ipmi_find_bmc_prod_dev_id(&ipmidriver
,
2307 * If there is already an bmc_device, free the new one,
2308 * otherwise register the new BMC device
2312 intf
->bmc
= old_bmc
;
2315 kref_get(&bmc
->refcount
);
2316 mutex_unlock(&ipmidriver_mutex
);
2319 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2320 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2321 bmc
->id
.manufacturer_id
,
2326 unsigned char orig_dev_id
= bmc
->id
.device_id
;
2327 int warn_printed
= 0;
2329 snprintf(name
, sizeof(name
),
2330 "ipmi_bmc.%4.4x", bmc
->id
.product_id
);
2332 while (ipmi_find_bmc_prod_dev_id(&ipmidriver
,
2334 bmc
->id
.device_id
)) {
2335 if (!warn_printed
) {
2336 printk(KERN_WARNING PFX
2337 "This machine has two different BMCs"
2338 " with the same product id and device"
2339 " id. This is an error in the"
2340 " firmware, but incrementing the"
2341 " device id to work around the problem."
2342 " Prod ID = 0x%x, Dev ID = 0x%x\n",
2343 bmc
->id
.product_id
, bmc
->id
.device_id
);
2346 bmc
->id
.device_id
++; /* Wraps at 255 */
2347 if (bmc
->id
.device_id
== orig_dev_id
) {
2349 "Out of device ids!\n");
2354 bmc
->dev
= platform_device_alloc(name
, bmc
->id
.device_id
);
2356 mutex_unlock(&ipmidriver_mutex
);
2359 " Unable to allocate platform device\n");
2362 bmc
->dev
->dev
.driver
= &ipmidriver
;
2363 dev_set_drvdata(&bmc
->dev
->dev
, bmc
);
2364 kref_init(&bmc
->refcount
);
2366 rv
= platform_device_add(bmc
->dev
);
2367 mutex_unlock(&ipmidriver_mutex
);
2369 platform_device_put(bmc
->dev
);
2373 " Unable to register bmc device: %d\n",
2375 /* Don't go to out_err, you can only do that if
2376 the device is registered already. */
2380 rv
= create_files(bmc
);
2382 mutex_lock(&ipmidriver_mutex
);
2383 platform_device_unregister(bmc
->dev
);
2384 mutex_unlock(&ipmidriver_mutex
);
2390 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2391 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2392 bmc
->id
.manufacturer_id
,
2398 * create symlink from system interface device to bmc device
2401 intf
->sysfs_name
= kstrdup(sysfs_name
, GFP_KERNEL
);
2402 if (!intf
->sysfs_name
) {
2405 "ipmi_msghandler: allocate link to BMC: %d\n",
2410 rv
= sysfs_create_link(&intf
->si_dev
->kobj
,
2411 &bmc
->dev
->dev
.kobj
, intf
->sysfs_name
);
2413 kfree(intf
->sysfs_name
);
2414 intf
->sysfs_name
= NULL
;
2416 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2421 size
= snprintf(dummy
, 0, "ipmi%d", ifnum
);
2422 intf
->my_dev_name
= kmalloc(size
+1, GFP_KERNEL
);
2423 if (!intf
->my_dev_name
) {
2424 kfree(intf
->sysfs_name
);
2425 intf
->sysfs_name
= NULL
;
2428 "ipmi_msghandler: allocate link from BMC: %d\n",
2432 snprintf(intf
->my_dev_name
, size
+1, "ipmi%d", ifnum
);
2434 rv
= sysfs_create_link(&bmc
->dev
->dev
.kobj
, &intf
->si_dev
->kobj
,
2437 kfree(intf
->sysfs_name
);
2438 intf
->sysfs_name
= NULL
;
2439 kfree(intf
->my_dev_name
);
2440 intf
->my_dev_name
= NULL
;
2443 " Unable to create symlink to bmc: %d\n",
2451 ipmi_bmc_unregister(intf
);
2456 send_guid_cmd(ipmi_smi_t intf
, int chan
)
2458 struct kernel_ipmi_msg msg
;
2459 struct ipmi_system_interface_addr si
;
2461 si
.addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
2462 si
.channel
= IPMI_BMC_CHANNEL
;
2465 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
2466 msg
.cmd
= IPMI_GET_DEVICE_GUID_CMD
;
2469 return i_ipmi_request(NULL
,
2471 (struct ipmi_addr
*) &si
,
2478 intf
->channels
[0].address
,
2479 intf
->channels
[0].lun
,
2484 guid_handler(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
2486 if ((msg
->addr
.addr_type
!= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
2487 || (msg
->msg
.netfn
!= IPMI_NETFN_APP_RESPONSE
)
2488 || (msg
->msg
.cmd
!= IPMI_GET_DEVICE_GUID_CMD
))
2492 if (msg
->msg
.data
[0] != 0) {
2493 /* Error from getting the GUID, the BMC doesn't have one. */
2494 intf
->bmc
->guid_set
= 0;
2498 if (msg
->msg
.data_len
< 17) {
2499 intf
->bmc
->guid_set
= 0;
2500 printk(KERN_WARNING PFX
2501 "guid_handler: The GUID response from the BMC was too"
2502 " short, it was %d but should have been 17. Assuming"
2503 " GUID is not available.\n",
2508 memcpy(intf
->bmc
->guid
, msg
->msg
.data
, 16);
2509 intf
->bmc
->guid_set
= 1;
2511 wake_up(&intf
->waitq
);
2515 get_guid(ipmi_smi_t intf
)
2519 intf
->bmc
->guid_set
= 0x2;
2520 intf
->null_user_handler
= guid_handler
;
2521 rv
= send_guid_cmd(intf
, 0);
2523 /* Send failed, no GUID available. */
2524 intf
->bmc
->guid_set
= 0;
2525 wait_event(intf
->waitq
, intf
->bmc
->guid_set
!= 2);
2526 intf
->null_user_handler
= NULL
;
2530 send_channel_info_cmd(ipmi_smi_t intf
, int chan
)
2532 struct kernel_ipmi_msg msg
;
2533 unsigned char data
[1];
2534 struct ipmi_system_interface_addr si
;
2536 si
.addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
2537 si
.channel
= IPMI_BMC_CHANNEL
;
2540 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
2541 msg
.cmd
= IPMI_GET_CHANNEL_INFO_CMD
;
2545 return i_ipmi_request(NULL
,
2547 (struct ipmi_addr
*) &si
,
2554 intf
->channels
[0].address
,
2555 intf
->channels
[0].lun
,
2560 channel_handler(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
2565 if ((msg
->addr
.addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
2566 && (msg
->msg
.netfn
== IPMI_NETFN_APP_RESPONSE
)
2567 && (msg
->msg
.cmd
== IPMI_GET_CHANNEL_INFO_CMD
))
2569 /* It's the one we want */
2570 if (msg
->msg
.data
[0] != 0) {
2571 /* Got an error from the channel, just go on. */
2573 if (msg
->msg
.data
[0] == IPMI_INVALID_COMMAND_ERR
) {
2574 /* If the MC does not support this
2575 command, that is legal. We just
2576 assume it has one IPMB at channel
2578 intf
->channels
[0].medium
2579 = IPMI_CHANNEL_MEDIUM_IPMB
;
2580 intf
->channels
[0].protocol
2581 = IPMI_CHANNEL_PROTOCOL_IPMB
;
2584 intf
->curr_channel
= IPMI_MAX_CHANNELS
;
2585 wake_up(&intf
->waitq
);
2590 if (msg
->msg
.data_len
< 4) {
2591 /* Message not big enough, just go on. */
2594 chan
= intf
->curr_channel
;
2595 intf
->channels
[chan
].medium
= msg
->msg
.data
[2] & 0x7f;
2596 intf
->channels
[chan
].protocol
= msg
->msg
.data
[3] & 0x1f;
2599 intf
->curr_channel
++;
2600 if (intf
->curr_channel
>= IPMI_MAX_CHANNELS
)
2601 wake_up(&intf
->waitq
);
2603 rv
= send_channel_info_cmd(intf
, intf
->curr_channel
);
2606 /* Got an error somehow, just give up. */
2607 intf
->curr_channel
= IPMI_MAX_CHANNELS
;
2608 wake_up(&intf
->waitq
);
2610 printk(KERN_WARNING PFX
2611 "Error sending channel information: %d\n",
2619 void ipmi_poll_interface(ipmi_user_t user
)
2621 ipmi_smi_t intf
= user
->intf
;
2623 if (intf
->handlers
->poll
)
2624 intf
->handlers
->poll(intf
->send_info
);
2627 int ipmi_register_smi(struct ipmi_smi_handlers
*handlers
,
2629 struct ipmi_device_id
*device_id
,
2630 struct device
*si_dev
,
2631 const char *sysfs_name
,
2632 unsigned char slave_addr
)
2638 struct list_head
*link
;
2640 /* Make sure the driver is actually initialized, this handles
2641 problems with initialization order. */
2643 rv
= ipmi_init_msghandler();
2646 /* The init code doesn't return an error if it was turned
2647 off, but it won't initialize. Check that. */
2652 intf
= kzalloc(sizeof(*intf
), GFP_KERNEL
);
2656 intf
->ipmi_version_major
= ipmi_version_major(device_id
);
2657 intf
->ipmi_version_minor
= ipmi_version_minor(device_id
);
2659 intf
->bmc
= kzalloc(sizeof(*intf
->bmc
), GFP_KERNEL
);
2664 intf
->intf_num
= -1; /* Mark it invalid for now. */
2665 kref_init(&intf
->refcount
);
2666 intf
->bmc
->id
= *device_id
;
2667 intf
->si_dev
= si_dev
;
2668 for (j
= 0; j
< IPMI_MAX_CHANNELS
; j
++) {
2669 intf
->channels
[j
].address
= IPMI_BMC_SLAVE_ADDR
;
2670 intf
->channels
[j
].lun
= 2;
2672 if (slave_addr
!= 0)
2673 intf
->channels
[0].address
= slave_addr
;
2674 INIT_LIST_HEAD(&intf
->users
);
2675 intf
->handlers
= handlers
;
2676 intf
->send_info
= send_info
;
2677 spin_lock_init(&intf
->seq_lock
);
2678 for (j
= 0; j
< IPMI_IPMB_NUM_SEQ
; j
++) {
2679 intf
->seq_table
[j
].inuse
= 0;
2680 intf
->seq_table
[j
].seqid
= 0;
2683 #ifdef CONFIG_PROC_FS
2684 mutex_init(&intf
->proc_entry_lock
);
2686 spin_lock_init(&intf
->waiting_msgs_lock
);
2687 INIT_LIST_HEAD(&intf
->waiting_msgs
);
2688 spin_lock_init(&intf
->events_lock
);
2689 INIT_LIST_HEAD(&intf
->waiting_events
);
2690 intf
->waiting_events_count
= 0;
2691 mutex_init(&intf
->cmd_rcvrs_mutex
);
2692 spin_lock_init(&intf
->maintenance_mode_lock
);
2693 INIT_LIST_HEAD(&intf
->cmd_rcvrs
);
2694 init_waitqueue_head(&intf
->waitq
);
2695 for (i
= 0; i
< IPMI_NUM_STATS
; i
++)
2696 atomic_set(&intf
->stats
[i
], 0);
2698 intf
->proc_dir
= NULL
;
2700 mutex_lock(&smi_watchers_mutex
);
2701 mutex_lock(&ipmi_interfaces_mutex
);
2702 /* Look for a hole in the numbers. */
2704 link
= &ipmi_interfaces
;
2705 list_for_each_entry_rcu(tintf
, &ipmi_interfaces
, link
) {
2706 if (tintf
->intf_num
!= i
) {
2707 link
= &tintf
->link
;
2712 /* Add the new interface in numeric order. */
2714 list_add_rcu(&intf
->link
, &ipmi_interfaces
);
2716 list_add_tail_rcu(&intf
->link
, link
);
2718 rv
= handlers
->start_processing(send_info
, intf
);
2724 if ((intf
->ipmi_version_major
> 1)
2725 || ((intf
->ipmi_version_major
== 1)
2726 && (intf
->ipmi_version_minor
>= 5)))
2728 /* Start scanning the channels to see what is
2730 intf
->null_user_handler
= channel_handler
;
2731 intf
->curr_channel
= 0;
2732 rv
= send_channel_info_cmd(intf
, 0);
2736 /* Wait for the channel info to be read. */
2737 wait_event(intf
->waitq
,
2738 intf
->curr_channel
>= IPMI_MAX_CHANNELS
);
2739 intf
->null_user_handler
= NULL
;
2741 /* Assume a single IPMB channel at zero. */
2742 intf
->channels
[0].medium
= IPMI_CHANNEL_MEDIUM_IPMB
;
2743 intf
->channels
[0].protocol
= IPMI_CHANNEL_PROTOCOL_IPMB
;
2747 rv
= add_proc_entries(intf
, i
);
2749 rv
= ipmi_bmc_register(intf
, i
, sysfs_name
);
2754 remove_proc_entries(intf
);
2755 intf
->handlers
= NULL
;
2756 list_del_rcu(&intf
->link
);
2757 mutex_unlock(&ipmi_interfaces_mutex
);
2758 mutex_unlock(&smi_watchers_mutex
);
2760 kref_put(&intf
->refcount
, intf_free
);
2763 * Keep memory order straight for RCU readers. Make
2764 * sure everything else is committed to memory before
2765 * setting intf_num to mark the interface valid.
2769 mutex_unlock(&ipmi_interfaces_mutex
);
2770 /* After this point the interface is legal to use. */
2771 call_smi_watchers(i
, intf
->si_dev
);
2772 mutex_unlock(&smi_watchers_mutex
);
2778 static void cleanup_smi_msgs(ipmi_smi_t intf
)
2781 struct seq_table
*ent
;
2783 /* No need for locks, the interface is down. */
2784 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++) {
2785 ent
= &(intf
->seq_table
[i
]);
2788 deliver_err_response(ent
->recv_msg
, IPMI_ERR_UNSPECIFIED
);
2792 int ipmi_unregister_smi(ipmi_smi_t intf
)
2794 struct ipmi_smi_watcher
*w
;
2795 int intf_num
= intf
->intf_num
;
2797 ipmi_bmc_unregister(intf
);
2799 mutex_lock(&smi_watchers_mutex
);
2800 mutex_lock(&ipmi_interfaces_mutex
);
2801 intf
->intf_num
= -1;
2802 intf
->handlers
= NULL
;
2803 list_del_rcu(&intf
->link
);
2804 mutex_unlock(&ipmi_interfaces_mutex
);
2807 cleanup_smi_msgs(intf
);
2809 remove_proc_entries(intf
);
2811 /* Call all the watcher interfaces to tell them that
2812 an interface is gone. */
2813 list_for_each_entry(w
, &smi_watchers
, link
)
2814 w
->smi_gone(intf_num
);
2815 mutex_unlock(&smi_watchers_mutex
);
2817 kref_put(&intf
->refcount
, intf_free
);
2821 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf
,
2822 struct ipmi_smi_msg
*msg
)
2824 struct ipmi_ipmb_addr ipmb_addr
;
2825 struct ipmi_recv_msg
*recv_msg
;
2828 /* This is 11, not 10, because the response must contain a
2829 * completion code. */
2830 if (msg
->rsp_size
< 11) {
2831 /* Message not big enough, just ignore it. */
2832 ipmi_inc_stat(intf
, invalid_ipmb_responses
);
2836 if (msg
->rsp
[2] != 0) {
2837 /* An error getting the response, just ignore it. */
2841 ipmb_addr
.addr_type
= IPMI_IPMB_ADDR_TYPE
;
2842 ipmb_addr
.slave_addr
= msg
->rsp
[6];
2843 ipmb_addr
.channel
= msg
->rsp
[3] & 0x0f;
2844 ipmb_addr
.lun
= msg
->rsp
[7] & 3;
2846 /* It's a response from a remote entity. Look up the sequence
2847 number and handle the response. */
2848 if (intf_find_seq(intf
,
2852 (msg
->rsp
[4] >> 2) & (~1),
2853 (struct ipmi_addr
*) &(ipmb_addr
),
2856 /* We were unable to find the sequence number,
2857 so just nuke the message. */
2858 ipmi_inc_stat(intf
, unhandled_ipmb_responses
);
2862 memcpy(recv_msg
->msg_data
,
2865 /* THe other fields matched, so no need to set them, except
2866 for netfn, which needs to be the response that was
2867 returned, not the request value. */
2868 recv_msg
->msg
.netfn
= msg
->rsp
[4] >> 2;
2869 recv_msg
->msg
.data
= recv_msg
->msg_data
;
2870 recv_msg
->msg
.data_len
= msg
->rsp_size
- 10;
2871 recv_msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
2872 ipmi_inc_stat(intf
, handled_ipmb_responses
);
2873 deliver_response(recv_msg
);
2878 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf
,
2879 struct ipmi_smi_msg
*msg
)
2881 struct cmd_rcvr
*rcvr
;
2883 unsigned char netfn
;
2886 ipmi_user_t user
= NULL
;
2887 struct ipmi_ipmb_addr
*ipmb_addr
;
2888 struct ipmi_recv_msg
*recv_msg
;
2889 struct ipmi_smi_handlers
*handlers
;
2891 if (msg
->rsp_size
< 10) {
2892 /* Message not big enough, just ignore it. */
2893 ipmi_inc_stat(intf
, invalid_commands
);
2897 if (msg
->rsp
[2] != 0) {
2898 /* An error getting the response, just ignore it. */
2902 netfn
= msg
->rsp
[4] >> 2;
2904 chan
= msg
->rsp
[3] & 0xf;
2907 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, chan
);
2910 kref_get(&user
->refcount
);
2916 /* We didn't find a user, deliver an error response. */
2917 ipmi_inc_stat(intf
, unhandled_commands
);
2919 msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
2920 msg
->data
[1] = IPMI_SEND_MSG_CMD
;
2921 msg
->data
[2] = msg
->rsp
[3];
2922 msg
->data
[3] = msg
->rsp
[6];
2923 msg
->data
[4] = ((netfn
+ 1) << 2) | (msg
->rsp
[7] & 0x3);
2924 msg
->data
[5] = ipmb_checksum(&(msg
->data
[3]), 2);
2925 msg
->data
[6] = intf
->channels
[msg
->rsp
[3] & 0xf].address
;
2927 msg
->data
[7] = (msg
->rsp
[7] & 0xfc) | (msg
->rsp
[4] & 0x3);
2928 msg
->data
[8] = msg
->rsp
[8]; /* cmd */
2929 msg
->data
[9] = IPMI_INVALID_CMD_COMPLETION_CODE
;
2930 msg
->data
[10] = ipmb_checksum(&(msg
->data
[6]), 4);
2931 msg
->data_size
= 11;
2936 printk("Invalid command:");
2937 for (m
= 0; m
< msg
->data_size
; m
++)
2938 printk(" %2.2x", msg
->data
[m
]);
2943 handlers
= intf
->handlers
;
2945 handlers
->sender(intf
->send_info
, msg
, 0);
2946 /* We used the message, so return the value
2947 that causes it to not be freed or
2953 /* Deliver the message to the user. */
2954 ipmi_inc_stat(intf
, handled_commands
);
2956 recv_msg
= ipmi_alloc_recv_msg();
2958 /* We couldn't allocate memory for the
2959 message, so requeue it for handling
2962 kref_put(&user
->refcount
, free_user
);
2964 /* Extract the source address from the data. */
2965 ipmb_addr
= (struct ipmi_ipmb_addr
*) &recv_msg
->addr
;
2966 ipmb_addr
->addr_type
= IPMI_IPMB_ADDR_TYPE
;
2967 ipmb_addr
->slave_addr
= msg
->rsp
[6];
2968 ipmb_addr
->lun
= msg
->rsp
[7] & 3;
2969 ipmb_addr
->channel
= msg
->rsp
[3] & 0xf;
2971 /* Extract the rest of the message information
2972 from the IPMB header.*/
2973 recv_msg
->user
= user
;
2974 recv_msg
->recv_type
= IPMI_CMD_RECV_TYPE
;
2975 recv_msg
->msgid
= msg
->rsp
[7] >> 2;
2976 recv_msg
->msg
.netfn
= msg
->rsp
[4] >> 2;
2977 recv_msg
->msg
.cmd
= msg
->rsp
[8];
2978 recv_msg
->msg
.data
= recv_msg
->msg_data
;
2980 /* We chop off 10, not 9 bytes because the checksum
2981 at the end also needs to be removed. */
2982 recv_msg
->msg
.data_len
= msg
->rsp_size
- 10;
2983 memcpy(recv_msg
->msg_data
,
2985 msg
->rsp_size
- 10);
2986 deliver_response(recv_msg
);
2993 static int handle_lan_get_msg_rsp(ipmi_smi_t intf
,
2994 struct ipmi_smi_msg
*msg
)
2996 struct ipmi_lan_addr lan_addr
;
2997 struct ipmi_recv_msg
*recv_msg
;
3000 /* This is 13, not 12, because the response must contain a
3001 * completion code. */
3002 if (msg
->rsp_size
< 13) {
3003 /* Message not big enough, just ignore it. */
3004 ipmi_inc_stat(intf
, invalid_lan_responses
);
3008 if (msg
->rsp
[2] != 0) {
3009 /* An error getting the response, just ignore it. */
3013 lan_addr
.addr_type
= IPMI_LAN_ADDR_TYPE
;
3014 lan_addr
.session_handle
= msg
->rsp
[4];
3015 lan_addr
.remote_SWID
= msg
->rsp
[8];
3016 lan_addr
.local_SWID
= msg
->rsp
[5];
3017 lan_addr
.channel
= msg
->rsp
[3] & 0x0f;
3018 lan_addr
.privilege
= msg
->rsp
[3] >> 4;
3019 lan_addr
.lun
= msg
->rsp
[9] & 3;
3021 /* It's a response from a remote entity. Look up the sequence
3022 number and handle the response. */
3023 if (intf_find_seq(intf
,
3027 (msg
->rsp
[6] >> 2) & (~1),
3028 (struct ipmi_addr
*) &(lan_addr
),
3031 /* We were unable to find the sequence number,
3032 so just nuke the message. */
3033 ipmi_inc_stat(intf
, unhandled_lan_responses
);
3037 memcpy(recv_msg
->msg_data
,
3039 msg
->rsp_size
- 11);
3040 /* The other fields matched, so no need to set them, except
3041 for netfn, which needs to be the response that was
3042 returned, not the request value. */
3043 recv_msg
->msg
.netfn
= msg
->rsp
[6] >> 2;
3044 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3045 recv_msg
->msg
.data_len
= msg
->rsp_size
- 12;
3046 recv_msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
3047 ipmi_inc_stat(intf
, handled_lan_responses
);
3048 deliver_response(recv_msg
);
3053 static int handle_lan_get_msg_cmd(ipmi_smi_t intf
,
3054 struct ipmi_smi_msg
*msg
)
3056 struct cmd_rcvr
*rcvr
;
3058 unsigned char netfn
;
3061 ipmi_user_t user
= NULL
;
3062 struct ipmi_lan_addr
*lan_addr
;
3063 struct ipmi_recv_msg
*recv_msg
;
3065 if (msg
->rsp_size
< 12) {
3066 /* Message not big enough, just ignore it. */
3067 ipmi_inc_stat(intf
, invalid_commands
);
3071 if (msg
->rsp
[2] != 0) {
3072 /* An error getting the response, just ignore it. */
3076 netfn
= msg
->rsp
[6] >> 2;
3078 chan
= msg
->rsp
[3] & 0xf;
3081 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, chan
);
3084 kref_get(&user
->refcount
);
3090 /* We didn't find a user, just give up. */
3091 ipmi_inc_stat(intf
, unhandled_commands
);
3093 rv
= 0; /* Don't do anything with these messages, just
3094 allow them to be freed. */
3096 /* Deliver the message to the user. */
3097 ipmi_inc_stat(intf
, handled_commands
);
3099 recv_msg
= ipmi_alloc_recv_msg();
3101 /* We couldn't allocate memory for the
3102 message, so requeue it for handling
3105 kref_put(&user
->refcount
, free_user
);
3107 /* Extract the source address from the data. */
3108 lan_addr
= (struct ipmi_lan_addr
*) &recv_msg
->addr
;
3109 lan_addr
->addr_type
= IPMI_LAN_ADDR_TYPE
;
3110 lan_addr
->session_handle
= msg
->rsp
[4];
3111 lan_addr
->remote_SWID
= msg
->rsp
[8];
3112 lan_addr
->local_SWID
= msg
->rsp
[5];
3113 lan_addr
->lun
= msg
->rsp
[9] & 3;
3114 lan_addr
->channel
= msg
->rsp
[3] & 0xf;
3115 lan_addr
->privilege
= msg
->rsp
[3] >> 4;
3117 /* Extract the rest of the message information
3118 from the IPMB header.*/
3119 recv_msg
->user
= user
;
3120 recv_msg
->recv_type
= IPMI_CMD_RECV_TYPE
;
3121 recv_msg
->msgid
= msg
->rsp
[9] >> 2;
3122 recv_msg
->msg
.netfn
= msg
->rsp
[6] >> 2;
3123 recv_msg
->msg
.cmd
= msg
->rsp
[10];
3124 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3126 /* We chop off 12, not 11 bytes because the checksum
3127 at the end also needs to be removed. */
3128 recv_msg
->msg
.data_len
= msg
->rsp_size
- 12;
3129 memcpy(recv_msg
->msg_data
,
3131 msg
->rsp_size
- 12);
3132 deliver_response(recv_msg
);
3139 static void copy_event_into_recv_msg(struct ipmi_recv_msg
*recv_msg
,
3140 struct ipmi_smi_msg
*msg
)
3142 struct ipmi_system_interface_addr
*smi_addr
;
3144 recv_msg
->msgid
= 0;
3145 smi_addr
= (struct ipmi_system_interface_addr
*) &(recv_msg
->addr
);
3146 smi_addr
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
3147 smi_addr
->channel
= IPMI_BMC_CHANNEL
;
3148 smi_addr
->lun
= msg
->rsp
[0] & 3;
3149 recv_msg
->recv_type
= IPMI_ASYNC_EVENT_RECV_TYPE
;
3150 recv_msg
->msg
.netfn
= msg
->rsp
[0] >> 2;
3151 recv_msg
->msg
.cmd
= msg
->rsp
[1];
3152 memcpy(recv_msg
->msg_data
, &(msg
->rsp
[3]), msg
->rsp_size
- 3);
3153 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3154 recv_msg
->msg
.data_len
= msg
->rsp_size
- 3;
3157 static int handle_read_event_rsp(ipmi_smi_t intf
,
3158 struct ipmi_smi_msg
*msg
)
3160 struct ipmi_recv_msg
*recv_msg
, *recv_msg2
;
3161 struct list_head msgs
;
3164 int deliver_count
= 0;
3165 unsigned long flags
;
3167 if (msg
->rsp_size
< 19) {
3168 /* Message is too small to be an IPMB event. */
3169 ipmi_inc_stat(intf
, invalid_events
);
3173 if (msg
->rsp
[2] != 0) {
3174 /* An error getting the event, just ignore it. */
3178 INIT_LIST_HEAD(&msgs
);
3180 spin_lock_irqsave(&intf
->events_lock
, flags
);
3182 ipmi_inc_stat(intf
, events
);
3184 /* Allocate and fill in one message for every user that is getting
3187 list_for_each_entry_rcu(user
, &intf
->users
, link
) {
3188 if (!user
->gets_events
)
3191 recv_msg
= ipmi_alloc_recv_msg();
3194 list_for_each_entry_safe(recv_msg
, recv_msg2
, &msgs
,
3196 list_del(&recv_msg
->link
);
3197 ipmi_free_recv_msg(recv_msg
);
3199 /* We couldn't allocate memory for the
3200 message, so requeue it for handling
3208 copy_event_into_recv_msg(recv_msg
, msg
);
3209 recv_msg
->user
= user
;
3210 kref_get(&user
->refcount
);
3211 list_add_tail(&(recv_msg
->link
), &msgs
);
3215 if (deliver_count
) {
3216 /* Now deliver all the messages. */
3217 list_for_each_entry_safe(recv_msg
, recv_msg2
, &msgs
, link
) {
3218 list_del(&recv_msg
->link
);
3219 deliver_response(recv_msg
);
3221 } else if (intf
->waiting_events_count
< MAX_EVENTS_IN_QUEUE
) {
3222 /* No one to receive the message, put it in queue if there's
3223 not already too many things in the queue. */
3224 recv_msg
= ipmi_alloc_recv_msg();
3226 /* We couldn't allocate memory for the
3227 message, so requeue it for handling
3233 copy_event_into_recv_msg(recv_msg
, msg
);
3234 list_add_tail(&(recv_msg
->link
), &(intf
->waiting_events
));
3235 intf
->waiting_events_count
++;
3236 } else if (!intf
->event_msg_printed
) {
3237 /* There's too many things in the queue, discard this
3239 printk(KERN_WARNING PFX
"Event queue full, discarding"
3240 " incoming events\n");
3241 intf
->event_msg_printed
= 1;
3245 spin_unlock_irqrestore(&(intf
->events_lock
), flags
);
3250 static int handle_bmc_rsp(ipmi_smi_t intf
,
3251 struct ipmi_smi_msg
*msg
)
3253 struct ipmi_recv_msg
*recv_msg
;
3254 struct ipmi_user
*user
;
3256 recv_msg
= (struct ipmi_recv_msg
*) msg
->user_data
;
3257 if (recv_msg
== NULL
)
3259 printk(KERN_WARNING
"IPMI message received with no owner. This\n"
3260 "could be because of a malformed message, or\n"
3261 "because of a hardware error. Contact your\n"
3262 "hardware vender for assistance\n");
3266 user
= recv_msg
->user
;
3267 /* Make sure the user still exists. */
3268 if (user
&& !user
->valid
) {
3269 /* The user for the message went away, so give up. */
3270 ipmi_inc_stat(intf
, unhandled_local_responses
);
3271 ipmi_free_recv_msg(recv_msg
);
3273 struct ipmi_system_interface_addr
*smi_addr
;
3275 ipmi_inc_stat(intf
, handled_local_responses
);
3276 recv_msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
3277 recv_msg
->msgid
= msg
->msgid
;
3278 smi_addr
= ((struct ipmi_system_interface_addr
*)
3280 smi_addr
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
3281 smi_addr
->channel
= IPMI_BMC_CHANNEL
;
3282 smi_addr
->lun
= msg
->rsp
[0] & 3;
3283 recv_msg
->msg
.netfn
= msg
->rsp
[0] >> 2;
3284 recv_msg
->msg
.cmd
= msg
->rsp
[1];
3285 memcpy(recv_msg
->msg_data
,
3288 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3289 recv_msg
->msg
.data_len
= msg
->rsp_size
- 2;
3290 deliver_response(recv_msg
);
3296 /* Handle a new message. Return 1 if the message should be requeued,
3297 0 if the message should be freed, or -1 if the message should not
3298 be freed or requeued. */
3299 static int handle_new_recv_msg(ipmi_smi_t intf
,
3300 struct ipmi_smi_msg
*msg
)
3308 for (m
= 0; m
< msg
->rsp_size
; m
++)
3309 printk(" %2.2x", msg
->rsp
[m
]);
3312 if (msg
->rsp_size
< 2) {
3313 /* Message is too small to be correct. */
3314 printk(KERN_WARNING PFX
"BMC returned to small a message"
3315 " for netfn %x cmd %x, got %d bytes\n",
3316 (msg
->data
[0] >> 2) | 1, msg
->data
[1], msg
->rsp_size
);
3318 /* Generate an error response for the message. */
3319 msg
->rsp
[0] = msg
->data
[0] | (1 << 2);
3320 msg
->rsp
[1] = msg
->data
[1];
3321 msg
->rsp
[2] = IPMI_ERR_UNSPECIFIED
;
3323 } else if (((msg
->rsp
[0] >> 2) != ((msg
->data
[0] >> 2) | 1))/* Netfn */
3324 || (msg
->rsp
[1] != msg
->data
[1])) /* Command */
3326 /* The response is not even marginally correct. */
3327 printk(KERN_WARNING PFX
"BMC returned incorrect response,"
3328 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3329 (msg
->data
[0] >> 2) | 1, msg
->data
[1],
3330 msg
->rsp
[0] >> 2, msg
->rsp
[1]);
3332 /* Generate an error response for the message. */
3333 msg
->rsp
[0] = msg
->data
[0] | (1 << 2);
3334 msg
->rsp
[1] = msg
->data
[1];
3335 msg
->rsp
[2] = IPMI_ERR_UNSPECIFIED
;
3339 if ((msg
->rsp
[0] == ((IPMI_NETFN_APP_REQUEST
|1) << 2))
3340 && (msg
->rsp
[1] == IPMI_SEND_MSG_CMD
)
3341 && (msg
->user_data
!= NULL
))
3343 /* It's a response to a response we sent. For this we
3344 deliver a send message response to the user. */
3345 struct ipmi_recv_msg
*recv_msg
= msg
->user_data
;
3348 if (msg
->rsp_size
< 2)
3349 /* Message is too small to be correct. */
3352 chan
= msg
->data
[2] & 0x0f;
3353 if (chan
>= IPMI_MAX_CHANNELS
)
3354 /* Invalid channel number */
3360 /* Make sure the user still exists. */
3361 if (!recv_msg
->user
|| !recv_msg
->user
->valid
)
3364 recv_msg
->recv_type
= IPMI_RESPONSE_RESPONSE_TYPE
;
3365 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3366 recv_msg
->msg
.data_len
= 1;
3367 recv_msg
->msg_data
[0] = msg
->rsp
[2];
3368 deliver_response(recv_msg
);
3369 } else if ((msg
->rsp
[0] == ((IPMI_NETFN_APP_REQUEST
|1) << 2))
3370 && (msg
->rsp
[1] == IPMI_GET_MSG_CMD
))
3372 /* It's from the receive queue. */
3373 chan
= msg
->rsp
[3] & 0xf;
3374 if (chan
>= IPMI_MAX_CHANNELS
) {
3375 /* Invalid channel number */
3380 switch (intf
->channels
[chan
].medium
) {
3381 case IPMI_CHANNEL_MEDIUM_IPMB
:
3382 if (msg
->rsp
[4] & 0x04) {
3383 /* It's a response, so find the
3384 requesting message and send it up. */
3385 requeue
= handle_ipmb_get_msg_rsp(intf
, msg
);
3387 /* It's a command to the SMS from some other
3388 entity. Handle that. */
3389 requeue
= handle_ipmb_get_msg_cmd(intf
, msg
);
3393 case IPMI_CHANNEL_MEDIUM_8023LAN
:
3394 case IPMI_CHANNEL_MEDIUM_ASYNC
:
3395 if (msg
->rsp
[6] & 0x04) {
3396 /* It's a response, so find the
3397 requesting message and send it up. */
3398 requeue
= handle_lan_get_msg_rsp(intf
, msg
);
3400 /* It's a command to the SMS from some other
3401 entity. Handle that. */
3402 requeue
= handle_lan_get_msg_cmd(intf
, msg
);
3407 /* We don't handle the channel type, so just
3408 * free the message. */
3412 } else if ((msg
->rsp
[0] == ((IPMI_NETFN_APP_REQUEST
|1) << 2))
3413 && (msg
->rsp
[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD
))
3415 /* It's an asyncronous event. */
3416 requeue
= handle_read_event_rsp(intf
, msg
);
3418 /* It's a response from the local BMC. */
3419 requeue
= handle_bmc_rsp(intf
, msg
);
3426 /* Handle a new message from the lower layer. */
3427 void ipmi_smi_msg_received(ipmi_smi_t intf
,
3428 struct ipmi_smi_msg
*msg
)
3430 unsigned long flags
= 0; /* keep us warning-free. */
3432 int run_to_completion
;
3435 if ((msg
->data_size
>= 2)
3436 && (msg
->data
[0] == (IPMI_NETFN_APP_REQUEST
<< 2))
3437 && (msg
->data
[1] == IPMI_SEND_MSG_CMD
)
3438 && (msg
->user_data
== NULL
))
3440 /* This is the local response to a command send, start
3441 the timer for these. The user_data will not be
3442 NULL if this is a response send, and we will let
3443 response sends just go through. */
3445 /* Check for errors, if we get certain errors (ones
3446 that mean basically we can try again later), we
3447 ignore them and start the timer. Otherwise we
3448 report the error immediately. */
3449 if ((msg
->rsp_size
>= 3) && (msg
->rsp
[2] != 0)
3450 && (msg
->rsp
[2] != IPMI_NODE_BUSY_ERR
)
3451 && (msg
->rsp
[2] != IPMI_LOST_ARBITRATION_ERR
)
3452 && (msg
->rsp
[2] != IPMI_BUS_ERR
)
3453 && (msg
->rsp
[2] != IPMI_NAK_ON_WRITE_ERR
))
3455 int chan
= msg
->rsp
[3] & 0xf;
3457 /* Got an error sending the message, handle it. */
3458 if (chan
>= IPMI_MAX_CHANNELS
)
3459 ; /* This shouldn't happen */
3460 else if ((intf
->channels
[chan
].medium
3461 == IPMI_CHANNEL_MEDIUM_8023LAN
)
3462 || (intf
->channels
[chan
].medium
3463 == IPMI_CHANNEL_MEDIUM_ASYNC
))
3464 ipmi_inc_stat(intf
, sent_lan_command_errs
);
3466 ipmi_inc_stat(intf
, sent_ipmb_command_errs
);
3467 intf_err_seq(intf
, msg
->msgid
, msg
->rsp
[2]);
3469 /* The message was sent, start the timer. */
3470 intf_start_seq_timer(intf
, msg
->msgid
);
3473 ipmi_free_smi_msg(msg
);
3477 /* To preserve message order, if the list is not empty, we
3478 tack this message onto the end of the list. */
3479 run_to_completion
= intf
->run_to_completion
;
3480 if (!run_to_completion
)
3481 spin_lock_irqsave(&intf
->waiting_msgs_lock
, flags
);
3482 if (!list_empty(&intf
->waiting_msgs
)) {
3483 list_add_tail(&msg
->link
, &intf
->waiting_msgs
);
3484 if (!run_to_completion
)
3485 spin_unlock_irqrestore(&intf
->waiting_msgs_lock
, flags
);
3488 if (!run_to_completion
)
3489 spin_unlock_irqrestore(&intf
->waiting_msgs_lock
, flags
);
3491 rv
= handle_new_recv_msg(intf
, msg
);
3493 /* Could not handle the message now, just add it to a
3494 list to handle later. */
3495 run_to_completion
= intf
->run_to_completion
;
3496 if (!run_to_completion
)
3497 spin_lock_irqsave(&intf
->waiting_msgs_lock
, flags
);
3498 list_add_tail(&msg
->link
, &intf
->waiting_msgs
);
3499 if (!run_to_completion
)
3500 spin_unlock_irqrestore(&intf
->waiting_msgs_lock
, flags
);
3501 } else if (rv
== 0) {
3502 ipmi_free_smi_msg(msg
);
3509 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf
)
3514 list_for_each_entry_rcu(user
, &intf
->users
, link
) {
3515 if (!user
->handler
->ipmi_watchdog_pretimeout
)
3518 user
->handler
->ipmi_watchdog_pretimeout(user
->handler_data
);
3524 static struct ipmi_smi_msg
*
3525 smi_from_recv_msg(ipmi_smi_t intf
, struct ipmi_recv_msg
*recv_msg
,
3526 unsigned char seq
, long seqid
)
3528 struct ipmi_smi_msg
*smi_msg
= ipmi_alloc_smi_msg();
3530 /* If we can't allocate the message, then just return, we
3531 get 4 retries, so this should be ok. */
3534 memcpy(smi_msg
->data
, recv_msg
->msg
.data
, recv_msg
->msg
.data_len
);
3535 smi_msg
->data_size
= recv_msg
->msg
.data_len
;
3536 smi_msg
->msgid
= STORE_SEQ_IN_MSGID(seq
, seqid
);
3542 for (m
= 0; m
< smi_msg
->data_size
; m
++)
3543 printk(" %2.2x", smi_msg
->data
[m
]);
3550 static void check_msg_timeout(ipmi_smi_t intf
, struct seq_table
*ent
,
3551 struct list_head
*timeouts
, long timeout_period
,
3552 int slot
, unsigned long *flags
)
3554 struct ipmi_recv_msg
*msg
;
3555 struct ipmi_smi_handlers
*handlers
;
3557 if (intf
->intf_num
== -1)
3563 ent
->timeout
-= timeout_period
;
3564 if (ent
->timeout
> 0)
3567 if (ent
->retries_left
== 0) {
3568 /* The message has used all its retries. */
3570 msg
= ent
->recv_msg
;
3571 list_add_tail(&msg
->link
, timeouts
);
3573 ipmi_inc_stat(intf
, timed_out_ipmb_broadcasts
);
3574 else if (ent
->recv_msg
->addr
.addr_type
== IPMI_LAN_ADDR_TYPE
)
3575 ipmi_inc_stat(intf
, timed_out_lan_commands
);
3577 ipmi_inc_stat(intf
, timed_out_ipmb_commands
);
3579 struct ipmi_smi_msg
*smi_msg
;
3580 /* More retries, send again. */
3582 /* Start with the max timer, set to normal
3583 timer after the message is sent. */
3584 ent
->timeout
= MAX_MSG_TIMEOUT
;
3585 ent
->retries_left
--;
3586 if (ent
->recv_msg
->addr
.addr_type
== IPMI_LAN_ADDR_TYPE
)
3587 ipmi_inc_stat(intf
, retransmitted_lan_commands
);
3589 ipmi_inc_stat(intf
, retransmitted_ipmb_commands
);
3591 smi_msg
= smi_from_recv_msg(intf
, ent
->recv_msg
, slot
,
3596 spin_unlock_irqrestore(&intf
->seq_lock
, *flags
);
3598 /* Send the new message. We send with a zero
3599 * priority. It timed out, I doubt time is
3600 * that critical now, and high priority
3601 * messages are really only for messages to the
3602 * local MC, which don't get resent. */
3603 handlers
= intf
->handlers
;
3605 intf
->handlers
->sender(intf
->send_info
,
3608 ipmi_free_smi_msg(smi_msg
);
3610 spin_lock_irqsave(&intf
->seq_lock
, *flags
);
3614 static void ipmi_timeout_handler(long timeout_period
)
3617 struct list_head timeouts
;
3618 struct ipmi_recv_msg
*msg
, *msg2
;
3619 struct ipmi_smi_msg
*smi_msg
, *smi_msg2
;
3620 unsigned long flags
;
3624 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
3625 /* See if any waiting messages need to be processed. */
3626 spin_lock_irqsave(&intf
->waiting_msgs_lock
, flags
);
3627 list_for_each_entry_safe(smi_msg
, smi_msg2
,
3628 &intf
->waiting_msgs
, link
) {
3629 if (!handle_new_recv_msg(intf
, smi_msg
)) {
3630 list_del(&smi_msg
->link
);
3631 ipmi_free_smi_msg(smi_msg
);
3633 /* To preserve message order, quit if we
3634 can't handle a message. */
3638 spin_unlock_irqrestore(&intf
->waiting_msgs_lock
, flags
);
3640 /* Go through the seq table and find any messages that
3641 have timed out, putting them in the timeouts
3643 INIT_LIST_HEAD(&timeouts
);
3644 spin_lock_irqsave(&intf
->seq_lock
, flags
);
3645 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++)
3646 check_msg_timeout(intf
, &(intf
->seq_table
[i
]),
3647 &timeouts
, timeout_period
, i
,
3649 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
3651 list_for_each_entry_safe(msg
, msg2
, &timeouts
, link
)
3652 deliver_err_response(msg
, IPMI_TIMEOUT_COMPLETION_CODE
);
3655 * Maintenance mode handling. Check the timeout
3656 * optimistically before we claim the lock. It may
3657 * mean a timeout gets missed occasionally, but that
3658 * only means the timeout gets extended by one period
3659 * in that case. No big deal, and it avoids the lock
3662 if (intf
->auto_maintenance_timeout
> 0) {
3663 spin_lock_irqsave(&intf
->maintenance_mode_lock
, flags
);
3664 if (intf
->auto_maintenance_timeout
> 0) {
3665 intf
->auto_maintenance_timeout
3667 if (!intf
->maintenance_mode
3668 && (intf
->auto_maintenance_timeout
<= 0))
3670 intf
->maintenance_mode_enable
= 0;
3671 maintenance_mode_update(intf
);
3674 spin_unlock_irqrestore(&intf
->maintenance_mode_lock
,
3681 static void ipmi_request_event(void)
3684 struct ipmi_smi_handlers
*handlers
;
3687 /* Called from the timer, no need to check if handlers is
3689 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
3690 /* No event requests when in maintenance mode. */
3691 if (intf
->maintenance_mode_enable
)
3694 handlers
= intf
->handlers
;
3696 handlers
->request_events(intf
->send_info
);
3701 static struct timer_list ipmi_timer
;
3703 /* Call every ~100 ms. */
3704 #define IPMI_TIMEOUT_TIME 100
3706 /* How many jiffies does it take to get to the timeout time. */
3707 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3709 /* Request events from the queue every second (this is the number of
3710 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3711 future, IPMI will add a way to know immediately if an event is in
3712 the queue and this silliness can go away. */
3713 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3715 static atomic_t stop_operation
;
3716 static unsigned int ticks_to_req_ev
= IPMI_REQUEST_EV_TIME
;
3718 static void ipmi_timeout(unsigned long data
)
3720 if (atomic_read(&stop_operation
))
3724 if (ticks_to_req_ev
== 0) {
3725 ipmi_request_event();
3726 ticks_to_req_ev
= IPMI_REQUEST_EV_TIME
;
3729 ipmi_timeout_handler(IPMI_TIMEOUT_TIME
);
3731 mod_timer(&ipmi_timer
, jiffies
+ IPMI_TIMEOUT_JIFFIES
);
3735 static atomic_t smi_msg_inuse_count
= ATOMIC_INIT(0);
3736 static atomic_t recv_msg_inuse_count
= ATOMIC_INIT(0);
3738 /* FIXME - convert these to slabs. */
3739 static void free_smi_msg(struct ipmi_smi_msg
*msg
)
3741 atomic_dec(&smi_msg_inuse_count
);
3745 struct ipmi_smi_msg
*ipmi_alloc_smi_msg(void)
3747 struct ipmi_smi_msg
*rv
;
3748 rv
= kmalloc(sizeof(struct ipmi_smi_msg
), GFP_ATOMIC
);
3750 rv
->done
= free_smi_msg
;
3751 rv
->user_data
= NULL
;
3752 atomic_inc(&smi_msg_inuse_count
);
3757 static void free_recv_msg(struct ipmi_recv_msg
*msg
)
3759 atomic_dec(&recv_msg_inuse_count
);
3763 struct ipmi_recv_msg
*ipmi_alloc_recv_msg(void)
3765 struct ipmi_recv_msg
*rv
;
3767 rv
= kmalloc(sizeof(struct ipmi_recv_msg
), GFP_ATOMIC
);
3770 rv
->done
= free_recv_msg
;
3771 atomic_inc(&recv_msg_inuse_count
);
3776 void ipmi_free_recv_msg(struct ipmi_recv_msg
*msg
)
3779 kref_put(&msg
->user
->refcount
, free_user
);
3783 #ifdef CONFIG_IPMI_PANIC_EVENT
3785 static void dummy_smi_done_handler(struct ipmi_smi_msg
*msg
)
3789 static void dummy_recv_done_handler(struct ipmi_recv_msg
*msg
)
3793 #ifdef CONFIG_IPMI_PANIC_STRING
3794 static void event_receiver_fetcher(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
3796 if ((msg
->addr
.addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
3797 && (msg
->msg
.netfn
== IPMI_NETFN_SENSOR_EVENT_RESPONSE
)
3798 && (msg
->msg
.cmd
== IPMI_GET_EVENT_RECEIVER_CMD
)
3799 && (msg
->msg
.data
[0] == IPMI_CC_NO_ERROR
))
3801 /* A get event receiver command, save it. */
3802 intf
->event_receiver
= msg
->msg
.data
[1];
3803 intf
->event_receiver_lun
= msg
->msg
.data
[2] & 0x3;
3807 static void device_id_fetcher(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
3809 if ((msg
->addr
.addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
3810 && (msg
->msg
.netfn
== IPMI_NETFN_APP_RESPONSE
)
3811 && (msg
->msg
.cmd
== IPMI_GET_DEVICE_ID_CMD
)
3812 && (msg
->msg
.data
[0] == IPMI_CC_NO_ERROR
))
3814 /* A get device id command, save if we are an event
3815 receiver or generator. */
3816 intf
->local_sel_device
= (msg
->msg
.data
[6] >> 2) & 1;
3817 intf
->local_event_generator
= (msg
->msg
.data
[6] >> 5) & 1;
3822 static void send_panic_events(char *str
)
3824 struct kernel_ipmi_msg msg
;
3826 unsigned char data
[16];
3827 struct ipmi_system_interface_addr
*si
;
3828 struct ipmi_addr addr
;
3829 struct ipmi_smi_msg smi_msg
;
3830 struct ipmi_recv_msg recv_msg
;
3832 si
= (struct ipmi_system_interface_addr
*) &addr
;
3833 si
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
3834 si
->channel
= IPMI_BMC_CHANNEL
;
3837 /* Fill in an event telling that we have failed. */
3838 msg
.netfn
= 0x04; /* Sensor or Event. */
3839 msg
.cmd
= 2; /* Platform event command. */
3842 data
[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3843 data
[1] = 0x03; /* This is for IPMI 1.0. */
3844 data
[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3845 data
[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3846 data
[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3848 /* Put a few breadcrumbs in. Hopefully later we can add more things
3849 to make the panic events more useful. */
3856 smi_msg
.done
= dummy_smi_done_handler
;
3857 recv_msg
.done
= dummy_recv_done_handler
;
3859 /* For every registered interface, send the event. */
3860 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
3861 if (!intf
->handlers
)
3862 /* Interface is not ready. */
3865 intf
->run_to_completion
= 1;
3866 /* Send the event announcing the panic. */
3867 intf
->handlers
->set_run_to_completion(intf
->send_info
, 1);
3868 i_ipmi_request(NULL
,
3877 intf
->channels
[0].address
,
3878 intf
->channels
[0].lun
,
3879 0, 1); /* Don't retry, and don't wait. */
3882 #ifdef CONFIG_IPMI_PANIC_STRING
3883 /* On every interface, dump a bunch of OEM event holding the
3888 /* For every registered interface, send the event. */
3889 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
3891 struct ipmi_ipmb_addr
*ipmb
;
3894 if (intf
->intf_num
== -1)
3895 /* Interface was not ready yet. */
3899 * intf_num is used as an marker to tell if the
3900 * interface is valid. Thus we need a read barrier to
3901 * make sure data fetched before checking intf_num
3906 /* First job here is to figure out where to send the
3907 OEM events. There's no way in IPMI to send OEM
3908 events using an event send command, so we have to
3909 find the SEL to put them in and stick them in
3912 /* Get capabilities from the get device id. */
3913 intf
->local_sel_device
= 0;
3914 intf
->local_event_generator
= 0;
3915 intf
->event_receiver
= 0;
3917 /* Request the device info from the local MC. */
3918 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
3919 msg
.cmd
= IPMI_GET_DEVICE_ID_CMD
;
3922 intf
->null_user_handler
= device_id_fetcher
;
3923 i_ipmi_request(NULL
,
3932 intf
->channels
[0].address
,
3933 intf
->channels
[0].lun
,
3934 0, 1); /* Don't retry, and don't wait. */
3936 if (intf
->local_event_generator
) {
3937 /* Request the event receiver from the local MC. */
3938 msg
.netfn
= IPMI_NETFN_SENSOR_EVENT_REQUEST
;
3939 msg
.cmd
= IPMI_GET_EVENT_RECEIVER_CMD
;
3942 intf
->null_user_handler
= event_receiver_fetcher
;
3943 i_ipmi_request(NULL
,
3952 intf
->channels
[0].address
,
3953 intf
->channels
[0].lun
,
3954 0, 1); /* no retry, and no wait. */
3956 intf
->null_user_handler
= NULL
;
3958 /* Validate the event receiver. The low bit must not
3959 be 1 (it must be a valid IPMB address), it cannot
3960 be zero, and it must not be my address. */
3961 if (((intf
->event_receiver
& 1) == 0)
3962 && (intf
->event_receiver
!= 0)
3963 && (intf
->event_receiver
!= intf
->channels
[0].address
))
3965 /* The event receiver is valid, send an IPMB
3967 ipmb
= (struct ipmi_ipmb_addr
*) &addr
;
3968 ipmb
->addr_type
= IPMI_IPMB_ADDR_TYPE
;
3969 ipmb
->channel
= 0; /* FIXME - is this right? */
3970 ipmb
->lun
= intf
->event_receiver_lun
;
3971 ipmb
->slave_addr
= intf
->event_receiver
;
3972 } else if (intf
->local_sel_device
) {
3973 /* The event receiver was not valid (or was
3974 me), but I am an SEL device, just dump it
3976 si
= (struct ipmi_system_interface_addr
*) &addr
;
3977 si
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
3978 si
->channel
= IPMI_BMC_CHANNEL
;
3981 continue; /* No where to send the event. */
3984 msg
.netfn
= IPMI_NETFN_STORAGE_REQUEST
; /* Storage. */
3985 msg
.cmd
= IPMI_ADD_SEL_ENTRY_CMD
;
3991 int size
= strlen(p
);
3997 data
[2] = 0xf0; /* OEM event without timestamp. */
3998 data
[3] = intf
->channels
[0].address
;
3999 data
[4] = j
++; /* sequence # */
4000 /* Always give 11 bytes, so strncpy will fill
4001 it with zeroes for me. */
4002 strncpy(data
+5, p
, 11);
4005 i_ipmi_request(NULL
,
4014 intf
->channels
[0].address
,
4015 intf
->channels
[0].lun
,
4016 0, 1); /* no retry, and no wait. */
4019 #endif /* CONFIG_IPMI_PANIC_STRING */
4021 #endif /* CONFIG_IPMI_PANIC_EVENT */
4023 static int has_panicked
;
4025 static int panic_event(struct notifier_block
*this,
4026 unsigned long event
,
4035 /* For every registered interface, set it to run to completion. */
4036 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
4037 if (!intf
->handlers
)
4038 /* Interface is not ready. */
4041 intf
->run_to_completion
= 1;
4042 intf
->handlers
->set_run_to_completion(intf
->send_info
, 1);
4045 #ifdef CONFIG_IPMI_PANIC_EVENT
4046 send_panic_events(ptr
);
4052 static struct notifier_block panic_block
= {
4053 .notifier_call
= panic_event
,
4055 .priority
= 200 /* priority: INT_MAX >= x >= 0 */
4058 static int ipmi_init_msghandler(void)
4065 rv
= driver_register(&ipmidriver
);
4067 printk(KERN_ERR PFX
"Could not register IPMI driver\n");
4071 printk(KERN_INFO
"ipmi message handler version "
4072 IPMI_DRIVER_VERSION
"\n");
4074 #ifdef CONFIG_PROC_FS
4075 proc_ipmi_root
= proc_mkdir("ipmi", NULL
);
4076 if (!proc_ipmi_root
) {
4077 printk(KERN_ERR PFX
"Unable to create IPMI proc dir");
4081 proc_ipmi_root
->owner
= THIS_MODULE
;
4082 #endif /* CONFIG_PROC_FS */
4084 setup_timer(&ipmi_timer
, ipmi_timeout
, 0);
4085 mod_timer(&ipmi_timer
, jiffies
+ IPMI_TIMEOUT_JIFFIES
);
4087 atomic_notifier_chain_register(&panic_notifier_list
, &panic_block
);
4094 static __init
int ipmi_init_msghandler_mod(void)
4096 ipmi_init_msghandler();
4100 static __exit
void cleanup_ipmi(void)
4107 atomic_notifier_chain_unregister(&panic_notifier_list
, &panic_block
);
4109 /* This can't be called if any interfaces exist, so no worry about
4110 shutting down the interfaces. */
4112 /* Tell the timer to stop, then wait for it to stop. This avoids
4113 problems with race conditions removing the timer here. */
4114 atomic_inc(&stop_operation
);
4115 del_timer_sync(&ipmi_timer
);
4117 #ifdef CONFIG_PROC_FS
4118 remove_proc_entry(proc_ipmi_root
->name
, NULL
);
4119 #endif /* CONFIG_PROC_FS */
4121 driver_unregister(&ipmidriver
);
4125 /* Check for buffer leaks. */
4126 count
= atomic_read(&smi_msg_inuse_count
);
4128 printk(KERN_WARNING PFX
"SMI message count %d at exit\n",
4130 count
= atomic_read(&recv_msg_inuse_count
);
4132 printk(KERN_WARNING PFX
"recv message count %d at exit\n",
4135 module_exit(cleanup_ipmi
);
4137 module_init(ipmi_init_msghandler_mod
);
4138 MODULE_LICENSE("GPL");
4139 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
4140 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
4141 MODULE_VERSION(IPMI_DRIVER_VERSION
);
4143 EXPORT_SYMBOL(ipmi_create_user
);
4144 EXPORT_SYMBOL(ipmi_destroy_user
);
4145 EXPORT_SYMBOL(ipmi_get_version
);
4146 EXPORT_SYMBOL(ipmi_request_settime
);
4147 EXPORT_SYMBOL(ipmi_request_supply_msgs
);
4148 EXPORT_SYMBOL(ipmi_poll_interface
);
4149 EXPORT_SYMBOL(ipmi_register_smi
);
4150 EXPORT_SYMBOL(ipmi_unregister_smi
);
4151 EXPORT_SYMBOL(ipmi_register_for_cmd
);
4152 EXPORT_SYMBOL(ipmi_unregister_for_cmd
);
4153 EXPORT_SYMBOL(ipmi_smi_msg_received
);
4154 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout
);
4155 EXPORT_SYMBOL(ipmi_alloc_smi_msg
);
4156 EXPORT_SYMBOL(ipmi_addr_length
);
4157 EXPORT_SYMBOL(ipmi_validate_addr
);
4158 EXPORT_SYMBOL(ipmi_set_gets_events
);
4159 EXPORT_SYMBOL(ipmi_smi_watcher_register
);
4160 EXPORT_SYMBOL(ipmi_smi_watcher_unregister
);
4161 EXPORT_SYMBOL(ipmi_set_my_address
);
4162 EXPORT_SYMBOL(ipmi_get_my_address
);
4163 EXPORT_SYMBOL(ipmi_set_my_LUN
);
4164 EXPORT_SYMBOL(ipmi_get_my_LUN
);
4165 EXPORT_SYMBOL(ipmi_smi_add_proc_entry
);
4166 EXPORT_SYMBOL(ipmi_free_recv_msg
);