4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/config.h>
35 #include <linux/module.h>
36 #include <linux/errno.h>
37 #include <asm/system.h>
38 #include <linux/sched.h>
39 #include <linux/poll.h>
40 #include <linux/spinlock.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/ipmi_smi.h>
44 #include <linux/notifier.h>
45 #include <linux/init.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
49 #define PFX "IPMI message handler: "
51 #define IPMI_DRIVER_VERSION "38.0"
53 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54 static int ipmi_init_msghandler(void);
56 static int initialized = 0;
59 struct proc_dir_entry *proc_ipmi_root = NULL;
60 EXPORT_SYMBOL(proc_ipmi_root);
61 #endif /* CONFIG_PROC_FS */
63 #define MAX_EVENTS_IN_QUEUE 25
65 /* Don't let a message sit in a queue forever, always time it with at lest
66 the max message timer. This is in milliseconds. */
67 #define MAX_MSG_TIMEOUT 60000
71 * The main "user" data structure.
75 struct list_head link;
77 /* Set to "0" when the user is destroyed. */
82 /* The upper layer that handles receive messages. */
83 struct ipmi_user_hndl *handler;
86 /* The interface this user is bound to. */
89 /* Does this interface receive IPMI events? */
95 struct list_head link;
102 * This is used to form a linked lised during mass deletion.
103 * Since this is in an RCU list, we cannot use the link above
104 * or change any data until the RCU period completes. So we
105 * use this next variable during mass deletion so we can have
106 * a list and don't have to wait and restart the search on
107 * every individual deletion of a command. */
108 struct cmd_rcvr *next;
113 unsigned int inuse : 1;
114 unsigned int broadcast : 1;
116 unsigned long timeout;
117 unsigned long orig_timeout;
118 unsigned int retries_left;
120 /* To verify on an incoming send message response that this is
121 the message that the response is for, we keep a sequence id
122 and increment it every time we send a message. */
125 /* This is held so we can properly respond to the message on a
126 timeout, and it is used to hold the temporary data for
127 retransmission, too. */
128 struct ipmi_recv_msg *recv_msg;
131 /* Store the information in a msgid (long) to allow us to find a
132 sequence table entry from the msgid. */
133 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
135 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
137 seq = ((msgid >> 26) & 0x3f); \
138 seqid = (msgid & 0x3fffff); \
141 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
145 unsigned char medium;
146 unsigned char protocol;
148 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
149 but may be changed by the user. */
150 unsigned char address;
152 /* My LUN. This should generally stay the SMS LUN, but just in
157 #ifdef CONFIG_PROC_FS
158 struct ipmi_proc_entry
161 struct ipmi_proc_entry *next;
165 #define IPMI_IPMB_NUM_SEQ 64
166 #define IPMI_MAX_CHANNELS 16
169 /* What interface number are we? */
172 struct kref refcount;
174 /* The list of upper layers that are using me. seq_lock
176 struct list_head users;
178 /* Used for wake ups at startup. */
179 wait_queue_head_t waitq;
181 /* The IPMI version of the BMC on the other end. */
182 unsigned char version_major;
183 unsigned char version_minor;
185 /* This is the lower-layer's sender routine. */
186 struct ipmi_smi_handlers *handlers;
189 #ifdef CONFIG_PROC_FS
190 /* A list of proc entries for this interface. This does not
191 need a lock, only one thread creates it and only one thread
193 spinlock_t proc_entry_lock;
194 struct ipmi_proc_entry *proc_entries;
197 /* A table of sequence numbers for this interface. We use the
198 sequence numbers for IPMB messages that go out of the
199 interface to match them up with their responses. A routine
200 is called periodically to time the items in this list. */
202 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
205 /* Messages that were delayed for some reason (out of memory,
206 for instance), will go in here to be processed later in a
207 periodic timer interrupt. */
208 spinlock_t waiting_msgs_lock;
209 struct list_head waiting_msgs;
211 /* The list of command receivers that are registered for commands
212 on this interface. */
213 struct semaphore cmd_rcvrs_lock;
214 struct list_head cmd_rcvrs;
216 /* Events that were queues because no one was there to receive
218 spinlock_t events_lock; /* For dealing with event stuff. */
219 struct list_head waiting_events;
220 unsigned int waiting_events_count; /* How many events in queue? */
222 /* The event receiver for my BMC, only really used at panic
223 shutdown as a place to store this. */
224 unsigned char event_receiver;
225 unsigned char event_receiver_lun;
226 unsigned char local_sel_device;
227 unsigned char local_event_generator;
229 /* A cheap hack, if this is non-null and a message to an
230 interface comes in with a NULL user, call this routine with
231 it. Note that the message will still be freed by the
232 caller. This only works on the system interface. */
233 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
235 /* When we are scanning the channels for an SMI, this will
236 tell which channel we are scanning. */
239 /* Channel information */
240 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
243 struct proc_dir_entry *proc_dir;
244 char proc_dir_name[10];
246 spinlock_t counter_lock; /* For making counters atomic. */
248 /* Commands we got that were invalid. */
249 unsigned int sent_invalid_commands;
251 /* Commands we sent to the MC. */
252 unsigned int sent_local_commands;
253 /* Responses from the MC that were delivered to a user. */
254 unsigned int handled_local_responses;
255 /* Responses from the MC that were not delivered to a user. */
256 unsigned int unhandled_local_responses;
258 /* Commands we sent out to the IPMB bus. */
259 unsigned int sent_ipmb_commands;
260 /* Commands sent on the IPMB that had errors on the SEND CMD */
261 unsigned int sent_ipmb_command_errs;
262 /* Each retransmit increments this count. */
263 unsigned int retransmitted_ipmb_commands;
264 /* When a message times out (runs out of retransmits) this is
266 unsigned int timed_out_ipmb_commands;
268 /* This is like above, but for broadcasts. Broadcasts are
269 *not* included in the above count (they are expected to
271 unsigned int timed_out_ipmb_broadcasts;
273 /* Responses I have sent to the IPMB bus. */
274 unsigned int sent_ipmb_responses;
276 /* The response was delivered to the user. */
277 unsigned int handled_ipmb_responses;
278 /* The response had invalid data in it. */
279 unsigned int invalid_ipmb_responses;
280 /* The response didn't have anyone waiting for it. */
281 unsigned int unhandled_ipmb_responses;
283 /* Commands we sent out to the IPMB bus. */
284 unsigned int sent_lan_commands;
285 /* Commands sent on the IPMB that had errors on the SEND CMD */
286 unsigned int sent_lan_command_errs;
287 /* Each retransmit increments this count. */
288 unsigned int retransmitted_lan_commands;
289 /* When a message times out (runs out of retransmits) this is
291 unsigned int timed_out_lan_commands;
293 /* Responses I have sent to the IPMB bus. */
294 unsigned int sent_lan_responses;
296 /* The response was delivered to the user. */
297 unsigned int handled_lan_responses;
298 /* The response had invalid data in it. */
299 unsigned int invalid_lan_responses;
300 /* The response didn't have anyone waiting for it. */
301 unsigned int unhandled_lan_responses;
303 /* The command was delivered to the user. */
304 unsigned int handled_commands;
305 /* The command had invalid data in it. */
306 unsigned int invalid_commands;
307 /* The command didn't have anyone waiting for it. */
308 unsigned int unhandled_commands;
310 /* Invalid data in an event. */
311 unsigned int invalid_events;
312 /* Events that were received with the proper format. */
316 /* Used to mark an interface entry that cannot be used but is not a
317 * free entry, either, primarily used at creation and deletion time so
318 * a slot doesn't get reused too quickly. */
319 #define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1))
320 #define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
321 || (i == IPMI_INVALID_INTERFACE_ENTRY))
323 #define MAX_IPMI_INTERFACES 4
324 static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
326 /* Directly protects the ipmi_interfaces data structure. */
327 static DEFINE_SPINLOCK(interfaces_lock);
329 /* List of watchers that want to know when smi's are added and
331 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
332 static DECLARE_RWSEM(smi_watchers_sem);
335 static void free_recv_msg_list(struct list_head *q)
337 struct ipmi_recv_msg *msg, *msg2;
339 list_for_each_entry_safe(msg, msg2, q, link) {
340 list_del(&msg->link);
341 ipmi_free_recv_msg(msg);
345 static void clean_up_interface_data(ipmi_smi_t intf)
348 struct cmd_rcvr *rcvr, *rcvr2;
349 struct list_head list;
351 free_recv_msg_list(&intf->waiting_msgs);
352 free_recv_msg_list(&intf->waiting_events);
354 /* Wholesale remove all the entries from the list in the
355 * interface and wait for RCU to know that none are in use. */
356 down(&intf->cmd_rcvrs_lock);
357 list_add_rcu(&list, &intf->cmd_rcvrs);
358 list_del_rcu(&intf->cmd_rcvrs);
359 up(&intf->cmd_rcvrs_lock);
362 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
365 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
366 if ((intf->seq_table[i].inuse)
367 && (intf->seq_table[i].recv_msg))
369 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
374 static void intf_free(struct kref *ref)
376 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
378 clean_up_interface_data(intf);
382 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
387 down_write(&smi_watchers_sem);
388 list_add(&(watcher->link), &smi_watchers);
389 up_write(&smi_watchers_sem);
390 spin_lock_irqsave(&interfaces_lock, flags);
391 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
392 ipmi_smi_t intf = ipmi_interfaces[i];
393 if (IPMI_INVALID_INTERFACE(intf))
395 spin_unlock_irqrestore(&interfaces_lock, flags);
397 spin_lock_irqsave(&interfaces_lock, flags);
399 spin_unlock_irqrestore(&interfaces_lock, flags);
403 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
405 down_write(&smi_watchers_sem);
406 list_del(&(watcher->link));
407 up_write(&smi_watchers_sem);
412 call_smi_watchers(int i)
414 struct ipmi_smi_watcher *w;
416 down_read(&smi_watchers_sem);
417 list_for_each_entry(w, &smi_watchers, link) {
418 if (try_module_get(w->owner)) {
420 module_put(w->owner);
423 up_read(&smi_watchers_sem);
427 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
429 if (addr1->addr_type != addr2->addr_type)
432 if (addr1->channel != addr2->channel)
435 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
436 struct ipmi_system_interface_addr *smi_addr1
437 = (struct ipmi_system_interface_addr *) addr1;
438 struct ipmi_system_interface_addr *smi_addr2
439 = (struct ipmi_system_interface_addr *) addr2;
440 return (smi_addr1->lun == smi_addr2->lun);
443 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
444 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
446 struct ipmi_ipmb_addr *ipmb_addr1
447 = (struct ipmi_ipmb_addr *) addr1;
448 struct ipmi_ipmb_addr *ipmb_addr2
449 = (struct ipmi_ipmb_addr *) addr2;
451 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
452 && (ipmb_addr1->lun == ipmb_addr2->lun));
455 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
456 struct ipmi_lan_addr *lan_addr1
457 = (struct ipmi_lan_addr *) addr1;
458 struct ipmi_lan_addr *lan_addr2
459 = (struct ipmi_lan_addr *) addr2;
461 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
462 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
463 && (lan_addr1->session_handle
464 == lan_addr2->session_handle)
465 && (lan_addr1->lun == lan_addr2->lun));
471 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
473 if (len < sizeof(struct ipmi_system_interface_addr)) {
477 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
478 if (addr->channel != IPMI_BMC_CHANNEL)
483 if ((addr->channel == IPMI_BMC_CHANNEL)
484 || (addr->channel >= IPMI_MAX_CHANNELS)
485 || (addr->channel < 0))
488 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
489 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
491 if (len < sizeof(struct ipmi_ipmb_addr)) {
497 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
498 if (len < sizeof(struct ipmi_lan_addr)) {
507 unsigned int ipmi_addr_length(int addr_type)
509 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
510 return sizeof(struct ipmi_system_interface_addr);
512 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
513 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
515 return sizeof(struct ipmi_ipmb_addr);
518 if (addr_type == IPMI_LAN_ADDR_TYPE)
519 return sizeof(struct ipmi_lan_addr);
524 static void deliver_response(struct ipmi_recv_msg *msg)
527 ipmi_smi_t intf = msg->user_msg_data;
530 /* Special handling for NULL users. */
531 if (intf->null_user_handler) {
532 intf->null_user_handler(intf, msg);
533 spin_lock_irqsave(&intf->counter_lock, flags);
534 intf->handled_local_responses++;
535 spin_unlock_irqrestore(&intf->counter_lock, flags);
537 /* No handler, so give up. */
538 spin_lock_irqsave(&intf->counter_lock, flags);
539 intf->unhandled_local_responses++;
540 spin_unlock_irqrestore(&intf->counter_lock, flags);
542 ipmi_free_recv_msg(msg);
544 ipmi_user_t user = msg->user;
545 user->handler->ipmi_recv_hndl(msg, user->handler_data);
549 /* Find the next sequence number not being used and add the given
550 message with the given timeout to the sequence table. This must be
551 called with the interface's seq_lock held. */
552 static int intf_next_seq(ipmi_smi_t intf,
553 struct ipmi_recv_msg *recv_msg,
554 unsigned long timeout,
563 for (i = intf->curr_seq;
564 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
565 i = (i+1)%IPMI_IPMB_NUM_SEQ)
567 if (! intf->seq_table[i].inuse)
571 if (! intf->seq_table[i].inuse) {
572 intf->seq_table[i].recv_msg = recv_msg;
574 /* Start with the maximum timeout, when the send response
575 comes in we will start the real timer. */
576 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
577 intf->seq_table[i].orig_timeout = timeout;
578 intf->seq_table[i].retries_left = retries;
579 intf->seq_table[i].broadcast = broadcast;
580 intf->seq_table[i].inuse = 1;
581 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
583 *seqid = intf->seq_table[i].seqid;
584 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
592 /* Return the receive message for the given sequence number and
593 release the sequence number so it can be reused. Some other data
594 is passed in to be sure the message matches up correctly (to help
595 guard against message coming in after their timeout and the
596 sequence number being reused). */
597 static int intf_find_seq(ipmi_smi_t intf,
602 struct ipmi_addr *addr,
603 struct ipmi_recv_msg **recv_msg)
608 if (seq >= IPMI_IPMB_NUM_SEQ)
611 spin_lock_irqsave(&(intf->seq_lock), flags);
612 if (intf->seq_table[seq].inuse) {
613 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
615 if ((msg->addr.channel == channel)
616 && (msg->msg.cmd == cmd)
617 && (msg->msg.netfn == netfn)
618 && (ipmi_addr_equal(addr, &(msg->addr))))
621 intf->seq_table[seq].inuse = 0;
625 spin_unlock_irqrestore(&(intf->seq_lock), flags);
631 /* Start the timer for a specific sequence table entry. */
632 static int intf_start_seq_timer(ipmi_smi_t intf,
641 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
643 spin_lock_irqsave(&(intf->seq_lock), flags);
644 /* We do this verification because the user can be deleted
645 while a message is outstanding. */
646 if ((intf->seq_table[seq].inuse)
647 && (intf->seq_table[seq].seqid == seqid))
649 struct seq_table *ent = &(intf->seq_table[seq]);
650 ent->timeout = ent->orig_timeout;
653 spin_unlock_irqrestore(&(intf->seq_lock), flags);
658 /* Got an error for the send message for a specific sequence number. */
659 static int intf_err_seq(ipmi_smi_t intf,
667 struct ipmi_recv_msg *msg = NULL;
670 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
672 spin_lock_irqsave(&(intf->seq_lock), flags);
673 /* We do this verification because the user can be deleted
674 while a message is outstanding. */
675 if ((intf->seq_table[seq].inuse)
676 && (intf->seq_table[seq].seqid == seqid))
678 struct seq_table *ent = &(intf->seq_table[seq]);
684 spin_unlock_irqrestore(&(intf->seq_lock), flags);
687 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
688 msg->msg_data[0] = err;
689 msg->msg.netfn |= 1; /* Convert to a response. */
690 msg->msg.data_len = 1;
691 msg->msg.data = msg->msg_data;
692 deliver_response(msg);
699 int ipmi_create_user(unsigned int if_num,
700 struct ipmi_user_hndl *handler,
705 ipmi_user_t new_user;
709 /* There is no module usecount here, because it's not
710 required. Since this can only be used by and called from
711 other modules, they will implicitly use this module, and
712 thus this can't be removed unless the other modules are
718 /* Make sure the driver is actually initialized, this handles
719 problems with initialization order. */
721 rv = ipmi_init_msghandler();
725 /* The init code doesn't return an error if it was turned
726 off, but it won't initialize. Check that. */
731 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
735 spin_lock_irqsave(&interfaces_lock, flags);
736 intf = ipmi_interfaces[if_num];
737 if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) {
738 spin_unlock_irqrestore(&interfaces_lock, flags);
743 /* Note that each existing user holds a refcount to the interface. */
744 kref_get(&intf->refcount);
745 spin_unlock_irqrestore(&interfaces_lock, flags);
747 kref_init(&new_user->refcount);
748 new_user->handler = handler;
749 new_user->handler_data = handler_data;
750 new_user->intf = intf;
751 new_user->gets_events = 0;
753 if (!try_module_get(intf->handlers->owner)) {
758 if (intf->handlers->inc_usecount) {
759 rv = intf->handlers->inc_usecount(intf->send_info);
761 module_put(intf->handlers->owner);
767 spin_lock_irqsave(&intf->seq_lock, flags);
768 list_add_rcu(&new_user->link, &intf->users);
769 spin_unlock_irqrestore(&intf->seq_lock, flags);
774 kref_put(&intf->refcount, intf_free);
780 static void free_user(struct kref *ref)
782 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
786 int ipmi_destroy_user(ipmi_user_t user)
789 ipmi_smi_t intf = user->intf;
792 struct cmd_rcvr *rcvr;
793 struct cmd_rcvr *rcvrs = NULL;
797 /* Remove the user from the interface's sequence table. */
798 spin_lock_irqsave(&intf->seq_lock, flags);
799 list_del_rcu(&user->link);
801 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
802 if (intf->seq_table[i].inuse
803 && (intf->seq_table[i].recv_msg->user == user))
805 intf->seq_table[i].inuse = 0;
808 spin_unlock_irqrestore(&intf->seq_lock, flags);
811 * Remove the user from the command receiver's table. First
812 * we build a list of everything (not using the standard link,
813 * since other things may be using it till we do
814 * synchronize_rcu()) then free everything in that list.
816 down(&intf->cmd_rcvrs_lock);
817 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
818 if (rcvr->user == user) {
819 list_del_rcu(&rcvr->link);
824 up(&intf->cmd_rcvrs_lock);
832 module_put(intf->handlers->owner);
833 if (intf->handlers->dec_usecount)
834 intf->handlers->dec_usecount(intf->send_info);
836 kref_put(&intf->refcount, intf_free);
838 kref_put(&user->refcount, free_user);
843 void ipmi_get_version(ipmi_user_t user,
844 unsigned char *major,
845 unsigned char *minor)
847 *major = user->intf->version_major;
848 *minor = user->intf->version_minor;
851 int ipmi_set_my_address(ipmi_user_t user,
852 unsigned int channel,
853 unsigned char address)
855 if (channel >= IPMI_MAX_CHANNELS)
857 user->intf->channels[channel].address = address;
861 int ipmi_get_my_address(ipmi_user_t user,
862 unsigned int channel,
863 unsigned char *address)
865 if (channel >= IPMI_MAX_CHANNELS)
867 *address = user->intf->channels[channel].address;
871 int ipmi_set_my_LUN(ipmi_user_t user,
872 unsigned int channel,
875 if (channel >= IPMI_MAX_CHANNELS)
877 user->intf->channels[channel].lun = LUN & 0x3;
881 int ipmi_get_my_LUN(ipmi_user_t user,
882 unsigned int channel,
883 unsigned char *address)
885 if (channel >= IPMI_MAX_CHANNELS)
887 *address = user->intf->channels[channel].lun;
891 int ipmi_set_gets_events(ipmi_user_t user, int val)
894 ipmi_smi_t intf = user->intf;
895 struct ipmi_recv_msg *msg, *msg2;
896 struct list_head msgs;
898 INIT_LIST_HEAD(&msgs);
900 spin_lock_irqsave(&intf->events_lock, flags);
901 user->gets_events = val;
904 /* Deliver any queued events. */
905 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) {
906 list_del(&msg->link);
907 list_add_tail(&msg->link, &msgs);
911 /* Hold the events lock while doing this to preserve order. */
912 list_for_each_entry_safe(msg, msg2, &msgs, link) {
914 kref_get(&user->refcount);
915 deliver_response(msg);
918 spin_unlock_irqrestore(&intf->events_lock, flags);
923 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
927 struct cmd_rcvr *rcvr;
929 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
930 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd))
936 int ipmi_register_for_cmd(ipmi_user_t user,
940 ipmi_smi_t intf = user->intf;
941 struct cmd_rcvr *rcvr;
942 struct cmd_rcvr *entry;
946 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
953 down(&intf->cmd_rcvrs_lock);
954 /* Make sure the command/netfn is not already registered. */
955 entry = find_cmd_rcvr(intf, netfn, cmd);
961 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
964 up(&intf->cmd_rcvrs_lock);
971 int ipmi_unregister_for_cmd(ipmi_user_t user,
975 ipmi_smi_t intf = user->intf;
976 struct cmd_rcvr *rcvr;
978 down(&intf->cmd_rcvrs_lock);
979 /* Make sure the command/netfn is not already registered. */
980 rcvr = find_cmd_rcvr(intf, netfn, cmd);
981 if ((rcvr) && (rcvr->user == user)) {
982 list_del_rcu(&rcvr->link);
983 up(&intf->cmd_rcvrs_lock);
988 up(&intf->cmd_rcvrs_lock);
993 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
995 ipmi_smi_t intf = user->intf;
996 intf->handlers->set_run_to_completion(intf->send_info, val);
1000 ipmb_checksum(unsigned char *data, int size)
1002 unsigned char csum = 0;
1004 for (; size > 0; size--, data++)
1010 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1011 struct kernel_ipmi_msg *msg,
1012 struct ipmi_ipmb_addr *ipmb_addr,
1014 unsigned char ipmb_seq,
1016 unsigned char source_address,
1017 unsigned char source_lun)
1021 /* Format the IPMB header data. */
1022 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1023 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1024 smi_msg->data[2] = ipmb_addr->channel;
1026 smi_msg->data[3] = 0;
1027 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1028 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1029 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1030 smi_msg->data[i+6] = source_address;
1031 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1032 smi_msg->data[i+8] = msg->cmd;
1034 /* Now tack on the data to the message. */
1035 if (msg->data_len > 0)
1036 memcpy(&(smi_msg->data[i+9]), msg->data,
1038 smi_msg->data_size = msg->data_len + 9;
1040 /* Now calculate the checksum and tack it on. */
1041 smi_msg->data[i+smi_msg->data_size]
1042 = ipmb_checksum(&(smi_msg->data[i+6]),
1043 smi_msg->data_size-6);
1045 /* Add on the checksum size and the offset from the
1047 smi_msg->data_size += 1 + i;
1049 smi_msg->msgid = msgid;
1052 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1053 struct kernel_ipmi_msg *msg,
1054 struct ipmi_lan_addr *lan_addr,
1056 unsigned char ipmb_seq,
1057 unsigned char source_lun)
1059 /* Format the IPMB header data. */
1060 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1061 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1062 smi_msg->data[2] = lan_addr->channel;
1063 smi_msg->data[3] = lan_addr->session_handle;
1064 smi_msg->data[4] = lan_addr->remote_SWID;
1065 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1066 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1067 smi_msg->data[7] = lan_addr->local_SWID;
1068 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1069 smi_msg->data[9] = msg->cmd;
1071 /* Now tack on the data to the message. */
1072 if (msg->data_len > 0)
1073 memcpy(&(smi_msg->data[10]), msg->data,
1075 smi_msg->data_size = msg->data_len + 10;
1077 /* Now calculate the checksum and tack it on. */
1078 smi_msg->data[smi_msg->data_size]
1079 = ipmb_checksum(&(smi_msg->data[7]),
1080 smi_msg->data_size-7);
1082 /* Add on the checksum size and the offset from the
1084 smi_msg->data_size += 1;
1086 smi_msg->msgid = msgid;
1089 /* Separate from ipmi_request so that the user does not have to be
1090 supplied in certain circumstances (mainly at panic time). If
1091 messages are supplied, they will be freed, even if an error
1093 static int i_ipmi_request(ipmi_user_t user,
1095 struct ipmi_addr *addr,
1097 struct kernel_ipmi_msg *msg,
1098 void *user_msg_data,
1100 struct ipmi_recv_msg *supplied_recv,
1102 unsigned char source_address,
1103 unsigned char source_lun,
1105 unsigned int retry_time_ms)
1108 struct ipmi_smi_msg *smi_msg;
1109 struct ipmi_recv_msg *recv_msg;
1110 unsigned long flags;
1113 if (supplied_recv) {
1114 recv_msg = supplied_recv;
1116 recv_msg = ipmi_alloc_recv_msg();
1117 if (recv_msg == NULL) {
1121 recv_msg->user_msg_data = user_msg_data;
1124 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1126 smi_msg = ipmi_alloc_smi_msg();
1127 if (smi_msg == NULL) {
1128 ipmi_free_recv_msg(recv_msg);
1133 recv_msg->user = user;
1135 kref_get(&user->refcount);
1136 recv_msg->msgid = msgid;
1137 /* Store the message to send in the receive message so timeout
1138 responses can get the proper response data. */
1139 recv_msg->msg = *msg;
1141 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1142 struct ipmi_system_interface_addr *smi_addr;
1144 if (msg->netfn & 1) {
1145 /* Responses are not allowed to the SMI. */
1150 smi_addr = (struct ipmi_system_interface_addr *) addr;
1151 if (smi_addr->lun > 3) {
1152 spin_lock_irqsave(&intf->counter_lock, flags);
1153 intf->sent_invalid_commands++;
1154 spin_unlock_irqrestore(&intf->counter_lock, flags);
1159 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1161 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1162 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1163 || (msg->cmd == IPMI_GET_MSG_CMD)
1164 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1166 /* We don't let the user do these, since we manage
1167 the sequence numbers. */
1168 spin_lock_irqsave(&intf->counter_lock, flags);
1169 intf->sent_invalid_commands++;
1170 spin_unlock_irqrestore(&intf->counter_lock, flags);
1175 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1176 spin_lock_irqsave(&intf->counter_lock, flags);
1177 intf->sent_invalid_commands++;
1178 spin_unlock_irqrestore(&intf->counter_lock, flags);
1183 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1184 smi_msg->data[1] = msg->cmd;
1185 smi_msg->msgid = msgid;
1186 smi_msg->user_data = recv_msg;
1187 if (msg->data_len > 0)
1188 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1189 smi_msg->data_size = msg->data_len + 2;
1190 spin_lock_irqsave(&intf->counter_lock, flags);
1191 intf->sent_local_commands++;
1192 spin_unlock_irqrestore(&intf->counter_lock, flags);
1193 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1194 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1196 struct ipmi_ipmb_addr *ipmb_addr;
1197 unsigned char ipmb_seq;
1201 if (addr->channel >= IPMI_MAX_CHANNELS) {
1202 spin_lock_irqsave(&intf->counter_lock, flags);
1203 intf->sent_invalid_commands++;
1204 spin_unlock_irqrestore(&intf->counter_lock, flags);
1209 if (intf->channels[addr->channel].medium
1210 != IPMI_CHANNEL_MEDIUM_IPMB)
1212 spin_lock_irqsave(&intf->counter_lock, flags);
1213 intf->sent_invalid_commands++;
1214 spin_unlock_irqrestore(&intf->counter_lock, flags);
1220 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1221 retries = 0; /* Don't retry broadcasts. */
1225 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1226 /* Broadcasts add a zero at the beginning of the
1227 message, but otherwise is the same as an IPMB
1229 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1234 /* Default to 1 second retries. */
1235 if (retry_time_ms == 0)
1236 retry_time_ms = 1000;
1238 /* 9 for the header and 1 for the checksum, plus
1239 possibly one for the broadcast. */
1240 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1241 spin_lock_irqsave(&intf->counter_lock, flags);
1242 intf->sent_invalid_commands++;
1243 spin_unlock_irqrestore(&intf->counter_lock, flags);
1248 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1249 if (ipmb_addr->lun > 3) {
1250 spin_lock_irqsave(&intf->counter_lock, flags);
1251 intf->sent_invalid_commands++;
1252 spin_unlock_irqrestore(&intf->counter_lock, flags);
1257 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1259 if (recv_msg->msg.netfn & 0x1) {
1260 /* It's a response, so use the user's sequence
1262 spin_lock_irqsave(&intf->counter_lock, flags);
1263 intf->sent_ipmb_responses++;
1264 spin_unlock_irqrestore(&intf->counter_lock, flags);
1265 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1267 source_address, source_lun);
1269 /* Save the receive message so we can use it
1270 to deliver the response. */
1271 smi_msg->user_data = recv_msg;
1273 /* It's a command, so get a sequence for it. */
1275 spin_lock_irqsave(&(intf->seq_lock), flags);
1277 spin_lock(&intf->counter_lock);
1278 intf->sent_ipmb_commands++;
1279 spin_unlock(&intf->counter_lock);
1281 /* Create a sequence number with a 1 second
1282 timeout and 4 retries. */
1283 rv = intf_next_seq(intf,
1291 /* We have used up all the sequence numbers,
1292 probably, so abort. */
1293 spin_unlock_irqrestore(&(intf->seq_lock),
1298 /* Store the sequence number in the message,
1299 so that when the send message response
1300 comes back we can start the timer. */
1301 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1302 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1303 ipmb_seq, broadcast,
1304 source_address, source_lun);
1306 /* Copy the message into the recv message data, so we
1307 can retransmit it later if necessary. */
1308 memcpy(recv_msg->msg_data, smi_msg->data,
1309 smi_msg->data_size);
1310 recv_msg->msg.data = recv_msg->msg_data;
1311 recv_msg->msg.data_len = smi_msg->data_size;
1313 /* We don't unlock until here, because we need
1314 to copy the completed message into the
1315 recv_msg before we release the lock.
1316 Otherwise, race conditions may bite us. I
1317 know that's pretty paranoid, but I prefer
1319 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1321 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1322 struct ipmi_lan_addr *lan_addr;
1323 unsigned char ipmb_seq;
1326 if (addr->channel >= IPMI_MAX_CHANNELS) {
1327 spin_lock_irqsave(&intf->counter_lock, flags);
1328 intf->sent_invalid_commands++;
1329 spin_unlock_irqrestore(&intf->counter_lock, flags);
1334 if ((intf->channels[addr->channel].medium
1335 != IPMI_CHANNEL_MEDIUM_8023LAN)
1336 && (intf->channels[addr->channel].medium
1337 != IPMI_CHANNEL_MEDIUM_ASYNC))
1339 spin_lock_irqsave(&intf->counter_lock, flags);
1340 intf->sent_invalid_commands++;
1341 spin_unlock_irqrestore(&intf->counter_lock, flags);
1348 /* Default to 1 second retries. */
1349 if (retry_time_ms == 0)
1350 retry_time_ms = 1000;
1352 /* 11 for the header and 1 for the checksum. */
1353 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1354 spin_lock_irqsave(&intf->counter_lock, flags);
1355 intf->sent_invalid_commands++;
1356 spin_unlock_irqrestore(&intf->counter_lock, flags);
1361 lan_addr = (struct ipmi_lan_addr *) addr;
1362 if (lan_addr->lun > 3) {
1363 spin_lock_irqsave(&intf->counter_lock, flags);
1364 intf->sent_invalid_commands++;
1365 spin_unlock_irqrestore(&intf->counter_lock, flags);
1370 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1372 if (recv_msg->msg.netfn & 0x1) {
1373 /* It's a response, so use the user's sequence
1375 spin_lock_irqsave(&intf->counter_lock, flags);
1376 intf->sent_lan_responses++;
1377 spin_unlock_irqrestore(&intf->counter_lock, flags);
1378 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1381 /* Save the receive message so we can use it
1382 to deliver the response. */
1383 smi_msg->user_data = recv_msg;
1385 /* It's a command, so get a sequence for it. */
1387 spin_lock_irqsave(&(intf->seq_lock), flags);
1389 spin_lock(&intf->counter_lock);
1390 intf->sent_lan_commands++;
1391 spin_unlock(&intf->counter_lock);
1393 /* Create a sequence number with a 1 second
1394 timeout and 4 retries. */
1395 rv = intf_next_seq(intf,
1403 /* We have used up all the sequence numbers,
1404 probably, so abort. */
1405 spin_unlock_irqrestore(&(intf->seq_lock),
1410 /* Store the sequence number in the message,
1411 so that when the send message response
1412 comes back we can start the timer. */
1413 format_lan_msg(smi_msg, msg, lan_addr,
1414 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1415 ipmb_seq, source_lun);
1417 /* Copy the message into the recv message data, so we
1418 can retransmit it later if necessary. */
1419 memcpy(recv_msg->msg_data, smi_msg->data,
1420 smi_msg->data_size);
1421 recv_msg->msg.data = recv_msg->msg_data;
1422 recv_msg->msg.data_len = smi_msg->data_size;
1424 /* We don't unlock until here, because we need
1425 to copy the completed message into the
1426 recv_msg before we release the lock.
1427 Otherwise, race conditions may bite us. I
1428 know that's pretty paranoid, but I prefer
1430 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1433 /* Unknown address type. */
1434 spin_lock_irqsave(&intf->counter_lock, flags);
1435 intf->sent_invalid_commands++;
1436 spin_unlock_irqrestore(&intf->counter_lock, flags);
1444 for (m = 0; m < smi_msg->data_size; m++)
1445 printk(" %2.2x", smi_msg->data[m]);
1449 intf->handlers->sender(intf->send_info, smi_msg, priority);
1454 ipmi_free_smi_msg(smi_msg);
1455 ipmi_free_recv_msg(recv_msg);
1459 static int check_addr(ipmi_smi_t intf,
1460 struct ipmi_addr *addr,
1461 unsigned char *saddr,
1464 if (addr->channel >= IPMI_MAX_CHANNELS)
1466 *lun = intf->channels[addr->channel].lun;
1467 *saddr = intf->channels[addr->channel].address;
1471 int ipmi_request_settime(ipmi_user_t user,
1472 struct ipmi_addr *addr,
1474 struct kernel_ipmi_msg *msg,
1475 void *user_msg_data,
1478 unsigned int retry_time_ms)
1480 unsigned char saddr, lun;
1485 rv = check_addr(user->intf, addr, &saddr, &lun);
1488 return i_ipmi_request(user,
1502 int ipmi_request_supply_msgs(ipmi_user_t user,
1503 struct ipmi_addr *addr,
1505 struct kernel_ipmi_msg *msg,
1506 void *user_msg_data,
1508 struct ipmi_recv_msg *supplied_recv,
1511 unsigned char saddr, lun;
1516 rv = check_addr(user->intf, addr, &saddr, &lun);
1519 return i_ipmi_request(user,
1533 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1534 int count, int *eof, void *data)
1536 char *out = (char *) page;
1537 ipmi_smi_t intf = data;
1541 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1542 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1543 out[rv-1] = '\n'; /* Replace the final space with a newline */
1549 static int version_file_read_proc(char *page, char **start, off_t off,
1550 int count, int *eof, void *data)
1552 char *out = (char *) page;
1553 ipmi_smi_t intf = data;
1555 return sprintf(out, "%d.%d\n",
1556 intf->version_major, intf->version_minor);
1559 static int stat_file_read_proc(char *page, char **start, off_t off,
1560 int count, int *eof, void *data)
1562 char *out = (char *) page;
1563 ipmi_smi_t intf = data;
1565 out += sprintf(out, "sent_invalid_commands: %d\n",
1566 intf->sent_invalid_commands);
1567 out += sprintf(out, "sent_local_commands: %d\n",
1568 intf->sent_local_commands);
1569 out += sprintf(out, "handled_local_responses: %d\n",
1570 intf->handled_local_responses);
1571 out += sprintf(out, "unhandled_local_responses: %d\n",
1572 intf->unhandled_local_responses);
1573 out += sprintf(out, "sent_ipmb_commands: %d\n",
1574 intf->sent_ipmb_commands);
1575 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1576 intf->sent_ipmb_command_errs);
1577 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1578 intf->retransmitted_ipmb_commands);
1579 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1580 intf->timed_out_ipmb_commands);
1581 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1582 intf->timed_out_ipmb_broadcasts);
1583 out += sprintf(out, "sent_ipmb_responses: %d\n",
1584 intf->sent_ipmb_responses);
1585 out += sprintf(out, "handled_ipmb_responses: %d\n",
1586 intf->handled_ipmb_responses);
1587 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1588 intf->invalid_ipmb_responses);
1589 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1590 intf->unhandled_ipmb_responses);
1591 out += sprintf(out, "sent_lan_commands: %d\n",
1592 intf->sent_lan_commands);
1593 out += sprintf(out, "sent_lan_command_errs: %d\n",
1594 intf->sent_lan_command_errs);
1595 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1596 intf->retransmitted_lan_commands);
1597 out += sprintf(out, "timed_out_lan_commands: %d\n",
1598 intf->timed_out_lan_commands);
1599 out += sprintf(out, "sent_lan_responses: %d\n",
1600 intf->sent_lan_responses);
1601 out += sprintf(out, "handled_lan_responses: %d\n",
1602 intf->handled_lan_responses);
1603 out += sprintf(out, "invalid_lan_responses: %d\n",
1604 intf->invalid_lan_responses);
1605 out += sprintf(out, "unhandled_lan_responses: %d\n",
1606 intf->unhandled_lan_responses);
1607 out += sprintf(out, "handled_commands: %d\n",
1608 intf->handled_commands);
1609 out += sprintf(out, "invalid_commands: %d\n",
1610 intf->invalid_commands);
1611 out += sprintf(out, "unhandled_commands: %d\n",
1612 intf->unhandled_commands);
1613 out += sprintf(out, "invalid_events: %d\n",
1614 intf->invalid_events);
1615 out += sprintf(out, "events: %d\n",
1618 return (out - ((char *) page));
1621 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1622 read_proc_t *read_proc, write_proc_t *write_proc,
1623 void *data, struct module *owner)
1626 #ifdef CONFIG_PROC_FS
1627 struct proc_dir_entry *file;
1628 struct ipmi_proc_entry *entry;
1630 /* Create a list element. */
1631 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1634 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1639 strcpy(entry->name, name);
1641 file = create_proc_entry(name, 0, smi->proc_dir);
1649 file->read_proc = read_proc;
1650 file->write_proc = write_proc;
1651 file->owner = owner;
1653 spin_lock(&smi->proc_entry_lock);
1654 /* Stick it on the list. */
1655 entry->next = smi->proc_entries;
1656 smi->proc_entries = entry;
1657 spin_unlock(&smi->proc_entry_lock);
1659 #endif /* CONFIG_PROC_FS */
1664 static int add_proc_entries(ipmi_smi_t smi, int num)
1668 #ifdef CONFIG_PROC_FS
1669 sprintf(smi->proc_dir_name, "%d", num);
1670 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1674 smi->proc_dir->owner = THIS_MODULE;
1678 rv = ipmi_smi_add_proc_entry(smi, "stats",
1679 stat_file_read_proc, NULL,
1683 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1684 ipmb_file_read_proc, NULL,
1688 rv = ipmi_smi_add_proc_entry(smi, "version",
1689 version_file_read_proc, NULL,
1691 #endif /* CONFIG_PROC_FS */
1696 static void remove_proc_entries(ipmi_smi_t smi)
1698 #ifdef CONFIG_PROC_FS
1699 struct ipmi_proc_entry *entry;
1701 spin_lock(&smi->proc_entry_lock);
1702 while (smi->proc_entries) {
1703 entry = smi->proc_entries;
1704 smi->proc_entries = entry->next;
1706 remove_proc_entry(entry->name, smi->proc_dir);
1710 spin_unlock(&smi->proc_entry_lock);
1711 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1712 #endif /* CONFIG_PROC_FS */
1716 send_channel_info_cmd(ipmi_smi_t intf, int chan)
1718 struct kernel_ipmi_msg msg;
1719 unsigned char data[1];
1720 struct ipmi_system_interface_addr si;
1722 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
1723 si.channel = IPMI_BMC_CHANNEL;
1726 msg.netfn = IPMI_NETFN_APP_REQUEST;
1727 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
1731 return i_ipmi_request(NULL,
1733 (struct ipmi_addr *) &si,
1740 intf->channels[0].address,
1741 intf->channels[0].lun,
1746 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
1751 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
1752 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
1753 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
1755 /* It's the one we want */
1756 if (msg->msg.data[0] != 0) {
1757 /* Got an error from the channel, just go on. */
1759 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
1760 /* If the MC does not support this
1761 command, that is legal. We just
1762 assume it has one IPMB at channel
1764 intf->channels[0].medium
1765 = IPMI_CHANNEL_MEDIUM_IPMB;
1766 intf->channels[0].protocol
1767 = IPMI_CHANNEL_PROTOCOL_IPMB;
1770 intf->curr_channel = IPMI_MAX_CHANNELS;
1771 wake_up(&intf->waitq);
1776 if (msg->msg.data_len < 4) {
1777 /* Message not big enough, just go on. */
1780 chan = intf->curr_channel;
1781 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
1782 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
1785 intf->curr_channel++;
1786 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
1787 wake_up(&intf->waitq);
1789 rv = send_channel_info_cmd(intf, intf->curr_channel);
1792 /* Got an error somehow, just give up. */
1793 intf->curr_channel = IPMI_MAX_CHANNELS;
1794 wake_up(&intf->waitq);
1796 printk(KERN_WARNING PFX
1797 "Error sending channel information: %d\n",
1805 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1807 unsigned char version_major,
1808 unsigned char version_minor,
1809 unsigned char slave_addr,
1810 ipmi_smi_t *new_intf)
1815 unsigned long flags;
1818 /* Make sure the driver is actually initialized, this handles
1819 problems with initialization order. */
1821 rv = ipmi_init_msghandler();
1824 /* The init code doesn't return an error if it was turned
1825 off, but it won't initialize. Check that. */
1830 intf = kmalloc(sizeof(*intf), GFP_KERNEL);
1833 memset(intf, 0, sizeof(*intf));
1834 intf->intf_num = -1;
1835 kref_init(&intf->refcount);
1836 intf->version_major = version_major;
1837 intf->version_minor = version_minor;
1838 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
1839 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
1840 intf->channels[j].lun = 2;
1842 if (slave_addr != 0)
1843 intf->channels[0].address = slave_addr;
1844 INIT_LIST_HEAD(&intf->users);
1845 intf->handlers = handlers;
1846 intf->send_info = send_info;
1847 spin_lock_init(&intf->seq_lock);
1848 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
1849 intf->seq_table[j].inuse = 0;
1850 intf->seq_table[j].seqid = 0;
1853 #ifdef CONFIG_PROC_FS
1854 spin_lock_init(&intf->proc_entry_lock);
1856 spin_lock_init(&intf->waiting_msgs_lock);
1857 INIT_LIST_HEAD(&intf->waiting_msgs);
1858 spin_lock_init(&intf->events_lock);
1859 INIT_LIST_HEAD(&intf->waiting_events);
1860 intf->waiting_events_count = 0;
1861 init_MUTEX(&intf->cmd_rcvrs_lock);
1862 INIT_LIST_HEAD(&intf->cmd_rcvrs);
1863 init_waitqueue_head(&intf->waitq);
1865 spin_lock_init(&intf->counter_lock);
1866 intf->proc_dir = NULL;
1869 spin_lock_irqsave(&interfaces_lock, flags);
1870 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
1871 if (ipmi_interfaces[i] == NULL) {
1873 /* Reserve the entry till we are done. */
1874 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
1879 spin_unlock_irqrestore(&interfaces_lock, flags);
1883 /* FIXME - this is an ugly kludge, this sets the intf for the
1884 caller before sending any messages with it. */
1887 if ((version_major > 1)
1888 || ((version_major == 1) && (version_minor >= 5)))
1890 /* Start scanning the channels to see what is
1892 intf->null_user_handler = channel_handler;
1893 intf->curr_channel = 0;
1894 rv = send_channel_info_cmd(intf, 0);
1898 /* Wait for the channel info to be read. */
1899 wait_event(intf->waitq,
1900 intf->curr_channel >= IPMI_MAX_CHANNELS);
1902 /* Assume a single IPMB channel at zero. */
1903 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
1904 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
1908 rv = add_proc_entries(intf, i);
1913 remove_proc_entries(intf);
1914 kref_put(&intf->refcount, intf_free);
1915 if (i < MAX_IPMI_INTERFACES) {
1916 spin_lock_irqsave(&interfaces_lock, flags);
1917 ipmi_interfaces[i] = NULL;
1918 spin_unlock_irqrestore(&interfaces_lock, flags);
1921 spin_lock_irqsave(&interfaces_lock, flags);
1922 ipmi_interfaces[i] = intf;
1923 spin_unlock_irqrestore(&interfaces_lock, flags);
1924 call_smi_watchers(i);
1930 int ipmi_unregister_smi(ipmi_smi_t intf)
1933 struct ipmi_smi_watcher *w;
1934 unsigned long flags;
1936 spin_lock_irqsave(&interfaces_lock, flags);
1937 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
1938 if (ipmi_interfaces[i] == intf) {
1939 /* Set the interface number reserved until we
1941 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
1942 intf->intf_num = -1;
1946 spin_unlock_irqrestore(&interfaces_lock,flags);
1948 if (i == MAX_IPMI_INTERFACES)
1951 remove_proc_entries(intf);
1953 /* Call all the watcher interfaces to tell them that
1954 an interface is gone. */
1955 down_read(&smi_watchers_sem);
1956 list_for_each_entry(w, &smi_watchers, link)
1958 up_read(&smi_watchers_sem);
1960 /* Allow the entry to be reused now. */
1961 spin_lock_irqsave(&interfaces_lock, flags);
1962 ipmi_interfaces[i] = NULL;
1963 spin_unlock_irqrestore(&interfaces_lock,flags);
1965 kref_put(&intf->refcount, intf_free);
1969 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
1970 struct ipmi_smi_msg *msg)
1972 struct ipmi_ipmb_addr ipmb_addr;
1973 struct ipmi_recv_msg *recv_msg;
1974 unsigned long flags;
1977 /* This is 11, not 10, because the response must contain a
1978 * completion code. */
1979 if (msg->rsp_size < 11) {
1980 /* Message not big enough, just ignore it. */
1981 spin_lock_irqsave(&intf->counter_lock, flags);
1982 intf->invalid_ipmb_responses++;
1983 spin_unlock_irqrestore(&intf->counter_lock, flags);
1987 if (msg->rsp[2] != 0) {
1988 /* An error getting the response, just ignore it. */
1992 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
1993 ipmb_addr.slave_addr = msg->rsp[6];
1994 ipmb_addr.channel = msg->rsp[3] & 0x0f;
1995 ipmb_addr.lun = msg->rsp[7] & 3;
1997 /* It's a response from a remote entity. Look up the sequence
1998 number and handle the response. */
1999 if (intf_find_seq(intf,
2003 (msg->rsp[4] >> 2) & (~1),
2004 (struct ipmi_addr *) &(ipmb_addr),
2007 /* We were unable to find the sequence number,
2008 so just nuke the message. */
2009 spin_lock_irqsave(&intf->counter_lock, flags);
2010 intf->unhandled_ipmb_responses++;
2011 spin_unlock_irqrestore(&intf->counter_lock, flags);
2015 memcpy(recv_msg->msg_data,
2018 /* THe other fields matched, so no need to set them, except
2019 for netfn, which needs to be the response that was
2020 returned, not the request value. */
2021 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2022 recv_msg->msg.data = recv_msg->msg_data;
2023 recv_msg->msg.data_len = msg->rsp_size - 10;
2024 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2025 spin_lock_irqsave(&intf->counter_lock, flags);
2026 intf->handled_ipmb_responses++;
2027 spin_unlock_irqrestore(&intf->counter_lock, flags);
2028 deliver_response(recv_msg);
2033 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2034 struct ipmi_smi_msg *msg)
2036 struct cmd_rcvr *rcvr;
2038 unsigned char netfn;
2040 ipmi_user_t user = NULL;
2041 struct ipmi_ipmb_addr *ipmb_addr;
2042 struct ipmi_recv_msg *recv_msg;
2043 unsigned long flags;
2045 if (msg->rsp_size < 10) {
2046 /* Message not big enough, just ignore it. */
2047 spin_lock_irqsave(&intf->counter_lock, flags);
2048 intf->invalid_commands++;
2049 spin_unlock_irqrestore(&intf->counter_lock, flags);
2053 if (msg->rsp[2] != 0) {
2054 /* An error getting the response, just ignore it. */
2058 netfn = msg->rsp[4] >> 2;
2062 rcvr = find_cmd_rcvr(intf, netfn, cmd);
2065 kref_get(&user->refcount);
2071 /* We didn't find a user, deliver an error response. */
2072 spin_lock_irqsave(&intf->counter_lock, flags);
2073 intf->unhandled_commands++;
2074 spin_unlock_irqrestore(&intf->counter_lock, flags);
2076 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2077 msg->data[1] = IPMI_SEND_MSG_CMD;
2078 msg->data[2] = msg->rsp[3];
2079 msg->data[3] = msg->rsp[6];
2080 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2081 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2082 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2084 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2085 msg->data[8] = msg->rsp[8]; /* cmd */
2086 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2087 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2088 msg->data_size = 11;
2093 printk("Invalid command:");
2094 for (m = 0; m < msg->data_size; m++)
2095 printk(" %2.2x", msg->data[m]);
2099 intf->handlers->sender(intf->send_info, msg, 0);
2101 rv = -1; /* We used the message, so return the value that
2102 causes it to not be freed or queued. */
2104 /* Deliver the message to the user. */
2105 spin_lock_irqsave(&intf->counter_lock, flags);
2106 intf->handled_commands++;
2107 spin_unlock_irqrestore(&intf->counter_lock, flags);
2109 recv_msg = ipmi_alloc_recv_msg();
2111 /* We couldn't allocate memory for the
2112 message, so requeue it for handling
2115 kref_put(&user->refcount, free_user);
2117 /* Extract the source address from the data. */
2118 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2119 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2120 ipmb_addr->slave_addr = msg->rsp[6];
2121 ipmb_addr->lun = msg->rsp[7] & 3;
2122 ipmb_addr->channel = msg->rsp[3] & 0xf;
2124 /* Extract the rest of the message information
2125 from the IPMB header.*/
2126 recv_msg->user = user;
2127 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2128 recv_msg->msgid = msg->rsp[7] >> 2;
2129 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2130 recv_msg->msg.cmd = msg->rsp[8];
2131 recv_msg->msg.data = recv_msg->msg_data;
2133 /* We chop off 10, not 9 bytes because the checksum
2134 at the end also needs to be removed. */
2135 recv_msg->msg.data_len = msg->rsp_size - 10;
2136 memcpy(recv_msg->msg_data,
2138 msg->rsp_size - 10);
2139 deliver_response(recv_msg);
2146 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2147 struct ipmi_smi_msg *msg)
2149 struct ipmi_lan_addr lan_addr;
2150 struct ipmi_recv_msg *recv_msg;
2151 unsigned long flags;
2154 /* This is 13, not 12, because the response must contain a
2155 * completion code. */
2156 if (msg->rsp_size < 13) {
2157 /* Message not big enough, just ignore it. */
2158 spin_lock_irqsave(&intf->counter_lock, flags);
2159 intf->invalid_lan_responses++;
2160 spin_unlock_irqrestore(&intf->counter_lock, flags);
2164 if (msg->rsp[2] != 0) {
2165 /* An error getting the response, just ignore it. */
2169 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
2170 lan_addr.session_handle = msg->rsp[4];
2171 lan_addr.remote_SWID = msg->rsp[8];
2172 lan_addr.local_SWID = msg->rsp[5];
2173 lan_addr.channel = msg->rsp[3] & 0x0f;
2174 lan_addr.privilege = msg->rsp[3] >> 4;
2175 lan_addr.lun = msg->rsp[9] & 3;
2177 /* It's a response from a remote entity. Look up the sequence
2178 number and handle the response. */
2179 if (intf_find_seq(intf,
2183 (msg->rsp[6] >> 2) & (~1),
2184 (struct ipmi_addr *) &(lan_addr),
2187 /* We were unable to find the sequence number,
2188 so just nuke the message. */
2189 spin_lock_irqsave(&intf->counter_lock, flags);
2190 intf->unhandled_lan_responses++;
2191 spin_unlock_irqrestore(&intf->counter_lock, flags);
2195 memcpy(recv_msg->msg_data,
2197 msg->rsp_size - 11);
2198 /* The other fields matched, so no need to set them, except
2199 for netfn, which needs to be the response that was
2200 returned, not the request value. */
2201 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2202 recv_msg->msg.data = recv_msg->msg_data;
2203 recv_msg->msg.data_len = msg->rsp_size - 12;
2204 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2205 spin_lock_irqsave(&intf->counter_lock, flags);
2206 intf->handled_lan_responses++;
2207 spin_unlock_irqrestore(&intf->counter_lock, flags);
2208 deliver_response(recv_msg);
2213 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2214 struct ipmi_smi_msg *msg)
2216 struct cmd_rcvr *rcvr;
2218 unsigned char netfn;
2220 ipmi_user_t user = NULL;
2221 struct ipmi_lan_addr *lan_addr;
2222 struct ipmi_recv_msg *recv_msg;
2223 unsigned long flags;
2225 if (msg->rsp_size < 12) {
2226 /* Message not big enough, just ignore it. */
2227 spin_lock_irqsave(&intf->counter_lock, flags);
2228 intf->invalid_commands++;
2229 spin_unlock_irqrestore(&intf->counter_lock, flags);
2233 if (msg->rsp[2] != 0) {
2234 /* An error getting the response, just ignore it. */
2238 netfn = msg->rsp[6] >> 2;
2242 rcvr = find_cmd_rcvr(intf, netfn, cmd);
2245 kref_get(&user->refcount);
2251 /* We didn't find a user, just give up. */
2252 spin_lock_irqsave(&intf->counter_lock, flags);
2253 intf->unhandled_commands++;
2254 spin_unlock_irqrestore(&intf->counter_lock, flags);
2256 rv = 0; /* Don't do anything with these messages, just
2257 allow them to be freed. */
2259 /* Deliver the message to the user. */
2260 spin_lock_irqsave(&intf->counter_lock, flags);
2261 intf->handled_commands++;
2262 spin_unlock_irqrestore(&intf->counter_lock, flags);
2264 recv_msg = ipmi_alloc_recv_msg();
2266 /* We couldn't allocate memory for the
2267 message, so requeue it for handling
2270 kref_put(&user->refcount, free_user);
2272 /* Extract the source address from the data. */
2273 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
2274 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
2275 lan_addr->session_handle = msg->rsp[4];
2276 lan_addr->remote_SWID = msg->rsp[8];
2277 lan_addr->local_SWID = msg->rsp[5];
2278 lan_addr->lun = msg->rsp[9] & 3;
2279 lan_addr->channel = msg->rsp[3] & 0xf;
2280 lan_addr->privilege = msg->rsp[3] >> 4;
2282 /* Extract the rest of the message information
2283 from the IPMB header.*/
2284 recv_msg->user = user;
2285 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2286 recv_msg->msgid = msg->rsp[9] >> 2;
2287 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2288 recv_msg->msg.cmd = msg->rsp[10];
2289 recv_msg->msg.data = recv_msg->msg_data;
2291 /* We chop off 12, not 11 bytes because the checksum
2292 at the end also needs to be removed. */
2293 recv_msg->msg.data_len = msg->rsp_size - 12;
2294 memcpy(recv_msg->msg_data,
2296 msg->rsp_size - 12);
2297 deliver_response(recv_msg);
2304 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
2305 struct ipmi_smi_msg *msg)
2307 struct ipmi_system_interface_addr *smi_addr;
2309 recv_msg->msgid = 0;
2310 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
2311 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2312 smi_addr->channel = IPMI_BMC_CHANNEL;
2313 smi_addr->lun = msg->rsp[0] & 3;
2314 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
2315 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2316 recv_msg->msg.cmd = msg->rsp[1];
2317 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
2318 recv_msg->msg.data = recv_msg->msg_data;
2319 recv_msg->msg.data_len = msg->rsp_size - 3;
2322 static int handle_read_event_rsp(ipmi_smi_t intf,
2323 struct ipmi_smi_msg *msg)
2325 struct ipmi_recv_msg *recv_msg, *recv_msg2;
2326 struct list_head msgs;
2329 int deliver_count = 0;
2330 unsigned long flags;
2332 if (msg->rsp_size < 19) {
2333 /* Message is too small to be an IPMB event. */
2334 spin_lock_irqsave(&intf->counter_lock, flags);
2335 intf->invalid_events++;
2336 spin_unlock_irqrestore(&intf->counter_lock, flags);
2340 if (msg->rsp[2] != 0) {
2341 /* An error getting the event, just ignore it. */
2345 INIT_LIST_HEAD(&msgs);
2347 spin_lock_irqsave(&intf->events_lock, flags);
2349 spin_lock(&intf->counter_lock);
2351 spin_unlock(&intf->counter_lock);
2353 /* Allocate and fill in one message for every user that is getting
2356 list_for_each_entry_rcu(user, &intf->users, link) {
2357 if (! user->gets_events)
2360 recv_msg = ipmi_alloc_recv_msg();
2363 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2364 list_del(&recv_msg->link);
2365 ipmi_free_recv_msg(recv_msg);
2367 /* We couldn't allocate memory for the
2368 message, so requeue it for handling
2376 copy_event_into_recv_msg(recv_msg, msg);
2377 recv_msg->user = user;
2378 kref_get(&user->refcount);
2379 list_add_tail(&(recv_msg->link), &msgs);
2383 if (deliver_count) {
2384 /* Now deliver all the messages. */
2385 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2386 list_del(&recv_msg->link);
2387 deliver_response(recv_msg);
2389 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
2390 /* No one to receive the message, put it in queue if there's
2391 not already too many things in the queue. */
2392 recv_msg = ipmi_alloc_recv_msg();
2394 /* We couldn't allocate memory for the
2395 message, so requeue it for handling
2401 copy_event_into_recv_msg(recv_msg, msg);
2402 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
2404 /* There's too many things in the queue, discard this
2406 printk(KERN_WARNING PFX "Event queue full, discarding an"
2407 " incoming event\n");
2411 spin_unlock_irqrestore(&(intf->events_lock), flags);
2416 static int handle_bmc_rsp(ipmi_smi_t intf,
2417 struct ipmi_smi_msg *msg)
2419 struct ipmi_recv_msg *recv_msg;
2420 unsigned long flags;
2421 struct ipmi_user *user;
2423 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
2424 if (recv_msg == NULL)
2426 printk(KERN_WARNING"IPMI message received with no owner. This\n"
2427 "could be because of a malformed message, or\n"
2428 "because of a hardware error. Contact your\n"
2429 "hardware vender for assistance\n");
2433 user = recv_msg->user;
2434 /* Make sure the user still exists. */
2435 if (user && !user->valid) {
2436 /* The user for the message went away, so give up. */
2437 spin_lock_irqsave(&intf->counter_lock, flags);
2438 intf->unhandled_local_responses++;
2439 spin_unlock_irqrestore(&intf->counter_lock, flags);
2440 ipmi_free_recv_msg(recv_msg);
2442 struct ipmi_system_interface_addr *smi_addr;
2444 spin_lock_irqsave(&intf->counter_lock, flags);
2445 intf->handled_local_responses++;
2446 spin_unlock_irqrestore(&intf->counter_lock, flags);
2447 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2448 recv_msg->msgid = msg->msgid;
2449 smi_addr = ((struct ipmi_system_interface_addr *)
2451 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2452 smi_addr->channel = IPMI_BMC_CHANNEL;
2453 smi_addr->lun = msg->rsp[0] & 3;
2454 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2455 recv_msg->msg.cmd = msg->rsp[1];
2456 memcpy(recv_msg->msg_data,
2459 recv_msg->msg.data = recv_msg->msg_data;
2460 recv_msg->msg.data_len = msg->rsp_size - 2;
2461 deliver_response(recv_msg);
2467 /* Handle a new message. Return 1 if the message should be requeued,
2468 0 if the message should be freed, or -1 if the message should not
2469 be freed or requeued. */
2470 static int handle_new_recv_msg(ipmi_smi_t intf,
2471 struct ipmi_smi_msg *msg)
2479 for (m = 0; m < msg->rsp_size; m++)
2480 printk(" %2.2x", msg->rsp[m]);
2483 if (msg->rsp_size < 2) {
2484 /* Message is too small to be correct. */
2485 printk(KERN_WARNING PFX "BMC returned to small a message"
2486 " for netfn %x cmd %x, got %d bytes\n",
2487 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
2489 /* Generate an error response for the message. */
2490 msg->rsp[0] = msg->data[0] | (1 << 2);
2491 msg->rsp[1] = msg->data[1];
2492 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
2494 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
2495 || (msg->rsp[1] != msg->data[1])) /* Command */
2497 /* The response is not even marginally correct. */
2498 printk(KERN_WARNING PFX "BMC returned incorrect response,"
2499 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
2500 (msg->data[0] >> 2) | 1, msg->data[1],
2501 msg->rsp[0] >> 2, msg->rsp[1]);
2503 /* Generate an error response for the message. */
2504 msg->rsp[0] = msg->data[0] | (1 << 2);
2505 msg->rsp[1] = msg->data[1];
2506 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
2510 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2511 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
2512 && (msg->user_data != NULL))
2514 /* It's a response to a response we sent. For this we
2515 deliver a send message response to the user. */
2516 struct ipmi_recv_msg *recv_msg = msg->user_data;
2519 if (msg->rsp_size < 2)
2520 /* Message is too small to be correct. */
2523 chan = msg->data[2] & 0x0f;
2524 if (chan >= IPMI_MAX_CHANNELS)
2525 /* Invalid channel number */
2531 /* Make sure the user still exists. */
2532 if (!recv_msg->user || !recv_msg->user->valid)
2535 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
2536 recv_msg->msg.data = recv_msg->msg_data;
2537 recv_msg->msg.data_len = 1;
2538 recv_msg->msg_data[0] = msg->rsp[2];
2539 deliver_response(recv_msg);
2540 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2541 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
2543 /* It's from the receive queue. */
2544 chan = msg->rsp[3] & 0xf;
2545 if (chan >= IPMI_MAX_CHANNELS) {
2546 /* Invalid channel number */
2551 switch (intf->channels[chan].medium) {
2552 case IPMI_CHANNEL_MEDIUM_IPMB:
2553 if (msg->rsp[4] & 0x04) {
2554 /* It's a response, so find the
2555 requesting message and send it up. */
2556 requeue = handle_ipmb_get_msg_rsp(intf, msg);
2558 /* It's a command to the SMS from some other
2559 entity. Handle that. */
2560 requeue = handle_ipmb_get_msg_cmd(intf, msg);
2564 case IPMI_CHANNEL_MEDIUM_8023LAN:
2565 case IPMI_CHANNEL_MEDIUM_ASYNC:
2566 if (msg->rsp[6] & 0x04) {
2567 /* It's a response, so find the
2568 requesting message and send it up. */
2569 requeue = handle_lan_get_msg_rsp(intf, msg);
2571 /* It's a command to the SMS from some other
2572 entity. Handle that. */
2573 requeue = handle_lan_get_msg_cmd(intf, msg);
2578 /* We don't handle the channel type, so just
2579 * free the message. */
2583 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2584 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
2586 /* It's an asyncronous event. */
2587 requeue = handle_read_event_rsp(intf, msg);
2589 /* It's a response from the local BMC. */
2590 requeue = handle_bmc_rsp(intf, msg);
2597 /* Handle a new message from the lower layer. */
2598 void ipmi_smi_msg_received(ipmi_smi_t intf,
2599 struct ipmi_smi_msg *msg)
2601 unsigned long flags;
2605 if ((msg->data_size >= 2)
2606 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
2607 && (msg->data[1] == IPMI_SEND_MSG_CMD)
2608 && (msg->user_data == NULL))
2610 /* This is the local response to a command send, start
2611 the timer for these. The user_data will not be
2612 NULL if this is a response send, and we will let
2613 response sends just go through. */
2615 /* Check for errors, if we get certain errors (ones
2616 that mean basically we can try again later), we
2617 ignore them and start the timer. Otherwise we
2618 report the error immediately. */
2619 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
2620 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
2621 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR))
2623 int chan = msg->rsp[3] & 0xf;
2625 /* Got an error sending the message, handle it. */
2626 spin_lock_irqsave(&intf->counter_lock, flags);
2627 if (chan >= IPMI_MAX_CHANNELS)
2628 ; /* This shouldn't happen */
2629 else if ((intf->channels[chan].medium
2630 == IPMI_CHANNEL_MEDIUM_8023LAN)
2631 || (intf->channels[chan].medium
2632 == IPMI_CHANNEL_MEDIUM_ASYNC))
2633 intf->sent_lan_command_errs++;
2635 intf->sent_ipmb_command_errs++;
2636 spin_unlock_irqrestore(&intf->counter_lock, flags);
2637 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
2639 /* The message was sent, start the timer. */
2640 intf_start_seq_timer(intf, msg->msgid);
2643 ipmi_free_smi_msg(msg);
2647 /* To preserve message order, if the list is not empty, we
2648 tack this message onto the end of the list. */
2649 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
2650 if (!list_empty(&intf->waiting_msgs)) {
2651 list_add_tail(&msg->link, &intf->waiting_msgs);
2652 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
2655 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
2657 rv = handle_new_recv_msg(intf, msg);
2659 /* Could not handle the message now, just add it to a
2660 list to handle later. */
2661 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
2662 list_add_tail(&msg->link, &intf->waiting_msgs);
2663 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
2664 } else if (rv == 0) {
2665 ipmi_free_smi_msg(msg);
2672 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
2677 list_for_each_entry_rcu(user, &intf->users, link) {
2678 if (! user->handler->ipmi_watchdog_pretimeout)
2681 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
2687 handle_msg_timeout(struct ipmi_recv_msg *msg)
2689 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2690 msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
2691 msg->msg.netfn |= 1; /* Convert to a response. */
2692 msg->msg.data_len = 1;
2693 msg->msg.data = msg->msg_data;
2694 deliver_response(msg);
2697 static struct ipmi_smi_msg *
2698 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
2699 unsigned char seq, long seqid)
2701 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
2703 /* If we can't allocate the message, then just return, we
2704 get 4 retries, so this should be ok. */
2707 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
2708 smi_msg->data_size = recv_msg->msg.data_len;
2709 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
2715 for (m = 0; m < smi_msg->data_size; m++)
2716 printk(" %2.2x", smi_msg->data[m]);
2723 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
2724 struct list_head *timeouts, long timeout_period,
2725 int slot, unsigned long *flags)
2727 struct ipmi_recv_msg *msg;
2732 ent->timeout -= timeout_period;
2733 if (ent->timeout > 0)
2736 if (ent->retries_left == 0) {
2737 /* The message has used all its retries. */
2739 msg = ent->recv_msg;
2740 list_add_tail(&msg->link, timeouts);
2741 spin_lock(&intf->counter_lock);
2743 intf->timed_out_ipmb_broadcasts++;
2744 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
2745 intf->timed_out_lan_commands++;
2747 intf->timed_out_ipmb_commands++;
2748 spin_unlock(&intf->counter_lock);
2750 struct ipmi_smi_msg *smi_msg;
2751 /* More retries, send again. */
2753 /* Start with the max timer, set to normal
2754 timer after the message is sent. */
2755 ent->timeout = MAX_MSG_TIMEOUT;
2756 ent->retries_left--;
2757 spin_lock(&intf->counter_lock);
2758 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
2759 intf->retransmitted_lan_commands++;
2761 intf->retransmitted_ipmb_commands++;
2762 spin_unlock(&intf->counter_lock);
2764 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
2769 spin_unlock_irqrestore(&intf->seq_lock, *flags);
2770 /* Send the new message. We send with a zero
2771 * priority. It timed out, I doubt time is
2772 * that critical now, and high priority
2773 * messages are really only for messages to the
2774 * local MC, which don't get resent. */
2775 intf->handlers->sender(intf->send_info,
2777 spin_lock_irqsave(&intf->seq_lock, *flags);
2781 static void ipmi_timeout_handler(long timeout_period)
2784 struct list_head timeouts;
2785 struct ipmi_recv_msg *msg, *msg2;
2786 struct ipmi_smi_msg *smi_msg, *smi_msg2;
2787 unsigned long flags;
2790 INIT_LIST_HEAD(&timeouts);
2792 spin_lock(&interfaces_lock);
2793 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2794 intf = ipmi_interfaces[i];
2795 if (IPMI_INVALID_INTERFACE(intf))
2797 kref_get(&intf->refcount);
2798 spin_unlock(&interfaces_lock);
2800 /* See if any waiting messages need to be processed. */
2801 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
2802 list_for_each_entry_safe(smi_msg, smi_msg2, &intf->waiting_msgs, link) {
2803 if (! handle_new_recv_msg(intf, smi_msg)) {
2804 list_del(&smi_msg->link);
2805 ipmi_free_smi_msg(smi_msg);
2807 /* To preserve message order, quit if we
2808 can't handle a message. */
2812 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
2814 /* Go through the seq table and find any messages that
2815 have timed out, putting them in the timeouts
2817 spin_lock_irqsave(&intf->seq_lock, flags);
2818 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++)
2819 check_msg_timeout(intf, &(intf->seq_table[j]),
2820 &timeouts, timeout_period, j,
2822 spin_unlock_irqrestore(&intf->seq_lock, flags);
2824 list_for_each_entry_safe(msg, msg2, &timeouts, link)
2825 handle_msg_timeout(msg);
2827 kref_put(&intf->refcount, intf_free);
2828 spin_lock(&interfaces_lock);
2830 spin_unlock(&interfaces_lock);
2833 static void ipmi_request_event(void)
2838 spin_lock(&interfaces_lock);
2839 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2840 intf = ipmi_interfaces[i];
2841 if (IPMI_INVALID_INTERFACE(intf))
2844 intf->handlers->request_events(intf->send_info);
2846 spin_unlock(&interfaces_lock);
2849 static struct timer_list ipmi_timer;
2851 /* Call every ~100 ms. */
2852 #define IPMI_TIMEOUT_TIME 100
2854 /* How many jiffies does it take to get to the timeout time. */
2855 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
2857 /* Request events from the queue every second (this is the number of
2858 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
2859 future, IPMI will add a way to know immediately if an event is in
2860 the queue and this silliness can go away. */
2861 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
2863 static atomic_t stop_operation;
2864 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
2866 static void ipmi_timeout(unsigned long data)
2868 if (atomic_read(&stop_operation))
2872 if (ticks_to_req_ev == 0) {
2873 ipmi_request_event();
2874 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
2877 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
2879 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
2883 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
2884 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
2886 /* FIXME - convert these to slabs. */
2887 static void free_smi_msg(struct ipmi_smi_msg *msg)
2889 atomic_dec(&smi_msg_inuse_count);
2893 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
2895 struct ipmi_smi_msg *rv;
2896 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
2898 rv->done = free_smi_msg;
2899 rv->user_data = NULL;
2900 atomic_inc(&smi_msg_inuse_count);
2905 static void free_recv_msg(struct ipmi_recv_msg *msg)
2907 atomic_dec(&recv_msg_inuse_count);
2911 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
2913 struct ipmi_recv_msg *rv;
2915 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
2917 rv->done = free_recv_msg;
2918 atomic_inc(&recv_msg_inuse_count);
2923 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
2926 kref_put(&msg->user->refcount, free_user);
2930 #ifdef CONFIG_IPMI_PANIC_EVENT
2932 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
2936 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
2940 #ifdef CONFIG_IPMI_PANIC_STRING
2941 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2943 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2944 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
2945 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
2946 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
2948 /* A get event receiver command, save it. */
2949 intf->event_receiver = msg->msg.data[1];
2950 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
2954 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2956 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2957 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2958 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
2959 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
2961 /* A get device id command, save if we are an event
2962 receiver or generator. */
2963 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
2964 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
2969 static void send_panic_events(char *str)
2971 struct kernel_ipmi_msg msg;
2973 unsigned char data[16];
2975 struct ipmi_system_interface_addr *si;
2976 struct ipmi_addr addr;
2977 struct ipmi_smi_msg smi_msg;
2978 struct ipmi_recv_msg recv_msg;
2980 si = (struct ipmi_system_interface_addr *) &addr;
2981 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2982 si->channel = IPMI_BMC_CHANNEL;
2985 /* Fill in an event telling that we have failed. */
2986 msg.netfn = 0x04; /* Sensor or Event. */
2987 msg.cmd = 2; /* Platform event command. */
2990 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
2991 data[1] = 0x03; /* This is for IPMI 1.0. */
2992 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
2993 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
2994 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
2996 /* Put a few breadcrumbs in. Hopefully later we can add more things
2997 to make the panic events more useful. */
3004 smi_msg.done = dummy_smi_done_handler;
3005 recv_msg.done = dummy_recv_done_handler;
3007 /* For every registered interface, send the event. */
3008 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3009 intf = ipmi_interfaces[i];
3010 if (IPMI_INVALID_INTERFACE(intf))
3013 /* Send the event announcing the panic. */
3014 intf->handlers->set_run_to_completion(intf->send_info, 1);
3015 i_ipmi_request(NULL,
3024 intf->channels[0].address,
3025 intf->channels[0].lun,
3026 0, 1); /* Don't retry, and don't wait. */
3029 #ifdef CONFIG_IPMI_PANIC_STRING
3030 /* On every interface, dump a bunch of OEM event holding the
3035 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3037 struct ipmi_ipmb_addr *ipmb;
3040 intf = ipmi_interfaces[i];
3041 if (IPMI_INVALID_INTERFACE(intf))
3044 /* First job here is to figure out where to send the
3045 OEM events. There's no way in IPMI to send OEM
3046 events using an event send command, so we have to
3047 find the SEL to put them in and stick them in
3050 /* Get capabilities from the get device id. */
3051 intf->local_sel_device = 0;
3052 intf->local_event_generator = 0;
3053 intf->event_receiver = 0;
3055 /* Request the device info from the local MC. */
3056 msg.netfn = IPMI_NETFN_APP_REQUEST;
3057 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3060 intf->null_user_handler = device_id_fetcher;
3061 i_ipmi_request(NULL,
3070 intf->channels[0].address,
3071 intf->channels[0].lun,
3072 0, 1); /* Don't retry, and don't wait. */
3074 if (intf->local_event_generator) {
3075 /* Request the event receiver from the local MC. */
3076 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3077 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3080 intf->null_user_handler = event_receiver_fetcher;
3081 i_ipmi_request(NULL,
3090 intf->channels[0].address,
3091 intf->channels[0].lun,
3092 0, 1); /* no retry, and no wait. */
3094 intf->null_user_handler = NULL;
3096 /* Validate the event receiver. The low bit must not
3097 be 1 (it must be a valid IPMB address), it cannot
3098 be zero, and it must not be my address. */
3099 if (((intf->event_receiver & 1) == 0)
3100 && (intf->event_receiver != 0)
3101 && (intf->event_receiver != intf->channels[0].address))
3103 /* The event receiver is valid, send an IPMB
3105 ipmb = (struct ipmi_ipmb_addr *) &addr;
3106 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3107 ipmb->channel = 0; /* FIXME - is this right? */
3108 ipmb->lun = intf->event_receiver_lun;
3109 ipmb->slave_addr = intf->event_receiver;
3110 } else if (intf->local_sel_device) {
3111 /* The event receiver was not valid (or was
3112 me), but I am an SEL device, just dump it
3114 si = (struct ipmi_system_interface_addr *) &addr;
3115 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3116 si->channel = IPMI_BMC_CHANNEL;
3119 continue; /* No where to send the event. */
3122 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
3123 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
3129 int size = strlen(p);
3135 data[2] = 0xf0; /* OEM event without timestamp. */
3136 data[3] = intf->channels[0].address;
3137 data[4] = j++; /* sequence # */
3138 /* Always give 11 bytes, so strncpy will fill
3139 it with zeroes for me. */
3140 strncpy(data+5, p, 11);
3143 i_ipmi_request(NULL,
3152 intf->channels[0].address,
3153 intf->channels[0].lun,
3154 0, 1); /* no retry, and no wait. */
3157 #endif /* CONFIG_IPMI_PANIC_STRING */
3159 #endif /* CONFIG_IPMI_PANIC_EVENT */
3161 static int has_paniced = 0;
3163 static int panic_event(struct notifier_block *this,
3164 unsigned long event,
3174 /* For every registered interface, set it to run to completion. */
3175 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3176 intf = ipmi_interfaces[i];
3177 if (IPMI_INVALID_INTERFACE(intf))
3180 intf->handlers->set_run_to_completion(intf->send_info, 1);
3183 #ifdef CONFIG_IPMI_PANIC_EVENT
3184 send_panic_events(ptr);
3190 static struct notifier_block panic_block = {
3191 .notifier_call = panic_event,
3193 .priority = 200 /* priority: INT_MAX >= x >= 0 */
3196 static int ipmi_init_msghandler(void)
3203 printk(KERN_INFO "ipmi message handler version "
3204 IPMI_DRIVER_VERSION "\n");
3206 for (i = 0; i < MAX_IPMI_INTERFACES; i++)
3207 ipmi_interfaces[i] = NULL;
3209 #ifdef CONFIG_PROC_FS
3210 proc_ipmi_root = proc_mkdir("ipmi", NULL);
3211 if (!proc_ipmi_root) {
3212 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
3216 proc_ipmi_root->owner = THIS_MODULE;
3217 #endif /* CONFIG_PROC_FS */
3219 init_timer(&ipmi_timer);
3220 ipmi_timer.data = 0;
3221 ipmi_timer.function = ipmi_timeout;
3222 ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES;
3223 add_timer(&ipmi_timer);
3225 notifier_chain_register(&panic_notifier_list, &panic_block);
3232 static __init int ipmi_init_msghandler_mod(void)
3234 ipmi_init_msghandler();
3238 static __exit void cleanup_ipmi(void)
3245 notifier_chain_unregister(&panic_notifier_list, &panic_block);
3247 /* This can't be called if any interfaces exist, so no worry about
3248 shutting down the interfaces. */
3250 /* Tell the timer to stop, then wait for it to stop. This avoids
3251 problems with race conditions removing the timer here. */
3252 atomic_inc(&stop_operation);
3253 del_timer_sync(&ipmi_timer);
3255 #ifdef CONFIG_PROC_FS
3256 remove_proc_entry(proc_ipmi_root->name, &proc_root);
3257 #endif /* CONFIG_PROC_FS */
3261 /* Check for buffer leaks. */
3262 count = atomic_read(&smi_msg_inuse_count);
3264 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
3266 count = atomic_read(&recv_msg_inuse_count);
3268 printk(KERN_WARNING PFX "recv message count %d at exit\n",
3271 module_exit(cleanup_ipmi);
3273 module_init(ipmi_init_msghandler_mod);
3274 MODULE_LICENSE("GPL");
3275 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3276 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
3277 MODULE_VERSION(IPMI_DRIVER_VERSION);
3279 EXPORT_SYMBOL(ipmi_create_user);
3280 EXPORT_SYMBOL(ipmi_destroy_user);
3281 EXPORT_SYMBOL(ipmi_get_version);
3282 EXPORT_SYMBOL(ipmi_request_settime);
3283 EXPORT_SYMBOL(ipmi_request_supply_msgs);
3284 EXPORT_SYMBOL(ipmi_register_smi);
3285 EXPORT_SYMBOL(ipmi_unregister_smi);
3286 EXPORT_SYMBOL(ipmi_register_for_cmd);
3287 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
3288 EXPORT_SYMBOL(ipmi_smi_msg_received);
3289 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3290 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
3291 EXPORT_SYMBOL(ipmi_addr_length);
3292 EXPORT_SYMBOL(ipmi_validate_addr);
3293 EXPORT_SYMBOL(ipmi_set_gets_events);
3294 EXPORT_SYMBOL(ipmi_smi_watcher_register);
3295 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
3296 EXPORT_SYMBOL(ipmi_set_my_address);
3297 EXPORT_SYMBOL(ipmi_get_my_address);
3298 EXPORT_SYMBOL(ipmi_set_my_LUN);
3299 EXPORT_SYMBOL(ipmi_get_my_LUN);
3300 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
3301 EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
3302 EXPORT_SYMBOL(ipmi_free_recv_msg);