4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/seq_file.h>
39 #include <linux/spinlock.h>
40 #include <linux/mutex.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/ipmi_smi.h>
44 #include <linux/notifier.h>
45 #include <linux/init.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
48 #include <linux/interrupt.h>
49 #include <linux/moduleparam.h>
50 #include <linux/workqueue.h>
51 #include <linux/uuid.h>
52 #include <linux/nospec.h>
54 #define PFX "IPMI message handler: "
56 #define IPMI_DRIVER_VERSION "39.2"
58 static struct ipmi_recv_msg
*ipmi_alloc_recv_msg(void);
59 static int ipmi_init_msghandler(void);
60 static void smi_recv_tasklet(unsigned long);
61 static void handle_new_recv_msgs(ipmi_smi_t intf
);
62 static void need_waiter(ipmi_smi_t intf
);
63 static int handle_one_recv_msg(ipmi_smi_t intf
,
64 struct ipmi_smi_msg
*msg
);
66 static int initialized
;
68 enum ipmi_panic_event_op
{
69 IPMI_SEND_PANIC_EVENT_NONE
,
70 IPMI_SEND_PANIC_EVENT
,
71 IPMI_SEND_PANIC_EVENT_STRING
73 #ifdef CONFIG_IPMI_PANIC_STRING
74 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
75 #elif defined(CONFIG_IPMI_PANIC_EVENT)
76 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
78 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
80 static enum ipmi_panic_event_op ipmi_send_panic_event
= IPMI_PANIC_DEFAULT
;
82 static int panic_op_write_handler(const char *val
,
83 const struct kernel_param
*kp
)
88 strncpy(valcp
, val
, 16);
93 if (strcmp(s
, "none") == 0)
94 ipmi_send_panic_event
= IPMI_SEND_PANIC_EVENT_NONE
;
95 else if (strcmp(s
, "event") == 0)
96 ipmi_send_panic_event
= IPMI_SEND_PANIC_EVENT
;
97 else if (strcmp(s
, "string") == 0)
98 ipmi_send_panic_event
= IPMI_SEND_PANIC_EVENT_STRING
;
105 static int panic_op_read_handler(char *buffer
, const struct kernel_param
*kp
)
107 switch (ipmi_send_panic_event
) {
108 case IPMI_SEND_PANIC_EVENT_NONE
:
109 strcpy(buffer
, "none");
112 case IPMI_SEND_PANIC_EVENT
:
113 strcpy(buffer
, "event");
116 case IPMI_SEND_PANIC_EVENT_STRING
:
117 strcpy(buffer
, "string");
121 strcpy(buffer
, "???");
125 return strlen(buffer
);
128 static const struct kernel_param_ops panic_op_ops
= {
129 .set
= panic_op_write_handler
,
130 .get
= panic_op_read_handler
132 module_param_cb(panic_op
, &panic_op_ops
, NULL
, 0600);
133 MODULE_PARM_DESC(panic_op
, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
136 #ifdef CONFIG_IPMI_PROC_INTERFACE
137 static struct proc_dir_entry
*proc_ipmi_root
;
138 #endif /* CONFIG_IPMI_PROC_INTERFACE */
140 /* Remain in auto-maintenance mode for this amount of time (in ms). */
141 #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
143 #define MAX_EVENTS_IN_QUEUE 25
146 * Don't let a message sit in a queue forever, always time it with at lest
147 * the max message timer. This is in milliseconds.
149 #define MAX_MSG_TIMEOUT 60000
151 /* Call every ~1000 ms. */
152 #define IPMI_TIMEOUT_TIME 1000
154 /* How many jiffies does it take to get to the timeout time. */
155 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
158 * Request events from the queue every second (this is the number of
159 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
160 * future, IPMI will add a way to know immediately if an event is in
161 * the queue and this silliness can go away.
163 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
165 /* How long should we cache dynamic device IDs? */
166 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
169 * The main "user" data structure.
172 struct list_head link
;
174 /* Set to false when the user is destroyed. */
177 struct kref refcount
;
179 /* The upper layer that handles receive messages. */
180 const struct ipmi_user_hndl
*handler
;
183 /* The interface this user is bound to. */
186 /* Does this interface receive IPMI events? */
191 struct list_head link
;
199 * This is used to form a linked lised during mass deletion.
200 * Since this is in an RCU list, we cannot use the link above
201 * or change any data until the RCU period completes. So we
202 * use this next variable during mass deletion so we can have
203 * a list and don't have to wait and restart the search on
204 * every individual deletion of a command.
206 struct cmd_rcvr
*next
;
210 unsigned int inuse
: 1;
211 unsigned int broadcast
: 1;
213 unsigned long timeout
;
214 unsigned long orig_timeout
;
215 unsigned int retries_left
;
218 * To verify on an incoming send message response that this is
219 * the message that the response is for, we keep a sequence id
220 * and increment it every time we send a message.
225 * This is held so we can properly respond to the message on a
226 * timeout, and it is used to hold the temporary data for
227 * retransmission, too.
229 struct ipmi_recv_msg
*recv_msg
;
233 * Store the information in a msgid (long) to allow us to find a
234 * sequence table entry from the msgid.
236 #define STORE_SEQ_IN_MSGID(seq, seqid) \
237 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
239 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
241 seq = (((msgid) >> 26) & 0x3f); \
242 seqid = ((msgid) & 0x3ffffff); \
245 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
247 #define IPMI_MAX_CHANNELS 16
248 struct ipmi_channel
{
249 unsigned char medium
;
250 unsigned char protocol
;
253 struct ipmi_channel_set
{
254 struct ipmi_channel c
[IPMI_MAX_CHANNELS
];
257 struct ipmi_my_addrinfo
{
259 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
260 * but may be changed by the user.
262 unsigned char address
;
265 * My LUN. This should generally stay the SMS LUN, but just in
271 #ifdef CONFIG_IPMI_PROC_INTERFACE
272 struct ipmi_proc_entry
{
274 struct ipmi_proc_entry
*next
;
279 * Note that the product id, manufacturer id, guid, and device id are
280 * immutable in this structure, so dyn_mutex is not required for
281 * accessing those. If those change on a BMC, a new BMC is allocated.
284 struct platform_device pdev
;
285 struct list_head intfs
; /* Interfaces on this BMC. */
286 struct ipmi_device_id id
;
287 struct ipmi_device_id fetch_id
;
289 unsigned long dyn_id_expiry
;
290 struct mutex dyn_mutex
; /* Protects id, intfs, & dyn* */
294 struct kref usecount
;
295 struct work_struct remove_work
;
297 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
299 static int bmc_get_device_id(ipmi_smi_t intf
, struct bmc_device
*bmc
,
300 struct ipmi_device_id
*id
,
301 bool *guid_set
, guid_t
*guid
);
304 * Various statistics for IPMI, these index stats[] in the ipmi_smi
307 enum ipmi_stat_indexes
{
308 /* Commands we got from the user that were invalid. */
309 IPMI_STAT_sent_invalid_commands
= 0,
311 /* Commands we sent to the MC. */
312 IPMI_STAT_sent_local_commands
,
314 /* Responses from the MC that were delivered to a user. */
315 IPMI_STAT_handled_local_responses
,
317 /* Responses from the MC that were not delivered to a user. */
318 IPMI_STAT_unhandled_local_responses
,
320 /* Commands we sent out to the IPMB bus. */
321 IPMI_STAT_sent_ipmb_commands
,
323 /* Commands sent on the IPMB that had errors on the SEND CMD */
324 IPMI_STAT_sent_ipmb_command_errs
,
326 /* Each retransmit increments this count. */
327 IPMI_STAT_retransmitted_ipmb_commands
,
330 * When a message times out (runs out of retransmits) this is
333 IPMI_STAT_timed_out_ipmb_commands
,
336 * This is like above, but for broadcasts. Broadcasts are
337 * *not* included in the above count (they are expected to
340 IPMI_STAT_timed_out_ipmb_broadcasts
,
342 /* Responses I have sent to the IPMB bus. */
343 IPMI_STAT_sent_ipmb_responses
,
345 /* The response was delivered to the user. */
346 IPMI_STAT_handled_ipmb_responses
,
348 /* The response had invalid data in it. */
349 IPMI_STAT_invalid_ipmb_responses
,
351 /* The response didn't have anyone waiting for it. */
352 IPMI_STAT_unhandled_ipmb_responses
,
354 /* Commands we sent out to the IPMB bus. */
355 IPMI_STAT_sent_lan_commands
,
357 /* Commands sent on the IPMB that had errors on the SEND CMD */
358 IPMI_STAT_sent_lan_command_errs
,
360 /* Each retransmit increments this count. */
361 IPMI_STAT_retransmitted_lan_commands
,
364 * When a message times out (runs out of retransmits) this is
367 IPMI_STAT_timed_out_lan_commands
,
369 /* Responses I have sent to the IPMB bus. */
370 IPMI_STAT_sent_lan_responses
,
372 /* The response was delivered to the user. */
373 IPMI_STAT_handled_lan_responses
,
375 /* The response had invalid data in it. */
376 IPMI_STAT_invalid_lan_responses
,
378 /* The response didn't have anyone waiting for it. */
379 IPMI_STAT_unhandled_lan_responses
,
381 /* The command was delivered to the user. */
382 IPMI_STAT_handled_commands
,
384 /* The command had invalid data in it. */
385 IPMI_STAT_invalid_commands
,
387 /* The command didn't have anyone waiting for it. */
388 IPMI_STAT_unhandled_commands
,
390 /* Invalid data in an event. */
391 IPMI_STAT_invalid_events
,
393 /* Events that were received with the proper format. */
396 /* Retransmissions on IPMB that failed. */
397 IPMI_STAT_dropped_rexmit_ipmb_commands
,
399 /* Retransmissions on LAN that failed. */
400 IPMI_STAT_dropped_rexmit_lan_commands
,
402 /* This *must* remain last, add new values above this. */
407 #define IPMI_IPMB_NUM_SEQ 64
409 /* What interface number are we? */
412 struct kref refcount
;
414 /* Set when the interface is being unregistered. */
417 /* Used for a list of interfaces. */
418 struct list_head link
;
421 * The list of upper layers that are using me. seq_lock
424 struct list_head users
;
426 /* Used for wake ups at startup. */
427 wait_queue_head_t waitq
;
430 * Prevents the interface from being unregistered when the
431 * interface is used by being looked up through the BMC
434 struct mutex bmc_reg_mutex
;
436 struct bmc_device tmp_bmc
;
437 struct bmc_device
*bmc
;
439 struct list_head bmc_link
;
441 bool in_bmc_register
; /* Handle recursive situations. Yuck. */
442 struct work_struct bmc_reg_work
;
445 * This is the lower-layer's sender routine. Note that you
446 * must either be holding the ipmi_interfaces_mutex or be in
447 * an umpreemptible region to use this. You must fetch the
448 * value into a local variable and make sure it is not NULL.
450 const struct ipmi_smi_handlers
*handlers
;
453 #ifdef CONFIG_IPMI_PROC_INTERFACE
454 /* A list of proc entries for this interface. */
455 struct mutex proc_entry_lock
;
456 struct ipmi_proc_entry
*proc_entries
;
458 struct proc_dir_entry
*proc_dir
;
459 char proc_dir_name
[10];
462 /* Driver-model device for the system interface. */
463 struct device
*si_dev
;
466 * A table of sequence numbers for this interface. We use the
467 * sequence numbers for IPMB messages that go out of the
468 * interface to match them up with their responses. A routine
469 * is called periodically to time the items in this list.
472 struct seq_table seq_table
[IPMI_IPMB_NUM_SEQ
];
476 * Messages queued for delivery. If delivery fails (out of memory
477 * for instance), They will stay in here to be processed later in a
478 * periodic timer interrupt. The tasklet is for handling received
479 * messages directly from the handler.
481 spinlock_t waiting_rcv_msgs_lock
;
482 struct list_head waiting_rcv_msgs
;
483 atomic_t watchdog_pretimeouts_to_deliver
;
484 struct tasklet_struct recv_tasklet
;
486 spinlock_t xmit_msgs_lock
;
487 struct list_head xmit_msgs
;
488 struct ipmi_smi_msg
*curr_msg
;
489 struct list_head hp_xmit_msgs
;
492 * The list of command receivers that are registered for commands
495 struct mutex cmd_rcvrs_mutex
;
496 struct list_head cmd_rcvrs
;
499 * Events that were queues because no one was there to receive
502 spinlock_t events_lock
; /* For dealing with event stuff. */
503 struct list_head waiting_events
;
504 unsigned int waiting_events_count
; /* How many events in queue? */
505 char delivering_events
;
506 char event_msg_printed
;
507 atomic_t event_waiters
;
508 unsigned int ticks_to_req_ev
;
509 int last_needs_timer
;
512 * The event receiver for my BMC, only really used at panic
513 * shutdown as a place to store this.
515 unsigned char event_receiver
;
516 unsigned char event_receiver_lun
;
517 unsigned char local_sel_device
;
518 unsigned char local_event_generator
;
520 /* For handling of maintenance mode. */
521 int maintenance_mode
;
522 bool maintenance_mode_enable
;
523 int auto_maintenance_timeout
;
524 spinlock_t maintenance_mode_lock
; /* Used in a timer... */
527 * A cheap hack, if this is non-null and a message to an
528 * interface comes in with a NULL user, call this routine with
529 * it. Note that the message will still be freed by the
530 * caller. This only works on the system interface.
532 * Protected by bmc_reg_mutex.
534 void (*null_user_handler
)(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
);
537 * When we are scanning the channels for an SMI, this will
538 * tell which channel we are scanning.
542 /* Channel information */
543 struct ipmi_channel_set
*channel_list
;
544 unsigned int curr_working_cset
; /* First index into the following. */
545 struct ipmi_channel_set wchannels
[2];
546 struct ipmi_my_addrinfo addrinfo
[IPMI_MAX_CHANNELS
];
549 atomic_t stats
[IPMI_NUM_STATS
];
552 * run_to_completion duplicate of smb_info, smi_info
553 * and ipmi_serial_info structures. Used to decrease numbers of
554 * parameters passed by "low" level IPMI code.
556 int run_to_completion
;
558 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
560 static void __get_guid(ipmi_smi_t intf
);
561 static void __ipmi_bmc_unregister(ipmi_smi_t intf
);
562 static int __ipmi_bmc_register(ipmi_smi_t intf
,
563 struct ipmi_device_id
*id
,
564 bool guid_set
, guid_t
*guid
, int intf_num
);
565 static int __scan_channels(ipmi_smi_t intf
, struct ipmi_device_id
*id
);
569 * The driver model view of the IPMI messaging driver.
571 static struct platform_driver ipmidriver
= {
574 .bus
= &platform_bus_type
578 * This mutex keeps us from adding the same BMC twice.
580 static DEFINE_MUTEX(ipmidriver_mutex
);
582 static LIST_HEAD(ipmi_interfaces
);
583 static DEFINE_MUTEX(ipmi_interfaces_mutex
);
586 * List of watchers that want to know when smi's are added and deleted.
588 static LIST_HEAD(smi_watchers
);
589 static DEFINE_MUTEX(smi_watchers_mutex
);
591 #define ipmi_inc_stat(intf, stat) \
592 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
593 #define ipmi_get_stat(intf, stat) \
594 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
596 static const char * const addr_src_to_str
[] = {
597 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
598 "device-tree", "platform"
601 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src
)
604 src
= 0; /* Invalid */
605 return addr_src_to_str
[src
];
607 EXPORT_SYMBOL(ipmi_addr_src_to_str
);
609 static int is_lan_addr(struct ipmi_addr
*addr
)
611 return addr
->addr_type
== IPMI_LAN_ADDR_TYPE
;
614 static int is_ipmb_addr(struct ipmi_addr
*addr
)
616 return addr
->addr_type
== IPMI_IPMB_ADDR_TYPE
;
619 static int is_ipmb_bcast_addr(struct ipmi_addr
*addr
)
621 return addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
;
624 static void free_recv_msg_list(struct list_head
*q
)
626 struct ipmi_recv_msg
*msg
, *msg2
;
628 list_for_each_entry_safe(msg
, msg2
, q
, link
) {
629 list_del(&msg
->link
);
630 ipmi_free_recv_msg(msg
);
634 static void free_smi_msg_list(struct list_head
*q
)
636 struct ipmi_smi_msg
*msg
, *msg2
;
638 list_for_each_entry_safe(msg
, msg2
, q
, link
) {
639 list_del(&msg
->link
);
640 ipmi_free_smi_msg(msg
);
644 static void clean_up_interface_data(ipmi_smi_t intf
)
647 struct cmd_rcvr
*rcvr
, *rcvr2
;
648 struct list_head list
;
650 tasklet_kill(&intf
->recv_tasklet
);
652 free_smi_msg_list(&intf
->waiting_rcv_msgs
);
653 free_recv_msg_list(&intf
->waiting_events
);
656 * Wholesale remove all the entries from the list in the
657 * interface and wait for RCU to know that none are in use.
659 mutex_lock(&intf
->cmd_rcvrs_mutex
);
660 INIT_LIST_HEAD(&list
);
661 list_splice_init_rcu(&intf
->cmd_rcvrs
, &list
, synchronize_rcu
);
662 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
664 list_for_each_entry_safe(rcvr
, rcvr2
, &list
, link
)
667 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++) {
668 if ((intf
->seq_table
[i
].inuse
)
669 && (intf
->seq_table
[i
].recv_msg
))
670 ipmi_free_recv_msg(intf
->seq_table
[i
].recv_msg
);
674 static void intf_free(struct kref
*ref
)
676 ipmi_smi_t intf
= container_of(ref
, struct ipmi_smi
, refcount
);
678 clean_up_interface_data(intf
);
682 struct watcher_entry
{
685 struct list_head link
;
688 int ipmi_smi_watcher_register(struct ipmi_smi_watcher
*watcher
)
691 LIST_HEAD(to_deliver
);
692 struct watcher_entry
*e
, *e2
;
694 mutex_lock(&smi_watchers_mutex
);
696 mutex_lock(&ipmi_interfaces_mutex
);
698 /* Build a list of things to deliver. */
699 list_for_each_entry(intf
, &ipmi_interfaces
, link
) {
700 if (intf
->intf_num
== -1)
702 e
= kmalloc(sizeof(*e
), GFP_KERNEL
);
705 kref_get(&intf
->refcount
);
707 e
->intf_num
= intf
->intf_num
;
708 list_add_tail(&e
->link
, &to_deliver
);
711 /* We will succeed, so add it to the list. */
712 list_add(&watcher
->link
, &smi_watchers
);
714 mutex_unlock(&ipmi_interfaces_mutex
);
716 list_for_each_entry_safe(e
, e2
, &to_deliver
, link
) {
718 watcher
->new_smi(e
->intf_num
, e
->intf
->si_dev
);
719 kref_put(&e
->intf
->refcount
, intf_free
);
723 mutex_unlock(&smi_watchers_mutex
);
728 mutex_unlock(&ipmi_interfaces_mutex
);
729 mutex_unlock(&smi_watchers_mutex
);
730 list_for_each_entry_safe(e
, e2
, &to_deliver
, link
) {
732 kref_put(&e
->intf
->refcount
, intf_free
);
737 EXPORT_SYMBOL(ipmi_smi_watcher_register
);
739 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher
*watcher
)
741 mutex_lock(&smi_watchers_mutex
);
742 list_del(&(watcher
->link
));
743 mutex_unlock(&smi_watchers_mutex
);
746 EXPORT_SYMBOL(ipmi_smi_watcher_unregister
);
749 * Must be called with smi_watchers_mutex held.
752 call_smi_watchers(int i
, struct device
*dev
)
754 struct ipmi_smi_watcher
*w
;
756 list_for_each_entry(w
, &smi_watchers
, link
) {
757 if (try_module_get(w
->owner
)) {
759 module_put(w
->owner
);
765 ipmi_addr_equal(struct ipmi_addr
*addr1
, struct ipmi_addr
*addr2
)
767 if (addr1
->addr_type
!= addr2
->addr_type
)
770 if (addr1
->channel
!= addr2
->channel
)
773 if (addr1
->addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
) {
774 struct ipmi_system_interface_addr
*smi_addr1
775 = (struct ipmi_system_interface_addr
*) addr1
;
776 struct ipmi_system_interface_addr
*smi_addr2
777 = (struct ipmi_system_interface_addr
*) addr2
;
778 return (smi_addr1
->lun
== smi_addr2
->lun
);
781 if (is_ipmb_addr(addr1
) || is_ipmb_bcast_addr(addr1
)) {
782 struct ipmi_ipmb_addr
*ipmb_addr1
783 = (struct ipmi_ipmb_addr
*) addr1
;
784 struct ipmi_ipmb_addr
*ipmb_addr2
785 = (struct ipmi_ipmb_addr
*) addr2
;
787 return ((ipmb_addr1
->slave_addr
== ipmb_addr2
->slave_addr
)
788 && (ipmb_addr1
->lun
== ipmb_addr2
->lun
));
791 if (is_lan_addr(addr1
)) {
792 struct ipmi_lan_addr
*lan_addr1
793 = (struct ipmi_lan_addr
*) addr1
;
794 struct ipmi_lan_addr
*lan_addr2
795 = (struct ipmi_lan_addr
*) addr2
;
797 return ((lan_addr1
->remote_SWID
== lan_addr2
->remote_SWID
)
798 && (lan_addr1
->local_SWID
== lan_addr2
->local_SWID
)
799 && (lan_addr1
->session_handle
800 == lan_addr2
->session_handle
)
801 && (lan_addr1
->lun
== lan_addr2
->lun
));
807 int ipmi_validate_addr(struct ipmi_addr
*addr
, int len
)
809 if (len
< sizeof(struct ipmi_system_interface_addr
))
812 if (addr
->addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
) {
813 if (addr
->channel
!= IPMI_BMC_CHANNEL
)
818 if ((addr
->channel
== IPMI_BMC_CHANNEL
)
819 || (addr
->channel
>= IPMI_MAX_CHANNELS
)
820 || (addr
->channel
< 0))
823 if (is_ipmb_addr(addr
) || is_ipmb_bcast_addr(addr
)) {
824 if (len
< sizeof(struct ipmi_ipmb_addr
))
829 if (is_lan_addr(addr
)) {
830 if (len
< sizeof(struct ipmi_lan_addr
))
837 EXPORT_SYMBOL(ipmi_validate_addr
);
839 unsigned int ipmi_addr_length(int addr_type
)
841 if (addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
842 return sizeof(struct ipmi_system_interface_addr
);
844 if ((addr_type
== IPMI_IPMB_ADDR_TYPE
)
845 || (addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
))
846 return sizeof(struct ipmi_ipmb_addr
);
848 if (addr_type
== IPMI_LAN_ADDR_TYPE
)
849 return sizeof(struct ipmi_lan_addr
);
853 EXPORT_SYMBOL(ipmi_addr_length
);
855 static void deliver_response(struct ipmi_recv_msg
*msg
)
858 ipmi_smi_t intf
= msg
->user_msg_data
;
860 /* Special handling for NULL users. */
861 if (intf
->null_user_handler
) {
862 intf
->null_user_handler(intf
, msg
);
863 ipmi_inc_stat(intf
, handled_local_responses
);
865 /* No handler, so give up. */
866 ipmi_inc_stat(intf
, unhandled_local_responses
);
868 ipmi_free_recv_msg(msg
);
869 } else if (!oops_in_progress
) {
871 * If we are running in the panic context, calling the
872 * receive handler doesn't much meaning and has a deadlock
873 * risk. At this moment, simply skip it in that case.
876 ipmi_user_t user
= msg
->user
;
877 user
->handler
->ipmi_recv_hndl(msg
, user
->handler_data
);
882 deliver_err_response(struct ipmi_recv_msg
*msg
, int err
)
884 msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
885 msg
->msg_data
[0] = err
;
886 msg
->msg
.netfn
|= 1; /* Convert to a response. */
887 msg
->msg
.data_len
= 1;
888 msg
->msg
.data
= msg
->msg_data
;
889 deliver_response(msg
);
893 * Find the next sequence number not being used and add the given
894 * message with the given timeout to the sequence table. This must be
895 * called with the interface's seq_lock held.
897 static int intf_next_seq(ipmi_smi_t intf
,
898 struct ipmi_recv_msg
*recv_msg
,
899 unsigned long timeout
,
908 for (i
= intf
->curr_seq
; (i
+1)%IPMI_IPMB_NUM_SEQ
!= intf
->curr_seq
;
909 i
= (i
+1)%IPMI_IPMB_NUM_SEQ
) {
910 if (!intf
->seq_table
[i
].inuse
)
914 if (!intf
->seq_table
[i
].inuse
) {
915 intf
->seq_table
[i
].recv_msg
= recv_msg
;
918 * Start with the maximum timeout, when the send response
919 * comes in we will start the real timer.
921 intf
->seq_table
[i
].timeout
= MAX_MSG_TIMEOUT
;
922 intf
->seq_table
[i
].orig_timeout
= timeout
;
923 intf
->seq_table
[i
].retries_left
= retries
;
924 intf
->seq_table
[i
].broadcast
= broadcast
;
925 intf
->seq_table
[i
].inuse
= 1;
926 intf
->seq_table
[i
].seqid
= NEXT_SEQID(intf
->seq_table
[i
].seqid
);
928 *seqid
= intf
->seq_table
[i
].seqid
;
929 intf
->curr_seq
= (i
+1)%IPMI_IPMB_NUM_SEQ
;
939 * Return the receive message for the given sequence number and
940 * release the sequence number so it can be reused. Some other data
941 * is passed in to be sure the message matches up correctly (to help
942 * guard against message coming in after their timeout and the
943 * sequence number being reused).
945 static int intf_find_seq(ipmi_smi_t intf
,
950 struct ipmi_addr
*addr
,
951 struct ipmi_recv_msg
**recv_msg
)
956 if (seq
>= IPMI_IPMB_NUM_SEQ
)
959 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
960 if (intf
->seq_table
[seq
].inuse
) {
961 struct ipmi_recv_msg
*msg
= intf
->seq_table
[seq
].recv_msg
;
963 if ((msg
->addr
.channel
== channel
) && (msg
->msg
.cmd
== cmd
)
964 && (msg
->msg
.netfn
== netfn
)
965 && (ipmi_addr_equal(addr
, &(msg
->addr
)))) {
967 intf
->seq_table
[seq
].inuse
= 0;
971 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
977 /* Start the timer for a specific sequence table entry. */
978 static int intf_start_seq_timer(ipmi_smi_t intf
,
987 GET_SEQ_FROM_MSGID(msgid
, seq
, seqid
);
989 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
991 * We do this verification because the user can be deleted
992 * while a message is outstanding.
994 if ((intf
->seq_table
[seq
].inuse
)
995 && (intf
->seq_table
[seq
].seqid
== seqid
)) {
996 struct seq_table
*ent
= &(intf
->seq_table
[seq
]);
997 ent
->timeout
= ent
->orig_timeout
;
1000 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
1005 /* Got an error for the send message for a specific sequence number. */
1006 static int intf_err_seq(ipmi_smi_t intf
,
1011 unsigned long flags
;
1013 unsigned long seqid
;
1014 struct ipmi_recv_msg
*msg
= NULL
;
1017 GET_SEQ_FROM_MSGID(msgid
, seq
, seqid
);
1019 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
1021 * We do this verification because the user can be deleted
1022 * while a message is outstanding.
1024 if ((intf
->seq_table
[seq
].inuse
)
1025 && (intf
->seq_table
[seq
].seqid
== seqid
)) {
1026 struct seq_table
*ent
= &(intf
->seq_table
[seq
]);
1029 msg
= ent
->recv_msg
;
1032 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
1035 deliver_err_response(msg
, err
);
1041 int ipmi_create_user(unsigned int if_num
,
1042 const struct ipmi_user_hndl
*handler
,
1046 unsigned long flags
;
1047 ipmi_user_t new_user
;
1052 * There is no module usecount here, because it's not
1053 * required. Since this can only be used by and called from
1054 * other modules, they will implicitly use this module, and
1055 * thus this can't be removed unless the other modules are
1059 if (handler
== NULL
)
1063 * Make sure the driver is actually initialized, this handles
1064 * problems with initialization order.
1067 rv
= ipmi_init_msghandler();
1072 * The init code doesn't return an error if it was turned
1073 * off, but it won't initialize. Check that.
1079 new_user
= kmalloc(sizeof(*new_user
), GFP_KERNEL
);
1083 mutex_lock(&ipmi_interfaces_mutex
);
1084 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
1085 if (intf
->intf_num
== if_num
)
1088 /* Not found, return an error */
1093 /* Note that each existing user holds a refcount to the interface. */
1094 kref_get(&intf
->refcount
);
1096 kref_init(&new_user
->refcount
);
1097 new_user
->handler
= handler
;
1098 new_user
->handler_data
= handler_data
;
1099 new_user
->intf
= intf
;
1100 new_user
->gets_events
= false;
1102 if (!try_module_get(intf
->handlers
->owner
)) {
1107 if (intf
->handlers
->inc_usecount
) {
1108 rv
= intf
->handlers
->inc_usecount(intf
->send_info
);
1110 module_put(intf
->handlers
->owner
);
1116 * Hold the lock so intf->handlers is guaranteed to be good
1119 mutex_unlock(&ipmi_interfaces_mutex
);
1121 new_user
->valid
= true;
1122 spin_lock_irqsave(&intf
->seq_lock
, flags
);
1123 list_add_rcu(&new_user
->link
, &intf
->users
);
1124 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
1125 if (handler
->ipmi_watchdog_pretimeout
) {
1126 /* User wants pretimeouts, so make sure to watch for them. */
1127 if (atomic_inc_return(&intf
->event_waiters
) == 1)
1134 kref_put(&intf
->refcount
, intf_free
);
1136 mutex_unlock(&ipmi_interfaces_mutex
);
1140 EXPORT_SYMBOL(ipmi_create_user
);
1142 int ipmi_get_smi_info(int if_num
, struct ipmi_smi_info
*data
)
1146 const struct ipmi_smi_handlers
*handlers
;
1148 mutex_lock(&ipmi_interfaces_mutex
);
1149 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
1150 if (intf
->intf_num
== if_num
)
1153 /* Not found, return an error */
1155 mutex_unlock(&ipmi_interfaces_mutex
);
1159 handlers
= intf
->handlers
;
1161 if (handlers
->get_smi_info
)
1162 rv
= handlers
->get_smi_info(intf
->send_info
, data
);
1163 mutex_unlock(&ipmi_interfaces_mutex
);
1167 EXPORT_SYMBOL(ipmi_get_smi_info
);
1169 static void free_user(struct kref
*ref
)
1171 ipmi_user_t user
= container_of(ref
, struct ipmi_user
, refcount
);
1175 int ipmi_destroy_user(ipmi_user_t user
)
1177 ipmi_smi_t intf
= user
->intf
;
1179 unsigned long flags
;
1180 struct cmd_rcvr
*rcvr
;
1181 struct cmd_rcvr
*rcvrs
= NULL
;
1183 user
->valid
= false;
1185 if (user
->handler
->ipmi_watchdog_pretimeout
)
1186 atomic_dec(&intf
->event_waiters
);
1188 if (user
->gets_events
)
1189 atomic_dec(&intf
->event_waiters
);
1191 /* Remove the user from the interface's sequence table. */
1192 spin_lock_irqsave(&intf
->seq_lock
, flags
);
1193 list_del_rcu(&user
->link
);
1195 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++) {
1196 if (intf
->seq_table
[i
].inuse
1197 && (intf
->seq_table
[i
].recv_msg
->user
== user
)) {
1198 intf
->seq_table
[i
].inuse
= 0;
1199 ipmi_free_recv_msg(intf
->seq_table
[i
].recv_msg
);
1202 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
1205 * Remove the user from the command receiver's table. First
1206 * we build a list of everything (not using the standard link,
1207 * since other things may be using it till we do
1208 * synchronize_rcu()) then free everything in that list.
1210 mutex_lock(&intf
->cmd_rcvrs_mutex
);
1211 list_for_each_entry_rcu(rcvr
, &intf
->cmd_rcvrs
, link
) {
1212 if (rcvr
->user
== user
) {
1213 list_del_rcu(&rcvr
->link
);
1218 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
1226 mutex_lock(&ipmi_interfaces_mutex
);
1227 if (intf
->handlers
) {
1228 module_put(intf
->handlers
->owner
);
1229 if (intf
->handlers
->dec_usecount
)
1230 intf
->handlers
->dec_usecount(intf
->send_info
);
1232 mutex_unlock(&ipmi_interfaces_mutex
);
1234 kref_put(&intf
->refcount
, intf_free
);
1236 kref_put(&user
->refcount
, free_user
);
1240 EXPORT_SYMBOL(ipmi_destroy_user
);
1242 int ipmi_get_version(ipmi_user_t user
,
1243 unsigned char *major
,
1244 unsigned char *minor
)
1246 struct ipmi_device_id id
;
1249 rv
= bmc_get_device_id(user
->intf
, NULL
, &id
, NULL
, NULL
);
1253 *major
= ipmi_version_major(&id
);
1254 *minor
= ipmi_version_minor(&id
);
1258 EXPORT_SYMBOL(ipmi_get_version
);
1260 int ipmi_set_my_address(ipmi_user_t user
,
1261 unsigned int channel
,
1262 unsigned char address
)
1264 if (channel
>= IPMI_MAX_CHANNELS
)
1266 channel
= array_index_nospec(channel
, IPMI_MAX_CHANNELS
);
1267 user
->intf
->addrinfo
[channel
].address
= address
;
1270 EXPORT_SYMBOL(ipmi_set_my_address
);
1272 int ipmi_get_my_address(ipmi_user_t user
,
1273 unsigned int channel
,
1274 unsigned char *address
)
1276 if (channel
>= IPMI_MAX_CHANNELS
)
1278 channel
= array_index_nospec(channel
, IPMI_MAX_CHANNELS
);
1279 *address
= user
->intf
->addrinfo
[channel
].address
;
1282 EXPORT_SYMBOL(ipmi_get_my_address
);
1284 int ipmi_set_my_LUN(ipmi_user_t user
,
1285 unsigned int channel
,
1288 if (channel
>= IPMI_MAX_CHANNELS
)
1290 channel
= array_index_nospec(channel
, IPMI_MAX_CHANNELS
);
1291 user
->intf
->addrinfo
[channel
].lun
= LUN
& 0x3;
1294 EXPORT_SYMBOL(ipmi_set_my_LUN
);
1296 int ipmi_get_my_LUN(ipmi_user_t user
,
1297 unsigned int channel
,
1298 unsigned char *address
)
1300 if (channel
>= IPMI_MAX_CHANNELS
)
1302 channel
= array_index_nospec(channel
, IPMI_MAX_CHANNELS
);
1303 *address
= user
->intf
->addrinfo
[channel
].lun
;
1306 EXPORT_SYMBOL(ipmi_get_my_LUN
);
1308 int ipmi_get_maintenance_mode(ipmi_user_t user
)
1311 unsigned long flags
;
1313 spin_lock_irqsave(&user
->intf
->maintenance_mode_lock
, flags
);
1314 mode
= user
->intf
->maintenance_mode
;
1315 spin_unlock_irqrestore(&user
->intf
->maintenance_mode_lock
, flags
);
1319 EXPORT_SYMBOL(ipmi_get_maintenance_mode
);
1321 static void maintenance_mode_update(ipmi_smi_t intf
)
1323 if (intf
->handlers
->set_maintenance_mode
)
1324 intf
->handlers
->set_maintenance_mode(
1325 intf
->send_info
, intf
->maintenance_mode_enable
);
1328 int ipmi_set_maintenance_mode(ipmi_user_t user
, int mode
)
1331 unsigned long flags
;
1332 ipmi_smi_t intf
= user
->intf
;
1334 spin_lock_irqsave(&intf
->maintenance_mode_lock
, flags
);
1335 if (intf
->maintenance_mode
!= mode
) {
1337 case IPMI_MAINTENANCE_MODE_AUTO
:
1338 intf
->maintenance_mode_enable
1339 = (intf
->auto_maintenance_timeout
> 0);
1342 case IPMI_MAINTENANCE_MODE_OFF
:
1343 intf
->maintenance_mode_enable
= false;
1346 case IPMI_MAINTENANCE_MODE_ON
:
1347 intf
->maintenance_mode_enable
= true;
1354 intf
->maintenance_mode
= mode
;
1356 maintenance_mode_update(intf
);
1359 spin_unlock_irqrestore(&intf
->maintenance_mode_lock
, flags
);
1363 EXPORT_SYMBOL(ipmi_set_maintenance_mode
);
1365 int ipmi_set_gets_events(ipmi_user_t user
, bool val
)
1367 unsigned long flags
;
1368 ipmi_smi_t intf
= user
->intf
;
1369 struct ipmi_recv_msg
*msg
, *msg2
;
1370 struct list_head msgs
;
1372 INIT_LIST_HEAD(&msgs
);
1374 spin_lock_irqsave(&intf
->events_lock
, flags
);
1375 if (user
->gets_events
== val
)
1378 user
->gets_events
= val
;
1381 if (atomic_inc_return(&intf
->event_waiters
) == 1)
1384 atomic_dec(&intf
->event_waiters
);
1387 if (intf
->delivering_events
)
1389 * Another thread is delivering events for this, so
1390 * let it handle any new events.
1394 /* Deliver any queued events. */
1395 while (user
->gets_events
&& !list_empty(&intf
->waiting_events
)) {
1396 list_for_each_entry_safe(msg
, msg2
, &intf
->waiting_events
, link
)
1397 list_move_tail(&msg
->link
, &msgs
);
1398 intf
->waiting_events_count
= 0;
1399 if (intf
->event_msg_printed
) {
1400 dev_warn(intf
->si_dev
,
1401 PFX
"Event queue no longer full\n");
1402 intf
->event_msg_printed
= 0;
1405 intf
->delivering_events
= 1;
1406 spin_unlock_irqrestore(&intf
->events_lock
, flags
);
1408 list_for_each_entry_safe(msg
, msg2
, &msgs
, link
) {
1410 kref_get(&user
->refcount
);
1411 deliver_response(msg
);
1414 spin_lock_irqsave(&intf
->events_lock
, flags
);
1415 intf
->delivering_events
= 0;
1419 spin_unlock_irqrestore(&intf
->events_lock
, flags
);
1423 EXPORT_SYMBOL(ipmi_set_gets_events
);
1425 static struct cmd_rcvr
*find_cmd_rcvr(ipmi_smi_t intf
,
1426 unsigned char netfn
,
1430 struct cmd_rcvr
*rcvr
;
1432 list_for_each_entry_rcu(rcvr
, &intf
->cmd_rcvrs
, link
) {
1433 if ((rcvr
->netfn
== netfn
) && (rcvr
->cmd
== cmd
)
1434 && (rcvr
->chans
& (1 << chan
)))
1440 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf
,
1441 unsigned char netfn
,
1445 struct cmd_rcvr
*rcvr
;
1447 list_for_each_entry_rcu(rcvr
, &intf
->cmd_rcvrs
, link
) {
1448 if ((rcvr
->netfn
== netfn
) && (rcvr
->cmd
== cmd
)
1449 && (rcvr
->chans
& chans
))
1455 int ipmi_register_for_cmd(ipmi_user_t user
,
1456 unsigned char netfn
,
1460 ipmi_smi_t intf
= user
->intf
;
1461 struct cmd_rcvr
*rcvr
;
1465 rcvr
= kmalloc(sizeof(*rcvr
), GFP_KERNEL
);
1469 rcvr
->netfn
= netfn
;
1470 rcvr
->chans
= chans
;
1473 mutex_lock(&intf
->cmd_rcvrs_mutex
);
1474 /* Make sure the command/netfn is not already registered. */
1475 if (!is_cmd_rcvr_exclusive(intf
, netfn
, cmd
, chans
)) {
1480 if (atomic_inc_return(&intf
->event_waiters
) == 1)
1483 list_add_rcu(&rcvr
->link
, &intf
->cmd_rcvrs
);
1486 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
1492 EXPORT_SYMBOL(ipmi_register_for_cmd
);
1494 int ipmi_unregister_for_cmd(ipmi_user_t user
,
1495 unsigned char netfn
,
1499 ipmi_smi_t intf
= user
->intf
;
1500 struct cmd_rcvr
*rcvr
;
1501 struct cmd_rcvr
*rcvrs
= NULL
;
1502 int i
, rv
= -ENOENT
;
1504 mutex_lock(&intf
->cmd_rcvrs_mutex
);
1505 for (i
= 0; i
< IPMI_NUM_CHANNELS
; i
++) {
1506 if (((1 << i
) & chans
) == 0)
1508 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, i
);
1511 if (rcvr
->user
== user
) {
1513 rcvr
->chans
&= ~chans
;
1514 if (rcvr
->chans
== 0) {
1515 list_del_rcu(&rcvr
->link
);
1521 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
1524 atomic_dec(&intf
->event_waiters
);
1531 EXPORT_SYMBOL(ipmi_unregister_for_cmd
);
1533 static unsigned char
1534 ipmb_checksum(unsigned char *data
, int size
)
1536 unsigned char csum
= 0;
1538 for (; size
> 0; size
--, data
++)
1544 static inline void format_ipmb_msg(struct ipmi_smi_msg
*smi_msg
,
1545 struct kernel_ipmi_msg
*msg
,
1546 struct ipmi_ipmb_addr
*ipmb_addr
,
1548 unsigned char ipmb_seq
,
1550 unsigned char source_address
,
1551 unsigned char source_lun
)
1555 /* Format the IPMB header data. */
1556 smi_msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
1557 smi_msg
->data
[1] = IPMI_SEND_MSG_CMD
;
1558 smi_msg
->data
[2] = ipmb_addr
->channel
;
1560 smi_msg
->data
[3] = 0;
1561 smi_msg
->data
[i
+3] = ipmb_addr
->slave_addr
;
1562 smi_msg
->data
[i
+4] = (msg
->netfn
<< 2) | (ipmb_addr
->lun
& 0x3);
1563 smi_msg
->data
[i
+5] = ipmb_checksum(&(smi_msg
->data
[i
+3]), 2);
1564 smi_msg
->data
[i
+6] = source_address
;
1565 smi_msg
->data
[i
+7] = (ipmb_seq
<< 2) | source_lun
;
1566 smi_msg
->data
[i
+8] = msg
->cmd
;
1568 /* Now tack on the data to the message. */
1569 if (msg
->data_len
> 0)
1570 memcpy(&(smi_msg
->data
[i
+9]), msg
->data
,
1572 smi_msg
->data_size
= msg
->data_len
+ 9;
1574 /* Now calculate the checksum and tack it on. */
1575 smi_msg
->data
[i
+smi_msg
->data_size
]
1576 = ipmb_checksum(&(smi_msg
->data
[i
+6]),
1577 smi_msg
->data_size
-6);
1580 * Add on the checksum size and the offset from the
1583 smi_msg
->data_size
+= 1 + i
;
1585 smi_msg
->msgid
= msgid
;
1588 static inline void format_lan_msg(struct ipmi_smi_msg
*smi_msg
,
1589 struct kernel_ipmi_msg
*msg
,
1590 struct ipmi_lan_addr
*lan_addr
,
1592 unsigned char ipmb_seq
,
1593 unsigned char source_lun
)
1595 /* Format the IPMB header data. */
1596 smi_msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
1597 smi_msg
->data
[1] = IPMI_SEND_MSG_CMD
;
1598 smi_msg
->data
[2] = lan_addr
->channel
;
1599 smi_msg
->data
[3] = lan_addr
->session_handle
;
1600 smi_msg
->data
[4] = lan_addr
->remote_SWID
;
1601 smi_msg
->data
[5] = (msg
->netfn
<< 2) | (lan_addr
->lun
& 0x3);
1602 smi_msg
->data
[6] = ipmb_checksum(&(smi_msg
->data
[4]), 2);
1603 smi_msg
->data
[7] = lan_addr
->local_SWID
;
1604 smi_msg
->data
[8] = (ipmb_seq
<< 2) | source_lun
;
1605 smi_msg
->data
[9] = msg
->cmd
;
1607 /* Now tack on the data to the message. */
1608 if (msg
->data_len
> 0)
1609 memcpy(&(smi_msg
->data
[10]), msg
->data
,
1611 smi_msg
->data_size
= msg
->data_len
+ 10;
1613 /* Now calculate the checksum and tack it on. */
1614 smi_msg
->data
[smi_msg
->data_size
]
1615 = ipmb_checksum(&(smi_msg
->data
[7]),
1616 smi_msg
->data_size
-7);
1619 * Add on the checksum size and the offset from the
1622 smi_msg
->data_size
+= 1;
1624 smi_msg
->msgid
= msgid
;
1627 static struct ipmi_smi_msg
*smi_add_send_msg(ipmi_smi_t intf
,
1628 struct ipmi_smi_msg
*smi_msg
,
1631 if (intf
->curr_msg
) {
1633 list_add_tail(&smi_msg
->link
, &intf
->hp_xmit_msgs
);
1635 list_add_tail(&smi_msg
->link
, &intf
->xmit_msgs
);
1638 intf
->curr_msg
= smi_msg
;
1645 static void smi_send(ipmi_smi_t intf
, const struct ipmi_smi_handlers
*handlers
,
1646 struct ipmi_smi_msg
*smi_msg
, int priority
)
1648 int run_to_completion
= intf
->run_to_completion
;
1650 if (run_to_completion
) {
1651 smi_msg
= smi_add_send_msg(intf
, smi_msg
, priority
);
1653 unsigned long flags
;
1655 spin_lock_irqsave(&intf
->xmit_msgs_lock
, flags
);
1656 smi_msg
= smi_add_send_msg(intf
, smi_msg
, priority
);
1657 spin_unlock_irqrestore(&intf
->xmit_msgs_lock
, flags
);
1661 handlers
->sender(intf
->send_info
, smi_msg
);
1665 * Separate from ipmi_request so that the user does not have to be
1666 * supplied in certain circumstances (mainly at panic time). If
1667 * messages are supplied, they will be freed, even if an error
1670 static int i_ipmi_request(ipmi_user_t user
,
1672 struct ipmi_addr
*addr
,
1674 struct kernel_ipmi_msg
*msg
,
1675 void *user_msg_data
,
1677 struct ipmi_recv_msg
*supplied_recv
,
1679 unsigned char source_address
,
1680 unsigned char source_lun
,
1682 unsigned int retry_time_ms
)
1685 struct ipmi_smi_msg
*smi_msg
;
1686 struct ipmi_recv_msg
*recv_msg
;
1687 unsigned long flags
;
1691 recv_msg
= supplied_recv
;
1693 recv_msg
= ipmi_alloc_recv_msg();
1694 if (recv_msg
== NULL
)
1697 recv_msg
->user_msg_data
= user_msg_data
;
1700 smi_msg
= (struct ipmi_smi_msg
*) supplied_smi
;
1702 smi_msg
= ipmi_alloc_smi_msg();
1703 if (smi_msg
== NULL
) {
1704 ipmi_free_recv_msg(recv_msg
);
1710 if (intf
->in_shutdown
) {
1715 recv_msg
->user
= user
;
1717 kref_get(&user
->refcount
);
1718 recv_msg
->msgid
= msgid
;
1720 * Store the message to send in the receive message so timeout
1721 * responses can get the proper response data.
1723 recv_msg
->msg
= *msg
;
1725 if (addr
->addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
) {
1726 struct ipmi_system_interface_addr
*smi_addr
;
1728 if (msg
->netfn
& 1) {
1729 /* Responses are not allowed to the SMI. */
1734 smi_addr
= (struct ipmi_system_interface_addr
*) addr
;
1735 if (smi_addr
->lun
> 3) {
1736 ipmi_inc_stat(intf
, sent_invalid_commands
);
1741 memcpy(&recv_msg
->addr
, smi_addr
, sizeof(*smi_addr
));
1743 if ((msg
->netfn
== IPMI_NETFN_APP_REQUEST
)
1744 && ((msg
->cmd
== IPMI_SEND_MSG_CMD
)
1745 || (msg
->cmd
== IPMI_GET_MSG_CMD
)
1746 || (msg
->cmd
== IPMI_READ_EVENT_MSG_BUFFER_CMD
))) {
1748 * We don't let the user do these, since we manage
1749 * the sequence numbers.
1751 ipmi_inc_stat(intf
, sent_invalid_commands
);
1756 if (((msg
->netfn
== IPMI_NETFN_APP_REQUEST
)
1757 && ((msg
->cmd
== IPMI_COLD_RESET_CMD
)
1758 || (msg
->cmd
== IPMI_WARM_RESET_CMD
)))
1759 || (msg
->netfn
== IPMI_NETFN_FIRMWARE_REQUEST
)) {
1760 spin_lock_irqsave(&intf
->maintenance_mode_lock
, flags
);
1761 intf
->auto_maintenance_timeout
1762 = IPMI_MAINTENANCE_MODE_TIMEOUT
;
1763 if (!intf
->maintenance_mode
1764 && !intf
->maintenance_mode_enable
) {
1765 intf
->maintenance_mode_enable
= true;
1766 maintenance_mode_update(intf
);
1768 spin_unlock_irqrestore(&intf
->maintenance_mode_lock
,
1772 if ((msg
->data_len
+ 2) > IPMI_MAX_MSG_LENGTH
) {
1773 ipmi_inc_stat(intf
, sent_invalid_commands
);
1778 smi_msg
->data
[0] = (msg
->netfn
<< 2) | (smi_addr
->lun
& 0x3);
1779 smi_msg
->data
[1] = msg
->cmd
;
1780 smi_msg
->msgid
= msgid
;
1781 smi_msg
->user_data
= recv_msg
;
1782 if (msg
->data_len
> 0)
1783 memcpy(&(smi_msg
->data
[2]), msg
->data
, msg
->data_len
);
1784 smi_msg
->data_size
= msg
->data_len
+ 2;
1785 ipmi_inc_stat(intf
, sent_local_commands
);
1786 } else if (is_ipmb_addr(addr
) || is_ipmb_bcast_addr(addr
)) {
1787 struct ipmi_ipmb_addr
*ipmb_addr
;
1788 unsigned char ipmb_seq
;
1791 struct ipmi_channel
*chans
;
1793 if (addr
->channel
>= IPMI_MAX_CHANNELS
) {
1794 ipmi_inc_stat(intf
, sent_invalid_commands
);
1799 chans
= READ_ONCE(intf
->channel_list
)->c
;
1801 if (chans
[addr
->channel
].medium
!= IPMI_CHANNEL_MEDIUM_IPMB
) {
1802 ipmi_inc_stat(intf
, sent_invalid_commands
);
1808 if (addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
)
1809 retries
= 0; /* Don't retry broadcasts. */
1813 if (addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
) {
1815 * Broadcasts add a zero at the beginning of the
1816 * message, but otherwise is the same as an IPMB
1819 addr
->addr_type
= IPMI_IPMB_ADDR_TYPE
;
1824 /* Default to 1 second retries. */
1825 if (retry_time_ms
== 0)
1826 retry_time_ms
= 1000;
1829 * 9 for the header and 1 for the checksum, plus
1830 * possibly one for the broadcast.
1832 if ((msg
->data_len
+ 10 + broadcast
) > IPMI_MAX_MSG_LENGTH
) {
1833 ipmi_inc_stat(intf
, sent_invalid_commands
);
1838 ipmb_addr
= (struct ipmi_ipmb_addr
*) addr
;
1839 if (ipmb_addr
->lun
> 3) {
1840 ipmi_inc_stat(intf
, sent_invalid_commands
);
1845 memcpy(&recv_msg
->addr
, ipmb_addr
, sizeof(*ipmb_addr
));
1847 if (recv_msg
->msg
.netfn
& 0x1) {
1849 * It's a response, so use the user's sequence
1852 ipmi_inc_stat(intf
, sent_ipmb_responses
);
1853 format_ipmb_msg(smi_msg
, msg
, ipmb_addr
, msgid
,
1855 source_address
, source_lun
);
1858 * Save the receive message so we can use it
1859 * to deliver the response.
1861 smi_msg
->user_data
= recv_msg
;
1863 /* It's a command, so get a sequence for it. */
1865 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
1868 * Create a sequence number with a 1 second
1869 * timeout and 4 retries.
1871 rv
= intf_next_seq(intf
,
1880 * We have used up all the sequence numbers,
1881 * probably, so abort.
1883 spin_unlock_irqrestore(&(intf
->seq_lock
),
1888 ipmi_inc_stat(intf
, sent_ipmb_commands
);
1891 * Store the sequence number in the message,
1892 * so that when the send message response
1893 * comes back we can start the timer.
1895 format_ipmb_msg(smi_msg
, msg
, ipmb_addr
,
1896 STORE_SEQ_IN_MSGID(ipmb_seq
, seqid
),
1897 ipmb_seq
, broadcast
,
1898 source_address
, source_lun
);
1901 * Copy the message into the recv message data, so we
1902 * can retransmit it later if necessary.
1904 memcpy(recv_msg
->msg_data
, smi_msg
->data
,
1905 smi_msg
->data_size
);
1906 recv_msg
->msg
.data
= recv_msg
->msg_data
;
1907 recv_msg
->msg
.data_len
= smi_msg
->data_size
;
1910 * We don't unlock until here, because we need
1911 * to copy the completed message into the
1912 * recv_msg before we release the lock.
1913 * Otherwise, race conditions may bite us. I
1914 * know that's pretty paranoid, but I prefer
1917 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
1919 } else if (is_lan_addr(addr
)) {
1920 struct ipmi_lan_addr
*lan_addr
;
1921 unsigned char ipmb_seq
;
1923 struct ipmi_channel
*chans
;
1925 if (addr
->channel
>= IPMI_MAX_CHANNELS
) {
1926 ipmi_inc_stat(intf
, sent_invalid_commands
);
1931 chans
= READ_ONCE(intf
->channel_list
)->c
;
1933 if ((chans
[addr
->channel
].medium
1934 != IPMI_CHANNEL_MEDIUM_8023LAN
)
1935 && (chans
[addr
->channel
].medium
1936 != IPMI_CHANNEL_MEDIUM_ASYNC
)) {
1937 ipmi_inc_stat(intf
, sent_invalid_commands
);
1944 /* Default to 1 second retries. */
1945 if (retry_time_ms
== 0)
1946 retry_time_ms
= 1000;
1948 /* 11 for the header and 1 for the checksum. */
1949 if ((msg
->data_len
+ 12) > IPMI_MAX_MSG_LENGTH
) {
1950 ipmi_inc_stat(intf
, sent_invalid_commands
);
1955 lan_addr
= (struct ipmi_lan_addr
*) addr
;
1956 if (lan_addr
->lun
> 3) {
1957 ipmi_inc_stat(intf
, sent_invalid_commands
);
1962 memcpy(&recv_msg
->addr
, lan_addr
, sizeof(*lan_addr
));
1964 if (recv_msg
->msg
.netfn
& 0x1) {
1966 * It's a response, so use the user's sequence
1969 ipmi_inc_stat(intf
, sent_lan_responses
);
1970 format_lan_msg(smi_msg
, msg
, lan_addr
, msgid
,
1974 * Save the receive message so we can use it
1975 * to deliver the response.
1977 smi_msg
->user_data
= recv_msg
;
1979 /* It's a command, so get a sequence for it. */
1981 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
1984 * Create a sequence number with a 1 second
1985 * timeout and 4 retries.
1987 rv
= intf_next_seq(intf
,
1996 * We have used up all the sequence numbers,
1997 * probably, so abort.
1999 spin_unlock_irqrestore(&(intf
->seq_lock
),
2004 ipmi_inc_stat(intf
, sent_lan_commands
);
2007 * Store the sequence number in the message,
2008 * so that when the send message response
2009 * comes back we can start the timer.
2011 format_lan_msg(smi_msg
, msg
, lan_addr
,
2012 STORE_SEQ_IN_MSGID(ipmb_seq
, seqid
),
2013 ipmb_seq
, source_lun
);
2016 * Copy the message into the recv message data, so we
2017 * can retransmit it later if necessary.
2019 memcpy(recv_msg
->msg_data
, smi_msg
->data
,
2020 smi_msg
->data_size
);
2021 recv_msg
->msg
.data
= recv_msg
->msg_data
;
2022 recv_msg
->msg
.data_len
= smi_msg
->data_size
;
2025 * We don't unlock until here, because we need
2026 * to copy the completed message into the
2027 * recv_msg before we release the lock.
2028 * Otherwise, race conditions may bite us. I
2029 * know that's pretty paranoid, but I prefer
2032 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
2035 /* Unknown address type. */
2036 ipmi_inc_stat(intf
, sent_invalid_commands
);
2044 for (m
= 0; m
< smi_msg
->data_size
; m
++)
2045 printk(" %2.2x", smi_msg
->data
[m
]);
2050 smi_send(intf
, intf
->handlers
, smi_msg
, priority
);
2057 ipmi_free_smi_msg(smi_msg
);
2058 ipmi_free_recv_msg(recv_msg
);
2062 static int check_addr(ipmi_smi_t intf
,
2063 struct ipmi_addr
*addr
,
2064 unsigned char *saddr
,
2067 if (addr
->channel
>= IPMI_MAX_CHANNELS
)
2069 addr
->channel
= array_index_nospec(addr
->channel
, IPMI_MAX_CHANNELS
);
2070 *lun
= intf
->addrinfo
[addr
->channel
].lun
;
2071 *saddr
= intf
->addrinfo
[addr
->channel
].address
;
2075 int ipmi_request_settime(ipmi_user_t user
,
2076 struct ipmi_addr
*addr
,
2078 struct kernel_ipmi_msg
*msg
,
2079 void *user_msg_data
,
2082 unsigned int retry_time_ms
)
2084 unsigned char saddr
= 0, lun
= 0;
2089 rv
= check_addr(user
->intf
, addr
, &saddr
, &lun
);
2092 return i_ipmi_request(user
,
2105 EXPORT_SYMBOL(ipmi_request_settime
);
2107 int ipmi_request_supply_msgs(ipmi_user_t user
,
2108 struct ipmi_addr
*addr
,
2110 struct kernel_ipmi_msg
*msg
,
2111 void *user_msg_data
,
2113 struct ipmi_recv_msg
*supplied_recv
,
2116 unsigned char saddr
= 0, lun
= 0;
2121 rv
= check_addr(user
->intf
, addr
, &saddr
, &lun
);
2124 return i_ipmi_request(user
,
2137 EXPORT_SYMBOL(ipmi_request_supply_msgs
);
2139 static void bmc_device_id_handler(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
2143 if ((msg
->addr
.addr_type
!= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
2144 || (msg
->msg
.netfn
!= IPMI_NETFN_APP_RESPONSE
)
2145 || (msg
->msg
.cmd
!= IPMI_GET_DEVICE_ID_CMD
)) {
2146 dev_warn(intf
->si_dev
,
2147 PFX
"invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2148 msg
->addr
.addr_type
, msg
->msg
.netfn
, msg
->msg
.cmd
);
2152 rv
= ipmi_demangle_device_id(msg
->msg
.netfn
, msg
->msg
.cmd
,
2153 msg
->msg
.data
, msg
->msg
.data_len
, &intf
->bmc
->fetch_id
);
2155 dev_warn(intf
->si_dev
,
2156 PFX
"device id demangle failed: %d\n", rv
);
2157 intf
->bmc
->dyn_id_set
= 0;
2160 * Make sure the id data is available before setting
2164 intf
->bmc
->dyn_id_set
= 1;
2167 wake_up(&intf
->waitq
);
2171 send_get_device_id_cmd(ipmi_smi_t intf
)
2173 struct ipmi_system_interface_addr si
;
2174 struct kernel_ipmi_msg msg
;
2176 si
.addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
2177 si
.channel
= IPMI_BMC_CHANNEL
;
2180 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
2181 msg
.cmd
= IPMI_GET_DEVICE_ID_CMD
;
2185 return i_ipmi_request(NULL
,
2187 (struct ipmi_addr
*) &si
,
2194 intf
->addrinfo
[0].address
,
2195 intf
->addrinfo
[0].lun
,
2199 static int __get_device_id(ipmi_smi_t intf
, struct bmc_device
*bmc
)
2203 bmc
->dyn_id_set
= 2;
2205 intf
->null_user_handler
= bmc_device_id_handler
;
2207 rv
= send_get_device_id_cmd(intf
);
2211 wait_event(intf
->waitq
, bmc
->dyn_id_set
!= 2);
2213 if (!bmc
->dyn_id_set
)
2214 rv
= -EIO
; /* Something went wrong in the fetch. */
2216 /* dyn_id_set makes the id data available. */
2219 intf
->null_user_handler
= NULL
;
2225 * Fetch the device id for the bmc/interface. You must pass in either
2226 * bmc or intf, this code will get the other one. If the data has
2227 * been recently fetched, this will just use the cached data. Otherwise
2228 * it will run a new fetch.
2230 * Except for the first time this is called (in ipmi_register_smi()),
2231 * this will always return good data;
2233 static int __bmc_get_device_id(ipmi_smi_t intf
, struct bmc_device
*bmc
,
2234 struct ipmi_device_id
*id
,
2235 bool *guid_set
, guid_t
*guid
, int intf_num
)
2238 int prev_dyn_id_set
, prev_guid_set
;
2239 bool intf_set
= intf
!= NULL
;
2242 mutex_lock(&bmc
->dyn_mutex
);
2244 if (list_empty(&bmc
->intfs
)) {
2245 mutex_unlock(&bmc
->dyn_mutex
);
2248 intf
= list_first_entry(&bmc
->intfs
, struct ipmi_smi
,
2250 kref_get(&intf
->refcount
);
2251 mutex_unlock(&bmc
->dyn_mutex
);
2252 mutex_lock(&intf
->bmc_reg_mutex
);
2253 mutex_lock(&bmc
->dyn_mutex
);
2254 if (intf
!= list_first_entry(&bmc
->intfs
, struct ipmi_smi
,
2256 mutex_unlock(&intf
->bmc_reg_mutex
);
2257 kref_put(&intf
->refcount
, intf_free
);
2258 goto retry_bmc_lock
;
2261 mutex_lock(&intf
->bmc_reg_mutex
);
2263 mutex_lock(&bmc
->dyn_mutex
);
2264 kref_get(&intf
->refcount
);
2267 /* If we have a valid and current ID, just return that. */
2268 if (intf
->in_bmc_register
||
2269 (bmc
->dyn_id_set
&& time_is_after_jiffies(bmc
->dyn_id_expiry
)))
2270 goto out_noprocessing
;
2272 prev_guid_set
= bmc
->dyn_guid_set
;
2275 prev_dyn_id_set
= bmc
->dyn_id_set
;
2276 rv
= __get_device_id(intf
, bmc
);
2281 * The guid, device id, manufacturer id, and product id should
2282 * not change on a BMC. If it does we have to do some dancing.
2284 if (!intf
->bmc_registered
2285 || (!prev_guid_set
&& bmc
->dyn_guid_set
)
2286 || (!prev_dyn_id_set
&& bmc
->dyn_id_set
)
2287 || (prev_guid_set
&& bmc
->dyn_guid_set
2288 && !guid_equal(&bmc
->guid
, &bmc
->fetch_guid
))
2289 || bmc
->id
.device_id
!= bmc
->fetch_id
.device_id
2290 || bmc
->id
.manufacturer_id
!= bmc
->fetch_id
.manufacturer_id
2291 || bmc
->id
.product_id
!= bmc
->fetch_id
.product_id
) {
2292 struct ipmi_device_id id
= bmc
->fetch_id
;
2293 int guid_set
= bmc
->dyn_guid_set
;
2296 guid
= bmc
->fetch_guid
;
2297 mutex_unlock(&bmc
->dyn_mutex
);
2299 __ipmi_bmc_unregister(intf
);
2300 /* Fill in the temporary BMC for good measure. */
2302 intf
->bmc
->dyn_guid_set
= guid_set
;
2303 intf
->bmc
->guid
= guid
;
2304 if (__ipmi_bmc_register(intf
, &id
, guid_set
, &guid
, intf_num
))
2305 need_waiter(intf
); /* Retry later on an error. */
2307 __scan_channels(intf
, &id
);
2312 * We weren't given the interface on the
2313 * command line, so restart the operation on
2314 * the next interface for the BMC.
2316 mutex_unlock(&intf
->bmc_reg_mutex
);
2317 mutex_lock(&bmc
->dyn_mutex
);
2318 goto retry_bmc_lock
;
2321 /* We have a new BMC, set it up. */
2323 mutex_lock(&bmc
->dyn_mutex
);
2324 goto out_noprocessing
;
2325 } else if (memcmp(&bmc
->fetch_id
, &bmc
->id
, sizeof(bmc
->id
)))
2326 /* Version info changes, scan the channels again. */
2327 __scan_channels(intf
, &bmc
->fetch_id
);
2329 bmc
->dyn_id_expiry
= jiffies
+ IPMI_DYN_DEV_ID_EXPIRY
;
2332 if (rv
&& prev_dyn_id_set
) {
2333 rv
= 0; /* Ignore failures if we have previous data. */
2334 bmc
->dyn_id_set
= prev_dyn_id_set
;
2337 bmc
->id
= bmc
->fetch_id
;
2338 if (bmc
->dyn_guid_set
)
2339 bmc
->guid
= bmc
->fetch_guid
;
2340 else if (prev_guid_set
)
2342 * The guid used to be valid and it failed to fetch,
2343 * just use the cached value.
2345 bmc
->dyn_guid_set
= prev_guid_set
;
2353 *guid_set
= bmc
->dyn_guid_set
;
2355 if (guid
&& bmc
->dyn_guid_set
)
2359 mutex_unlock(&bmc
->dyn_mutex
);
2360 mutex_unlock(&intf
->bmc_reg_mutex
);
2362 kref_put(&intf
->refcount
, intf_free
);
2366 static int bmc_get_device_id(ipmi_smi_t intf
, struct bmc_device
*bmc
,
2367 struct ipmi_device_id
*id
,
2368 bool *guid_set
, guid_t
*guid
)
2370 return __bmc_get_device_id(intf
, bmc
, id
, guid_set
, guid
, -1);
2373 #ifdef CONFIG_IPMI_PROC_INTERFACE
2374 static int smi_ipmb_proc_show(struct seq_file
*m
, void *v
)
2376 ipmi_smi_t intf
= m
->private;
2379 seq_printf(m
, "%x", intf
->addrinfo
[0].address
);
2380 for (i
= 1; i
< IPMI_MAX_CHANNELS
; i
++)
2381 seq_printf(m
, " %x", intf
->addrinfo
[i
].address
);
2387 static int smi_ipmb_proc_open(struct inode
*inode
, struct file
*file
)
2389 return single_open(file
, smi_ipmb_proc_show
, PDE_DATA(inode
));
2392 static const struct file_operations smi_ipmb_proc_ops
= {
2393 .open
= smi_ipmb_proc_open
,
2395 .llseek
= seq_lseek
,
2396 .release
= single_release
,
2399 static int smi_version_proc_show(struct seq_file
*m
, void *v
)
2401 ipmi_smi_t intf
= m
->private;
2402 struct ipmi_device_id id
;
2405 rv
= bmc_get_device_id(intf
, NULL
, &id
, NULL
, NULL
);
2409 seq_printf(m
, "%u.%u\n",
2410 ipmi_version_major(&id
),
2411 ipmi_version_minor(&id
));
2416 static int smi_version_proc_open(struct inode
*inode
, struct file
*file
)
2418 return single_open(file
, smi_version_proc_show
, PDE_DATA(inode
));
2421 static const struct file_operations smi_version_proc_ops
= {
2422 .open
= smi_version_proc_open
,
2424 .llseek
= seq_lseek
,
2425 .release
= single_release
,
2428 static int smi_stats_proc_show(struct seq_file
*m
, void *v
)
2430 ipmi_smi_t intf
= m
->private;
2432 seq_printf(m
, "sent_invalid_commands: %u\n",
2433 ipmi_get_stat(intf
, sent_invalid_commands
));
2434 seq_printf(m
, "sent_local_commands: %u\n",
2435 ipmi_get_stat(intf
, sent_local_commands
));
2436 seq_printf(m
, "handled_local_responses: %u\n",
2437 ipmi_get_stat(intf
, handled_local_responses
));
2438 seq_printf(m
, "unhandled_local_responses: %u\n",
2439 ipmi_get_stat(intf
, unhandled_local_responses
));
2440 seq_printf(m
, "sent_ipmb_commands: %u\n",
2441 ipmi_get_stat(intf
, sent_ipmb_commands
));
2442 seq_printf(m
, "sent_ipmb_command_errs: %u\n",
2443 ipmi_get_stat(intf
, sent_ipmb_command_errs
));
2444 seq_printf(m
, "retransmitted_ipmb_commands: %u\n",
2445 ipmi_get_stat(intf
, retransmitted_ipmb_commands
));
2446 seq_printf(m
, "timed_out_ipmb_commands: %u\n",
2447 ipmi_get_stat(intf
, timed_out_ipmb_commands
));
2448 seq_printf(m
, "timed_out_ipmb_broadcasts: %u\n",
2449 ipmi_get_stat(intf
, timed_out_ipmb_broadcasts
));
2450 seq_printf(m
, "sent_ipmb_responses: %u\n",
2451 ipmi_get_stat(intf
, sent_ipmb_responses
));
2452 seq_printf(m
, "handled_ipmb_responses: %u\n",
2453 ipmi_get_stat(intf
, handled_ipmb_responses
));
2454 seq_printf(m
, "invalid_ipmb_responses: %u\n",
2455 ipmi_get_stat(intf
, invalid_ipmb_responses
));
2456 seq_printf(m
, "unhandled_ipmb_responses: %u\n",
2457 ipmi_get_stat(intf
, unhandled_ipmb_responses
));
2458 seq_printf(m
, "sent_lan_commands: %u\n",
2459 ipmi_get_stat(intf
, sent_lan_commands
));
2460 seq_printf(m
, "sent_lan_command_errs: %u\n",
2461 ipmi_get_stat(intf
, sent_lan_command_errs
));
2462 seq_printf(m
, "retransmitted_lan_commands: %u\n",
2463 ipmi_get_stat(intf
, retransmitted_lan_commands
));
2464 seq_printf(m
, "timed_out_lan_commands: %u\n",
2465 ipmi_get_stat(intf
, timed_out_lan_commands
));
2466 seq_printf(m
, "sent_lan_responses: %u\n",
2467 ipmi_get_stat(intf
, sent_lan_responses
));
2468 seq_printf(m
, "handled_lan_responses: %u\n",
2469 ipmi_get_stat(intf
, handled_lan_responses
));
2470 seq_printf(m
, "invalid_lan_responses: %u\n",
2471 ipmi_get_stat(intf
, invalid_lan_responses
));
2472 seq_printf(m
, "unhandled_lan_responses: %u\n",
2473 ipmi_get_stat(intf
, unhandled_lan_responses
));
2474 seq_printf(m
, "handled_commands: %u\n",
2475 ipmi_get_stat(intf
, handled_commands
));
2476 seq_printf(m
, "invalid_commands: %u\n",
2477 ipmi_get_stat(intf
, invalid_commands
));
2478 seq_printf(m
, "unhandled_commands: %u\n",
2479 ipmi_get_stat(intf
, unhandled_commands
));
2480 seq_printf(m
, "invalid_events: %u\n",
2481 ipmi_get_stat(intf
, invalid_events
));
2482 seq_printf(m
, "events: %u\n",
2483 ipmi_get_stat(intf
, events
));
2484 seq_printf(m
, "failed rexmit LAN msgs: %u\n",
2485 ipmi_get_stat(intf
, dropped_rexmit_lan_commands
));
2486 seq_printf(m
, "failed rexmit IPMB msgs: %u\n",
2487 ipmi_get_stat(intf
, dropped_rexmit_ipmb_commands
));
2491 static int smi_stats_proc_open(struct inode
*inode
, struct file
*file
)
2493 return single_open(file
, smi_stats_proc_show
, PDE_DATA(inode
));
2496 static const struct file_operations smi_stats_proc_ops
= {
2497 .open
= smi_stats_proc_open
,
2499 .llseek
= seq_lseek
,
2500 .release
= single_release
,
2503 int ipmi_smi_add_proc_entry(ipmi_smi_t smi
, char *name
,
2504 const struct file_operations
*proc_ops
,
2508 struct proc_dir_entry
*file
;
2509 struct ipmi_proc_entry
*entry
;
2511 /* Create a list element. */
2512 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
2515 entry
->name
= kstrdup(name
, GFP_KERNEL
);
2521 file
= proc_create_data(name
, 0, smi
->proc_dir
, proc_ops
, data
);
2527 mutex_lock(&smi
->proc_entry_lock
);
2528 /* Stick it on the list. */
2529 entry
->next
= smi
->proc_entries
;
2530 smi
->proc_entries
= entry
;
2531 mutex_unlock(&smi
->proc_entry_lock
);
2536 EXPORT_SYMBOL(ipmi_smi_add_proc_entry
);
2538 static int add_proc_entries(ipmi_smi_t smi
, int num
)
2542 sprintf(smi
->proc_dir_name
, "%d", num
);
2543 smi
->proc_dir
= proc_mkdir(smi
->proc_dir_name
, proc_ipmi_root
);
2548 rv
= ipmi_smi_add_proc_entry(smi
, "stats",
2549 &smi_stats_proc_ops
,
2553 rv
= ipmi_smi_add_proc_entry(smi
, "ipmb",
2558 rv
= ipmi_smi_add_proc_entry(smi
, "version",
2559 &smi_version_proc_ops
,
2565 static void remove_proc_entries(ipmi_smi_t smi
)
2567 struct ipmi_proc_entry
*entry
;
2569 mutex_lock(&smi
->proc_entry_lock
);
2570 while (smi
->proc_entries
) {
2571 entry
= smi
->proc_entries
;
2572 smi
->proc_entries
= entry
->next
;
2574 remove_proc_entry(entry
->name
, smi
->proc_dir
);
2578 mutex_unlock(&smi
->proc_entry_lock
);
2579 remove_proc_entry(smi
->proc_dir_name
, proc_ipmi_root
);
2581 #endif /* CONFIG_IPMI_PROC_INTERFACE */
2583 static ssize_t
device_id_show(struct device
*dev
,
2584 struct device_attribute
*attr
,
2587 struct bmc_device
*bmc
= to_bmc_device(dev
);
2588 struct ipmi_device_id id
;
2591 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2595 return snprintf(buf
, 10, "%u\n", id
.device_id
);
2597 static DEVICE_ATTR_RO(device_id
);
2599 static ssize_t
provides_device_sdrs_show(struct device
*dev
,
2600 struct device_attribute
*attr
,
2603 struct bmc_device
*bmc
= to_bmc_device(dev
);
2604 struct ipmi_device_id id
;
2607 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2611 return snprintf(buf
, 10, "%u\n", (id
.device_revision
& 0x80) >> 7);
2613 static DEVICE_ATTR_RO(provides_device_sdrs
);
2615 static ssize_t
revision_show(struct device
*dev
, struct device_attribute
*attr
,
2618 struct bmc_device
*bmc
= to_bmc_device(dev
);
2619 struct ipmi_device_id id
;
2622 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2626 return snprintf(buf
, 20, "%u\n", id
.device_revision
& 0x0F);
2628 static DEVICE_ATTR_RO(revision
);
2630 static ssize_t
firmware_revision_show(struct device
*dev
,
2631 struct device_attribute
*attr
,
2634 struct bmc_device
*bmc
= to_bmc_device(dev
);
2635 struct ipmi_device_id id
;
2638 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2642 return snprintf(buf
, 20, "%u.%x\n", id
.firmware_revision_1
,
2643 id
.firmware_revision_2
);
2645 static DEVICE_ATTR_RO(firmware_revision
);
2647 static ssize_t
ipmi_version_show(struct device
*dev
,
2648 struct device_attribute
*attr
,
2651 struct bmc_device
*bmc
= to_bmc_device(dev
);
2652 struct ipmi_device_id id
;
2655 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2659 return snprintf(buf
, 20, "%u.%u\n",
2660 ipmi_version_major(&id
),
2661 ipmi_version_minor(&id
));
2663 static DEVICE_ATTR_RO(ipmi_version
);
2665 static ssize_t
add_dev_support_show(struct device
*dev
,
2666 struct device_attribute
*attr
,
2669 struct bmc_device
*bmc
= to_bmc_device(dev
);
2670 struct ipmi_device_id id
;
2673 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2677 return snprintf(buf
, 10, "0x%02x\n", id
.additional_device_support
);
2679 static DEVICE_ATTR(additional_device_support
, S_IRUGO
, add_dev_support_show
,
2682 static ssize_t
manufacturer_id_show(struct device
*dev
,
2683 struct device_attribute
*attr
,
2686 struct bmc_device
*bmc
= to_bmc_device(dev
);
2687 struct ipmi_device_id id
;
2690 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2694 return snprintf(buf
, 20, "0x%6.6x\n", id
.manufacturer_id
);
2696 static DEVICE_ATTR_RO(manufacturer_id
);
2698 static ssize_t
product_id_show(struct device
*dev
,
2699 struct device_attribute
*attr
,
2702 struct bmc_device
*bmc
= to_bmc_device(dev
);
2703 struct ipmi_device_id id
;
2706 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2710 return snprintf(buf
, 10, "0x%4.4x\n", id
.product_id
);
2712 static DEVICE_ATTR_RO(product_id
);
2714 static ssize_t
aux_firmware_rev_show(struct device
*dev
,
2715 struct device_attribute
*attr
,
2718 struct bmc_device
*bmc
= to_bmc_device(dev
);
2719 struct ipmi_device_id id
;
2722 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2726 return snprintf(buf
, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2727 id
.aux_firmware_revision
[3],
2728 id
.aux_firmware_revision
[2],
2729 id
.aux_firmware_revision
[1],
2730 id
.aux_firmware_revision
[0]);
2732 static DEVICE_ATTR(aux_firmware_revision
, S_IRUGO
, aux_firmware_rev_show
, NULL
);
2734 static ssize_t
guid_show(struct device
*dev
, struct device_attribute
*attr
,
2737 struct bmc_device
*bmc
= to_bmc_device(dev
);
2742 rv
= bmc_get_device_id(NULL
, bmc
, NULL
, &guid_set
, &guid
);
2748 return snprintf(buf
, 38, "%pUl\n", guid
.b
);
2750 static DEVICE_ATTR_RO(guid
);
2752 static struct attribute
*bmc_dev_attrs
[] = {
2753 &dev_attr_device_id
.attr
,
2754 &dev_attr_provides_device_sdrs
.attr
,
2755 &dev_attr_revision
.attr
,
2756 &dev_attr_firmware_revision
.attr
,
2757 &dev_attr_ipmi_version
.attr
,
2758 &dev_attr_additional_device_support
.attr
,
2759 &dev_attr_manufacturer_id
.attr
,
2760 &dev_attr_product_id
.attr
,
2761 &dev_attr_aux_firmware_revision
.attr
,
2762 &dev_attr_guid
.attr
,
2766 static umode_t
bmc_dev_attr_is_visible(struct kobject
*kobj
,
2767 struct attribute
*attr
, int idx
)
2769 struct device
*dev
= kobj_to_dev(kobj
);
2770 struct bmc_device
*bmc
= to_bmc_device(dev
);
2771 umode_t mode
= attr
->mode
;
2774 if (attr
== &dev_attr_aux_firmware_revision
.attr
) {
2775 struct ipmi_device_id id
;
2777 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2778 return (!rv
&& id
.aux_firmware_revision_set
) ? mode
: 0;
2780 if (attr
== &dev_attr_guid
.attr
) {
2783 rv
= bmc_get_device_id(NULL
, bmc
, NULL
, &guid_set
, NULL
);
2784 return (!rv
&& guid_set
) ? mode
: 0;
2789 static const struct attribute_group bmc_dev_attr_group
= {
2790 .attrs
= bmc_dev_attrs
,
2791 .is_visible
= bmc_dev_attr_is_visible
,
2794 static const struct attribute_group
*bmc_dev_attr_groups
[] = {
2795 &bmc_dev_attr_group
,
2799 static const struct device_type bmc_device_type
= {
2800 .groups
= bmc_dev_attr_groups
,
2803 static int __find_bmc_guid(struct device
*dev
, void *data
)
2805 guid_t
*guid
= data
;
2806 struct bmc_device
*bmc
;
2809 if (dev
->type
!= &bmc_device_type
)
2812 bmc
= to_bmc_device(dev
);
2813 rv
= bmc
->dyn_guid_set
&& guid_equal(&bmc
->guid
, guid
);
2815 rv
= kref_get_unless_zero(&bmc
->usecount
);
2820 * Returns with the bmc's usecount incremented, if it is non-NULL.
2822 static struct bmc_device
*ipmi_find_bmc_guid(struct device_driver
*drv
,
2826 struct bmc_device
*bmc
= NULL
;
2828 dev
= driver_find_device(drv
, NULL
, guid
, __find_bmc_guid
);
2830 bmc
= to_bmc_device(dev
);
2836 struct prod_dev_id
{
2837 unsigned int product_id
;
2838 unsigned char device_id
;
2841 static int __find_bmc_prod_dev_id(struct device
*dev
, void *data
)
2843 struct prod_dev_id
*cid
= data
;
2844 struct bmc_device
*bmc
;
2847 if (dev
->type
!= &bmc_device_type
)
2850 bmc
= to_bmc_device(dev
);
2851 rv
= (bmc
->id
.product_id
== cid
->product_id
2852 && bmc
->id
.device_id
== cid
->device_id
);
2854 rv
= kref_get_unless_zero(&bmc
->usecount
);
2859 * Returns with the bmc's usecount incremented, if it is non-NULL.
2861 static struct bmc_device
*ipmi_find_bmc_prod_dev_id(
2862 struct device_driver
*drv
,
2863 unsigned int product_id
, unsigned char device_id
)
2865 struct prod_dev_id id
= {
2866 .product_id
= product_id
,
2867 .device_id
= device_id
,
2870 struct bmc_device
*bmc
= NULL
;
2872 dev
= driver_find_device(drv
, NULL
, &id
, __find_bmc_prod_dev_id
);
2874 bmc
= to_bmc_device(dev
);
2880 static DEFINE_IDA(ipmi_bmc_ida
);
2883 release_bmc_device(struct device
*dev
)
2885 kfree(to_bmc_device(dev
));
2888 static void cleanup_bmc_work(struct work_struct
*work
)
2890 struct bmc_device
*bmc
= container_of(work
, struct bmc_device
,
2892 int id
= bmc
->pdev
.id
; /* Unregister overwrites id */
2894 platform_device_unregister(&bmc
->pdev
);
2895 ida_simple_remove(&ipmi_bmc_ida
, id
);
2899 cleanup_bmc_device(struct kref
*ref
)
2901 struct bmc_device
*bmc
= container_of(ref
, struct bmc_device
, usecount
);
2904 * Remove the platform device in a work queue to avoid issues
2905 * with removing the device attributes while reading a device
2908 schedule_work(&bmc
->remove_work
);
2912 * Must be called with intf->bmc_reg_mutex held.
2914 static void __ipmi_bmc_unregister(ipmi_smi_t intf
)
2916 struct bmc_device
*bmc
= intf
->bmc
;
2918 if (!intf
->bmc_registered
)
2921 sysfs_remove_link(&intf
->si_dev
->kobj
, "bmc");
2922 sysfs_remove_link(&bmc
->pdev
.dev
.kobj
, intf
->my_dev_name
);
2923 kfree(intf
->my_dev_name
);
2924 intf
->my_dev_name
= NULL
;
2926 mutex_lock(&bmc
->dyn_mutex
);
2927 list_del(&intf
->bmc_link
);
2928 mutex_unlock(&bmc
->dyn_mutex
);
2929 intf
->bmc
= &intf
->tmp_bmc
;
2930 kref_put(&bmc
->usecount
, cleanup_bmc_device
);
2931 intf
->bmc_registered
= false;
2934 static void ipmi_bmc_unregister(ipmi_smi_t intf
)
2936 mutex_lock(&intf
->bmc_reg_mutex
);
2937 __ipmi_bmc_unregister(intf
);
2938 mutex_unlock(&intf
->bmc_reg_mutex
);
2942 * Must be called with intf->bmc_reg_mutex held.
2944 static int __ipmi_bmc_register(ipmi_smi_t intf
,
2945 struct ipmi_device_id
*id
,
2946 bool guid_set
, guid_t
*guid
, int intf_num
)
2949 struct bmc_device
*bmc
;
2950 struct bmc_device
*old_bmc
;
2953 * platform_device_register() can cause bmc_reg_mutex to
2954 * be claimed because of the is_visible functions of
2955 * the attributes. Eliminate possible recursion and
2958 intf
->in_bmc_register
= true;
2959 mutex_unlock(&intf
->bmc_reg_mutex
);
2962 * Try to find if there is an bmc_device struct
2963 * representing the interfaced BMC already
2965 mutex_lock(&ipmidriver_mutex
);
2967 old_bmc
= ipmi_find_bmc_guid(&ipmidriver
.driver
, guid
);
2969 old_bmc
= ipmi_find_bmc_prod_dev_id(&ipmidriver
.driver
,
2974 * If there is already an bmc_device, free the new one,
2975 * otherwise register the new BMC device
2980 * Note: old_bmc already has usecount incremented by
2981 * the BMC find functions.
2983 intf
->bmc
= old_bmc
;
2984 mutex_lock(&bmc
->dyn_mutex
);
2985 list_add_tail(&intf
->bmc_link
, &bmc
->intfs
);
2986 mutex_unlock(&bmc
->dyn_mutex
);
2988 dev_info(intf
->si_dev
,
2989 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2990 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2991 bmc
->id
.manufacturer_id
,
2995 bmc
= kzalloc(sizeof(*bmc
), GFP_KERNEL
);
3000 INIT_LIST_HEAD(&bmc
->intfs
);
3001 mutex_init(&bmc
->dyn_mutex
);
3002 INIT_WORK(&bmc
->remove_work
, cleanup_bmc_work
);
3005 bmc
->dyn_id_set
= 1;
3006 bmc
->dyn_guid_set
= guid_set
;
3008 bmc
->dyn_id_expiry
= jiffies
+ IPMI_DYN_DEV_ID_EXPIRY
;
3010 bmc
->pdev
.name
= "ipmi_bmc";
3012 rv
= ida_simple_get(&ipmi_bmc_ida
, 0, 0, GFP_KERNEL
);
3015 bmc
->pdev
.dev
.driver
= &ipmidriver
.driver
;
3017 bmc
->pdev
.dev
.release
= release_bmc_device
;
3018 bmc
->pdev
.dev
.type
= &bmc_device_type
;
3019 kref_init(&bmc
->usecount
);
3022 mutex_lock(&bmc
->dyn_mutex
);
3023 list_add_tail(&intf
->bmc_link
, &bmc
->intfs
);
3024 mutex_unlock(&bmc
->dyn_mutex
);
3026 rv
= platform_device_register(&bmc
->pdev
);
3028 dev_err(intf
->si_dev
,
3029 PFX
" Unable to register bmc device: %d\n",
3034 dev_info(intf
->si_dev
,
3035 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3036 bmc
->id
.manufacturer_id
,
3042 * create symlink from system interface device to bmc device
3045 rv
= sysfs_create_link(&intf
->si_dev
->kobj
, &bmc
->pdev
.dev
.kobj
, "bmc");
3047 dev_err(intf
->si_dev
,
3048 PFX
"Unable to create bmc symlink: %d\n", rv
);
3053 intf_num
= intf
->intf_num
;
3054 intf
->my_dev_name
= kasprintf(GFP_KERNEL
, "ipmi%d", intf_num
);
3055 if (!intf
->my_dev_name
) {
3057 dev_err(intf
->si_dev
,
3058 PFX
"Unable to allocate link from BMC: %d\n", rv
);
3062 rv
= sysfs_create_link(&bmc
->pdev
.dev
.kobj
, &intf
->si_dev
->kobj
,
3065 kfree(intf
->my_dev_name
);
3066 intf
->my_dev_name
= NULL
;
3067 dev_err(intf
->si_dev
,
3068 PFX
"Unable to create symlink to bmc: %d\n", rv
);
3069 goto out_free_my_dev_name
;
3072 intf
->bmc_registered
= true;
3075 mutex_unlock(&ipmidriver_mutex
);
3076 mutex_lock(&intf
->bmc_reg_mutex
);
3077 intf
->in_bmc_register
= false;
3081 out_free_my_dev_name
:
3082 kfree(intf
->my_dev_name
);
3083 intf
->my_dev_name
= NULL
;
3086 sysfs_remove_link(&intf
->si_dev
->kobj
, "bmc");
3089 mutex_lock(&bmc
->dyn_mutex
);
3090 list_del(&intf
->bmc_link
);
3091 mutex_unlock(&bmc
->dyn_mutex
);
3092 intf
->bmc
= &intf
->tmp_bmc
;
3093 kref_put(&bmc
->usecount
, cleanup_bmc_device
);
3097 mutex_lock(&bmc
->dyn_mutex
);
3098 list_del(&intf
->bmc_link
);
3099 mutex_unlock(&bmc
->dyn_mutex
);
3100 intf
->bmc
= &intf
->tmp_bmc
;
3101 put_device(&bmc
->pdev
.dev
);
3106 send_guid_cmd(ipmi_smi_t intf
, int chan
)
3108 struct kernel_ipmi_msg msg
;
3109 struct ipmi_system_interface_addr si
;
3111 si
.addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
3112 si
.channel
= IPMI_BMC_CHANNEL
;
3115 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
3116 msg
.cmd
= IPMI_GET_DEVICE_GUID_CMD
;
3119 return i_ipmi_request(NULL
,
3121 (struct ipmi_addr
*) &si
,
3128 intf
->addrinfo
[0].address
,
3129 intf
->addrinfo
[0].lun
,
3133 static void guid_handler(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
3135 struct bmc_device
*bmc
= intf
->bmc
;
3137 if ((msg
->addr
.addr_type
!= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
3138 || (msg
->msg
.netfn
!= IPMI_NETFN_APP_RESPONSE
)
3139 || (msg
->msg
.cmd
!= IPMI_GET_DEVICE_GUID_CMD
))
3143 if (msg
->msg
.data
[0] != 0) {
3144 /* Error from getting the GUID, the BMC doesn't have one. */
3145 bmc
->dyn_guid_set
= 0;
3149 if (msg
->msg
.data_len
< 17) {
3150 bmc
->dyn_guid_set
= 0;
3151 dev_warn(intf
->si_dev
,
3152 PFX
"The GUID response from the BMC was too short, it was %d but should have been 17. Assuming GUID is not available.\n",
3157 memcpy(bmc
->fetch_guid
.b
, msg
->msg
.data
+ 1, 16);
3159 * Make sure the guid data is available before setting
3163 bmc
->dyn_guid_set
= 1;
3165 wake_up(&intf
->waitq
);
3168 static void __get_guid(ipmi_smi_t intf
)
3171 struct bmc_device
*bmc
= intf
->bmc
;
3173 bmc
->dyn_guid_set
= 2;
3174 intf
->null_user_handler
= guid_handler
;
3175 rv
= send_guid_cmd(intf
, 0);
3177 /* Send failed, no GUID available. */
3178 bmc
->dyn_guid_set
= 0;
3180 wait_event(intf
->waitq
, bmc
->dyn_guid_set
!= 2);
3182 /* dyn_guid_set makes the guid data available. */
3185 intf
->null_user_handler
= NULL
;
3189 send_channel_info_cmd(ipmi_smi_t intf
, int chan
)
3191 struct kernel_ipmi_msg msg
;
3192 unsigned char data
[1];
3193 struct ipmi_system_interface_addr si
;
3195 si
.addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
3196 si
.channel
= IPMI_BMC_CHANNEL
;
3199 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
3200 msg
.cmd
= IPMI_GET_CHANNEL_INFO_CMD
;
3204 return i_ipmi_request(NULL
,
3206 (struct ipmi_addr
*) &si
,
3213 intf
->addrinfo
[0].address
,
3214 intf
->addrinfo
[0].lun
,
3219 channel_handler(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
3223 unsigned int set
= intf
->curr_working_cset
;
3224 struct ipmi_channel
*chans
;
3226 if ((msg
->addr
.addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
3227 && (msg
->msg
.netfn
== IPMI_NETFN_APP_RESPONSE
)
3228 && (msg
->msg
.cmd
== IPMI_GET_CHANNEL_INFO_CMD
)) {
3229 /* It's the one we want */
3230 if (msg
->msg
.data
[0] != 0) {
3231 /* Got an error from the channel, just go on. */
3233 if (msg
->msg
.data
[0] == IPMI_INVALID_COMMAND_ERR
) {
3235 * If the MC does not support this
3236 * command, that is legal. We just
3237 * assume it has one IPMB at channel
3240 intf
->wchannels
[set
].c
[0].medium
3241 = IPMI_CHANNEL_MEDIUM_IPMB
;
3242 intf
->wchannels
[set
].c
[0].protocol
3243 = IPMI_CHANNEL_PROTOCOL_IPMB
;
3245 intf
->channel_list
= intf
->wchannels
+ set
;
3246 intf
->channels_ready
= true;
3247 wake_up(&intf
->waitq
);
3252 if (msg
->msg
.data_len
< 4) {
3253 /* Message not big enough, just go on. */
3256 ch
= intf
->curr_channel
;
3257 chans
= intf
->wchannels
[set
].c
;
3258 chans
[ch
].medium
= msg
->msg
.data
[2] & 0x7f;
3259 chans
[ch
].protocol
= msg
->msg
.data
[3] & 0x1f;
3262 intf
->curr_channel
++;
3263 if (intf
->curr_channel
>= IPMI_MAX_CHANNELS
) {
3264 intf
->channel_list
= intf
->wchannels
+ set
;
3265 intf
->channels_ready
= true;
3266 wake_up(&intf
->waitq
);
3268 intf
->channel_list
= intf
->wchannels
+ set
;
3269 intf
->channels_ready
= true;
3270 rv
= send_channel_info_cmd(intf
, intf
->curr_channel
);
3274 /* Got an error somehow, just give up. */
3275 dev_warn(intf
->si_dev
,
3276 PFX
"Error sending channel information for channel %d: %d\n",
3277 intf
->curr_channel
, rv
);
3279 intf
->channel_list
= intf
->wchannels
+ set
;
3280 intf
->channels_ready
= true;
3281 wake_up(&intf
->waitq
);
3289 * Must be holding intf->bmc_reg_mutex to call this.
3291 static int __scan_channels(ipmi_smi_t intf
, struct ipmi_device_id
*id
)
3295 if (ipmi_version_major(id
) > 1
3296 || (ipmi_version_major(id
) == 1
3297 && ipmi_version_minor(id
) >= 5)) {
3301 * Start scanning the channels to see what is
3304 set
= !intf
->curr_working_cset
;
3305 intf
->curr_working_cset
= set
;
3306 memset(&intf
->wchannels
[set
], 0,
3307 sizeof(struct ipmi_channel_set
));
3309 intf
->null_user_handler
= channel_handler
;
3310 intf
->curr_channel
= 0;
3311 rv
= send_channel_info_cmd(intf
, 0);
3313 dev_warn(intf
->si_dev
,
3314 "Error sending channel information for channel 0, %d\n",
3319 /* Wait for the channel info to be read. */
3320 wait_event(intf
->waitq
, intf
->channels_ready
);
3321 intf
->null_user_handler
= NULL
;
3323 unsigned int set
= intf
->curr_working_cset
;
3325 /* Assume a single IPMB channel at zero. */
3326 intf
->wchannels
[set
].c
[0].medium
= IPMI_CHANNEL_MEDIUM_IPMB
;
3327 intf
->wchannels
[set
].c
[0].protocol
= IPMI_CHANNEL_PROTOCOL_IPMB
;
3328 intf
->channel_list
= intf
->wchannels
+ set
;
3329 intf
->channels_ready
= true;
3335 static void ipmi_poll(ipmi_smi_t intf
)
3337 if (intf
->handlers
->poll
)
3338 intf
->handlers
->poll(intf
->send_info
);
3339 /* In case something came in */
3340 handle_new_recv_msgs(intf
);
3343 void ipmi_poll_interface(ipmi_user_t user
)
3345 ipmi_poll(user
->intf
);
3347 EXPORT_SYMBOL(ipmi_poll_interface
);
3349 static void redo_bmc_reg(struct work_struct
*work
)
3351 ipmi_smi_t intf
= container_of(work
, struct ipmi_smi
, bmc_reg_work
);
3353 if (!intf
->in_shutdown
)
3354 bmc_get_device_id(intf
, NULL
, NULL
, NULL
, NULL
);
3356 kref_put(&intf
->refcount
, intf_free
);
3359 int ipmi_register_smi(const struct ipmi_smi_handlers
*handlers
,
3361 struct device
*si_dev
,
3362 unsigned char slave_addr
)
3368 struct list_head
*link
;
3369 struct ipmi_device_id id
;
3372 * Make sure the driver is actually initialized, this handles
3373 * problems with initialization order.
3376 rv
= ipmi_init_msghandler();
3380 * The init code doesn't return an error if it was turned
3381 * off, but it won't initialize. Check that.
3387 intf
= kzalloc(sizeof(*intf
), GFP_KERNEL
);
3391 intf
->bmc
= &intf
->tmp_bmc
;
3392 INIT_LIST_HEAD(&intf
->bmc
->intfs
);
3393 mutex_init(&intf
->bmc
->dyn_mutex
);
3394 INIT_LIST_HEAD(&intf
->bmc_link
);
3395 mutex_init(&intf
->bmc_reg_mutex
);
3396 intf
->intf_num
= -1; /* Mark it invalid for now. */
3397 kref_init(&intf
->refcount
);
3398 INIT_WORK(&intf
->bmc_reg_work
, redo_bmc_reg
);
3399 intf
->si_dev
= si_dev
;
3400 for (j
= 0; j
< IPMI_MAX_CHANNELS
; j
++) {
3401 intf
->addrinfo
[j
].address
= IPMI_BMC_SLAVE_ADDR
;
3402 intf
->addrinfo
[j
].lun
= 2;
3404 if (slave_addr
!= 0)
3405 intf
->addrinfo
[0].address
= slave_addr
;
3406 INIT_LIST_HEAD(&intf
->users
);
3407 intf
->handlers
= handlers
;
3408 intf
->send_info
= send_info
;
3409 spin_lock_init(&intf
->seq_lock
);
3410 for (j
= 0; j
< IPMI_IPMB_NUM_SEQ
; j
++) {
3411 intf
->seq_table
[j
].inuse
= 0;
3412 intf
->seq_table
[j
].seqid
= 0;
3415 #ifdef CONFIG_IPMI_PROC_INTERFACE
3416 mutex_init(&intf
->proc_entry_lock
);
3418 spin_lock_init(&intf
->waiting_rcv_msgs_lock
);
3419 INIT_LIST_HEAD(&intf
->waiting_rcv_msgs
);
3420 tasklet_init(&intf
->recv_tasklet
,
3422 (unsigned long) intf
);
3423 atomic_set(&intf
->watchdog_pretimeouts_to_deliver
, 0);
3424 spin_lock_init(&intf
->xmit_msgs_lock
);
3425 INIT_LIST_HEAD(&intf
->xmit_msgs
);
3426 INIT_LIST_HEAD(&intf
->hp_xmit_msgs
);
3427 spin_lock_init(&intf
->events_lock
);
3428 atomic_set(&intf
->event_waiters
, 0);
3429 intf
->ticks_to_req_ev
= IPMI_REQUEST_EV_TIME
;
3430 INIT_LIST_HEAD(&intf
->waiting_events
);
3431 intf
->waiting_events_count
= 0;
3432 mutex_init(&intf
->cmd_rcvrs_mutex
);
3433 spin_lock_init(&intf
->maintenance_mode_lock
);
3434 INIT_LIST_HEAD(&intf
->cmd_rcvrs
);
3435 init_waitqueue_head(&intf
->waitq
);
3436 for (i
= 0; i
< IPMI_NUM_STATS
; i
++)
3437 atomic_set(&intf
->stats
[i
], 0);
3439 #ifdef CONFIG_IPMI_PROC_INTERFACE
3440 intf
->proc_dir
= NULL
;
3443 mutex_lock(&smi_watchers_mutex
);
3444 mutex_lock(&ipmi_interfaces_mutex
);
3445 /* Look for a hole in the numbers. */
3447 link
= &ipmi_interfaces
;
3448 list_for_each_entry_rcu(tintf
, &ipmi_interfaces
, link
) {
3449 if (tintf
->intf_num
!= i
) {
3450 link
= &tintf
->link
;
3455 /* Add the new interface in numeric order. */
3457 list_add_rcu(&intf
->link
, &ipmi_interfaces
);
3459 list_add_tail_rcu(&intf
->link
, link
);
3461 rv
= handlers
->start_processing(send_info
, intf
);
3465 rv
= __bmc_get_device_id(intf
, NULL
, &id
, NULL
, NULL
, i
);
3467 dev_err(si_dev
, "Unable to get the device id: %d\n", rv
);
3471 mutex_lock(&intf
->bmc_reg_mutex
);
3472 rv
= __scan_channels(intf
, &id
);
3473 mutex_unlock(&intf
->bmc_reg_mutex
);
3477 #ifdef CONFIG_IPMI_PROC_INTERFACE
3478 rv
= add_proc_entries(intf
, i
);
3483 ipmi_bmc_unregister(intf
);
3484 #ifdef CONFIG_IPMI_PROC_INTERFACE
3486 remove_proc_entries(intf
);
3488 intf
->handlers
= NULL
;
3489 list_del_rcu(&intf
->link
);
3490 mutex_unlock(&ipmi_interfaces_mutex
);
3491 mutex_unlock(&smi_watchers_mutex
);
3493 kref_put(&intf
->refcount
, intf_free
);
3496 * Keep memory order straight for RCU readers. Make
3497 * sure everything else is committed to memory before
3498 * setting intf_num to mark the interface valid.
3502 mutex_unlock(&ipmi_interfaces_mutex
);
3503 /* After this point the interface is legal to use. */
3504 call_smi_watchers(i
, intf
->si_dev
);
3505 mutex_unlock(&smi_watchers_mutex
);
3510 EXPORT_SYMBOL(ipmi_register_smi
);
3512 static void deliver_smi_err_response(ipmi_smi_t intf
,
3513 struct ipmi_smi_msg
*msg
,
3516 msg
->rsp
[0] = msg
->data
[0] | 4;
3517 msg
->rsp
[1] = msg
->data
[1];
3520 /* It's an error, so it will never requeue, no need to check return. */
3521 handle_one_recv_msg(intf
, msg
);
3524 static void cleanup_smi_msgs(ipmi_smi_t intf
)
3527 struct seq_table
*ent
;
3528 struct ipmi_smi_msg
*msg
;
3529 struct list_head
*entry
;
3530 struct list_head tmplist
;
3532 /* Clear out our transmit queues and hold the messages. */
3533 INIT_LIST_HEAD(&tmplist
);
3534 list_splice_tail(&intf
->hp_xmit_msgs
, &tmplist
);
3535 list_splice_tail(&intf
->xmit_msgs
, &tmplist
);
3537 /* Current message first, to preserve order */
3538 while (intf
->curr_msg
&& !list_empty(&intf
->waiting_rcv_msgs
)) {
3539 /* Wait for the message to clear out. */
3540 schedule_timeout(1);
3543 /* No need for locks, the interface is down. */
3546 * Return errors for all pending messages in queue and in the
3547 * tables waiting for remote responses.
3549 while (!list_empty(&tmplist
)) {
3550 entry
= tmplist
.next
;
3552 msg
= list_entry(entry
, struct ipmi_smi_msg
, link
);
3553 deliver_smi_err_response(intf
, msg
, IPMI_ERR_UNSPECIFIED
);
3556 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++) {
3557 ent
= &(intf
->seq_table
[i
]);
3560 deliver_err_response(ent
->recv_msg
, IPMI_ERR_UNSPECIFIED
);
3564 int ipmi_unregister_smi(ipmi_smi_t intf
)
3566 struct ipmi_smi_watcher
*w
;
3567 int intf_num
= intf
->intf_num
;
3570 mutex_lock(&smi_watchers_mutex
);
3571 mutex_lock(&ipmi_interfaces_mutex
);
3572 intf
->intf_num
= -1;
3573 intf
->in_shutdown
= true;
3574 list_del_rcu(&intf
->link
);
3575 mutex_unlock(&ipmi_interfaces_mutex
);
3578 cleanup_smi_msgs(intf
);
3580 /* Clean up the effects of users on the lower-level software. */
3581 mutex_lock(&ipmi_interfaces_mutex
);
3583 list_for_each_entry_rcu(user
, &intf
->users
, link
) {
3584 module_put(intf
->handlers
->owner
);
3585 if (intf
->handlers
->dec_usecount
)
3586 intf
->handlers
->dec_usecount(intf
->send_info
);
3589 intf
->handlers
= NULL
;
3590 mutex_unlock(&ipmi_interfaces_mutex
);
3592 #ifdef CONFIG_IPMI_PROC_INTERFACE
3593 remove_proc_entries(intf
);
3595 ipmi_bmc_unregister(intf
);
3598 * Call all the watcher interfaces to tell them that
3599 * an interface is gone.
3601 list_for_each_entry(w
, &smi_watchers
, link
)
3602 w
->smi_gone(intf_num
);
3603 mutex_unlock(&smi_watchers_mutex
);
3605 kref_put(&intf
->refcount
, intf_free
);
3608 EXPORT_SYMBOL(ipmi_unregister_smi
);
3610 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf
,
3611 struct ipmi_smi_msg
*msg
)
3613 struct ipmi_ipmb_addr ipmb_addr
;
3614 struct ipmi_recv_msg
*recv_msg
;
3617 * This is 11, not 10, because the response must contain a
3620 if (msg
->rsp_size
< 11) {
3621 /* Message not big enough, just ignore it. */
3622 ipmi_inc_stat(intf
, invalid_ipmb_responses
);
3626 if (msg
->rsp
[2] != 0) {
3627 /* An error getting the response, just ignore it. */
3631 ipmb_addr
.addr_type
= IPMI_IPMB_ADDR_TYPE
;
3632 ipmb_addr
.slave_addr
= msg
->rsp
[6];
3633 ipmb_addr
.channel
= msg
->rsp
[3] & 0x0f;
3634 ipmb_addr
.lun
= msg
->rsp
[7] & 3;
3637 * It's a response from a remote entity. Look up the sequence
3638 * number and handle the response.
3640 if (intf_find_seq(intf
,
3644 (msg
->rsp
[4] >> 2) & (~1),
3645 (struct ipmi_addr
*) &(ipmb_addr
),
3648 * We were unable to find the sequence number,
3649 * so just nuke the message.
3651 ipmi_inc_stat(intf
, unhandled_ipmb_responses
);
3655 memcpy(recv_msg
->msg_data
,
3659 * The other fields matched, so no need to set them, except
3660 * for netfn, which needs to be the response that was
3661 * returned, not the request value.
3663 recv_msg
->msg
.netfn
= msg
->rsp
[4] >> 2;
3664 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3665 recv_msg
->msg
.data_len
= msg
->rsp_size
- 10;
3666 recv_msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
3667 ipmi_inc_stat(intf
, handled_ipmb_responses
);
3668 deliver_response(recv_msg
);
3673 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf
,
3674 struct ipmi_smi_msg
*msg
)
3676 struct cmd_rcvr
*rcvr
;
3678 unsigned char netfn
;
3681 ipmi_user_t user
= NULL
;
3682 struct ipmi_ipmb_addr
*ipmb_addr
;
3683 struct ipmi_recv_msg
*recv_msg
;
3685 if (msg
->rsp_size
< 10) {
3686 /* Message not big enough, just ignore it. */
3687 ipmi_inc_stat(intf
, invalid_commands
);
3691 if (msg
->rsp
[2] != 0) {
3692 /* An error getting the response, just ignore it. */
3696 netfn
= msg
->rsp
[4] >> 2;
3698 chan
= msg
->rsp
[3] & 0xf;
3701 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, chan
);
3704 kref_get(&user
->refcount
);
3710 /* We didn't find a user, deliver an error response. */
3711 ipmi_inc_stat(intf
, unhandled_commands
);
3713 msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
3714 msg
->data
[1] = IPMI_SEND_MSG_CMD
;
3715 msg
->data
[2] = msg
->rsp
[3];
3716 msg
->data
[3] = msg
->rsp
[6];
3717 msg
->data
[4] = ((netfn
+ 1) << 2) | (msg
->rsp
[7] & 0x3);
3718 msg
->data
[5] = ipmb_checksum(&(msg
->data
[3]), 2);
3719 msg
->data
[6] = intf
->addrinfo
[msg
->rsp
[3] & 0xf].address
;
3721 msg
->data
[7] = (msg
->rsp
[7] & 0xfc) | (msg
->rsp
[4] & 0x3);
3722 msg
->data
[8] = msg
->rsp
[8]; /* cmd */
3723 msg
->data
[9] = IPMI_INVALID_CMD_COMPLETION_CODE
;
3724 msg
->data
[10] = ipmb_checksum(&(msg
->data
[6]), 4);
3725 msg
->data_size
= 11;
3730 printk("Invalid command:");
3731 for (m
= 0; m
< msg
->data_size
; m
++)
3732 printk(" %2.2x", msg
->data
[m
]);
3737 if (!intf
->in_shutdown
) {
3738 smi_send(intf
, intf
->handlers
, msg
, 0);
3740 * We used the message, so return the value
3741 * that causes it to not be freed or
3748 /* Deliver the message to the user. */
3749 ipmi_inc_stat(intf
, handled_commands
);
3751 recv_msg
= ipmi_alloc_recv_msg();
3754 * We couldn't allocate memory for the
3755 * message, so requeue it for handling
3759 kref_put(&user
->refcount
, free_user
);
3761 /* Extract the source address from the data. */
3762 ipmb_addr
= (struct ipmi_ipmb_addr
*) &recv_msg
->addr
;
3763 ipmb_addr
->addr_type
= IPMI_IPMB_ADDR_TYPE
;
3764 ipmb_addr
->slave_addr
= msg
->rsp
[6];
3765 ipmb_addr
->lun
= msg
->rsp
[7] & 3;
3766 ipmb_addr
->channel
= msg
->rsp
[3] & 0xf;
3769 * Extract the rest of the message information
3770 * from the IPMB header.
3772 recv_msg
->user
= user
;
3773 recv_msg
->recv_type
= IPMI_CMD_RECV_TYPE
;
3774 recv_msg
->msgid
= msg
->rsp
[7] >> 2;
3775 recv_msg
->msg
.netfn
= msg
->rsp
[4] >> 2;
3776 recv_msg
->msg
.cmd
= msg
->rsp
[8];
3777 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3780 * We chop off 10, not 9 bytes because the checksum
3781 * at the end also needs to be removed.
3783 recv_msg
->msg
.data_len
= msg
->rsp_size
- 10;
3784 memcpy(recv_msg
->msg_data
,
3786 msg
->rsp_size
- 10);
3787 deliver_response(recv_msg
);
3794 static int handle_lan_get_msg_rsp(ipmi_smi_t intf
,
3795 struct ipmi_smi_msg
*msg
)
3797 struct ipmi_lan_addr lan_addr
;
3798 struct ipmi_recv_msg
*recv_msg
;
3802 * This is 13, not 12, because the response must contain a
3805 if (msg
->rsp_size
< 13) {
3806 /* Message not big enough, just ignore it. */
3807 ipmi_inc_stat(intf
, invalid_lan_responses
);
3811 if (msg
->rsp
[2] != 0) {
3812 /* An error getting the response, just ignore it. */
3816 lan_addr
.addr_type
= IPMI_LAN_ADDR_TYPE
;
3817 lan_addr
.session_handle
= msg
->rsp
[4];
3818 lan_addr
.remote_SWID
= msg
->rsp
[8];
3819 lan_addr
.local_SWID
= msg
->rsp
[5];
3820 lan_addr
.channel
= msg
->rsp
[3] & 0x0f;
3821 lan_addr
.privilege
= msg
->rsp
[3] >> 4;
3822 lan_addr
.lun
= msg
->rsp
[9] & 3;
3825 * It's a response from a remote entity. Look up the sequence
3826 * number and handle the response.
3828 if (intf_find_seq(intf
,
3832 (msg
->rsp
[6] >> 2) & (~1),
3833 (struct ipmi_addr
*) &(lan_addr
),
3836 * We were unable to find the sequence number,
3837 * so just nuke the message.
3839 ipmi_inc_stat(intf
, unhandled_lan_responses
);
3843 memcpy(recv_msg
->msg_data
,
3845 msg
->rsp_size
- 11);
3847 * The other fields matched, so no need to set them, except
3848 * for netfn, which needs to be the response that was
3849 * returned, not the request value.
3851 recv_msg
->msg
.netfn
= msg
->rsp
[6] >> 2;
3852 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3853 recv_msg
->msg
.data_len
= msg
->rsp_size
- 12;
3854 recv_msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
3855 ipmi_inc_stat(intf
, handled_lan_responses
);
3856 deliver_response(recv_msg
);
3861 static int handle_lan_get_msg_cmd(ipmi_smi_t intf
,
3862 struct ipmi_smi_msg
*msg
)
3864 struct cmd_rcvr
*rcvr
;
3866 unsigned char netfn
;
3869 ipmi_user_t user
= NULL
;
3870 struct ipmi_lan_addr
*lan_addr
;
3871 struct ipmi_recv_msg
*recv_msg
;
3873 if (msg
->rsp_size
< 12) {
3874 /* Message not big enough, just ignore it. */
3875 ipmi_inc_stat(intf
, invalid_commands
);
3879 if (msg
->rsp
[2] != 0) {
3880 /* An error getting the response, just ignore it. */
3884 netfn
= msg
->rsp
[6] >> 2;
3886 chan
= msg
->rsp
[3] & 0xf;
3889 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, chan
);
3892 kref_get(&user
->refcount
);
3898 /* We didn't find a user, just give up. */
3899 ipmi_inc_stat(intf
, unhandled_commands
);
3902 * Don't do anything with these messages, just allow
3907 /* Deliver the message to the user. */
3908 ipmi_inc_stat(intf
, handled_commands
);
3910 recv_msg
= ipmi_alloc_recv_msg();
3913 * We couldn't allocate memory for the
3914 * message, so requeue it for handling later.
3917 kref_put(&user
->refcount
, free_user
);
3919 /* Extract the source address from the data. */
3920 lan_addr
= (struct ipmi_lan_addr
*) &recv_msg
->addr
;
3921 lan_addr
->addr_type
= IPMI_LAN_ADDR_TYPE
;
3922 lan_addr
->session_handle
= msg
->rsp
[4];
3923 lan_addr
->remote_SWID
= msg
->rsp
[8];
3924 lan_addr
->local_SWID
= msg
->rsp
[5];
3925 lan_addr
->lun
= msg
->rsp
[9] & 3;
3926 lan_addr
->channel
= msg
->rsp
[3] & 0xf;
3927 lan_addr
->privilege
= msg
->rsp
[3] >> 4;
3930 * Extract the rest of the message information
3931 * from the IPMB header.
3933 recv_msg
->user
= user
;
3934 recv_msg
->recv_type
= IPMI_CMD_RECV_TYPE
;
3935 recv_msg
->msgid
= msg
->rsp
[9] >> 2;
3936 recv_msg
->msg
.netfn
= msg
->rsp
[6] >> 2;
3937 recv_msg
->msg
.cmd
= msg
->rsp
[10];
3938 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3941 * We chop off 12, not 11 bytes because the checksum
3942 * at the end also needs to be removed.
3944 recv_msg
->msg
.data_len
= msg
->rsp_size
- 12;
3945 memcpy(recv_msg
->msg_data
,
3947 msg
->rsp_size
- 12);
3948 deliver_response(recv_msg
);
3956 * This routine will handle "Get Message" command responses with
3957 * channels that use an OEM Medium. The message format belongs to
3958 * the OEM. See IPMI 2.0 specification, Chapter 6 and
3959 * Chapter 22, sections 22.6 and 22.24 for more details.
3961 static int handle_oem_get_msg_cmd(ipmi_smi_t intf
,
3962 struct ipmi_smi_msg
*msg
)
3964 struct cmd_rcvr
*rcvr
;
3966 unsigned char netfn
;
3969 ipmi_user_t user
= NULL
;
3970 struct ipmi_system_interface_addr
*smi_addr
;
3971 struct ipmi_recv_msg
*recv_msg
;
3974 * We expect the OEM SW to perform error checking
3975 * so we just do some basic sanity checks
3977 if (msg
->rsp_size
< 4) {
3978 /* Message not big enough, just ignore it. */
3979 ipmi_inc_stat(intf
, invalid_commands
);
3983 if (msg
->rsp
[2] != 0) {
3984 /* An error getting the response, just ignore it. */
3989 * This is an OEM Message so the OEM needs to know how
3990 * handle the message. We do no interpretation.
3992 netfn
= msg
->rsp
[0] >> 2;
3994 chan
= msg
->rsp
[3] & 0xf;
3997 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, chan
);
4000 kref_get(&user
->refcount
);
4006 /* We didn't find a user, just give up. */
4007 ipmi_inc_stat(intf
, unhandled_commands
);
4010 * Don't do anything with these messages, just allow
4016 /* Deliver the message to the user. */
4017 ipmi_inc_stat(intf
, handled_commands
);
4019 recv_msg
= ipmi_alloc_recv_msg();
4022 * We couldn't allocate memory for the
4023 * message, so requeue it for handling
4027 kref_put(&user
->refcount
, free_user
);
4030 * OEM Messages are expected to be delivered via
4031 * the system interface to SMS software. We might
4032 * need to visit this again depending on OEM
4035 smi_addr
= ((struct ipmi_system_interface_addr
*)
4037 smi_addr
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
4038 smi_addr
->channel
= IPMI_BMC_CHANNEL
;
4039 smi_addr
->lun
= msg
->rsp
[0] & 3;
4041 recv_msg
->user
= user
;
4042 recv_msg
->user_msg_data
= NULL
;
4043 recv_msg
->recv_type
= IPMI_OEM_RECV_TYPE
;
4044 recv_msg
->msg
.netfn
= msg
->rsp
[0] >> 2;
4045 recv_msg
->msg
.cmd
= msg
->rsp
[1];
4046 recv_msg
->msg
.data
= recv_msg
->msg_data
;
4049 * The message starts at byte 4 which follows the
4050 * the Channel Byte in the "GET MESSAGE" command
4052 recv_msg
->msg
.data_len
= msg
->rsp_size
- 4;
4053 memcpy(recv_msg
->msg_data
,
4056 deliver_response(recv_msg
);
4063 static void copy_event_into_recv_msg(struct ipmi_recv_msg
*recv_msg
,
4064 struct ipmi_smi_msg
*msg
)
4066 struct ipmi_system_interface_addr
*smi_addr
;
4068 recv_msg
->msgid
= 0;
4069 smi_addr
= (struct ipmi_system_interface_addr
*) &(recv_msg
->addr
);
4070 smi_addr
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
4071 smi_addr
->channel
= IPMI_BMC_CHANNEL
;
4072 smi_addr
->lun
= msg
->rsp
[0] & 3;
4073 recv_msg
->recv_type
= IPMI_ASYNC_EVENT_RECV_TYPE
;
4074 recv_msg
->msg
.netfn
= msg
->rsp
[0] >> 2;
4075 recv_msg
->msg
.cmd
= msg
->rsp
[1];
4076 memcpy(recv_msg
->msg_data
, &(msg
->rsp
[3]), msg
->rsp_size
- 3);
4077 recv_msg
->msg
.data
= recv_msg
->msg_data
;
4078 recv_msg
->msg
.data_len
= msg
->rsp_size
- 3;
4081 static int handle_read_event_rsp(ipmi_smi_t intf
,
4082 struct ipmi_smi_msg
*msg
)
4084 struct ipmi_recv_msg
*recv_msg
, *recv_msg2
;
4085 struct list_head msgs
;
4088 int deliver_count
= 0;
4089 unsigned long flags
;
4091 if (msg
->rsp_size
< 19) {
4092 /* Message is too small to be an IPMB event. */
4093 ipmi_inc_stat(intf
, invalid_events
);
4097 if (msg
->rsp
[2] != 0) {
4098 /* An error getting the event, just ignore it. */
4102 INIT_LIST_HEAD(&msgs
);
4104 spin_lock_irqsave(&intf
->events_lock
, flags
);
4106 ipmi_inc_stat(intf
, events
);
4109 * Allocate and fill in one message for every user that is
4113 list_for_each_entry_rcu(user
, &intf
->users
, link
) {
4114 if (!user
->gets_events
)
4117 recv_msg
= ipmi_alloc_recv_msg();
4120 list_for_each_entry_safe(recv_msg
, recv_msg2
, &msgs
,
4122 list_del(&recv_msg
->link
);
4123 ipmi_free_recv_msg(recv_msg
);
4126 * We couldn't allocate memory for the
4127 * message, so requeue it for handling
4136 copy_event_into_recv_msg(recv_msg
, msg
);
4137 recv_msg
->user
= user
;
4138 kref_get(&user
->refcount
);
4139 list_add_tail(&(recv_msg
->link
), &msgs
);
4143 if (deliver_count
) {
4144 /* Now deliver all the messages. */
4145 list_for_each_entry_safe(recv_msg
, recv_msg2
, &msgs
, link
) {
4146 list_del(&recv_msg
->link
);
4147 deliver_response(recv_msg
);
4149 } else if (intf
->waiting_events_count
< MAX_EVENTS_IN_QUEUE
) {
4151 * No one to receive the message, put it in queue if there's
4152 * not already too many things in the queue.
4154 recv_msg
= ipmi_alloc_recv_msg();
4157 * We couldn't allocate memory for the
4158 * message, so requeue it for handling
4165 copy_event_into_recv_msg(recv_msg
, msg
);
4166 list_add_tail(&(recv_msg
->link
), &(intf
->waiting_events
));
4167 intf
->waiting_events_count
++;
4168 } else if (!intf
->event_msg_printed
) {
4170 * There's too many things in the queue, discard this
4173 dev_warn(intf
->si_dev
,
4174 PFX
"Event queue full, discarding incoming events\n");
4175 intf
->event_msg_printed
= 1;
4179 spin_unlock_irqrestore(&(intf
->events_lock
), flags
);
4184 static int handle_bmc_rsp(ipmi_smi_t intf
,
4185 struct ipmi_smi_msg
*msg
)
4187 struct ipmi_recv_msg
*recv_msg
;
4188 struct ipmi_user
*user
;
4190 recv_msg
= (struct ipmi_recv_msg
*) msg
->user_data
;
4191 if (recv_msg
== NULL
) {
4192 dev_warn(intf
->si_dev
,
4193 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vender for assistance\n");
4197 user
= recv_msg
->user
;
4198 /* Make sure the user still exists. */
4199 if (user
&& !user
->valid
) {
4200 /* The user for the message went away, so give up. */
4201 ipmi_inc_stat(intf
, unhandled_local_responses
);
4202 ipmi_free_recv_msg(recv_msg
);
4204 struct ipmi_system_interface_addr
*smi_addr
;
4206 ipmi_inc_stat(intf
, handled_local_responses
);
4207 recv_msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
4208 recv_msg
->msgid
= msg
->msgid
;
4209 smi_addr
= ((struct ipmi_system_interface_addr
*)
4211 smi_addr
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
4212 smi_addr
->channel
= IPMI_BMC_CHANNEL
;
4213 smi_addr
->lun
= msg
->rsp
[0] & 3;
4214 recv_msg
->msg
.netfn
= msg
->rsp
[0] >> 2;
4215 recv_msg
->msg
.cmd
= msg
->rsp
[1];
4216 memcpy(recv_msg
->msg_data
,
4219 recv_msg
->msg
.data
= recv_msg
->msg_data
;
4220 recv_msg
->msg
.data_len
= msg
->rsp_size
- 2;
4221 deliver_response(recv_msg
);
4228 * Handle a received message. Return 1 if the message should be requeued,
4229 * 0 if the message should be freed, or -1 if the message should not
4230 * be freed or requeued.
4232 static int handle_one_recv_msg(ipmi_smi_t intf
,
4233 struct ipmi_smi_msg
*msg
)
4241 for (m
= 0; m
< msg
->rsp_size
; m
++)
4242 printk(" %2.2x", msg
->rsp
[m
]);
4245 if (msg
->rsp_size
< 2) {
4246 /* Message is too small to be correct. */
4247 dev_warn(intf
->si_dev
,
4248 PFX
"BMC returned to small a message for netfn %x cmd %x, got %d bytes\n",
4249 (msg
->data
[0] >> 2) | 1, msg
->data
[1], msg
->rsp_size
);
4251 /* Generate an error response for the message. */
4252 msg
->rsp
[0] = msg
->data
[0] | (1 << 2);
4253 msg
->rsp
[1] = msg
->data
[1];
4254 msg
->rsp
[2] = IPMI_ERR_UNSPECIFIED
;
4256 } else if (((msg
->rsp
[0] >> 2) != ((msg
->data
[0] >> 2) | 1))
4257 || (msg
->rsp
[1] != msg
->data
[1])) {
4259 * The NetFN and Command in the response is not even
4260 * marginally correct.
4262 dev_warn(intf
->si_dev
,
4263 PFX
"BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4264 (msg
->data
[0] >> 2) | 1, msg
->data
[1],
4265 msg
->rsp
[0] >> 2, msg
->rsp
[1]);
4267 /* Generate an error response for the message. */
4268 msg
->rsp
[0] = msg
->data
[0] | (1 << 2);
4269 msg
->rsp
[1] = msg
->data
[1];
4270 msg
->rsp
[2] = IPMI_ERR_UNSPECIFIED
;
4274 if ((msg
->rsp
[0] == ((IPMI_NETFN_APP_REQUEST
|1) << 2))
4275 && (msg
->rsp
[1] == IPMI_SEND_MSG_CMD
)
4276 && (msg
->user_data
!= NULL
)) {
4278 * It's a response to a response we sent. For this we
4279 * deliver a send message response to the user.
4281 struct ipmi_recv_msg
*recv_msg
= msg
->user_data
;
4284 if (msg
->rsp_size
< 2)
4285 /* Message is too small to be correct. */
4288 chan
= msg
->data
[2] & 0x0f;
4289 if (chan
>= IPMI_MAX_CHANNELS
)
4290 /* Invalid channel number */
4296 /* Make sure the user still exists. */
4297 if (!recv_msg
->user
|| !recv_msg
->user
->valid
)
4300 recv_msg
->recv_type
= IPMI_RESPONSE_RESPONSE_TYPE
;
4301 recv_msg
->msg
.data
= recv_msg
->msg_data
;
4302 recv_msg
->msg
.data_len
= 1;
4303 recv_msg
->msg_data
[0] = msg
->rsp
[2];
4304 deliver_response(recv_msg
);
4305 } else if ((msg
->rsp
[0] == ((IPMI_NETFN_APP_REQUEST
|1) << 2))
4306 && (msg
->rsp
[1] == IPMI_GET_MSG_CMD
)) {
4307 struct ipmi_channel
*chans
;
4309 /* It's from the receive queue. */
4310 chan
= msg
->rsp
[3] & 0xf;
4311 if (chan
>= IPMI_MAX_CHANNELS
) {
4312 /* Invalid channel number */
4318 * We need to make sure the channels have been initialized.
4319 * The channel_handler routine will set the "curr_channel"
4320 * equal to or greater than IPMI_MAX_CHANNELS when all the
4321 * channels for this interface have been initialized.
4323 if (!intf
->channels_ready
) {
4324 requeue
= 0; /* Throw the message away */
4328 chans
= READ_ONCE(intf
->channel_list
)->c
;
4330 switch (chans
[chan
].medium
) {
4331 case IPMI_CHANNEL_MEDIUM_IPMB
:
4332 if (msg
->rsp
[4] & 0x04) {
4334 * It's a response, so find the
4335 * requesting message and send it up.
4337 requeue
= handle_ipmb_get_msg_rsp(intf
, msg
);
4340 * It's a command to the SMS from some other
4341 * entity. Handle that.
4343 requeue
= handle_ipmb_get_msg_cmd(intf
, msg
);
4347 case IPMI_CHANNEL_MEDIUM_8023LAN
:
4348 case IPMI_CHANNEL_MEDIUM_ASYNC
:
4349 if (msg
->rsp
[6] & 0x04) {
4351 * It's a response, so find the
4352 * requesting message and send it up.
4354 requeue
= handle_lan_get_msg_rsp(intf
, msg
);
4357 * It's a command to the SMS from some other
4358 * entity. Handle that.
4360 requeue
= handle_lan_get_msg_cmd(intf
, msg
);
4365 /* Check for OEM Channels. Clients had better
4366 register for these commands. */
4367 if ((chans
[chan
].medium
>= IPMI_CHANNEL_MEDIUM_OEM_MIN
)
4368 && (chans
[chan
].medium
4369 <= IPMI_CHANNEL_MEDIUM_OEM_MAX
)) {
4370 requeue
= handle_oem_get_msg_cmd(intf
, msg
);
4373 * We don't handle the channel type, so just
4380 } else if ((msg
->rsp
[0] == ((IPMI_NETFN_APP_REQUEST
|1) << 2))
4381 && (msg
->rsp
[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD
)) {
4382 /* It's an asynchronous event. */
4383 requeue
= handle_read_event_rsp(intf
, msg
);
4385 /* It's a response from the local BMC. */
4386 requeue
= handle_bmc_rsp(intf
, msg
);
4394 * If there are messages in the queue or pretimeouts, handle them.
4396 static void handle_new_recv_msgs(ipmi_smi_t intf
)
4398 struct ipmi_smi_msg
*smi_msg
;
4399 unsigned long flags
= 0;
4401 int run_to_completion
= intf
->run_to_completion
;
4403 /* See if any waiting messages need to be processed. */
4404 if (!run_to_completion
)
4405 spin_lock_irqsave(&intf
->waiting_rcv_msgs_lock
, flags
);
4406 while (!list_empty(&intf
->waiting_rcv_msgs
)) {
4407 smi_msg
= list_entry(intf
->waiting_rcv_msgs
.next
,
4408 struct ipmi_smi_msg
, link
);
4409 list_del(&smi_msg
->link
);
4410 if (!run_to_completion
)
4411 spin_unlock_irqrestore(&intf
->waiting_rcv_msgs_lock
,
4413 rv
= handle_one_recv_msg(intf
, smi_msg
);
4414 if (!run_to_completion
)
4415 spin_lock_irqsave(&intf
->waiting_rcv_msgs_lock
, flags
);
4418 * To preserve message order, quit if we
4419 * can't handle a message. Add the message
4420 * back at the head, this is safe because this
4421 * tasklet is the only thing that pulls the
4424 list_add(&smi_msg
->link
, &intf
->waiting_rcv_msgs
);
4428 /* Message handled */
4429 ipmi_free_smi_msg(smi_msg
);
4430 /* If rv < 0, fatal error, del but don't free. */
4433 if (!run_to_completion
)
4434 spin_unlock_irqrestore(&intf
->waiting_rcv_msgs_lock
, flags
);
4437 * If the pretimout count is non-zero, decrement one from it and
4438 * deliver pretimeouts to all the users.
4440 if (atomic_add_unless(&intf
->watchdog_pretimeouts_to_deliver
, -1, 0)) {
4444 list_for_each_entry_rcu(user
, &intf
->users
, link
) {
4445 if (user
->handler
->ipmi_watchdog_pretimeout
)
4446 user
->handler
->ipmi_watchdog_pretimeout(
4447 user
->handler_data
);
4453 static void smi_recv_tasklet(unsigned long val
)
4455 unsigned long flags
= 0; /* keep us warning-free. */
4456 ipmi_smi_t intf
= (ipmi_smi_t
) val
;
4457 int run_to_completion
= intf
->run_to_completion
;
4458 struct ipmi_smi_msg
*newmsg
= NULL
;
4461 * Start the next message if available.
4463 * Do this here, not in the actual receiver, because we may deadlock
4464 * because the lower layer is allowed to hold locks while calling
4470 if (!run_to_completion
)
4471 spin_lock_irqsave(&intf
->xmit_msgs_lock
, flags
);
4472 if (intf
->curr_msg
== NULL
&& !intf
->in_shutdown
) {
4473 struct list_head
*entry
= NULL
;
4475 /* Pick the high priority queue first. */
4476 if (!list_empty(&intf
->hp_xmit_msgs
))
4477 entry
= intf
->hp_xmit_msgs
.next
;
4478 else if (!list_empty(&intf
->xmit_msgs
))
4479 entry
= intf
->xmit_msgs
.next
;
4483 newmsg
= list_entry(entry
, struct ipmi_smi_msg
, link
);
4484 intf
->curr_msg
= newmsg
;
4487 if (!run_to_completion
)
4488 spin_unlock_irqrestore(&intf
->xmit_msgs_lock
, flags
);
4490 intf
->handlers
->sender(intf
->send_info
, newmsg
);
4494 handle_new_recv_msgs(intf
);
4497 /* Handle a new message from the lower layer. */
4498 void ipmi_smi_msg_received(ipmi_smi_t intf
,
4499 struct ipmi_smi_msg
*msg
)
4501 unsigned long flags
= 0; /* keep us warning-free. */
4502 int run_to_completion
= intf
->run_to_completion
;
4504 if ((msg
->data_size
>= 2)
4505 && (msg
->data
[0] == (IPMI_NETFN_APP_REQUEST
<< 2))
4506 && (msg
->data
[1] == IPMI_SEND_MSG_CMD
)
4507 && (msg
->user_data
== NULL
)) {
4509 if (intf
->in_shutdown
)
4513 * This is the local response to a command send, start
4514 * the timer for these. The user_data will not be
4515 * NULL if this is a response send, and we will let
4516 * response sends just go through.
4520 * Check for errors, if we get certain errors (ones
4521 * that mean basically we can try again later), we
4522 * ignore them and start the timer. Otherwise we
4523 * report the error immediately.
4525 if ((msg
->rsp_size
>= 3) && (msg
->rsp
[2] != 0)
4526 && (msg
->rsp
[2] != IPMI_NODE_BUSY_ERR
)
4527 && (msg
->rsp
[2] != IPMI_LOST_ARBITRATION_ERR
)
4528 && (msg
->rsp
[2] != IPMI_BUS_ERR
)
4529 && (msg
->rsp
[2] != IPMI_NAK_ON_WRITE_ERR
)) {
4530 int ch
= msg
->rsp
[3] & 0xf;
4531 struct ipmi_channel
*chans
;
4533 /* Got an error sending the message, handle it. */
4535 chans
= READ_ONCE(intf
->channel_list
)->c
;
4536 if ((chans
[ch
].medium
== IPMI_CHANNEL_MEDIUM_8023LAN
)
4537 || (chans
[ch
].medium
== IPMI_CHANNEL_MEDIUM_ASYNC
))
4538 ipmi_inc_stat(intf
, sent_lan_command_errs
);
4540 ipmi_inc_stat(intf
, sent_ipmb_command_errs
);
4541 intf_err_seq(intf
, msg
->msgid
, msg
->rsp
[2]);
4543 /* The message was sent, start the timer. */
4544 intf_start_seq_timer(intf
, msg
->msgid
);
4547 ipmi_free_smi_msg(msg
);
4550 * To preserve message order, we keep a queue and deliver from
4553 if (!run_to_completion
)
4554 spin_lock_irqsave(&intf
->waiting_rcv_msgs_lock
, flags
);
4555 list_add_tail(&msg
->link
, &intf
->waiting_rcv_msgs
);
4556 if (!run_to_completion
)
4557 spin_unlock_irqrestore(&intf
->waiting_rcv_msgs_lock
,
4561 if (!run_to_completion
)
4562 spin_lock_irqsave(&intf
->xmit_msgs_lock
, flags
);
4564 * We can get an asynchronous event or receive message in addition
4565 * to commands we send.
4567 if (msg
== intf
->curr_msg
)
4568 intf
->curr_msg
= NULL
;
4569 if (!run_to_completion
)
4570 spin_unlock_irqrestore(&intf
->xmit_msgs_lock
, flags
);
4572 if (run_to_completion
)
4573 smi_recv_tasklet((unsigned long) intf
);
4575 tasklet_schedule(&intf
->recv_tasklet
);
4577 EXPORT_SYMBOL(ipmi_smi_msg_received
);
4579 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf
)
4581 if (intf
->in_shutdown
)
4584 atomic_set(&intf
->watchdog_pretimeouts_to_deliver
, 1);
4585 tasklet_schedule(&intf
->recv_tasklet
);
4587 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout
);
4589 static struct ipmi_smi_msg
*
4590 smi_from_recv_msg(ipmi_smi_t intf
, struct ipmi_recv_msg
*recv_msg
,
4591 unsigned char seq
, long seqid
)
4593 struct ipmi_smi_msg
*smi_msg
= ipmi_alloc_smi_msg();
4596 * If we can't allocate the message, then just return, we
4597 * get 4 retries, so this should be ok.
4601 memcpy(smi_msg
->data
, recv_msg
->msg
.data
, recv_msg
->msg
.data_len
);
4602 smi_msg
->data_size
= recv_msg
->msg
.data_len
;
4603 smi_msg
->msgid
= STORE_SEQ_IN_MSGID(seq
, seqid
);
4609 for (m
= 0; m
< smi_msg
->data_size
; m
++)
4610 printk(" %2.2x", smi_msg
->data
[m
]);
4617 static void check_msg_timeout(ipmi_smi_t intf
, struct seq_table
*ent
,
4618 struct list_head
*timeouts
,
4619 unsigned long timeout_period
,
4620 int slot
, unsigned long *flags
,
4621 unsigned int *waiting_msgs
)
4623 struct ipmi_recv_msg
*msg
;
4624 const struct ipmi_smi_handlers
*handlers
;
4626 if (intf
->in_shutdown
)
4632 if (timeout_period
< ent
->timeout
) {
4633 ent
->timeout
-= timeout_period
;
4638 if (ent
->retries_left
== 0) {
4639 /* The message has used all its retries. */
4641 msg
= ent
->recv_msg
;
4642 list_add_tail(&msg
->link
, timeouts
);
4644 ipmi_inc_stat(intf
, timed_out_ipmb_broadcasts
);
4645 else if (is_lan_addr(&ent
->recv_msg
->addr
))
4646 ipmi_inc_stat(intf
, timed_out_lan_commands
);
4648 ipmi_inc_stat(intf
, timed_out_ipmb_commands
);
4650 struct ipmi_smi_msg
*smi_msg
;
4651 /* More retries, send again. */
4656 * Start with the max timer, set to normal timer after
4657 * the message is sent.
4659 ent
->timeout
= MAX_MSG_TIMEOUT
;
4660 ent
->retries_left
--;
4661 smi_msg
= smi_from_recv_msg(intf
, ent
->recv_msg
, slot
,
4664 if (is_lan_addr(&ent
->recv_msg
->addr
))
4666 dropped_rexmit_lan_commands
);
4669 dropped_rexmit_ipmb_commands
);
4673 spin_unlock_irqrestore(&intf
->seq_lock
, *flags
);
4676 * Send the new message. We send with a zero
4677 * priority. It timed out, I doubt time is that
4678 * critical now, and high priority messages are really
4679 * only for messages to the local MC, which don't get
4682 handlers
= intf
->handlers
;
4684 if (is_lan_addr(&ent
->recv_msg
->addr
))
4686 retransmitted_lan_commands
);
4689 retransmitted_ipmb_commands
);
4691 smi_send(intf
, handlers
, smi_msg
, 0);
4693 ipmi_free_smi_msg(smi_msg
);
4695 spin_lock_irqsave(&intf
->seq_lock
, *flags
);
4699 static unsigned int ipmi_timeout_handler(ipmi_smi_t intf
,
4700 unsigned long timeout_period
)
4702 struct list_head timeouts
;
4703 struct ipmi_recv_msg
*msg
, *msg2
;
4704 unsigned long flags
;
4706 unsigned int waiting_msgs
= 0;
4708 if (!intf
->bmc_registered
) {
4709 kref_get(&intf
->refcount
);
4710 if (!schedule_work(&intf
->bmc_reg_work
)) {
4711 kref_put(&intf
->refcount
, intf_free
);
4717 * Go through the seq table and find any messages that
4718 * have timed out, putting them in the timeouts
4721 INIT_LIST_HEAD(&timeouts
);
4722 spin_lock_irqsave(&intf
->seq_lock
, flags
);
4723 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++)
4724 check_msg_timeout(intf
, &(intf
->seq_table
[i
]),
4725 &timeouts
, timeout_period
, i
,
4726 &flags
, &waiting_msgs
);
4727 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
4729 list_for_each_entry_safe(msg
, msg2
, &timeouts
, link
)
4730 deliver_err_response(msg
, IPMI_TIMEOUT_COMPLETION_CODE
);
4733 * Maintenance mode handling. Check the timeout
4734 * optimistically before we claim the lock. It may
4735 * mean a timeout gets missed occasionally, but that
4736 * only means the timeout gets extended by one period
4737 * in that case. No big deal, and it avoids the lock
4740 if (intf
->auto_maintenance_timeout
> 0) {
4741 spin_lock_irqsave(&intf
->maintenance_mode_lock
, flags
);
4742 if (intf
->auto_maintenance_timeout
> 0) {
4743 intf
->auto_maintenance_timeout
4745 if (!intf
->maintenance_mode
4746 && (intf
->auto_maintenance_timeout
<= 0)) {
4747 intf
->maintenance_mode_enable
= false;
4748 maintenance_mode_update(intf
);
4751 spin_unlock_irqrestore(&intf
->maintenance_mode_lock
,
4755 tasklet_schedule(&intf
->recv_tasklet
);
4757 return waiting_msgs
;
4760 static void ipmi_request_event(ipmi_smi_t intf
)
4762 /* No event requests when in maintenance mode. */
4763 if (intf
->maintenance_mode_enable
)
4766 if (!intf
->in_shutdown
)
4767 intf
->handlers
->request_events(intf
->send_info
);
4770 static struct timer_list ipmi_timer
;
4772 static atomic_t stop_operation
;
4774 static void ipmi_timeout(struct timer_list
*unused
)
4779 if (atomic_read(&stop_operation
))
4783 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
4786 if (atomic_read(&intf
->event_waiters
)) {
4787 intf
->ticks_to_req_ev
--;
4788 if (intf
->ticks_to_req_ev
== 0) {
4789 ipmi_request_event(intf
);
4790 intf
->ticks_to_req_ev
= IPMI_REQUEST_EV_TIME
;
4795 lnt
+= ipmi_timeout_handler(intf
, IPMI_TIMEOUT_TIME
);
4798 if (lnt
!= intf
->last_needs_timer
&&
4799 intf
->handlers
->set_need_watch
)
4800 intf
->handlers
->set_need_watch(intf
->send_info
, lnt
);
4801 intf
->last_needs_timer
= lnt
;
4808 mod_timer(&ipmi_timer
, jiffies
+ IPMI_TIMEOUT_JIFFIES
);
4811 static void need_waiter(ipmi_smi_t intf
)
4813 /* Racy, but worst case we start the timer twice. */
4814 if (!timer_pending(&ipmi_timer
))
4815 mod_timer(&ipmi_timer
, jiffies
+ IPMI_TIMEOUT_JIFFIES
);
4818 static atomic_t smi_msg_inuse_count
= ATOMIC_INIT(0);
4819 static atomic_t recv_msg_inuse_count
= ATOMIC_INIT(0);
4821 static void free_smi_msg(struct ipmi_smi_msg
*msg
)
4823 atomic_dec(&smi_msg_inuse_count
);
4827 struct ipmi_smi_msg
*ipmi_alloc_smi_msg(void)
4829 struct ipmi_smi_msg
*rv
;
4830 rv
= kmalloc(sizeof(struct ipmi_smi_msg
), GFP_ATOMIC
);
4832 rv
->done
= free_smi_msg
;
4833 rv
->user_data
= NULL
;
4834 atomic_inc(&smi_msg_inuse_count
);
4838 EXPORT_SYMBOL(ipmi_alloc_smi_msg
);
4840 static void free_recv_msg(struct ipmi_recv_msg
*msg
)
4842 atomic_dec(&recv_msg_inuse_count
);
4846 static struct ipmi_recv_msg
*ipmi_alloc_recv_msg(void)
4848 struct ipmi_recv_msg
*rv
;
4850 rv
= kmalloc(sizeof(struct ipmi_recv_msg
), GFP_ATOMIC
);
4853 rv
->done
= free_recv_msg
;
4854 atomic_inc(&recv_msg_inuse_count
);
4859 void ipmi_free_recv_msg(struct ipmi_recv_msg
*msg
)
4862 kref_put(&msg
->user
->refcount
, free_user
);
4865 EXPORT_SYMBOL(ipmi_free_recv_msg
);
4867 static atomic_t panic_done_count
= ATOMIC_INIT(0);
4869 static void dummy_smi_done_handler(struct ipmi_smi_msg
*msg
)
4871 atomic_dec(&panic_done_count
);
4874 static void dummy_recv_done_handler(struct ipmi_recv_msg
*msg
)
4876 atomic_dec(&panic_done_count
);
4880 * Inside a panic, send a message and wait for a response.
4882 static void ipmi_panic_request_and_wait(ipmi_smi_t intf
,
4883 struct ipmi_addr
*addr
,
4884 struct kernel_ipmi_msg
*msg
)
4886 struct ipmi_smi_msg smi_msg
;
4887 struct ipmi_recv_msg recv_msg
;
4890 smi_msg
.done
= dummy_smi_done_handler
;
4891 recv_msg
.done
= dummy_recv_done_handler
;
4892 atomic_add(2, &panic_done_count
);
4893 rv
= i_ipmi_request(NULL
,
4902 intf
->addrinfo
[0].address
,
4903 intf
->addrinfo
[0].lun
,
4904 0, 1); /* Don't retry, and don't wait. */
4906 atomic_sub(2, &panic_done_count
);
4907 else if (intf
->handlers
->flush_messages
)
4908 intf
->handlers
->flush_messages(intf
->send_info
);
4910 while (atomic_read(&panic_done_count
) != 0)
4914 static void event_receiver_fetcher(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
4916 if ((msg
->addr
.addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
4917 && (msg
->msg
.netfn
== IPMI_NETFN_SENSOR_EVENT_RESPONSE
)
4918 && (msg
->msg
.cmd
== IPMI_GET_EVENT_RECEIVER_CMD
)
4919 && (msg
->msg
.data
[0] == IPMI_CC_NO_ERROR
)) {
4920 /* A get event receiver command, save it. */
4921 intf
->event_receiver
= msg
->msg
.data
[1];
4922 intf
->event_receiver_lun
= msg
->msg
.data
[2] & 0x3;
4926 static void device_id_fetcher(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
4928 if ((msg
->addr
.addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
4929 && (msg
->msg
.netfn
== IPMI_NETFN_APP_RESPONSE
)
4930 && (msg
->msg
.cmd
== IPMI_GET_DEVICE_ID_CMD
)
4931 && (msg
->msg
.data
[0] == IPMI_CC_NO_ERROR
)) {
4933 * A get device id command, save if we are an event
4934 * receiver or generator.
4936 intf
->local_sel_device
= (msg
->msg
.data
[6] >> 2) & 1;
4937 intf
->local_event_generator
= (msg
->msg
.data
[6] >> 5) & 1;
4941 static void send_panic_events(char *str
)
4943 struct kernel_ipmi_msg msg
;
4945 unsigned char data
[16];
4946 struct ipmi_system_interface_addr
*si
;
4947 struct ipmi_addr addr
;
4949 if (ipmi_send_panic_event
== IPMI_SEND_PANIC_EVENT_NONE
)
4952 si
= (struct ipmi_system_interface_addr
*) &addr
;
4953 si
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
4954 si
->channel
= IPMI_BMC_CHANNEL
;
4957 /* Fill in an event telling that we have failed. */
4958 msg
.netfn
= 0x04; /* Sensor or Event. */
4959 msg
.cmd
= 2; /* Platform event command. */
4962 data
[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
4963 data
[1] = 0x03; /* This is for IPMI 1.0. */
4964 data
[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4965 data
[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4966 data
[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4969 * Put a few breadcrumbs in. Hopefully later we can add more things
4970 * to make the panic events more useful.
4978 /* For every registered interface, send the event. */
4979 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
4980 if (!intf
->handlers
|| !intf
->handlers
->poll
)
4981 /* Interface is not ready or can't run at panic time. */
4984 /* Send the event announcing the panic. */
4985 ipmi_panic_request_and_wait(intf
, &addr
, &msg
);
4989 * On every interface, dump a bunch of OEM event holding the
4992 if (ipmi_send_panic_event
!= IPMI_SEND_PANIC_EVENT_STRING
|| !str
)
4995 /* For every registered interface, send the event. */
4996 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
4998 struct ipmi_ipmb_addr
*ipmb
;
5001 if (intf
->intf_num
== -1)
5002 /* Interface was not ready yet. */
5006 * intf_num is used as an marker to tell if the
5007 * interface is valid. Thus we need a read barrier to
5008 * make sure data fetched before checking intf_num
5014 * First job here is to figure out where to send the
5015 * OEM events. There's no way in IPMI to send OEM
5016 * events using an event send command, so we have to
5017 * find the SEL to put them in and stick them in
5021 /* Get capabilities from the get device id. */
5022 intf
->local_sel_device
= 0;
5023 intf
->local_event_generator
= 0;
5024 intf
->event_receiver
= 0;
5026 /* Request the device info from the local MC. */
5027 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
5028 msg
.cmd
= IPMI_GET_DEVICE_ID_CMD
;
5031 intf
->null_user_handler
= device_id_fetcher
;
5032 ipmi_panic_request_and_wait(intf
, &addr
, &msg
);
5034 if (intf
->local_event_generator
) {
5035 /* Request the event receiver from the local MC. */
5036 msg
.netfn
= IPMI_NETFN_SENSOR_EVENT_REQUEST
;
5037 msg
.cmd
= IPMI_GET_EVENT_RECEIVER_CMD
;
5040 intf
->null_user_handler
= event_receiver_fetcher
;
5041 ipmi_panic_request_and_wait(intf
, &addr
, &msg
);
5043 intf
->null_user_handler
= NULL
;
5046 * Validate the event receiver. The low bit must not
5047 * be 1 (it must be a valid IPMB address), it cannot
5048 * be zero, and it must not be my address.
5050 if (((intf
->event_receiver
& 1) == 0)
5051 && (intf
->event_receiver
!= 0)
5052 && (intf
->event_receiver
!= intf
->addrinfo
[0].address
)) {
5054 * The event receiver is valid, send an IPMB
5057 ipmb
= (struct ipmi_ipmb_addr
*) &addr
;
5058 ipmb
->addr_type
= IPMI_IPMB_ADDR_TYPE
;
5059 ipmb
->channel
= 0; /* FIXME - is this right? */
5060 ipmb
->lun
= intf
->event_receiver_lun
;
5061 ipmb
->slave_addr
= intf
->event_receiver
;
5062 } else if (intf
->local_sel_device
) {
5064 * The event receiver was not valid (or was
5065 * me), but I am an SEL device, just dump it
5068 si
= (struct ipmi_system_interface_addr
*) &addr
;
5069 si
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
5070 si
->channel
= IPMI_BMC_CHANNEL
;
5073 continue; /* No where to send the event. */
5075 msg
.netfn
= IPMI_NETFN_STORAGE_REQUEST
; /* Storage. */
5076 msg
.cmd
= IPMI_ADD_SEL_ENTRY_CMD
;
5082 int size
= strlen(p
);
5088 data
[2] = 0xf0; /* OEM event without timestamp. */
5089 data
[3] = intf
->addrinfo
[0].address
;
5090 data
[4] = j
++; /* sequence # */
5092 * Always give 11 bytes, so strncpy will fill
5093 * it with zeroes for me.
5095 strncpy(data
+5, p
, 11);
5098 ipmi_panic_request_and_wait(intf
, &addr
, &msg
);
5103 static int has_panicked
;
5105 static int panic_event(struct notifier_block
*this,
5106 unsigned long event
,
5115 /* For every registered interface, set it to run to completion. */
5116 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
5117 if (!intf
->handlers
)
5118 /* Interface is not ready. */
5122 * If we were interrupted while locking xmit_msgs_lock or
5123 * waiting_rcv_msgs_lock, the corresponding list may be
5124 * corrupted. In this case, drop items on the list for
5127 if (!spin_trylock(&intf
->xmit_msgs_lock
)) {
5128 INIT_LIST_HEAD(&intf
->xmit_msgs
);
5129 INIT_LIST_HEAD(&intf
->hp_xmit_msgs
);
5131 spin_unlock(&intf
->xmit_msgs_lock
);
5133 if (!spin_trylock(&intf
->waiting_rcv_msgs_lock
))
5134 INIT_LIST_HEAD(&intf
->waiting_rcv_msgs
);
5136 spin_unlock(&intf
->waiting_rcv_msgs_lock
);
5138 intf
->run_to_completion
= 1;
5139 if (intf
->handlers
->set_run_to_completion
)
5140 intf
->handlers
->set_run_to_completion(intf
->send_info
,
5144 send_panic_events(ptr
);
5149 static struct notifier_block panic_block
= {
5150 .notifier_call
= panic_event
,
5152 .priority
= 200 /* priority: INT_MAX >= x >= 0 */
5155 static int ipmi_init_msghandler(void)
5162 rv
= driver_register(&ipmidriver
.driver
);
5164 pr_err(PFX
"Could not register IPMI driver\n");
5168 pr_info("ipmi message handler version " IPMI_DRIVER_VERSION
"\n");
5170 #ifdef CONFIG_IPMI_PROC_INTERFACE
5171 proc_ipmi_root
= proc_mkdir("ipmi", NULL
);
5172 if (!proc_ipmi_root
) {
5173 pr_err(PFX
"Unable to create IPMI proc dir");
5174 driver_unregister(&ipmidriver
.driver
);
5178 #endif /* CONFIG_IPMI_PROC_INTERFACE */
5180 timer_setup(&ipmi_timer
, ipmi_timeout
, 0);
5181 mod_timer(&ipmi_timer
, jiffies
+ IPMI_TIMEOUT_JIFFIES
);
5183 atomic_notifier_chain_register(&panic_notifier_list
, &panic_block
);
5190 static int __init
ipmi_init_msghandler_mod(void)
5192 ipmi_init_msghandler();
5196 static void __exit
cleanup_ipmi(void)
5203 atomic_notifier_chain_unregister(&panic_notifier_list
, &panic_block
);
5206 * This can't be called if any interfaces exist, so no worry
5207 * about shutting down the interfaces.
5211 * Tell the timer to stop, then wait for it to stop. This
5212 * avoids problems with race conditions removing the timer
5215 atomic_inc(&stop_operation
);
5216 del_timer_sync(&ipmi_timer
);
5218 #ifdef CONFIG_IPMI_PROC_INTERFACE
5219 proc_remove(proc_ipmi_root
);
5220 #endif /* CONFIG_IPMI_PROC_INTERFACE */
5222 driver_unregister(&ipmidriver
.driver
);
5226 /* Check for buffer leaks. */
5227 count
= atomic_read(&smi_msg_inuse_count
);
5229 pr_warn(PFX
"SMI message count %d at exit\n", count
);
5230 count
= atomic_read(&recv_msg_inuse_count
);
5232 pr_warn(PFX
"recv message count %d at exit\n", count
);
5234 module_exit(cleanup_ipmi
);
5236 module_init(ipmi_init_msghandler_mod
);
5237 MODULE_LICENSE("GPL");
5238 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5239 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
5241 MODULE_VERSION(IPMI_DRIVER_VERSION
);
5242 MODULE_SOFTDEP("post: ipmi_devintf");