]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/char/ipmi/ipmi_msghandler.c
0aa5d608fe6f539aa6eb2bbb84ba7e52e3dbc05a
[mirror_ubuntu-bionic-kernel.git] / drivers / char / ipmi / ipmi_msghandler.c
1 /*
2 * ipmi_msghandler.c
3 *
4 * Incoming and outgoing message routing for an IPMI interface.
5 *
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
9 *
10 * Copyright 2002 MontaVista Software Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/sched.h>
38 #include <linux/poll.h>
39 #include <linux/spinlock.h>
40 #include <linux/mutex.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/ipmi_smi.h>
44 #include <linux/notifier.h>
45 #include <linux/init.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
48
49 #define PFX "IPMI message handler: "
50
51 #define IPMI_DRIVER_VERSION "39.0"
52
53 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54 static int ipmi_init_msghandler(void);
55
56 static int initialized = 0;
57
58 #ifdef CONFIG_PROC_FS
59 static struct proc_dir_entry *proc_ipmi_root = NULL;
60 #endif /* CONFIG_PROC_FS */
61
62 #define MAX_EVENTS_IN_QUEUE 25
63
64 /* Don't let a message sit in a queue forever, always time it with at lest
65 the max message timer. This is in milliseconds. */
66 #define MAX_MSG_TIMEOUT 60000
67
68
69 /*
70 * The main "user" data structure.
71 */
72 struct ipmi_user
73 {
74 struct list_head link;
75
76 /* Set to "0" when the user is destroyed. */
77 int valid;
78
79 struct kref refcount;
80
81 /* The upper layer that handles receive messages. */
82 struct ipmi_user_hndl *handler;
83 void *handler_data;
84
85 /* The interface this user is bound to. */
86 ipmi_smi_t intf;
87
88 /* Does this interface receive IPMI events? */
89 int gets_events;
90 };
91
92 struct cmd_rcvr
93 {
94 struct list_head link;
95
96 ipmi_user_t user;
97 unsigned char netfn;
98 unsigned char cmd;
99
100 /*
101 * This is used to form a linked lised during mass deletion.
102 * Since this is in an RCU list, we cannot use the link above
103 * or change any data until the RCU period completes. So we
104 * use this next variable during mass deletion so we can have
105 * a list and don't have to wait and restart the search on
106 * every individual deletion of a command. */
107 struct cmd_rcvr *next;
108 };
109
110 struct seq_table
111 {
112 unsigned int inuse : 1;
113 unsigned int broadcast : 1;
114
115 unsigned long timeout;
116 unsigned long orig_timeout;
117 unsigned int retries_left;
118
119 /* To verify on an incoming send message response that this is
120 the message that the response is for, we keep a sequence id
121 and increment it every time we send a message. */
122 long seqid;
123
124 /* This is held so we can properly respond to the message on a
125 timeout, and it is used to hold the temporary data for
126 retransmission, too. */
127 struct ipmi_recv_msg *recv_msg;
128 };
129
130 /* Store the information in a msgid (long) to allow us to find a
131 sequence table entry from the msgid. */
132 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
133
134 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
135 do { \
136 seq = ((msgid >> 26) & 0x3f); \
137 seqid = (msgid & 0x3fffff); \
138 } while (0)
139
140 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
141
142 struct ipmi_channel
143 {
144 unsigned char medium;
145 unsigned char protocol;
146
147 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
148 but may be changed by the user. */
149 unsigned char address;
150
151 /* My LUN. This should generally stay the SMS LUN, but just in
152 case... */
153 unsigned char lun;
154 };
155
156 #ifdef CONFIG_PROC_FS
157 struct ipmi_proc_entry
158 {
159 char *name;
160 struct ipmi_proc_entry *next;
161 };
162 #endif
163
164 struct bmc_device
165 {
166 struct platform_device *dev;
167 struct ipmi_device_id id;
168 unsigned char guid[16];
169 int guid_set;
170
171 struct kref refcount;
172
173 /* bmc device attributes */
174 struct device_attribute device_id_attr;
175 struct device_attribute provides_dev_sdrs_attr;
176 struct device_attribute revision_attr;
177 struct device_attribute firmware_rev_attr;
178 struct device_attribute version_attr;
179 struct device_attribute add_dev_support_attr;
180 struct device_attribute manufacturer_id_attr;
181 struct device_attribute product_id_attr;
182 struct device_attribute guid_attr;
183 struct device_attribute aux_firmware_rev_attr;
184 };
185
186 #define IPMI_IPMB_NUM_SEQ 64
187 #define IPMI_MAX_CHANNELS 16
188 struct ipmi_smi
189 {
190 /* What interface number are we? */
191 int intf_num;
192
193 struct kref refcount;
194
195 /* The list of upper layers that are using me. seq_lock
196 * protects this. */
197 struct list_head users;
198
199 /* Used for wake ups at startup. */
200 wait_queue_head_t waitq;
201
202 struct bmc_device *bmc;
203 char *my_dev_name;
204
205 /* This is the lower-layer's sender routine. */
206 struct ipmi_smi_handlers *handlers;
207 void *send_info;
208
209 #ifdef CONFIG_PROC_FS
210 /* A list of proc entries for this interface. This does not
211 need a lock, only one thread creates it and only one thread
212 destroys it. */
213 spinlock_t proc_entry_lock;
214 struct ipmi_proc_entry *proc_entries;
215 #endif
216
217 /* Driver-model device for the system interface. */
218 struct device *si_dev;
219
220 /* A table of sequence numbers for this interface. We use the
221 sequence numbers for IPMB messages that go out of the
222 interface to match them up with their responses. A routine
223 is called periodically to time the items in this list. */
224 spinlock_t seq_lock;
225 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
226 int curr_seq;
227
228 /* Messages that were delayed for some reason (out of memory,
229 for instance), will go in here to be processed later in a
230 periodic timer interrupt. */
231 spinlock_t waiting_msgs_lock;
232 struct list_head waiting_msgs;
233
234 /* The list of command receivers that are registered for commands
235 on this interface. */
236 struct mutex cmd_rcvrs_mutex;
237 struct list_head cmd_rcvrs;
238
239 /* Events that were queues because no one was there to receive
240 them. */
241 spinlock_t events_lock; /* For dealing with event stuff. */
242 struct list_head waiting_events;
243 unsigned int waiting_events_count; /* How many events in queue? */
244
245 /* The event receiver for my BMC, only really used at panic
246 shutdown as a place to store this. */
247 unsigned char event_receiver;
248 unsigned char event_receiver_lun;
249 unsigned char local_sel_device;
250 unsigned char local_event_generator;
251
252 /* A cheap hack, if this is non-null and a message to an
253 interface comes in with a NULL user, call this routine with
254 it. Note that the message will still be freed by the
255 caller. This only works on the system interface. */
256 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
257
258 /* When we are scanning the channels for an SMI, this will
259 tell which channel we are scanning. */
260 int curr_channel;
261
262 /* Channel information */
263 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
264
265 /* Proc FS stuff. */
266 struct proc_dir_entry *proc_dir;
267 char proc_dir_name[10];
268
269 spinlock_t counter_lock; /* For making counters atomic. */
270
271 /* Commands we got that were invalid. */
272 unsigned int sent_invalid_commands;
273
274 /* Commands we sent to the MC. */
275 unsigned int sent_local_commands;
276 /* Responses from the MC that were delivered to a user. */
277 unsigned int handled_local_responses;
278 /* Responses from the MC that were not delivered to a user. */
279 unsigned int unhandled_local_responses;
280
281 /* Commands we sent out to the IPMB bus. */
282 unsigned int sent_ipmb_commands;
283 /* Commands sent on the IPMB that had errors on the SEND CMD */
284 unsigned int sent_ipmb_command_errs;
285 /* Each retransmit increments this count. */
286 unsigned int retransmitted_ipmb_commands;
287 /* When a message times out (runs out of retransmits) this is
288 incremented. */
289 unsigned int timed_out_ipmb_commands;
290
291 /* This is like above, but for broadcasts. Broadcasts are
292 *not* included in the above count (they are expected to
293 time out). */
294 unsigned int timed_out_ipmb_broadcasts;
295
296 /* Responses I have sent to the IPMB bus. */
297 unsigned int sent_ipmb_responses;
298
299 /* The response was delivered to the user. */
300 unsigned int handled_ipmb_responses;
301 /* The response had invalid data in it. */
302 unsigned int invalid_ipmb_responses;
303 /* The response didn't have anyone waiting for it. */
304 unsigned int unhandled_ipmb_responses;
305
306 /* Commands we sent out to the IPMB bus. */
307 unsigned int sent_lan_commands;
308 /* Commands sent on the IPMB that had errors on the SEND CMD */
309 unsigned int sent_lan_command_errs;
310 /* Each retransmit increments this count. */
311 unsigned int retransmitted_lan_commands;
312 /* When a message times out (runs out of retransmits) this is
313 incremented. */
314 unsigned int timed_out_lan_commands;
315
316 /* Responses I have sent to the IPMB bus. */
317 unsigned int sent_lan_responses;
318
319 /* The response was delivered to the user. */
320 unsigned int handled_lan_responses;
321 /* The response had invalid data in it. */
322 unsigned int invalid_lan_responses;
323 /* The response didn't have anyone waiting for it. */
324 unsigned int unhandled_lan_responses;
325
326 /* The command was delivered to the user. */
327 unsigned int handled_commands;
328 /* The command had invalid data in it. */
329 unsigned int invalid_commands;
330 /* The command didn't have anyone waiting for it. */
331 unsigned int unhandled_commands;
332
333 /* Invalid data in an event. */
334 unsigned int invalid_events;
335 /* Events that were received with the proper format. */
336 unsigned int events;
337 };
338 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
339
340 /* Used to mark an interface entry that cannot be used but is not a
341 * free entry, either, primarily used at creation and deletion time so
342 * a slot doesn't get reused too quickly. */
343 #define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1))
344 #define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
345 || (i == IPMI_INVALID_INTERFACE_ENTRY))
346
347 /**
348 * The driver model view of the IPMI messaging driver.
349 */
350 static struct device_driver ipmidriver = {
351 .name = "ipmi",
352 .bus = &platform_bus_type
353 };
354 static DEFINE_MUTEX(ipmidriver_mutex);
355
356 #define MAX_IPMI_INTERFACES 4
357 static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
358
359 /* Directly protects the ipmi_interfaces data structure. */
360 static DEFINE_SPINLOCK(interfaces_lock);
361
362 /* List of watchers that want to know when smi's are added and
363 deleted. */
364 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
365 static DECLARE_RWSEM(smi_watchers_sem);
366
367
368 static void free_recv_msg_list(struct list_head *q)
369 {
370 struct ipmi_recv_msg *msg, *msg2;
371
372 list_for_each_entry_safe(msg, msg2, q, link) {
373 list_del(&msg->link);
374 ipmi_free_recv_msg(msg);
375 }
376 }
377
378 static void clean_up_interface_data(ipmi_smi_t intf)
379 {
380 int i;
381 struct cmd_rcvr *rcvr, *rcvr2;
382 struct list_head list;
383
384 free_recv_msg_list(&intf->waiting_msgs);
385 free_recv_msg_list(&intf->waiting_events);
386
387 /* Wholesale remove all the entries from the list in the
388 * interface and wait for RCU to know that none are in use. */
389 mutex_lock(&intf->cmd_rcvrs_mutex);
390 list_add_rcu(&list, &intf->cmd_rcvrs);
391 list_del_rcu(&intf->cmd_rcvrs);
392 mutex_unlock(&intf->cmd_rcvrs_mutex);
393 synchronize_rcu();
394
395 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
396 kfree(rcvr);
397
398 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
399 if ((intf->seq_table[i].inuse)
400 && (intf->seq_table[i].recv_msg))
401 {
402 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
403 }
404 }
405 }
406
407 static void intf_free(struct kref *ref)
408 {
409 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
410
411 clean_up_interface_data(intf);
412 kfree(intf);
413 }
414
415 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
416 {
417 int i;
418 unsigned long flags;
419
420 down_write(&smi_watchers_sem);
421 list_add(&(watcher->link), &smi_watchers);
422 up_write(&smi_watchers_sem);
423 spin_lock_irqsave(&interfaces_lock, flags);
424 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
425 ipmi_smi_t intf = ipmi_interfaces[i];
426 if (IPMI_INVALID_INTERFACE(intf))
427 continue;
428 spin_unlock_irqrestore(&interfaces_lock, flags);
429 watcher->new_smi(i, intf->si_dev);
430 spin_lock_irqsave(&interfaces_lock, flags);
431 }
432 spin_unlock_irqrestore(&interfaces_lock, flags);
433 return 0;
434 }
435
436 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
437 {
438 down_write(&smi_watchers_sem);
439 list_del(&(watcher->link));
440 up_write(&smi_watchers_sem);
441 return 0;
442 }
443
444 static void
445 call_smi_watchers(int i, struct device *dev)
446 {
447 struct ipmi_smi_watcher *w;
448
449 down_read(&smi_watchers_sem);
450 list_for_each_entry(w, &smi_watchers, link) {
451 if (try_module_get(w->owner)) {
452 w->new_smi(i, dev);
453 module_put(w->owner);
454 }
455 }
456 up_read(&smi_watchers_sem);
457 }
458
459 static int
460 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
461 {
462 if (addr1->addr_type != addr2->addr_type)
463 return 0;
464
465 if (addr1->channel != addr2->channel)
466 return 0;
467
468 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
469 struct ipmi_system_interface_addr *smi_addr1
470 = (struct ipmi_system_interface_addr *) addr1;
471 struct ipmi_system_interface_addr *smi_addr2
472 = (struct ipmi_system_interface_addr *) addr2;
473 return (smi_addr1->lun == smi_addr2->lun);
474 }
475
476 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
477 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
478 {
479 struct ipmi_ipmb_addr *ipmb_addr1
480 = (struct ipmi_ipmb_addr *) addr1;
481 struct ipmi_ipmb_addr *ipmb_addr2
482 = (struct ipmi_ipmb_addr *) addr2;
483
484 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
485 && (ipmb_addr1->lun == ipmb_addr2->lun));
486 }
487
488 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
489 struct ipmi_lan_addr *lan_addr1
490 = (struct ipmi_lan_addr *) addr1;
491 struct ipmi_lan_addr *lan_addr2
492 = (struct ipmi_lan_addr *) addr2;
493
494 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
495 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
496 && (lan_addr1->session_handle
497 == lan_addr2->session_handle)
498 && (lan_addr1->lun == lan_addr2->lun));
499 }
500
501 return 1;
502 }
503
504 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
505 {
506 if (len < sizeof(struct ipmi_system_interface_addr)) {
507 return -EINVAL;
508 }
509
510 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
511 if (addr->channel != IPMI_BMC_CHANNEL)
512 return -EINVAL;
513 return 0;
514 }
515
516 if ((addr->channel == IPMI_BMC_CHANNEL)
517 || (addr->channel >= IPMI_MAX_CHANNELS)
518 || (addr->channel < 0))
519 return -EINVAL;
520
521 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
522 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
523 {
524 if (len < sizeof(struct ipmi_ipmb_addr)) {
525 return -EINVAL;
526 }
527 return 0;
528 }
529
530 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
531 if (len < sizeof(struct ipmi_lan_addr)) {
532 return -EINVAL;
533 }
534 return 0;
535 }
536
537 return -EINVAL;
538 }
539
540 unsigned int ipmi_addr_length(int addr_type)
541 {
542 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
543 return sizeof(struct ipmi_system_interface_addr);
544
545 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
546 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
547 {
548 return sizeof(struct ipmi_ipmb_addr);
549 }
550
551 if (addr_type == IPMI_LAN_ADDR_TYPE)
552 return sizeof(struct ipmi_lan_addr);
553
554 return 0;
555 }
556
557 static void deliver_response(struct ipmi_recv_msg *msg)
558 {
559 if (!msg->user) {
560 ipmi_smi_t intf = msg->user_msg_data;
561 unsigned long flags;
562
563 /* Special handling for NULL users. */
564 if (intf->null_user_handler) {
565 intf->null_user_handler(intf, msg);
566 spin_lock_irqsave(&intf->counter_lock, flags);
567 intf->handled_local_responses++;
568 spin_unlock_irqrestore(&intf->counter_lock, flags);
569 } else {
570 /* No handler, so give up. */
571 spin_lock_irqsave(&intf->counter_lock, flags);
572 intf->unhandled_local_responses++;
573 spin_unlock_irqrestore(&intf->counter_lock, flags);
574 }
575 ipmi_free_recv_msg(msg);
576 } else {
577 ipmi_user_t user = msg->user;
578 user->handler->ipmi_recv_hndl(msg, user->handler_data);
579 }
580 }
581
582 /* Find the next sequence number not being used and add the given
583 message with the given timeout to the sequence table. This must be
584 called with the interface's seq_lock held. */
585 static int intf_next_seq(ipmi_smi_t intf,
586 struct ipmi_recv_msg *recv_msg,
587 unsigned long timeout,
588 int retries,
589 int broadcast,
590 unsigned char *seq,
591 long *seqid)
592 {
593 int rv = 0;
594 unsigned int i;
595
596 for (i = intf->curr_seq;
597 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
598 i = (i+1)%IPMI_IPMB_NUM_SEQ)
599 {
600 if (!intf->seq_table[i].inuse)
601 break;
602 }
603
604 if (!intf->seq_table[i].inuse) {
605 intf->seq_table[i].recv_msg = recv_msg;
606
607 /* Start with the maximum timeout, when the send response
608 comes in we will start the real timer. */
609 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
610 intf->seq_table[i].orig_timeout = timeout;
611 intf->seq_table[i].retries_left = retries;
612 intf->seq_table[i].broadcast = broadcast;
613 intf->seq_table[i].inuse = 1;
614 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
615 *seq = i;
616 *seqid = intf->seq_table[i].seqid;
617 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
618 } else {
619 rv = -EAGAIN;
620 }
621
622 return rv;
623 }
624
625 /* Return the receive message for the given sequence number and
626 release the sequence number so it can be reused. Some other data
627 is passed in to be sure the message matches up correctly (to help
628 guard against message coming in after their timeout and the
629 sequence number being reused). */
630 static int intf_find_seq(ipmi_smi_t intf,
631 unsigned char seq,
632 short channel,
633 unsigned char cmd,
634 unsigned char netfn,
635 struct ipmi_addr *addr,
636 struct ipmi_recv_msg **recv_msg)
637 {
638 int rv = -ENODEV;
639 unsigned long flags;
640
641 if (seq >= IPMI_IPMB_NUM_SEQ)
642 return -EINVAL;
643
644 spin_lock_irqsave(&(intf->seq_lock), flags);
645 if (intf->seq_table[seq].inuse) {
646 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
647
648 if ((msg->addr.channel == channel)
649 && (msg->msg.cmd == cmd)
650 && (msg->msg.netfn == netfn)
651 && (ipmi_addr_equal(addr, &(msg->addr))))
652 {
653 *recv_msg = msg;
654 intf->seq_table[seq].inuse = 0;
655 rv = 0;
656 }
657 }
658 spin_unlock_irqrestore(&(intf->seq_lock), flags);
659
660 return rv;
661 }
662
663
664 /* Start the timer for a specific sequence table entry. */
665 static int intf_start_seq_timer(ipmi_smi_t intf,
666 long msgid)
667 {
668 int rv = -ENODEV;
669 unsigned long flags;
670 unsigned char seq;
671 unsigned long seqid;
672
673
674 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
675
676 spin_lock_irqsave(&(intf->seq_lock), flags);
677 /* We do this verification because the user can be deleted
678 while a message is outstanding. */
679 if ((intf->seq_table[seq].inuse)
680 && (intf->seq_table[seq].seqid == seqid))
681 {
682 struct seq_table *ent = &(intf->seq_table[seq]);
683 ent->timeout = ent->orig_timeout;
684 rv = 0;
685 }
686 spin_unlock_irqrestore(&(intf->seq_lock), flags);
687
688 return rv;
689 }
690
691 /* Got an error for the send message for a specific sequence number. */
692 static int intf_err_seq(ipmi_smi_t intf,
693 long msgid,
694 unsigned int err)
695 {
696 int rv = -ENODEV;
697 unsigned long flags;
698 unsigned char seq;
699 unsigned long seqid;
700 struct ipmi_recv_msg *msg = NULL;
701
702
703 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
704
705 spin_lock_irqsave(&(intf->seq_lock), flags);
706 /* We do this verification because the user can be deleted
707 while a message is outstanding. */
708 if ((intf->seq_table[seq].inuse)
709 && (intf->seq_table[seq].seqid == seqid))
710 {
711 struct seq_table *ent = &(intf->seq_table[seq]);
712
713 ent->inuse = 0;
714 msg = ent->recv_msg;
715 rv = 0;
716 }
717 spin_unlock_irqrestore(&(intf->seq_lock), flags);
718
719 if (msg) {
720 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
721 msg->msg_data[0] = err;
722 msg->msg.netfn |= 1; /* Convert to a response. */
723 msg->msg.data_len = 1;
724 msg->msg.data = msg->msg_data;
725 deliver_response(msg);
726 }
727
728 return rv;
729 }
730
731
732 int ipmi_create_user(unsigned int if_num,
733 struct ipmi_user_hndl *handler,
734 void *handler_data,
735 ipmi_user_t *user)
736 {
737 unsigned long flags;
738 ipmi_user_t new_user;
739 int rv = 0;
740 ipmi_smi_t intf;
741
742 /* There is no module usecount here, because it's not
743 required. Since this can only be used by and called from
744 other modules, they will implicitly use this module, and
745 thus this can't be removed unless the other modules are
746 removed. */
747
748 if (handler == NULL)
749 return -EINVAL;
750
751 /* Make sure the driver is actually initialized, this handles
752 problems with initialization order. */
753 if (!initialized) {
754 rv = ipmi_init_msghandler();
755 if (rv)
756 return rv;
757
758 /* The init code doesn't return an error if it was turned
759 off, but it won't initialize. Check that. */
760 if (!initialized)
761 return -ENODEV;
762 }
763
764 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
765 if (!new_user)
766 return -ENOMEM;
767
768 spin_lock_irqsave(&interfaces_lock, flags);
769 intf = ipmi_interfaces[if_num];
770 if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) {
771 spin_unlock_irqrestore(&interfaces_lock, flags);
772 rv = -EINVAL;
773 goto out_kfree;
774 }
775
776 /* Note that each existing user holds a refcount to the interface. */
777 kref_get(&intf->refcount);
778 spin_unlock_irqrestore(&interfaces_lock, flags);
779
780 kref_init(&new_user->refcount);
781 new_user->handler = handler;
782 new_user->handler_data = handler_data;
783 new_user->intf = intf;
784 new_user->gets_events = 0;
785
786 if (!try_module_get(intf->handlers->owner)) {
787 rv = -ENODEV;
788 goto out_kref;
789 }
790
791 if (intf->handlers->inc_usecount) {
792 rv = intf->handlers->inc_usecount(intf->send_info);
793 if (rv) {
794 module_put(intf->handlers->owner);
795 goto out_kref;
796 }
797 }
798
799 new_user->valid = 1;
800 spin_lock_irqsave(&intf->seq_lock, flags);
801 list_add_rcu(&new_user->link, &intf->users);
802 spin_unlock_irqrestore(&intf->seq_lock, flags);
803 *user = new_user;
804 return 0;
805
806 out_kref:
807 kref_put(&intf->refcount, intf_free);
808 out_kfree:
809 kfree(new_user);
810 return rv;
811 }
812
813 static void free_user(struct kref *ref)
814 {
815 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
816 kfree(user);
817 }
818
819 int ipmi_destroy_user(ipmi_user_t user)
820 {
821 ipmi_smi_t intf = user->intf;
822 int i;
823 unsigned long flags;
824 struct cmd_rcvr *rcvr;
825 struct cmd_rcvr *rcvrs = NULL;
826
827 user->valid = 0;
828
829 /* Remove the user from the interface's sequence table. */
830 spin_lock_irqsave(&intf->seq_lock, flags);
831 list_del_rcu(&user->link);
832
833 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
834 if (intf->seq_table[i].inuse
835 && (intf->seq_table[i].recv_msg->user == user))
836 {
837 intf->seq_table[i].inuse = 0;
838 }
839 }
840 spin_unlock_irqrestore(&intf->seq_lock, flags);
841
842 /*
843 * Remove the user from the command receiver's table. First
844 * we build a list of everything (not using the standard link,
845 * since other things may be using it till we do
846 * synchronize_rcu()) then free everything in that list.
847 */
848 mutex_lock(&intf->cmd_rcvrs_mutex);
849 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
850 if (rcvr->user == user) {
851 list_del_rcu(&rcvr->link);
852 rcvr->next = rcvrs;
853 rcvrs = rcvr;
854 }
855 }
856 mutex_unlock(&intf->cmd_rcvrs_mutex);
857 synchronize_rcu();
858 while (rcvrs) {
859 rcvr = rcvrs;
860 rcvrs = rcvr->next;
861 kfree(rcvr);
862 }
863
864 module_put(intf->handlers->owner);
865 if (intf->handlers->dec_usecount)
866 intf->handlers->dec_usecount(intf->send_info);
867
868 kref_put(&intf->refcount, intf_free);
869
870 kref_put(&user->refcount, free_user);
871
872 return 0;
873 }
874
875 void ipmi_get_version(ipmi_user_t user,
876 unsigned char *major,
877 unsigned char *minor)
878 {
879 *major = ipmi_version_major(&user->intf->bmc->id);
880 *minor = ipmi_version_minor(&user->intf->bmc->id);
881 }
882
883 int ipmi_set_my_address(ipmi_user_t user,
884 unsigned int channel,
885 unsigned char address)
886 {
887 if (channel >= IPMI_MAX_CHANNELS)
888 return -EINVAL;
889 user->intf->channels[channel].address = address;
890 return 0;
891 }
892
893 int ipmi_get_my_address(ipmi_user_t user,
894 unsigned int channel,
895 unsigned char *address)
896 {
897 if (channel >= IPMI_MAX_CHANNELS)
898 return -EINVAL;
899 *address = user->intf->channels[channel].address;
900 return 0;
901 }
902
903 int ipmi_set_my_LUN(ipmi_user_t user,
904 unsigned int channel,
905 unsigned char LUN)
906 {
907 if (channel >= IPMI_MAX_CHANNELS)
908 return -EINVAL;
909 user->intf->channels[channel].lun = LUN & 0x3;
910 return 0;
911 }
912
913 int ipmi_get_my_LUN(ipmi_user_t user,
914 unsigned int channel,
915 unsigned char *address)
916 {
917 if (channel >= IPMI_MAX_CHANNELS)
918 return -EINVAL;
919 *address = user->intf->channels[channel].lun;
920 return 0;
921 }
922
923 int ipmi_set_gets_events(ipmi_user_t user, int val)
924 {
925 unsigned long flags;
926 ipmi_smi_t intf = user->intf;
927 struct ipmi_recv_msg *msg, *msg2;
928 struct list_head msgs;
929
930 INIT_LIST_HEAD(&msgs);
931
932 spin_lock_irqsave(&intf->events_lock, flags);
933 user->gets_events = val;
934
935 if (val) {
936 /* Deliver any queued events. */
937 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
938 list_move_tail(&msg->link, &msgs);
939 intf->waiting_events_count = 0;
940 }
941
942 /* Hold the events lock while doing this to preserve order. */
943 list_for_each_entry_safe(msg, msg2, &msgs, link) {
944 msg->user = user;
945 kref_get(&user->refcount);
946 deliver_response(msg);
947 }
948
949 spin_unlock_irqrestore(&intf->events_lock, flags);
950
951 return 0;
952 }
953
954 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
955 unsigned char netfn,
956 unsigned char cmd)
957 {
958 struct cmd_rcvr *rcvr;
959
960 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
961 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd))
962 return rcvr;
963 }
964 return NULL;
965 }
966
967 int ipmi_register_for_cmd(ipmi_user_t user,
968 unsigned char netfn,
969 unsigned char cmd)
970 {
971 ipmi_smi_t intf = user->intf;
972 struct cmd_rcvr *rcvr;
973 struct cmd_rcvr *entry;
974 int rv = 0;
975
976
977 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
978 if (!rcvr)
979 return -ENOMEM;
980 rcvr->cmd = cmd;
981 rcvr->netfn = netfn;
982 rcvr->user = user;
983
984 mutex_lock(&intf->cmd_rcvrs_mutex);
985 /* Make sure the command/netfn is not already registered. */
986 entry = find_cmd_rcvr(intf, netfn, cmd);
987 if (entry) {
988 rv = -EBUSY;
989 goto out_unlock;
990 }
991
992 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
993
994 out_unlock:
995 mutex_unlock(&intf->cmd_rcvrs_mutex);
996 if (rv)
997 kfree(rcvr);
998
999 return rv;
1000 }
1001
1002 int ipmi_unregister_for_cmd(ipmi_user_t user,
1003 unsigned char netfn,
1004 unsigned char cmd)
1005 {
1006 ipmi_smi_t intf = user->intf;
1007 struct cmd_rcvr *rcvr;
1008
1009 mutex_lock(&intf->cmd_rcvrs_mutex);
1010 /* Make sure the command/netfn is not already registered. */
1011 rcvr = find_cmd_rcvr(intf, netfn, cmd);
1012 if ((rcvr) && (rcvr->user == user)) {
1013 list_del_rcu(&rcvr->link);
1014 mutex_unlock(&intf->cmd_rcvrs_mutex);
1015 synchronize_rcu();
1016 kfree(rcvr);
1017 return 0;
1018 } else {
1019 mutex_unlock(&intf->cmd_rcvrs_mutex);
1020 return -ENOENT;
1021 }
1022 }
1023
1024 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
1025 {
1026 ipmi_smi_t intf = user->intf;
1027 intf->handlers->set_run_to_completion(intf->send_info, val);
1028 }
1029
1030 static unsigned char
1031 ipmb_checksum(unsigned char *data, int size)
1032 {
1033 unsigned char csum = 0;
1034
1035 for (; size > 0; size--, data++)
1036 csum += *data;
1037
1038 return -csum;
1039 }
1040
1041 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1042 struct kernel_ipmi_msg *msg,
1043 struct ipmi_ipmb_addr *ipmb_addr,
1044 long msgid,
1045 unsigned char ipmb_seq,
1046 int broadcast,
1047 unsigned char source_address,
1048 unsigned char source_lun)
1049 {
1050 int i = broadcast;
1051
1052 /* Format the IPMB header data. */
1053 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1054 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1055 smi_msg->data[2] = ipmb_addr->channel;
1056 if (broadcast)
1057 smi_msg->data[3] = 0;
1058 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1059 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1060 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1061 smi_msg->data[i+6] = source_address;
1062 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1063 smi_msg->data[i+8] = msg->cmd;
1064
1065 /* Now tack on the data to the message. */
1066 if (msg->data_len > 0)
1067 memcpy(&(smi_msg->data[i+9]), msg->data,
1068 msg->data_len);
1069 smi_msg->data_size = msg->data_len + 9;
1070
1071 /* Now calculate the checksum and tack it on. */
1072 smi_msg->data[i+smi_msg->data_size]
1073 = ipmb_checksum(&(smi_msg->data[i+6]),
1074 smi_msg->data_size-6);
1075
1076 /* Add on the checksum size and the offset from the
1077 broadcast. */
1078 smi_msg->data_size += 1 + i;
1079
1080 smi_msg->msgid = msgid;
1081 }
1082
1083 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1084 struct kernel_ipmi_msg *msg,
1085 struct ipmi_lan_addr *lan_addr,
1086 long msgid,
1087 unsigned char ipmb_seq,
1088 unsigned char source_lun)
1089 {
1090 /* Format the IPMB header data. */
1091 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1092 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1093 smi_msg->data[2] = lan_addr->channel;
1094 smi_msg->data[3] = lan_addr->session_handle;
1095 smi_msg->data[4] = lan_addr->remote_SWID;
1096 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1097 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1098 smi_msg->data[7] = lan_addr->local_SWID;
1099 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1100 smi_msg->data[9] = msg->cmd;
1101
1102 /* Now tack on the data to the message. */
1103 if (msg->data_len > 0)
1104 memcpy(&(smi_msg->data[10]), msg->data,
1105 msg->data_len);
1106 smi_msg->data_size = msg->data_len + 10;
1107
1108 /* Now calculate the checksum and tack it on. */
1109 smi_msg->data[smi_msg->data_size]
1110 = ipmb_checksum(&(smi_msg->data[7]),
1111 smi_msg->data_size-7);
1112
1113 /* Add on the checksum size and the offset from the
1114 broadcast. */
1115 smi_msg->data_size += 1;
1116
1117 smi_msg->msgid = msgid;
1118 }
1119
1120 /* Separate from ipmi_request so that the user does not have to be
1121 supplied in certain circumstances (mainly at panic time). If
1122 messages are supplied, they will be freed, even if an error
1123 occurs. */
1124 static int i_ipmi_request(ipmi_user_t user,
1125 ipmi_smi_t intf,
1126 struct ipmi_addr *addr,
1127 long msgid,
1128 struct kernel_ipmi_msg *msg,
1129 void *user_msg_data,
1130 void *supplied_smi,
1131 struct ipmi_recv_msg *supplied_recv,
1132 int priority,
1133 unsigned char source_address,
1134 unsigned char source_lun,
1135 int retries,
1136 unsigned int retry_time_ms)
1137 {
1138 int rv = 0;
1139 struct ipmi_smi_msg *smi_msg;
1140 struct ipmi_recv_msg *recv_msg;
1141 unsigned long flags;
1142
1143
1144 if (supplied_recv) {
1145 recv_msg = supplied_recv;
1146 } else {
1147 recv_msg = ipmi_alloc_recv_msg();
1148 if (recv_msg == NULL) {
1149 return -ENOMEM;
1150 }
1151 }
1152 recv_msg->user_msg_data = user_msg_data;
1153
1154 if (supplied_smi) {
1155 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1156 } else {
1157 smi_msg = ipmi_alloc_smi_msg();
1158 if (smi_msg == NULL) {
1159 ipmi_free_recv_msg(recv_msg);
1160 return -ENOMEM;
1161 }
1162 }
1163
1164 recv_msg->user = user;
1165 if (user)
1166 kref_get(&user->refcount);
1167 recv_msg->msgid = msgid;
1168 /* Store the message to send in the receive message so timeout
1169 responses can get the proper response data. */
1170 recv_msg->msg = *msg;
1171
1172 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1173 struct ipmi_system_interface_addr *smi_addr;
1174
1175 if (msg->netfn & 1) {
1176 /* Responses are not allowed to the SMI. */
1177 rv = -EINVAL;
1178 goto out_err;
1179 }
1180
1181 smi_addr = (struct ipmi_system_interface_addr *) addr;
1182 if (smi_addr->lun > 3) {
1183 spin_lock_irqsave(&intf->counter_lock, flags);
1184 intf->sent_invalid_commands++;
1185 spin_unlock_irqrestore(&intf->counter_lock, flags);
1186 rv = -EINVAL;
1187 goto out_err;
1188 }
1189
1190 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1191
1192 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1193 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1194 || (msg->cmd == IPMI_GET_MSG_CMD)
1195 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1196 {
1197 /* We don't let the user do these, since we manage
1198 the sequence numbers. */
1199 spin_lock_irqsave(&intf->counter_lock, flags);
1200 intf->sent_invalid_commands++;
1201 spin_unlock_irqrestore(&intf->counter_lock, flags);
1202 rv = -EINVAL;
1203 goto out_err;
1204 }
1205
1206 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1207 spin_lock_irqsave(&intf->counter_lock, flags);
1208 intf->sent_invalid_commands++;
1209 spin_unlock_irqrestore(&intf->counter_lock, flags);
1210 rv = -EMSGSIZE;
1211 goto out_err;
1212 }
1213
1214 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1215 smi_msg->data[1] = msg->cmd;
1216 smi_msg->msgid = msgid;
1217 smi_msg->user_data = recv_msg;
1218 if (msg->data_len > 0)
1219 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1220 smi_msg->data_size = msg->data_len + 2;
1221 spin_lock_irqsave(&intf->counter_lock, flags);
1222 intf->sent_local_commands++;
1223 spin_unlock_irqrestore(&intf->counter_lock, flags);
1224 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1225 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1226 {
1227 struct ipmi_ipmb_addr *ipmb_addr;
1228 unsigned char ipmb_seq;
1229 long seqid;
1230 int broadcast = 0;
1231
1232 if (addr->channel >= IPMI_MAX_CHANNELS) {
1233 spin_lock_irqsave(&intf->counter_lock, flags);
1234 intf->sent_invalid_commands++;
1235 spin_unlock_irqrestore(&intf->counter_lock, flags);
1236 rv = -EINVAL;
1237 goto out_err;
1238 }
1239
1240 if (intf->channels[addr->channel].medium
1241 != IPMI_CHANNEL_MEDIUM_IPMB)
1242 {
1243 spin_lock_irqsave(&intf->counter_lock, flags);
1244 intf->sent_invalid_commands++;
1245 spin_unlock_irqrestore(&intf->counter_lock, flags);
1246 rv = -EINVAL;
1247 goto out_err;
1248 }
1249
1250 if (retries < 0) {
1251 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1252 retries = 0; /* Don't retry broadcasts. */
1253 else
1254 retries = 4;
1255 }
1256 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1257 /* Broadcasts add a zero at the beginning of the
1258 message, but otherwise is the same as an IPMB
1259 address. */
1260 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1261 broadcast = 1;
1262 }
1263
1264
1265 /* Default to 1 second retries. */
1266 if (retry_time_ms == 0)
1267 retry_time_ms = 1000;
1268
1269 /* 9 for the header and 1 for the checksum, plus
1270 possibly one for the broadcast. */
1271 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1272 spin_lock_irqsave(&intf->counter_lock, flags);
1273 intf->sent_invalid_commands++;
1274 spin_unlock_irqrestore(&intf->counter_lock, flags);
1275 rv = -EMSGSIZE;
1276 goto out_err;
1277 }
1278
1279 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1280 if (ipmb_addr->lun > 3) {
1281 spin_lock_irqsave(&intf->counter_lock, flags);
1282 intf->sent_invalid_commands++;
1283 spin_unlock_irqrestore(&intf->counter_lock, flags);
1284 rv = -EINVAL;
1285 goto out_err;
1286 }
1287
1288 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1289
1290 if (recv_msg->msg.netfn & 0x1) {
1291 /* It's a response, so use the user's sequence
1292 from msgid. */
1293 spin_lock_irqsave(&intf->counter_lock, flags);
1294 intf->sent_ipmb_responses++;
1295 spin_unlock_irqrestore(&intf->counter_lock, flags);
1296 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1297 msgid, broadcast,
1298 source_address, source_lun);
1299
1300 /* Save the receive message so we can use it
1301 to deliver the response. */
1302 smi_msg->user_data = recv_msg;
1303 } else {
1304 /* It's a command, so get a sequence for it. */
1305
1306 spin_lock_irqsave(&(intf->seq_lock), flags);
1307
1308 spin_lock(&intf->counter_lock);
1309 intf->sent_ipmb_commands++;
1310 spin_unlock(&intf->counter_lock);
1311
1312 /* Create a sequence number with a 1 second
1313 timeout and 4 retries. */
1314 rv = intf_next_seq(intf,
1315 recv_msg,
1316 retry_time_ms,
1317 retries,
1318 broadcast,
1319 &ipmb_seq,
1320 &seqid);
1321 if (rv) {
1322 /* We have used up all the sequence numbers,
1323 probably, so abort. */
1324 spin_unlock_irqrestore(&(intf->seq_lock),
1325 flags);
1326 goto out_err;
1327 }
1328
1329 /* Store the sequence number in the message,
1330 so that when the send message response
1331 comes back we can start the timer. */
1332 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1333 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1334 ipmb_seq, broadcast,
1335 source_address, source_lun);
1336
1337 /* Copy the message into the recv message data, so we
1338 can retransmit it later if necessary. */
1339 memcpy(recv_msg->msg_data, smi_msg->data,
1340 smi_msg->data_size);
1341 recv_msg->msg.data = recv_msg->msg_data;
1342 recv_msg->msg.data_len = smi_msg->data_size;
1343
1344 /* We don't unlock until here, because we need
1345 to copy the completed message into the
1346 recv_msg before we release the lock.
1347 Otherwise, race conditions may bite us. I
1348 know that's pretty paranoid, but I prefer
1349 to be correct. */
1350 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1351 }
1352 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1353 struct ipmi_lan_addr *lan_addr;
1354 unsigned char ipmb_seq;
1355 long seqid;
1356
1357 if (addr->channel >= IPMI_MAX_CHANNELS) {
1358 spin_lock_irqsave(&intf->counter_lock, flags);
1359 intf->sent_invalid_commands++;
1360 spin_unlock_irqrestore(&intf->counter_lock, flags);
1361 rv = -EINVAL;
1362 goto out_err;
1363 }
1364
1365 if ((intf->channels[addr->channel].medium
1366 != IPMI_CHANNEL_MEDIUM_8023LAN)
1367 && (intf->channels[addr->channel].medium
1368 != IPMI_CHANNEL_MEDIUM_ASYNC))
1369 {
1370 spin_lock_irqsave(&intf->counter_lock, flags);
1371 intf->sent_invalid_commands++;
1372 spin_unlock_irqrestore(&intf->counter_lock, flags);
1373 rv = -EINVAL;
1374 goto out_err;
1375 }
1376
1377 retries = 4;
1378
1379 /* Default to 1 second retries. */
1380 if (retry_time_ms == 0)
1381 retry_time_ms = 1000;
1382
1383 /* 11 for the header and 1 for the checksum. */
1384 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1385 spin_lock_irqsave(&intf->counter_lock, flags);
1386 intf->sent_invalid_commands++;
1387 spin_unlock_irqrestore(&intf->counter_lock, flags);
1388 rv = -EMSGSIZE;
1389 goto out_err;
1390 }
1391
1392 lan_addr = (struct ipmi_lan_addr *) addr;
1393 if (lan_addr->lun > 3) {
1394 spin_lock_irqsave(&intf->counter_lock, flags);
1395 intf->sent_invalid_commands++;
1396 spin_unlock_irqrestore(&intf->counter_lock, flags);
1397 rv = -EINVAL;
1398 goto out_err;
1399 }
1400
1401 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1402
1403 if (recv_msg->msg.netfn & 0x1) {
1404 /* It's a response, so use the user's sequence
1405 from msgid. */
1406 spin_lock_irqsave(&intf->counter_lock, flags);
1407 intf->sent_lan_responses++;
1408 spin_unlock_irqrestore(&intf->counter_lock, flags);
1409 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1410 msgid, source_lun);
1411
1412 /* Save the receive message so we can use it
1413 to deliver the response. */
1414 smi_msg->user_data = recv_msg;
1415 } else {
1416 /* It's a command, so get a sequence for it. */
1417
1418 spin_lock_irqsave(&(intf->seq_lock), flags);
1419
1420 spin_lock(&intf->counter_lock);
1421 intf->sent_lan_commands++;
1422 spin_unlock(&intf->counter_lock);
1423
1424 /* Create a sequence number with a 1 second
1425 timeout and 4 retries. */
1426 rv = intf_next_seq(intf,
1427 recv_msg,
1428 retry_time_ms,
1429 retries,
1430 0,
1431 &ipmb_seq,
1432 &seqid);
1433 if (rv) {
1434 /* We have used up all the sequence numbers,
1435 probably, so abort. */
1436 spin_unlock_irqrestore(&(intf->seq_lock),
1437 flags);
1438 goto out_err;
1439 }
1440
1441 /* Store the sequence number in the message,
1442 so that when the send message response
1443 comes back we can start the timer. */
1444 format_lan_msg(smi_msg, msg, lan_addr,
1445 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1446 ipmb_seq, source_lun);
1447
1448 /* Copy the message into the recv message data, so we
1449 can retransmit it later if necessary. */
1450 memcpy(recv_msg->msg_data, smi_msg->data,
1451 smi_msg->data_size);
1452 recv_msg->msg.data = recv_msg->msg_data;
1453 recv_msg->msg.data_len = smi_msg->data_size;
1454
1455 /* We don't unlock until here, because we need
1456 to copy the completed message into the
1457 recv_msg before we release the lock.
1458 Otherwise, race conditions may bite us. I
1459 know that's pretty paranoid, but I prefer
1460 to be correct. */
1461 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1462 }
1463 } else {
1464 /* Unknown address type. */
1465 spin_lock_irqsave(&intf->counter_lock, flags);
1466 intf->sent_invalid_commands++;
1467 spin_unlock_irqrestore(&intf->counter_lock, flags);
1468 rv = -EINVAL;
1469 goto out_err;
1470 }
1471
1472 #ifdef DEBUG_MSGING
1473 {
1474 int m;
1475 for (m = 0; m < smi_msg->data_size; m++)
1476 printk(" %2.2x", smi_msg->data[m]);
1477 printk("\n");
1478 }
1479 #endif
1480 intf->handlers->sender(intf->send_info, smi_msg, priority);
1481
1482 return 0;
1483
1484 out_err:
1485 ipmi_free_smi_msg(smi_msg);
1486 ipmi_free_recv_msg(recv_msg);
1487 return rv;
1488 }
1489
1490 static int check_addr(ipmi_smi_t intf,
1491 struct ipmi_addr *addr,
1492 unsigned char *saddr,
1493 unsigned char *lun)
1494 {
1495 if (addr->channel >= IPMI_MAX_CHANNELS)
1496 return -EINVAL;
1497 *lun = intf->channels[addr->channel].lun;
1498 *saddr = intf->channels[addr->channel].address;
1499 return 0;
1500 }
1501
1502 int ipmi_request_settime(ipmi_user_t user,
1503 struct ipmi_addr *addr,
1504 long msgid,
1505 struct kernel_ipmi_msg *msg,
1506 void *user_msg_data,
1507 int priority,
1508 int retries,
1509 unsigned int retry_time_ms)
1510 {
1511 unsigned char saddr, lun;
1512 int rv;
1513
1514 if (!user)
1515 return -EINVAL;
1516 rv = check_addr(user->intf, addr, &saddr, &lun);
1517 if (rv)
1518 return rv;
1519 return i_ipmi_request(user,
1520 user->intf,
1521 addr,
1522 msgid,
1523 msg,
1524 user_msg_data,
1525 NULL, NULL,
1526 priority,
1527 saddr,
1528 lun,
1529 retries,
1530 retry_time_ms);
1531 }
1532
1533 int ipmi_request_supply_msgs(ipmi_user_t user,
1534 struct ipmi_addr *addr,
1535 long msgid,
1536 struct kernel_ipmi_msg *msg,
1537 void *user_msg_data,
1538 void *supplied_smi,
1539 struct ipmi_recv_msg *supplied_recv,
1540 int priority)
1541 {
1542 unsigned char saddr, lun;
1543 int rv;
1544
1545 if (!user)
1546 return -EINVAL;
1547 rv = check_addr(user->intf, addr, &saddr, &lun);
1548 if (rv)
1549 return rv;
1550 return i_ipmi_request(user,
1551 user->intf,
1552 addr,
1553 msgid,
1554 msg,
1555 user_msg_data,
1556 supplied_smi,
1557 supplied_recv,
1558 priority,
1559 saddr,
1560 lun,
1561 -1, 0);
1562 }
1563
1564 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1565 int count, int *eof, void *data)
1566 {
1567 char *out = (char *) page;
1568 ipmi_smi_t intf = data;
1569 int i;
1570 int rv = 0;
1571
1572 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1573 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1574 out[rv-1] = '\n'; /* Replace the final space with a newline */
1575 out[rv] = '\0';
1576 rv++;
1577 return rv;
1578 }
1579
1580 static int version_file_read_proc(char *page, char **start, off_t off,
1581 int count, int *eof, void *data)
1582 {
1583 char *out = (char *) page;
1584 ipmi_smi_t intf = data;
1585
1586 return sprintf(out, "%d.%d\n",
1587 ipmi_version_major(&intf->bmc->id),
1588 ipmi_version_minor(&intf->bmc->id));
1589 }
1590
1591 static int stat_file_read_proc(char *page, char **start, off_t off,
1592 int count, int *eof, void *data)
1593 {
1594 char *out = (char *) page;
1595 ipmi_smi_t intf = data;
1596
1597 out += sprintf(out, "sent_invalid_commands: %d\n",
1598 intf->sent_invalid_commands);
1599 out += sprintf(out, "sent_local_commands: %d\n",
1600 intf->sent_local_commands);
1601 out += sprintf(out, "handled_local_responses: %d\n",
1602 intf->handled_local_responses);
1603 out += sprintf(out, "unhandled_local_responses: %d\n",
1604 intf->unhandled_local_responses);
1605 out += sprintf(out, "sent_ipmb_commands: %d\n",
1606 intf->sent_ipmb_commands);
1607 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1608 intf->sent_ipmb_command_errs);
1609 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1610 intf->retransmitted_ipmb_commands);
1611 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1612 intf->timed_out_ipmb_commands);
1613 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1614 intf->timed_out_ipmb_broadcasts);
1615 out += sprintf(out, "sent_ipmb_responses: %d\n",
1616 intf->sent_ipmb_responses);
1617 out += sprintf(out, "handled_ipmb_responses: %d\n",
1618 intf->handled_ipmb_responses);
1619 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1620 intf->invalid_ipmb_responses);
1621 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1622 intf->unhandled_ipmb_responses);
1623 out += sprintf(out, "sent_lan_commands: %d\n",
1624 intf->sent_lan_commands);
1625 out += sprintf(out, "sent_lan_command_errs: %d\n",
1626 intf->sent_lan_command_errs);
1627 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1628 intf->retransmitted_lan_commands);
1629 out += sprintf(out, "timed_out_lan_commands: %d\n",
1630 intf->timed_out_lan_commands);
1631 out += sprintf(out, "sent_lan_responses: %d\n",
1632 intf->sent_lan_responses);
1633 out += sprintf(out, "handled_lan_responses: %d\n",
1634 intf->handled_lan_responses);
1635 out += sprintf(out, "invalid_lan_responses: %d\n",
1636 intf->invalid_lan_responses);
1637 out += sprintf(out, "unhandled_lan_responses: %d\n",
1638 intf->unhandled_lan_responses);
1639 out += sprintf(out, "handled_commands: %d\n",
1640 intf->handled_commands);
1641 out += sprintf(out, "invalid_commands: %d\n",
1642 intf->invalid_commands);
1643 out += sprintf(out, "unhandled_commands: %d\n",
1644 intf->unhandled_commands);
1645 out += sprintf(out, "invalid_events: %d\n",
1646 intf->invalid_events);
1647 out += sprintf(out, "events: %d\n",
1648 intf->events);
1649
1650 return (out - ((char *) page));
1651 }
1652
1653 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1654 read_proc_t *read_proc, write_proc_t *write_proc,
1655 void *data, struct module *owner)
1656 {
1657 int rv = 0;
1658 #ifdef CONFIG_PROC_FS
1659 struct proc_dir_entry *file;
1660 struct ipmi_proc_entry *entry;
1661
1662 /* Create a list element. */
1663 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1664 if (!entry)
1665 return -ENOMEM;
1666 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1667 if (!entry->name) {
1668 kfree(entry);
1669 return -ENOMEM;
1670 }
1671 strcpy(entry->name, name);
1672
1673 file = create_proc_entry(name, 0, smi->proc_dir);
1674 if (!file) {
1675 kfree(entry->name);
1676 kfree(entry);
1677 rv = -ENOMEM;
1678 } else {
1679 file->nlink = 1;
1680 file->data = data;
1681 file->read_proc = read_proc;
1682 file->write_proc = write_proc;
1683 file->owner = owner;
1684
1685 spin_lock(&smi->proc_entry_lock);
1686 /* Stick it on the list. */
1687 entry->next = smi->proc_entries;
1688 smi->proc_entries = entry;
1689 spin_unlock(&smi->proc_entry_lock);
1690 }
1691 #endif /* CONFIG_PROC_FS */
1692
1693 return rv;
1694 }
1695
1696 static int add_proc_entries(ipmi_smi_t smi, int num)
1697 {
1698 int rv = 0;
1699
1700 #ifdef CONFIG_PROC_FS
1701 sprintf(smi->proc_dir_name, "%d", num);
1702 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1703 if (!smi->proc_dir)
1704 rv = -ENOMEM;
1705 else {
1706 smi->proc_dir->owner = THIS_MODULE;
1707 }
1708
1709 if (rv == 0)
1710 rv = ipmi_smi_add_proc_entry(smi, "stats",
1711 stat_file_read_proc, NULL,
1712 smi, THIS_MODULE);
1713
1714 if (rv == 0)
1715 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1716 ipmb_file_read_proc, NULL,
1717 smi, THIS_MODULE);
1718
1719 if (rv == 0)
1720 rv = ipmi_smi_add_proc_entry(smi, "version",
1721 version_file_read_proc, NULL,
1722 smi, THIS_MODULE);
1723 #endif /* CONFIG_PROC_FS */
1724
1725 return rv;
1726 }
1727
1728 static void remove_proc_entries(ipmi_smi_t smi)
1729 {
1730 #ifdef CONFIG_PROC_FS
1731 struct ipmi_proc_entry *entry;
1732
1733 spin_lock(&smi->proc_entry_lock);
1734 while (smi->proc_entries) {
1735 entry = smi->proc_entries;
1736 smi->proc_entries = entry->next;
1737
1738 remove_proc_entry(entry->name, smi->proc_dir);
1739 kfree(entry->name);
1740 kfree(entry);
1741 }
1742 spin_unlock(&smi->proc_entry_lock);
1743 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1744 #endif /* CONFIG_PROC_FS */
1745 }
1746
1747 static int __find_bmc_guid(struct device *dev, void *data)
1748 {
1749 unsigned char *id = data;
1750 struct bmc_device *bmc = dev_get_drvdata(dev);
1751 return memcmp(bmc->guid, id, 16) == 0;
1752 }
1753
1754 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1755 unsigned char *guid)
1756 {
1757 struct device *dev;
1758
1759 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1760 if (dev)
1761 return dev_get_drvdata(dev);
1762 else
1763 return NULL;
1764 }
1765
1766 struct prod_dev_id {
1767 unsigned int product_id;
1768 unsigned char device_id;
1769 };
1770
1771 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1772 {
1773 struct prod_dev_id *id = data;
1774 struct bmc_device *bmc = dev_get_drvdata(dev);
1775
1776 return (bmc->id.product_id == id->product_id
1777 && bmc->id.product_id == id->product_id
1778 && bmc->id.device_id == id->device_id);
1779 }
1780
1781 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1782 struct device_driver *drv,
1783 unsigned char product_id, unsigned char device_id)
1784 {
1785 struct prod_dev_id id = {
1786 .product_id = product_id,
1787 .device_id = device_id,
1788 };
1789 struct device *dev;
1790
1791 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
1792 if (dev)
1793 return dev_get_drvdata(dev);
1794 else
1795 return NULL;
1796 }
1797
1798 static ssize_t device_id_show(struct device *dev,
1799 struct device_attribute *attr,
1800 char *buf)
1801 {
1802 struct bmc_device *bmc = dev_get_drvdata(dev);
1803
1804 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
1805 }
1806
1807 static ssize_t provides_dev_sdrs_show(struct device *dev,
1808 struct device_attribute *attr,
1809 char *buf)
1810 {
1811 struct bmc_device *bmc = dev_get_drvdata(dev);
1812
1813 return snprintf(buf, 10, "%u\n",
1814 bmc->id.device_revision && 0x80 >> 7);
1815 }
1816
1817 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
1818 char *buf)
1819 {
1820 struct bmc_device *bmc = dev_get_drvdata(dev);
1821
1822 return snprintf(buf, 20, "%u\n",
1823 bmc->id.device_revision && 0x0F);
1824 }
1825
1826 static ssize_t firmware_rev_show(struct device *dev,
1827 struct device_attribute *attr,
1828 char *buf)
1829 {
1830 struct bmc_device *bmc = dev_get_drvdata(dev);
1831
1832 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
1833 bmc->id.firmware_revision_2);
1834 }
1835
1836 static ssize_t ipmi_version_show(struct device *dev,
1837 struct device_attribute *attr,
1838 char *buf)
1839 {
1840 struct bmc_device *bmc = dev_get_drvdata(dev);
1841
1842 return snprintf(buf, 20, "%u.%u\n",
1843 ipmi_version_major(&bmc->id),
1844 ipmi_version_minor(&bmc->id));
1845 }
1846
1847 static ssize_t add_dev_support_show(struct device *dev,
1848 struct device_attribute *attr,
1849 char *buf)
1850 {
1851 struct bmc_device *bmc = dev_get_drvdata(dev);
1852
1853 return snprintf(buf, 10, "0x%02x\n",
1854 bmc->id.additional_device_support);
1855 }
1856
1857 static ssize_t manufacturer_id_show(struct device *dev,
1858 struct device_attribute *attr,
1859 char *buf)
1860 {
1861 struct bmc_device *bmc = dev_get_drvdata(dev);
1862
1863 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
1864 }
1865
1866 static ssize_t product_id_show(struct device *dev,
1867 struct device_attribute *attr,
1868 char *buf)
1869 {
1870 struct bmc_device *bmc = dev_get_drvdata(dev);
1871
1872 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
1873 }
1874
1875 static ssize_t aux_firmware_rev_show(struct device *dev,
1876 struct device_attribute *attr,
1877 char *buf)
1878 {
1879 struct bmc_device *bmc = dev_get_drvdata(dev);
1880
1881 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
1882 bmc->id.aux_firmware_revision[3],
1883 bmc->id.aux_firmware_revision[2],
1884 bmc->id.aux_firmware_revision[1],
1885 bmc->id.aux_firmware_revision[0]);
1886 }
1887
1888 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
1889 char *buf)
1890 {
1891 struct bmc_device *bmc = dev_get_drvdata(dev);
1892
1893 return snprintf(buf, 100, "%Lx%Lx\n",
1894 (long long) bmc->guid[0],
1895 (long long) bmc->guid[8]);
1896 }
1897
1898 static void
1899 cleanup_bmc_device(struct kref *ref)
1900 {
1901 struct bmc_device *bmc;
1902
1903 bmc = container_of(ref, struct bmc_device, refcount);
1904
1905 device_remove_file(&bmc->dev->dev,
1906 &bmc->device_id_attr);
1907 device_remove_file(&bmc->dev->dev,
1908 &bmc->provides_dev_sdrs_attr);
1909 device_remove_file(&bmc->dev->dev,
1910 &bmc->revision_attr);
1911 device_remove_file(&bmc->dev->dev,
1912 &bmc->firmware_rev_attr);
1913 device_remove_file(&bmc->dev->dev,
1914 &bmc->version_attr);
1915 device_remove_file(&bmc->dev->dev,
1916 &bmc->add_dev_support_attr);
1917 device_remove_file(&bmc->dev->dev,
1918 &bmc->manufacturer_id_attr);
1919 device_remove_file(&bmc->dev->dev,
1920 &bmc->product_id_attr);
1921 if (bmc->id.aux_firmware_revision_set)
1922 device_remove_file(&bmc->dev->dev,
1923 &bmc->aux_firmware_rev_attr);
1924 if (bmc->guid_set)
1925 device_remove_file(&bmc->dev->dev,
1926 &bmc->guid_attr);
1927 platform_device_unregister(bmc->dev);
1928 kfree(bmc);
1929 }
1930
1931 static void ipmi_bmc_unregister(ipmi_smi_t intf)
1932 {
1933 struct bmc_device *bmc = intf->bmc;
1934
1935 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
1936 if (intf->my_dev_name) {
1937 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
1938 kfree(intf->my_dev_name);
1939 intf->my_dev_name = NULL;
1940 }
1941
1942 mutex_lock(&ipmidriver_mutex);
1943 kref_put(&bmc->refcount, cleanup_bmc_device);
1944 mutex_unlock(&ipmidriver_mutex);
1945 }
1946
1947 static int ipmi_bmc_register(ipmi_smi_t intf)
1948 {
1949 int rv;
1950 struct bmc_device *bmc = intf->bmc;
1951 struct bmc_device *old_bmc;
1952 int size;
1953 char dummy[1];
1954
1955 mutex_lock(&ipmidriver_mutex);
1956
1957 /*
1958 * Try to find if there is an bmc_device struct
1959 * representing the interfaced BMC already
1960 */
1961 if (bmc->guid_set)
1962 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
1963 else
1964 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
1965 bmc->id.product_id,
1966 bmc->id.device_id);
1967
1968 /*
1969 * If there is already an bmc_device, free the new one,
1970 * otherwise register the new BMC device
1971 */
1972 if (old_bmc) {
1973 kfree(bmc);
1974 intf->bmc = old_bmc;
1975 bmc = old_bmc;
1976
1977 kref_get(&bmc->refcount);
1978 mutex_unlock(&ipmidriver_mutex);
1979
1980 printk(KERN_INFO
1981 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
1982 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
1983 bmc->id.manufacturer_id,
1984 bmc->id.product_id,
1985 bmc->id.device_id);
1986 } else {
1987 bmc->dev = platform_device_alloc("ipmi_bmc",
1988 bmc->id.device_id);
1989 if (!bmc->dev) {
1990 printk(KERN_ERR
1991 "ipmi_msghandler:"
1992 " Unable to allocate platform device\n");
1993 return -ENOMEM;
1994 }
1995 bmc->dev->dev.driver = &ipmidriver;
1996 dev_set_drvdata(&bmc->dev->dev, bmc);
1997 kref_init(&bmc->refcount);
1998
1999 rv = platform_device_register(bmc->dev);
2000 mutex_unlock(&ipmidriver_mutex);
2001 if (rv) {
2002 printk(KERN_ERR
2003 "ipmi_msghandler:"
2004 " Unable to register bmc device: %d\n",
2005 rv);
2006 /* Don't go to out_err, you can only do that if
2007 the device is registered already. */
2008 return rv;
2009 }
2010
2011 bmc->device_id_attr.attr.name = "device_id";
2012 bmc->device_id_attr.attr.owner = THIS_MODULE;
2013 bmc->device_id_attr.attr.mode = S_IRUGO;
2014 bmc->device_id_attr.show = device_id_show;
2015
2016 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2017 bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
2018 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2019 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2020
2021
2022 bmc->revision_attr.attr.name = "revision";
2023 bmc->revision_attr.attr.owner = THIS_MODULE;
2024 bmc->revision_attr.attr.mode = S_IRUGO;
2025 bmc->revision_attr.show = revision_show;
2026
2027 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2028 bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
2029 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2030 bmc->firmware_rev_attr.show = firmware_rev_show;
2031
2032 bmc->version_attr.attr.name = "ipmi_version";
2033 bmc->version_attr.attr.owner = THIS_MODULE;
2034 bmc->version_attr.attr.mode = S_IRUGO;
2035 bmc->version_attr.show = ipmi_version_show;
2036
2037 bmc->add_dev_support_attr.attr.name
2038 = "additional_device_support";
2039 bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
2040 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2041 bmc->add_dev_support_attr.show = add_dev_support_show;
2042
2043 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2044 bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
2045 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2046 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2047
2048 bmc->product_id_attr.attr.name = "product_id";
2049 bmc->product_id_attr.attr.owner = THIS_MODULE;
2050 bmc->product_id_attr.attr.mode = S_IRUGO;
2051 bmc->product_id_attr.show = product_id_show;
2052
2053 bmc->guid_attr.attr.name = "guid";
2054 bmc->guid_attr.attr.owner = THIS_MODULE;
2055 bmc->guid_attr.attr.mode = S_IRUGO;
2056 bmc->guid_attr.show = guid_show;
2057
2058 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2059 bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
2060 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2061 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2062
2063 device_create_file(&bmc->dev->dev,
2064 &bmc->device_id_attr);
2065 device_create_file(&bmc->dev->dev,
2066 &bmc->provides_dev_sdrs_attr);
2067 device_create_file(&bmc->dev->dev,
2068 &bmc->revision_attr);
2069 device_create_file(&bmc->dev->dev,
2070 &bmc->firmware_rev_attr);
2071 device_create_file(&bmc->dev->dev,
2072 &bmc->version_attr);
2073 device_create_file(&bmc->dev->dev,
2074 &bmc->add_dev_support_attr);
2075 device_create_file(&bmc->dev->dev,
2076 &bmc->manufacturer_id_attr);
2077 device_create_file(&bmc->dev->dev,
2078 &bmc->product_id_attr);
2079 if (bmc->id.aux_firmware_revision_set)
2080 device_create_file(&bmc->dev->dev,
2081 &bmc->aux_firmware_rev_attr);
2082 if (bmc->guid_set)
2083 device_create_file(&bmc->dev->dev,
2084 &bmc->guid_attr);
2085
2086 printk(KERN_INFO
2087 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2088 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2089 bmc->id.manufacturer_id,
2090 bmc->id.product_id,
2091 bmc->id.device_id);
2092 }
2093
2094 /*
2095 * create symlink from system interface device to bmc device
2096 * and back.
2097 */
2098 rv = sysfs_create_link(&intf->si_dev->kobj,
2099 &bmc->dev->dev.kobj, "bmc");
2100 if (rv) {
2101 printk(KERN_ERR
2102 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2103 rv);
2104 goto out_err;
2105 }
2106
2107 size = snprintf(dummy, 0, "ipmi%d", intf->intf_num);
2108 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2109 if (!intf->my_dev_name) {
2110 rv = -ENOMEM;
2111 printk(KERN_ERR
2112 "ipmi_msghandler: allocate link from BMC: %d\n",
2113 rv);
2114 goto out_err;
2115 }
2116 snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num);
2117
2118 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2119 intf->my_dev_name);
2120 if (rv) {
2121 kfree(intf->my_dev_name);
2122 intf->my_dev_name = NULL;
2123 printk(KERN_ERR
2124 "ipmi_msghandler:"
2125 " Unable to create symlink to bmc: %d\n",
2126 rv);
2127 goto out_err;
2128 }
2129
2130 return 0;
2131
2132 out_err:
2133 ipmi_bmc_unregister(intf);
2134 return rv;
2135 }
2136
2137 static int
2138 send_guid_cmd(ipmi_smi_t intf, int chan)
2139 {
2140 struct kernel_ipmi_msg msg;
2141 struct ipmi_system_interface_addr si;
2142
2143 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2144 si.channel = IPMI_BMC_CHANNEL;
2145 si.lun = 0;
2146
2147 msg.netfn = IPMI_NETFN_APP_REQUEST;
2148 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2149 msg.data = NULL;
2150 msg.data_len = 0;
2151 return i_ipmi_request(NULL,
2152 intf,
2153 (struct ipmi_addr *) &si,
2154 0,
2155 &msg,
2156 intf,
2157 NULL,
2158 NULL,
2159 0,
2160 intf->channels[0].address,
2161 intf->channels[0].lun,
2162 -1, 0);
2163 }
2164
2165 static void
2166 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2167 {
2168 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2169 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2170 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2171 /* Not for me */
2172 return;
2173
2174 if (msg->msg.data[0] != 0) {
2175 /* Error from getting the GUID, the BMC doesn't have one. */
2176 intf->bmc->guid_set = 0;
2177 goto out;
2178 }
2179
2180 if (msg->msg.data_len < 17) {
2181 intf->bmc->guid_set = 0;
2182 printk(KERN_WARNING PFX
2183 "guid_handler: The GUID response from the BMC was too"
2184 " short, it was %d but should have been 17. Assuming"
2185 " GUID is not available.\n",
2186 msg->msg.data_len);
2187 goto out;
2188 }
2189
2190 memcpy(intf->bmc->guid, msg->msg.data, 16);
2191 intf->bmc->guid_set = 1;
2192 out:
2193 wake_up(&intf->waitq);
2194 }
2195
2196 static void
2197 get_guid(ipmi_smi_t intf)
2198 {
2199 int rv;
2200
2201 intf->bmc->guid_set = 0x2;
2202 intf->null_user_handler = guid_handler;
2203 rv = send_guid_cmd(intf, 0);
2204 if (rv)
2205 /* Send failed, no GUID available. */
2206 intf->bmc->guid_set = 0;
2207 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2208 intf->null_user_handler = NULL;
2209 }
2210
2211 static int
2212 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2213 {
2214 struct kernel_ipmi_msg msg;
2215 unsigned char data[1];
2216 struct ipmi_system_interface_addr si;
2217
2218 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2219 si.channel = IPMI_BMC_CHANNEL;
2220 si.lun = 0;
2221
2222 msg.netfn = IPMI_NETFN_APP_REQUEST;
2223 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2224 msg.data = data;
2225 msg.data_len = 1;
2226 data[0] = chan;
2227 return i_ipmi_request(NULL,
2228 intf,
2229 (struct ipmi_addr *) &si,
2230 0,
2231 &msg,
2232 intf,
2233 NULL,
2234 NULL,
2235 0,
2236 intf->channels[0].address,
2237 intf->channels[0].lun,
2238 -1, 0);
2239 }
2240
2241 static void
2242 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2243 {
2244 int rv = 0;
2245 int chan;
2246
2247 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2248 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2249 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2250 {
2251 /* It's the one we want */
2252 if (msg->msg.data[0] != 0) {
2253 /* Got an error from the channel, just go on. */
2254
2255 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2256 /* If the MC does not support this
2257 command, that is legal. We just
2258 assume it has one IPMB at channel
2259 zero. */
2260 intf->channels[0].medium
2261 = IPMI_CHANNEL_MEDIUM_IPMB;
2262 intf->channels[0].protocol
2263 = IPMI_CHANNEL_PROTOCOL_IPMB;
2264 rv = -ENOSYS;
2265
2266 intf->curr_channel = IPMI_MAX_CHANNELS;
2267 wake_up(&intf->waitq);
2268 goto out;
2269 }
2270 goto next_channel;
2271 }
2272 if (msg->msg.data_len < 4) {
2273 /* Message not big enough, just go on. */
2274 goto next_channel;
2275 }
2276 chan = intf->curr_channel;
2277 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2278 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2279
2280 next_channel:
2281 intf->curr_channel++;
2282 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2283 wake_up(&intf->waitq);
2284 else
2285 rv = send_channel_info_cmd(intf, intf->curr_channel);
2286
2287 if (rv) {
2288 /* Got an error somehow, just give up. */
2289 intf->curr_channel = IPMI_MAX_CHANNELS;
2290 wake_up(&intf->waitq);
2291
2292 printk(KERN_WARNING PFX
2293 "Error sending channel information: %d\n",
2294 rv);
2295 }
2296 }
2297 out:
2298 return;
2299 }
2300
2301 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2302 void *send_info,
2303 struct ipmi_device_id *device_id,
2304 struct device *si_dev,
2305 unsigned char slave_addr)
2306 {
2307 int i, j;
2308 int rv;
2309 ipmi_smi_t intf;
2310 unsigned long flags;
2311 int version_major;
2312 int version_minor;
2313
2314 version_major = ipmi_version_major(device_id);
2315 version_minor = ipmi_version_minor(device_id);
2316
2317 /* Make sure the driver is actually initialized, this handles
2318 problems with initialization order. */
2319 if (!initialized) {
2320 rv = ipmi_init_msghandler();
2321 if (rv)
2322 return rv;
2323 /* The init code doesn't return an error if it was turned
2324 off, but it won't initialize. Check that. */
2325 if (!initialized)
2326 return -ENODEV;
2327 }
2328
2329 intf = kmalloc(sizeof(*intf), GFP_KERNEL);
2330 if (!intf)
2331 return -ENOMEM;
2332 memset(intf, 0, sizeof(*intf));
2333 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2334 if (!intf->bmc) {
2335 kfree(intf);
2336 return -ENOMEM;
2337 }
2338 intf->intf_num = -1;
2339 kref_init(&intf->refcount);
2340 intf->bmc->id = *device_id;
2341 intf->si_dev = si_dev;
2342 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2343 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2344 intf->channels[j].lun = 2;
2345 }
2346 if (slave_addr != 0)
2347 intf->channels[0].address = slave_addr;
2348 INIT_LIST_HEAD(&intf->users);
2349 intf->handlers = handlers;
2350 intf->send_info = send_info;
2351 spin_lock_init(&intf->seq_lock);
2352 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2353 intf->seq_table[j].inuse = 0;
2354 intf->seq_table[j].seqid = 0;
2355 }
2356 intf->curr_seq = 0;
2357 #ifdef CONFIG_PROC_FS
2358 spin_lock_init(&intf->proc_entry_lock);
2359 #endif
2360 spin_lock_init(&intf->waiting_msgs_lock);
2361 INIT_LIST_HEAD(&intf->waiting_msgs);
2362 spin_lock_init(&intf->events_lock);
2363 INIT_LIST_HEAD(&intf->waiting_events);
2364 intf->waiting_events_count = 0;
2365 mutex_init(&intf->cmd_rcvrs_mutex);
2366 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2367 init_waitqueue_head(&intf->waitq);
2368
2369 spin_lock_init(&intf->counter_lock);
2370 intf->proc_dir = NULL;
2371
2372 rv = -ENOMEM;
2373 spin_lock_irqsave(&interfaces_lock, flags);
2374 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2375 if (ipmi_interfaces[i] == NULL) {
2376 intf->intf_num = i;
2377 /* Reserve the entry till we are done. */
2378 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
2379 rv = 0;
2380 break;
2381 }
2382 }
2383 spin_unlock_irqrestore(&interfaces_lock, flags);
2384 if (rv)
2385 goto out;
2386
2387 rv = handlers->start_processing(send_info, intf);
2388 if (rv)
2389 goto out;
2390
2391 get_guid(intf);
2392
2393 if ((version_major > 1)
2394 || ((version_major == 1) && (version_minor >= 5)))
2395 {
2396 /* Start scanning the channels to see what is
2397 available. */
2398 intf->null_user_handler = channel_handler;
2399 intf->curr_channel = 0;
2400 rv = send_channel_info_cmd(intf, 0);
2401 if (rv)
2402 goto out;
2403
2404 /* Wait for the channel info to be read. */
2405 wait_event(intf->waitq,
2406 intf->curr_channel >= IPMI_MAX_CHANNELS);
2407 intf->null_user_handler = NULL;
2408 } else {
2409 /* Assume a single IPMB channel at zero. */
2410 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2411 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2412 }
2413
2414 if (rv == 0)
2415 rv = add_proc_entries(intf, i);
2416
2417 rv = ipmi_bmc_register(intf);
2418
2419 out:
2420 if (rv) {
2421 if (intf->proc_dir)
2422 remove_proc_entries(intf);
2423 kref_put(&intf->refcount, intf_free);
2424 if (i < MAX_IPMI_INTERFACES) {
2425 spin_lock_irqsave(&interfaces_lock, flags);
2426 ipmi_interfaces[i] = NULL;
2427 spin_unlock_irqrestore(&interfaces_lock, flags);
2428 }
2429 } else {
2430 spin_lock_irqsave(&interfaces_lock, flags);
2431 ipmi_interfaces[i] = intf;
2432 spin_unlock_irqrestore(&interfaces_lock, flags);
2433 call_smi_watchers(i, intf->si_dev);
2434 }
2435
2436 return rv;
2437 }
2438
2439 int ipmi_unregister_smi(ipmi_smi_t intf)
2440 {
2441 int i;
2442 struct ipmi_smi_watcher *w;
2443 unsigned long flags;
2444
2445 ipmi_bmc_unregister(intf);
2446
2447 spin_lock_irqsave(&interfaces_lock, flags);
2448 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2449 if (ipmi_interfaces[i] == intf) {
2450 /* Set the interface number reserved until we
2451 * are done. */
2452 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
2453 intf->intf_num = -1;
2454 break;
2455 }
2456 }
2457 spin_unlock_irqrestore(&interfaces_lock,flags);
2458
2459 if (i == MAX_IPMI_INTERFACES)
2460 return -ENODEV;
2461
2462 remove_proc_entries(intf);
2463
2464 /* Call all the watcher interfaces to tell them that
2465 an interface is gone. */
2466 down_read(&smi_watchers_sem);
2467 list_for_each_entry(w, &smi_watchers, link)
2468 w->smi_gone(i);
2469 up_read(&smi_watchers_sem);
2470
2471 /* Allow the entry to be reused now. */
2472 spin_lock_irqsave(&interfaces_lock, flags);
2473 ipmi_interfaces[i] = NULL;
2474 spin_unlock_irqrestore(&interfaces_lock,flags);
2475
2476 kref_put(&intf->refcount, intf_free);
2477 return 0;
2478 }
2479
2480 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2481 struct ipmi_smi_msg *msg)
2482 {
2483 struct ipmi_ipmb_addr ipmb_addr;
2484 struct ipmi_recv_msg *recv_msg;
2485 unsigned long flags;
2486
2487
2488 /* This is 11, not 10, because the response must contain a
2489 * completion code. */
2490 if (msg->rsp_size < 11) {
2491 /* Message not big enough, just ignore it. */
2492 spin_lock_irqsave(&intf->counter_lock, flags);
2493 intf->invalid_ipmb_responses++;
2494 spin_unlock_irqrestore(&intf->counter_lock, flags);
2495 return 0;
2496 }
2497
2498 if (msg->rsp[2] != 0) {
2499 /* An error getting the response, just ignore it. */
2500 return 0;
2501 }
2502
2503 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2504 ipmb_addr.slave_addr = msg->rsp[6];
2505 ipmb_addr.channel = msg->rsp[3] & 0x0f;
2506 ipmb_addr.lun = msg->rsp[7] & 3;
2507
2508 /* It's a response from a remote entity. Look up the sequence
2509 number and handle the response. */
2510 if (intf_find_seq(intf,
2511 msg->rsp[7] >> 2,
2512 msg->rsp[3] & 0x0f,
2513 msg->rsp[8],
2514 (msg->rsp[4] >> 2) & (~1),
2515 (struct ipmi_addr *) &(ipmb_addr),
2516 &recv_msg))
2517 {
2518 /* We were unable to find the sequence number,
2519 so just nuke the message. */
2520 spin_lock_irqsave(&intf->counter_lock, flags);
2521 intf->unhandled_ipmb_responses++;
2522 spin_unlock_irqrestore(&intf->counter_lock, flags);
2523 return 0;
2524 }
2525
2526 memcpy(recv_msg->msg_data,
2527 &(msg->rsp[9]),
2528 msg->rsp_size - 9);
2529 /* THe other fields matched, so no need to set them, except
2530 for netfn, which needs to be the response that was
2531 returned, not the request value. */
2532 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2533 recv_msg->msg.data = recv_msg->msg_data;
2534 recv_msg->msg.data_len = msg->rsp_size - 10;
2535 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2536 spin_lock_irqsave(&intf->counter_lock, flags);
2537 intf->handled_ipmb_responses++;
2538 spin_unlock_irqrestore(&intf->counter_lock, flags);
2539 deliver_response(recv_msg);
2540
2541 return 0;
2542 }
2543
2544 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2545 struct ipmi_smi_msg *msg)
2546 {
2547 struct cmd_rcvr *rcvr;
2548 int rv = 0;
2549 unsigned char netfn;
2550 unsigned char cmd;
2551 ipmi_user_t user = NULL;
2552 struct ipmi_ipmb_addr *ipmb_addr;
2553 struct ipmi_recv_msg *recv_msg;
2554 unsigned long flags;
2555
2556 if (msg->rsp_size < 10) {
2557 /* Message not big enough, just ignore it. */
2558 spin_lock_irqsave(&intf->counter_lock, flags);
2559 intf->invalid_commands++;
2560 spin_unlock_irqrestore(&intf->counter_lock, flags);
2561 return 0;
2562 }
2563
2564 if (msg->rsp[2] != 0) {
2565 /* An error getting the response, just ignore it. */
2566 return 0;
2567 }
2568
2569 netfn = msg->rsp[4] >> 2;
2570 cmd = msg->rsp[8];
2571
2572 rcu_read_lock();
2573 rcvr = find_cmd_rcvr(intf, netfn, cmd);
2574 if (rcvr) {
2575 user = rcvr->user;
2576 kref_get(&user->refcount);
2577 } else
2578 user = NULL;
2579 rcu_read_unlock();
2580
2581 if (user == NULL) {
2582 /* We didn't find a user, deliver an error response. */
2583 spin_lock_irqsave(&intf->counter_lock, flags);
2584 intf->unhandled_commands++;
2585 spin_unlock_irqrestore(&intf->counter_lock, flags);
2586
2587 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2588 msg->data[1] = IPMI_SEND_MSG_CMD;
2589 msg->data[2] = msg->rsp[3];
2590 msg->data[3] = msg->rsp[6];
2591 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2592 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2593 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2594 /* rqseq/lun */
2595 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2596 msg->data[8] = msg->rsp[8]; /* cmd */
2597 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2598 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2599 msg->data_size = 11;
2600
2601 #ifdef DEBUG_MSGING
2602 {
2603 int m;
2604 printk("Invalid command:");
2605 for (m = 0; m < msg->data_size; m++)
2606 printk(" %2.2x", msg->data[m]);
2607 printk("\n");
2608 }
2609 #endif
2610 intf->handlers->sender(intf->send_info, msg, 0);
2611
2612 rv = -1; /* We used the message, so return the value that
2613 causes it to not be freed or queued. */
2614 } else {
2615 /* Deliver the message to the user. */
2616 spin_lock_irqsave(&intf->counter_lock, flags);
2617 intf->handled_commands++;
2618 spin_unlock_irqrestore(&intf->counter_lock, flags);
2619
2620 recv_msg = ipmi_alloc_recv_msg();
2621 if (!recv_msg) {
2622 /* We couldn't allocate memory for the
2623 message, so requeue it for handling
2624 later. */
2625 rv = 1;
2626 kref_put(&user->refcount, free_user);
2627 } else {
2628 /* Extract the source address from the data. */
2629 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2630 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2631 ipmb_addr->slave_addr = msg->rsp[6];
2632 ipmb_addr->lun = msg->rsp[7] & 3;
2633 ipmb_addr->channel = msg->rsp[3] & 0xf;
2634
2635 /* Extract the rest of the message information
2636 from the IPMB header.*/
2637 recv_msg->user = user;
2638 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2639 recv_msg->msgid = msg->rsp[7] >> 2;
2640 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2641 recv_msg->msg.cmd = msg->rsp[8];
2642 recv_msg->msg.data = recv_msg->msg_data;
2643
2644 /* We chop off 10, not 9 bytes because the checksum
2645 at the end also needs to be removed. */
2646 recv_msg->msg.data_len = msg->rsp_size - 10;
2647 memcpy(recv_msg->msg_data,
2648 &(msg->rsp[9]),
2649 msg->rsp_size - 10);
2650 deliver_response(recv_msg);
2651 }
2652 }
2653
2654 return rv;
2655 }
2656
2657 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2658 struct ipmi_smi_msg *msg)
2659 {
2660 struct ipmi_lan_addr lan_addr;
2661 struct ipmi_recv_msg *recv_msg;
2662 unsigned long flags;
2663
2664
2665 /* This is 13, not 12, because the response must contain a
2666 * completion code. */
2667 if (msg->rsp_size < 13) {
2668 /* Message not big enough, just ignore it. */
2669 spin_lock_irqsave(&intf->counter_lock, flags);
2670 intf->invalid_lan_responses++;
2671 spin_unlock_irqrestore(&intf->counter_lock, flags);
2672 return 0;
2673 }
2674
2675 if (msg->rsp[2] != 0) {
2676 /* An error getting the response, just ignore it. */
2677 return 0;
2678 }
2679
2680 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
2681 lan_addr.session_handle = msg->rsp[4];
2682 lan_addr.remote_SWID = msg->rsp[8];
2683 lan_addr.local_SWID = msg->rsp[5];
2684 lan_addr.channel = msg->rsp[3] & 0x0f;
2685 lan_addr.privilege = msg->rsp[3] >> 4;
2686 lan_addr.lun = msg->rsp[9] & 3;
2687
2688 /* It's a response from a remote entity. Look up the sequence
2689 number and handle the response. */
2690 if (intf_find_seq(intf,
2691 msg->rsp[9] >> 2,
2692 msg->rsp[3] & 0x0f,
2693 msg->rsp[10],
2694 (msg->rsp[6] >> 2) & (~1),
2695 (struct ipmi_addr *) &(lan_addr),
2696 &recv_msg))
2697 {
2698 /* We were unable to find the sequence number,
2699 so just nuke the message. */
2700 spin_lock_irqsave(&intf->counter_lock, flags);
2701 intf->unhandled_lan_responses++;
2702 spin_unlock_irqrestore(&intf->counter_lock, flags);
2703 return 0;
2704 }
2705
2706 memcpy(recv_msg->msg_data,
2707 &(msg->rsp[11]),
2708 msg->rsp_size - 11);
2709 /* The other fields matched, so no need to set them, except
2710 for netfn, which needs to be the response that was
2711 returned, not the request value. */
2712 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2713 recv_msg->msg.data = recv_msg->msg_data;
2714 recv_msg->msg.data_len = msg->rsp_size - 12;
2715 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2716 spin_lock_irqsave(&intf->counter_lock, flags);
2717 intf->handled_lan_responses++;
2718 spin_unlock_irqrestore(&intf->counter_lock, flags);
2719 deliver_response(recv_msg);
2720
2721 return 0;
2722 }
2723
2724 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2725 struct ipmi_smi_msg *msg)
2726 {
2727 struct cmd_rcvr *rcvr;
2728 int rv = 0;
2729 unsigned char netfn;
2730 unsigned char cmd;
2731 ipmi_user_t user = NULL;
2732 struct ipmi_lan_addr *lan_addr;
2733 struct ipmi_recv_msg *recv_msg;
2734 unsigned long flags;
2735
2736 if (msg->rsp_size < 12) {
2737 /* Message not big enough, just ignore it. */
2738 spin_lock_irqsave(&intf->counter_lock, flags);
2739 intf->invalid_commands++;
2740 spin_unlock_irqrestore(&intf->counter_lock, flags);
2741 return 0;
2742 }
2743
2744 if (msg->rsp[2] != 0) {
2745 /* An error getting the response, just ignore it. */
2746 return 0;
2747 }
2748
2749 netfn = msg->rsp[6] >> 2;
2750 cmd = msg->rsp[10];
2751
2752 rcu_read_lock();
2753 rcvr = find_cmd_rcvr(intf, netfn, cmd);
2754 if (rcvr) {
2755 user = rcvr->user;
2756 kref_get(&user->refcount);
2757 } else
2758 user = NULL;
2759 rcu_read_unlock();
2760
2761 if (user == NULL) {
2762 /* We didn't find a user, just give up. */
2763 spin_lock_irqsave(&intf->counter_lock, flags);
2764 intf->unhandled_commands++;
2765 spin_unlock_irqrestore(&intf->counter_lock, flags);
2766
2767 rv = 0; /* Don't do anything with these messages, just
2768 allow them to be freed. */
2769 } else {
2770 /* Deliver the message to the user. */
2771 spin_lock_irqsave(&intf->counter_lock, flags);
2772 intf->handled_commands++;
2773 spin_unlock_irqrestore(&intf->counter_lock, flags);
2774
2775 recv_msg = ipmi_alloc_recv_msg();
2776 if (!recv_msg) {
2777 /* We couldn't allocate memory for the
2778 message, so requeue it for handling
2779 later. */
2780 rv = 1;
2781 kref_put(&user->refcount, free_user);
2782 } else {
2783 /* Extract the source address from the data. */
2784 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
2785 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
2786 lan_addr->session_handle = msg->rsp[4];
2787 lan_addr->remote_SWID = msg->rsp[8];
2788 lan_addr->local_SWID = msg->rsp[5];
2789 lan_addr->lun = msg->rsp[9] & 3;
2790 lan_addr->channel = msg->rsp[3] & 0xf;
2791 lan_addr->privilege = msg->rsp[3] >> 4;
2792
2793 /* Extract the rest of the message information
2794 from the IPMB header.*/
2795 recv_msg->user = user;
2796 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2797 recv_msg->msgid = msg->rsp[9] >> 2;
2798 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2799 recv_msg->msg.cmd = msg->rsp[10];
2800 recv_msg->msg.data = recv_msg->msg_data;
2801
2802 /* We chop off 12, not 11 bytes because the checksum
2803 at the end also needs to be removed. */
2804 recv_msg->msg.data_len = msg->rsp_size - 12;
2805 memcpy(recv_msg->msg_data,
2806 &(msg->rsp[11]),
2807 msg->rsp_size - 12);
2808 deliver_response(recv_msg);
2809 }
2810 }
2811
2812 return rv;
2813 }
2814
2815 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
2816 struct ipmi_smi_msg *msg)
2817 {
2818 struct ipmi_system_interface_addr *smi_addr;
2819
2820 recv_msg->msgid = 0;
2821 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
2822 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2823 smi_addr->channel = IPMI_BMC_CHANNEL;
2824 smi_addr->lun = msg->rsp[0] & 3;
2825 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
2826 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2827 recv_msg->msg.cmd = msg->rsp[1];
2828 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
2829 recv_msg->msg.data = recv_msg->msg_data;
2830 recv_msg->msg.data_len = msg->rsp_size - 3;
2831 }
2832
2833 static int handle_read_event_rsp(ipmi_smi_t intf,
2834 struct ipmi_smi_msg *msg)
2835 {
2836 struct ipmi_recv_msg *recv_msg, *recv_msg2;
2837 struct list_head msgs;
2838 ipmi_user_t user;
2839 int rv = 0;
2840 int deliver_count = 0;
2841 unsigned long flags;
2842
2843 if (msg->rsp_size < 19) {
2844 /* Message is too small to be an IPMB event. */
2845 spin_lock_irqsave(&intf->counter_lock, flags);
2846 intf->invalid_events++;
2847 spin_unlock_irqrestore(&intf->counter_lock, flags);
2848 return 0;
2849 }
2850
2851 if (msg->rsp[2] != 0) {
2852 /* An error getting the event, just ignore it. */
2853 return 0;
2854 }
2855
2856 INIT_LIST_HEAD(&msgs);
2857
2858 spin_lock_irqsave(&intf->events_lock, flags);
2859
2860 spin_lock(&intf->counter_lock);
2861 intf->events++;
2862 spin_unlock(&intf->counter_lock);
2863
2864 /* Allocate and fill in one message for every user that is getting
2865 events. */
2866 rcu_read_lock();
2867 list_for_each_entry_rcu(user, &intf->users, link) {
2868 if (!user->gets_events)
2869 continue;
2870
2871 recv_msg = ipmi_alloc_recv_msg();
2872 if (!recv_msg) {
2873 rcu_read_unlock();
2874 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
2875 link) {
2876 list_del(&recv_msg->link);
2877 ipmi_free_recv_msg(recv_msg);
2878 }
2879 /* We couldn't allocate memory for the
2880 message, so requeue it for handling
2881 later. */
2882 rv = 1;
2883 goto out;
2884 }
2885
2886 deliver_count++;
2887
2888 copy_event_into_recv_msg(recv_msg, msg);
2889 recv_msg->user = user;
2890 kref_get(&user->refcount);
2891 list_add_tail(&(recv_msg->link), &msgs);
2892 }
2893 rcu_read_unlock();
2894
2895 if (deliver_count) {
2896 /* Now deliver all the messages. */
2897 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2898 list_del(&recv_msg->link);
2899 deliver_response(recv_msg);
2900 }
2901 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
2902 /* No one to receive the message, put it in queue if there's
2903 not already too many things in the queue. */
2904 recv_msg = ipmi_alloc_recv_msg();
2905 if (!recv_msg) {
2906 /* We couldn't allocate memory for the
2907 message, so requeue it for handling
2908 later. */
2909 rv = 1;
2910 goto out;
2911 }
2912
2913 copy_event_into_recv_msg(recv_msg, msg);
2914 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
2915 intf->waiting_events_count++;
2916 } else {
2917 /* There's too many things in the queue, discard this
2918 message. */
2919 printk(KERN_WARNING PFX "Event queue full, discarding an"
2920 " incoming event\n");
2921 }
2922
2923 out:
2924 spin_unlock_irqrestore(&(intf->events_lock), flags);
2925
2926 return rv;
2927 }
2928
2929 static int handle_bmc_rsp(ipmi_smi_t intf,
2930 struct ipmi_smi_msg *msg)
2931 {
2932 struct ipmi_recv_msg *recv_msg;
2933 unsigned long flags;
2934 struct ipmi_user *user;
2935
2936 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
2937 if (recv_msg == NULL)
2938 {
2939 printk(KERN_WARNING"IPMI message received with no owner. This\n"
2940 "could be because of a malformed message, or\n"
2941 "because of a hardware error. Contact your\n"
2942 "hardware vender for assistance\n");
2943 return 0;
2944 }
2945
2946 user = recv_msg->user;
2947 /* Make sure the user still exists. */
2948 if (user && !user->valid) {
2949 /* The user for the message went away, so give up. */
2950 spin_lock_irqsave(&intf->counter_lock, flags);
2951 intf->unhandled_local_responses++;
2952 spin_unlock_irqrestore(&intf->counter_lock, flags);
2953 ipmi_free_recv_msg(recv_msg);
2954 } else {
2955 struct ipmi_system_interface_addr *smi_addr;
2956
2957 spin_lock_irqsave(&intf->counter_lock, flags);
2958 intf->handled_local_responses++;
2959 spin_unlock_irqrestore(&intf->counter_lock, flags);
2960 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2961 recv_msg->msgid = msg->msgid;
2962 smi_addr = ((struct ipmi_system_interface_addr *)
2963 &(recv_msg->addr));
2964 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2965 smi_addr->channel = IPMI_BMC_CHANNEL;
2966 smi_addr->lun = msg->rsp[0] & 3;
2967 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2968 recv_msg->msg.cmd = msg->rsp[1];
2969 memcpy(recv_msg->msg_data,
2970 &(msg->rsp[2]),
2971 msg->rsp_size - 2);
2972 recv_msg->msg.data = recv_msg->msg_data;
2973 recv_msg->msg.data_len = msg->rsp_size - 2;
2974 deliver_response(recv_msg);
2975 }
2976
2977 return 0;
2978 }
2979
2980 /* Handle a new message. Return 1 if the message should be requeued,
2981 0 if the message should be freed, or -1 if the message should not
2982 be freed or requeued. */
2983 static int handle_new_recv_msg(ipmi_smi_t intf,
2984 struct ipmi_smi_msg *msg)
2985 {
2986 int requeue;
2987 int chan;
2988
2989 #ifdef DEBUG_MSGING
2990 int m;
2991 printk("Recv:");
2992 for (m = 0; m < msg->rsp_size; m++)
2993 printk(" %2.2x", msg->rsp[m]);
2994 printk("\n");
2995 #endif
2996 if (msg->rsp_size < 2) {
2997 /* Message is too small to be correct. */
2998 printk(KERN_WARNING PFX "BMC returned to small a message"
2999 " for netfn %x cmd %x, got %d bytes\n",
3000 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3001
3002 /* Generate an error response for the message. */
3003 msg->rsp[0] = msg->data[0] | (1 << 2);
3004 msg->rsp[1] = msg->data[1];
3005 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3006 msg->rsp_size = 3;
3007 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3008 || (msg->rsp[1] != msg->data[1])) /* Command */
3009 {
3010 /* The response is not even marginally correct. */
3011 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3012 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3013 (msg->data[0] >> 2) | 1, msg->data[1],
3014 msg->rsp[0] >> 2, msg->rsp[1]);
3015
3016 /* Generate an error response for the message. */
3017 msg->rsp[0] = msg->data[0] | (1 << 2);
3018 msg->rsp[1] = msg->data[1];
3019 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3020 msg->rsp_size = 3;
3021 }
3022
3023 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3024 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3025 && (msg->user_data != NULL))
3026 {
3027 /* It's a response to a response we sent. For this we
3028 deliver a send message response to the user. */
3029 struct ipmi_recv_msg *recv_msg = msg->user_data;
3030
3031 requeue = 0;
3032 if (msg->rsp_size < 2)
3033 /* Message is too small to be correct. */
3034 goto out;
3035
3036 chan = msg->data[2] & 0x0f;
3037 if (chan >= IPMI_MAX_CHANNELS)
3038 /* Invalid channel number */
3039 goto out;
3040
3041 if (!recv_msg)
3042 goto out;
3043
3044 /* Make sure the user still exists. */
3045 if (!recv_msg->user || !recv_msg->user->valid)
3046 goto out;
3047
3048 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3049 recv_msg->msg.data = recv_msg->msg_data;
3050 recv_msg->msg.data_len = 1;
3051 recv_msg->msg_data[0] = msg->rsp[2];
3052 deliver_response(recv_msg);
3053 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3054 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3055 {
3056 /* It's from the receive queue. */
3057 chan = msg->rsp[3] & 0xf;
3058 if (chan >= IPMI_MAX_CHANNELS) {
3059 /* Invalid channel number */
3060 requeue = 0;
3061 goto out;
3062 }
3063
3064 switch (intf->channels[chan].medium) {
3065 case IPMI_CHANNEL_MEDIUM_IPMB:
3066 if (msg->rsp[4] & 0x04) {
3067 /* It's a response, so find the
3068 requesting message and send it up. */
3069 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3070 } else {
3071 /* It's a command to the SMS from some other
3072 entity. Handle that. */
3073 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3074 }
3075 break;
3076
3077 case IPMI_CHANNEL_MEDIUM_8023LAN:
3078 case IPMI_CHANNEL_MEDIUM_ASYNC:
3079 if (msg->rsp[6] & 0x04) {
3080 /* It's a response, so find the
3081 requesting message and send it up. */
3082 requeue = handle_lan_get_msg_rsp(intf, msg);
3083 } else {
3084 /* It's a command to the SMS from some other
3085 entity. Handle that. */
3086 requeue = handle_lan_get_msg_cmd(intf, msg);
3087 }
3088 break;
3089
3090 default:
3091 /* We don't handle the channel type, so just
3092 * free the message. */
3093 requeue = 0;
3094 }
3095
3096 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3097 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3098 {
3099 /* It's an asyncronous event. */
3100 requeue = handle_read_event_rsp(intf, msg);
3101 } else {
3102 /* It's a response from the local BMC. */
3103 requeue = handle_bmc_rsp(intf, msg);
3104 }
3105
3106 out:
3107 return requeue;
3108 }
3109
3110 /* Handle a new message from the lower layer. */
3111 void ipmi_smi_msg_received(ipmi_smi_t intf,
3112 struct ipmi_smi_msg *msg)
3113 {
3114 unsigned long flags;
3115 int rv;
3116
3117
3118 if ((msg->data_size >= 2)
3119 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3120 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3121 && (msg->user_data == NULL))
3122 {
3123 /* This is the local response to a command send, start
3124 the timer for these. The user_data will not be
3125 NULL if this is a response send, and we will let
3126 response sends just go through. */
3127
3128 /* Check for errors, if we get certain errors (ones
3129 that mean basically we can try again later), we
3130 ignore them and start the timer. Otherwise we
3131 report the error immediately. */
3132 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3133 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3134 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR))
3135 {
3136 int chan = msg->rsp[3] & 0xf;
3137
3138 /* Got an error sending the message, handle it. */
3139 spin_lock_irqsave(&intf->counter_lock, flags);
3140 if (chan >= IPMI_MAX_CHANNELS)
3141 ; /* This shouldn't happen */
3142 else if ((intf->channels[chan].medium
3143 == IPMI_CHANNEL_MEDIUM_8023LAN)
3144 || (intf->channels[chan].medium
3145 == IPMI_CHANNEL_MEDIUM_ASYNC))
3146 intf->sent_lan_command_errs++;
3147 else
3148 intf->sent_ipmb_command_errs++;
3149 spin_unlock_irqrestore(&intf->counter_lock, flags);
3150 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3151 } else {
3152 /* The message was sent, start the timer. */
3153 intf_start_seq_timer(intf, msg->msgid);
3154 }
3155
3156 ipmi_free_smi_msg(msg);
3157 goto out;
3158 }
3159
3160 /* To preserve message order, if the list is not empty, we
3161 tack this message onto the end of the list. */
3162 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3163 if (!list_empty(&intf->waiting_msgs)) {
3164 list_add_tail(&msg->link, &intf->waiting_msgs);
3165 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3166 goto out;
3167 }
3168 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3169
3170 rv = handle_new_recv_msg(intf, msg);
3171 if (rv > 0) {
3172 /* Could not handle the message now, just add it to a
3173 list to handle later. */
3174 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3175 list_add_tail(&msg->link, &intf->waiting_msgs);
3176 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3177 } else if (rv == 0) {
3178 ipmi_free_smi_msg(msg);
3179 }
3180
3181 out:
3182 return;
3183 }
3184
3185 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3186 {
3187 ipmi_user_t user;
3188
3189 rcu_read_lock();
3190 list_for_each_entry_rcu(user, &intf->users, link) {
3191 if (!user->handler->ipmi_watchdog_pretimeout)
3192 continue;
3193
3194 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3195 }
3196 rcu_read_unlock();
3197 }
3198
3199 static void
3200 handle_msg_timeout(struct ipmi_recv_msg *msg)
3201 {
3202 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3203 msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
3204 msg->msg.netfn |= 1; /* Convert to a response. */
3205 msg->msg.data_len = 1;
3206 msg->msg.data = msg->msg_data;
3207 deliver_response(msg);
3208 }
3209
3210 static struct ipmi_smi_msg *
3211 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3212 unsigned char seq, long seqid)
3213 {
3214 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3215 if (!smi_msg)
3216 /* If we can't allocate the message, then just return, we
3217 get 4 retries, so this should be ok. */
3218 return NULL;
3219
3220 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3221 smi_msg->data_size = recv_msg->msg.data_len;
3222 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3223
3224 #ifdef DEBUG_MSGING
3225 {
3226 int m;
3227 printk("Resend: ");
3228 for (m = 0; m < smi_msg->data_size; m++)
3229 printk(" %2.2x", smi_msg->data[m]);
3230 printk("\n");
3231 }
3232 #endif
3233 return smi_msg;
3234 }
3235
3236 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3237 struct list_head *timeouts, long timeout_period,
3238 int slot, unsigned long *flags)
3239 {
3240 struct ipmi_recv_msg *msg;
3241
3242 if (!ent->inuse)
3243 return;
3244
3245 ent->timeout -= timeout_period;
3246 if (ent->timeout > 0)
3247 return;
3248
3249 if (ent->retries_left == 0) {
3250 /* The message has used all its retries. */
3251 ent->inuse = 0;
3252 msg = ent->recv_msg;
3253 list_add_tail(&msg->link, timeouts);
3254 spin_lock(&intf->counter_lock);
3255 if (ent->broadcast)
3256 intf->timed_out_ipmb_broadcasts++;
3257 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3258 intf->timed_out_lan_commands++;
3259 else
3260 intf->timed_out_ipmb_commands++;
3261 spin_unlock(&intf->counter_lock);
3262 } else {
3263 struct ipmi_smi_msg *smi_msg;
3264 /* More retries, send again. */
3265
3266 /* Start with the max timer, set to normal
3267 timer after the message is sent. */
3268 ent->timeout = MAX_MSG_TIMEOUT;
3269 ent->retries_left--;
3270 spin_lock(&intf->counter_lock);
3271 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3272 intf->retransmitted_lan_commands++;
3273 else
3274 intf->retransmitted_ipmb_commands++;
3275 spin_unlock(&intf->counter_lock);
3276
3277 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3278 ent->seqid);
3279 if (!smi_msg)
3280 return;
3281
3282 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3283 /* Send the new message. We send with a zero
3284 * priority. It timed out, I doubt time is
3285 * that critical now, and high priority
3286 * messages are really only for messages to the
3287 * local MC, which don't get resent. */
3288 intf->handlers->sender(intf->send_info,
3289 smi_msg, 0);
3290 spin_lock_irqsave(&intf->seq_lock, *flags);
3291 }
3292 }
3293
3294 static void ipmi_timeout_handler(long timeout_period)
3295 {
3296 ipmi_smi_t intf;
3297 struct list_head timeouts;
3298 struct ipmi_recv_msg *msg, *msg2;
3299 struct ipmi_smi_msg *smi_msg, *smi_msg2;
3300 unsigned long flags;
3301 int i, j;
3302
3303 INIT_LIST_HEAD(&timeouts);
3304
3305 spin_lock(&interfaces_lock);
3306 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3307 intf = ipmi_interfaces[i];
3308 if (IPMI_INVALID_INTERFACE(intf))
3309 continue;
3310 kref_get(&intf->refcount);
3311 spin_unlock(&interfaces_lock);
3312
3313 /* See if any waiting messages need to be processed. */
3314 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3315 list_for_each_entry_safe(smi_msg, smi_msg2,
3316 &intf->waiting_msgs, link) {
3317 if (!handle_new_recv_msg(intf, smi_msg)) {
3318 list_del(&smi_msg->link);
3319 ipmi_free_smi_msg(smi_msg);
3320 } else {
3321 /* To preserve message order, quit if we
3322 can't handle a message. */
3323 break;
3324 }
3325 }
3326 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3327
3328 /* Go through the seq table and find any messages that
3329 have timed out, putting them in the timeouts
3330 list. */
3331 spin_lock_irqsave(&intf->seq_lock, flags);
3332 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++)
3333 check_msg_timeout(intf, &(intf->seq_table[j]),
3334 &timeouts, timeout_period, j,
3335 &flags);
3336 spin_unlock_irqrestore(&intf->seq_lock, flags);
3337
3338 list_for_each_entry_safe(msg, msg2, &timeouts, link)
3339 handle_msg_timeout(msg);
3340
3341 kref_put(&intf->refcount, intf_free);
3342 spin_lock(&interfaces_lock);
3343 }
3344 spin_unlock(&interfaces_lock);
3345 }
3346
3347 static void ipmi_request_event(void)
3348 {
3349 ipmi_smi_t intf;
3350 int i;
3351
3352 spin_lock(&interfaces_lock);
3353 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3354 intf = ipmi_interfaces[i];
3355 if (IPMI_INVALID_INTERFACE(intf))
3356 continue;
3357
3358 intf->handlers->request_events(intf->send_info);
3359 }
3360 spin_unlock(&interfaces_lock);
3361 }
3362
3363 static struct timer_list ipmi_timer;
3364
3365 /* Call every ~100 ms. */
3366 #define IPMI_TIMEOUT_TIME 100
3367
3368 /* How many jiffies does it take to get to the timeout time. */
3369 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3370
3371 /* Request events from the queue every second (this is the number of
3372 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3373 future, IPMI will add a way to know immediately if an event is in
3374 the queue and this silliness can go away. */
3375 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3376
3377 static atomic_t stop_operation;
3378 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3379
3380 static void ipmi_timeout(unsigned long data)
3381 {
3382 if (atomic_read(&stop_operation))
3383 return;
3384
3385 ticks_to_req_ev--;
3386 if (ticks_to_req_ev == 0) {
3387 ipmi_request_event();
3388 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3389 }
3390
3391 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3392
3393 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3394 }
3395
3396
3397 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3398 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3399
3400 /* FIXME - convert these to slabs. */
3401 static void free_smi_msg(struct ipmi_smi_msg *msg)
3402 {
3403 atomic_dec(&smi_msg_inuse_count);
3404 kfree(msg);
3405 }
3406
3407 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3408 {
3409 struct ipmi_smi_msg *rv;
3410 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3411 if (rv) {
3412 rv->done = free_smi_msg;
3413 rv->user_data = NULL;
3414 atomic_inc(&smi_msg_inuse_count);
3415 }
3416 return rv;
3417 }
3418
3419 static void free_recv_msg(struct ipmi_recv_msg *msg)
3420 {
3421 atomic_dec(&recv_msg_inuse_count);
3422 kfree(msg);
3423 }
3424
3425 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3426 {
3427 struct ipmi_recv_msg *rv;
3428
3429 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3430 if (rv) {
3431 rv->done = free_recv_msg;
3432 atomic_inc(&recv_msg_inuse_count);
3433 }
3434 return rv;
3435 }
3436
3437 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3438 {
3439 if (msg->user)
3440 kref_put(&msg->user->refcount, free_user);
3441 msg->done(msg);
3442 }
3443
3444 #ifdef CONFIG_IPMI_PANIC_EVENT
3445
3446 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3447 {
3448 }
3449
3450 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3451 {
3452 }
3453
3454 #ifdef CONFIG_IPMI_PANIC_STRING
3455 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3456 {
3457 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3458 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3459 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3460 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3461 {
3462 /* A get event receiver command, save it. */
3463 intf->event_receiver = msg->msg.data[1];
3464 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3465 }
3466 }
3467
3468 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3469 {
3470 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3471 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3472 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3473 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3474 {
3475 /* A get device id command, save if we are an event
3476 receiver or generator. */
3477 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3478 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3479 }
3480 }
3481 #endif
3482
3483 static void send_panic_events(char *str)
3484 {
3485 struct kernel_ipmi_msg msg;
3486 ipmi_smi_t intf;
3487 unsigned char data[16];
3488 int i;
3489 struct ipmi_system_interface_addr *si;
3490 struct ipmi_addr addr;
3491 struct ipmi_smi_msg smi_msg;
3492 struct ipmi_recv_msg recv_msg;
3493
3494 si = (struct ipmi_system_interface_addr *) &addr;
3495 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3496 si->channel = IPMI_BMC_CHANNEL;
3497 si->lun = 0;
3498
3499 /* Fill in an event telling that we have failed. */
3500 msg.netfn = 0x04; /* Sensor or Event. */
3501 msg.cmd = 2; /* Platform event command. */
3502 msg.data = data;
3503 msg.data_len = 8;
3504 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3505 data[1] = 0x03; /* This is for IPMI 1.0. */
3506 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3507 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3508 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3509
3510 /* Put a few breadcrumbs in. Hopefully later we can add more things
3511 to make the panic events more useful. */
3512 if (str) {
3513 data[3] = str[0];
3514 data[6] = str[1];
3515 data[7] = str[2];
3516 }
3517
3518 smi_msg.done = dummy_smi_done_handler;
3519 recv_msg.done = dummy_recv_done_handler;
3520
3521 /* For every registered interface, send the event. */
3522 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3523 intf = ipmi_interfaces[i];
3524 if (IPMI_INVALID_INTERFACE(intf))
3525 continue;
3526
3527 /* Send the event announcing the panic. */
3528 intf->handlers->set_run_to_completion(intf->send_info, 1);
3529 i_ipmi_request(NULL,
3530 intf,
3531 &addr,
3532 0,
3533 &msg,
3534 intf,
3535 &smi_msg,
3536 &recv_msg,
3537 0,
3538 intf->channels[0].address,
3539 intf->channels[0].lun,
3540 0, 1); /* Don't retry, and don't wait. */
3541 }
3542
3543 #ifdef CONFIG_IPMI_PANIC_STRING
3544 /* On every interface, dump a bunch of OEM event holding the
3545 string. */
3546 if (!str)
3547 return;
3548
3549 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3550 char *p = str;
3551 struct ipmi_ipmb_addr *ipmb;
3552 int j;
3553
3554 intf = ipmi_interfaces[i];
3555 if (IPMI_INVALID_INTERFACE(intf))
3556 continue;
3557
3558 /* First job here is to figure out where to send the
3559 OEM events. There's no way in IPMI to send OEM
3560 events using an event send command, so we have to
3561 find the SEL to put them in and stick them in
3562 there. */
3563
3564 /* Get capabilities from the get device id. */
3565 intf->local_sel_device = 0;
3566 intf->local_event_generator = 0;
3567 intf->event_receiver = 0;
3568
3569 /* Request the device info from the local MC. */
3570 msg.netfn = IPMI_NETFN_APP_REQUEST;
3571 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3572 msg.data = NULL;
3573 msg.data_len = 0;
3574 intf->null_user_handler = device_id_fetcher;
3575 i_ipmi_request(NULL,
3576 intf,
3577 &addr,
3578 0,
3579 &msg,
3580 intf,
3581 &smi_msg,
3582 &recv_msg,
3583 0,
3584 intf->channels[0].address,
3585 intf->channels[0].lun,
3586 0, 1); /* Don't retry, and don't wait. */
3587
3588 if (intf->local_event_generator) {
3589 /* Request the event receiver from the local MC. */
3590 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3591 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3592 msg.data = NULL;
3593 msg.data_len = 0;
3594 intf->null_user_handler = event_receiver_fetcher;
3595 i_ipmi_request(NULL,
3596 intf,
3597 &addr,
3598 0,
3599 &msg,
3600 intf,
3601 &smi_msg,
3602 &recv_msg,
3603 0,
3604 intf->channels[0].address,
3605 intf->channels[0].lun,
3606 0, 1); /* no retry, and no wait. */
3607 }
3608 intf->null_user_handler = NULL;
3609
3610 /* Validate the event receiver. The low bit must not
3611 be 1 (it must be a valid IPMB address), it cannot
3612 be zero, and it must not be my address. */
3613 if (((intf->event_receiver & 1) == 0)
3614 && (intf->event_receiver != 0)
3615 && (intf->event_receiver != intf->channels[0].address))
3616 {
3617 /* The event receiver is valid, send an IPMB
3618 message. */
3619 ipmb = (struct ipmi_ipmb_addr *) &addr;
3620 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3621 ipmb->channel = 0; /* FIXME - is this right? */
3622 ipmb->lun = intf->event_receiver_lun;
3623 ipmb->slave_addr = intf->event_receiver;
3624 } else if (intf->local_sel_device) {
3625 /* The event receiver was not valid (or was
3626 me), but I am an SEL device, just dump it
3627 in my SEL. */
3628 si = (struct ipmi_system_interface_addr *) &addr;
3629 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3630 si->channel = IPMI_BMC_CHANNEL;
3631 si->lun = 0;
3632 } else
3633 continue; /* No where to send the event. */
3634
3635
3636 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
3637 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
3638 msg.data = data;
3639 msg.data_len = 16;
3640
3641 j = 0;
3642 while (*p) {
3643 int size = strlen(p);
3644
3645 if (size > 11)
3646 size = 11;
3647 data[0] = 0;
3648 data[1] = 0;
3649 data[2] = 0xf0; /* OEM event without timestamp. */
3650 data[3] = intf->channels[0].address;
3651 data[4] = j++; /* sequence # */
3652 /* Always give 11 bytes, so strncpy will fill
3653 it with zeroes for me. */
3654 strncpy(data+5, p, 11);
3655 p += size;
3656
3657 i_ipmi_request(NULL,
3658 intf,
3659 &addr,
3660 0,
3661 &msg,
3662 intf,
3663 &smi_msg,
3664 &recv_msg,
3665 0,
3666 intf->channels[0].address,
3667 intf->channels[0].lun,
3668 0, 1); /* no retry, and no wait. */
3669 }
3670 }
3671 #endif /* CONFIG_IPMI_PANIC_STRING */
3672 }
3673 #endif /* CONFIG_IPMI_PANIC_EVENT */
3674
3675 static int has_panicked = 0;
3676
3677 static int panic_event(struct notifier_block *this,
3678 unsigned long event,
3679 void *ptr)
3680 {
3681 int i;
3682 ipmi_smi_t intf;
3683
3684 if (has_panicked)
3685 return NOTIFY_DONE;
3686 has_panicked = 1;
3687
3688 /* For every registered interface, set it to run to completion. */
3689 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3690 intf = ipmi_interfaces[i];
3691 if (IPMI_INVALID_INTERFACE(intf))
3692 continue;
3693
3694 intf->handlers->set_run_to_completion(intf->send_info, 1);
3695 }
3696
3697 #ifdef CONFIG_IPMI_PANIC_EVENT
3698 send_panic_events(ptr);
3699 #endif
3700
3701 return NOTIFY_DONE;
3702 }
3703
3704 static struct notifier_block panic_block = {
3705 .notifier_call = panic_event,
3706 .next = NULL,
3707 .priority = 200 /* priority: INT_MAX >= x >= 0 */
3708 };
3709
3710 static int ipmi_init_msghandler(void)
3711 {
3712 int i;
3713 int rv;
3714
3715 if (initialized)
3716 return 0;
3717
3718 rv = driver_register(&ipmidriver);
3719 if (rv) {
3720 printk(KERN_ERR PFX "Could not register IPMI driver\n");
3721 return rv;
3722 }
3723
3724 printk(KERN_INFO "ipmi message handler version "
3725 IPMI_DRIVER_VERSION "\n");
3726
3727 for (i = 0; i < MAX_IPMI_INTERFACES; i++)
3728 ipmi_interfaces[i] = NULL;
3729
3730 #ifdef CONFIG_PROC_FS
3731 proc_ipmi_root = proc_mkdir("ipmi", NULL);
3732 if (!proc_ipmi_root) {
3733 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
3734 return -ENOMEM;
3735 }
3736
3737 proc_ipmi_root->owner = THIS_MODULE;
3738 #endif /* CONFIG_PROC_FS */
3739
3740 setup_timer(&ipmi_timer, ipmi_timeout, 0);
3741 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3742
3743 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
3744
3745 initialized = 1;
3746
3747 return 0;
3748 }
3749
3750 static __init int ipmi_init_msghandler_mod(void)
3751 {
3752 ipmi_init_msghandler();
3753 return 0;
3754 }
3755
3756 static __exit void cleanup_ipmi(void)
3757 {
3758 int count;
3759
3760 if (!initialized)
3761 return;
3762
3763 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
3764
3765 /* This can't be called if any interfaces exist, so no worry about
3766 shutting down the interfaces. */
3767
3768 /* Tell the timer to stop, then wait for it to stop. This avoids
3769 problems with race conditions removing the timer here. */
3770 atomic_inc(&stop_operation);
3771 del_timer_sync(&ipmi_timer);
3772
3773 #ifdef CONFIG_PROC_FS
3774 remove_proc_entry(proc_ipmi_root->name, &proc_root);
3775 #endif /* CONFIG_PROC_FS */
3776
3777 driver_unregister(&ipmidriver);
3778
3779 initialized = 0;
3780
3781 /* Check for buffer leaks. */
3782 count = atomic_read(&smi_msg_inuse_count);
3783 if (count != 0)
3784 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
3785 count);
3786 count = atomic_read(&recv_msg_inuse_count);
3787 if (count != 0)
3788 printk(KERN_WARNING PFX "recv message count %d at exit\n",
3789 count);
3790 }
3791 module_exit(cleanup_ipmi);
3792
3793 module_init(ipmi_init_msghandler_mod);
3794 MODULE_LICENSE("GPL");
3795 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3796 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
3797 MODULE_VERSION(IPMI_DRIVER_VERSION);
3798
3799 EXPORT_SYMBOL(ipmi_create_user);
3800 EXPORT_SYMBOL(ipmi_destroy_user);
3801 EXPORT_SYMBOL(ipmi_get_version);
3802 EXPORT_SYMBOL(ipmi_request_settime);
3803 EXPORT_SYMBOL(ipmi_request_supply_msgs);
3804 EXPORT_SYMBOL(ipmi_register_smi);
3805 EXPORT_SYMBOL(ipmi_unregister_smi);
3806 EXPORT_SYMBOL(ipmi_register_for_cmd);
3807 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
3808 EXPORT_SYMBOL(ipmi_smi_msg_received);
3809 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3810 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
3811 EXPORT_SYMBOL(ipmi_addr_length);
3812 EXPORT_SYMBOL(ipmi_validate_addr);
3813 EXPORT_SYMBOL(ipmi_set_gets_events);
3814 EXPORT_SYMBOL(ipmi_smi_watcher_register);
3815 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
3816 EXPORT_SYMBOL(ipmi_set_my_address);
3817 EXPORT_SYMBOL(ipmi_get_my_address);
3818 EXPORT_SYMBOL(ipmi_set_my_LUN);
3819 EXPORT_SYMBOL(ipmi_get_my_LUN);
3820 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
3821 EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
3822 EXPORT_SYMBOL(ipmi_free_recv_msg);