]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/char/ipmi/ipmi_msghandler.c
ipmi: Use a temporary BMC for an interface
[mirror_ubuntu-bionic-kernel.git] / drivers / char / ipmi / ipmi_msghandler.c
CommitLineData
1da177e4
LT
1/*
2 * ipmi_msghandler.c
3 *
4 * Incoming and outgoing message routing for an IPMI interface.
5 *
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
9 *
10 * Copyright 2002 MontaVista Software Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
1da177e4
LT
34#include <linux/module.h>
35#include <linux/errno.h>
1da177e4 36#include <linux/poll.h>
a99bbaf5 37#include <linux/sched.h>
07412736 38#include <linux/seq_file.h>
1da177e4 39#include <linux/spinlock.h>
d6dfd131 40#include <linux/mutex.h>
1da177e4
LT
41#include <linux/slab.h>
42#include <linux/ipmi.h>
43#include <linux/ipmi_smi.h>
44#include <linux/notifier.h>
45#include <linux/init.h>
46#include <linux/proc_fs.h>
393d2cc3 47#include <linux/rcupdate.h>
7adf579c 48#include <linux/interrupt.h>
1c9f98d1 49#include <linux/moduleparam.h>
1da177e4
LT
50
51#define PFX "IPMI message handler: "
1fdd75bd 52
f7caa1b5 53#define IPMI_DRIVER_VERSION "39.2"
1da177e4
LT
54
55static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
56static int ipmi_init_msghandler(void);
7adf579c
CM
57static void smi_recv_tasklet(unsigned long);
58static void handle_new_recv_msgs(ipmi_smi_t intf);
89986496 59static void need_waiter(ipmi_smi_t intf);
7ea0ed2b
CM
60static int handle_one_recv_msg(ipmi_smi_t intf,
61 struct ipmi_smi_msg *msg);
1da177e4 62
0c8204b3 63static int initialized;
1da177e4 64
1c9f98d1
CM
65enum ipmi_panic_event_op {
66 IPMI_SEND_PANIC_EVENT_NONE,
67 IPMI_SEND_PANIC_EVENT,
68 IPMI_SEND_PANIC_EVENT_STRING
69};
70#ifdef CONFIG_IPMI_PANIC_STRING
71#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
72#elif defined(CONFIG_IPMI_PANIC_EVENT)
73#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
74#else
75#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
76#endif
77static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
78
79static int panic_op_write_handler(const char *val,
80 const struct kernel_param *kp)
81{
82 char valcp[16];
83 char *s;
84
85 strncpy(valcp, val, 16);
86 valcp[15] = '\0';
87
88 s = strstrip(valcp);
89
90 if (strcmp(s, "none") == 0)
91 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
92 else if (strcmp(s, "event") == 0)
93 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
94 else if (strcmp(s, "string") == 0)
95 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
96 else
97 return -EINVAL;
98
99 return 0;
100}
101
102static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
103{
104 switch (ipmi_send_panic_event) {
105 case IPMI_SEND_PANIC_EVENT_NONE:
106 strcpy(buffer, "none");
107 break;
108
109 case IPMI_SEND_PANIC_EVENT:
110 strcpy(buffer, "event");
111 break;
112
113 case IPMI_SEND_PANIC_EVENT_STRING:
114 strcpy(buffer, "string");
115 break;
116
117 default:
118 strcpy(buffer, "???");
119 break;
120 }
121
122 return strlen(buffer);
123}
124
125static const struct kernel_param_ops panic_op_ops = {
126 .set = panic_op_write_handler,
127 .get = panic_op_read_handler
128};
129module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
130MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
131
132
3b625943 133#ifdef CONFIG_PROC_FS
0c8204b3 134static struct proc_dir_entry *proc_ipmi_root;
3b625943 135#endif /* CONFIG_PROC_FS */
1da177e4 136
b9675136
CM
137/* Remain in auto-maintenance mode for this amount of time (in ms). */
138#define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
139
1da177e4
LT
140#define MAX_EVENTS_IN_QUEUE 25
141
c70d7499
CM
142/*
143 * Don't let a message sit in a queue forever, always time it with at lest
144 * the max message timer. This is in milliseconds.
145 */
1da177e4
LT
146#define MAX_MSG_TIMEOUT 60000
147
89986496
CM
148/* Call every ~1000 ms. */
149#define IPMI_TIMEOUT_TIME 1000
150
151/* How many jiffies does it take to get to the timeout time. */
152#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
153
154/*
155 * Request events from the queue every second (this is the number of
156 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
157 * future, IPMI will add a way to know immediately if an event is in
158 * the queue and this silliness can go away.
159 */
160#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
161
aa9c9ab2
JK
162/* How long should we cache dynamic device IDs? */
163#define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
164
393d2cc3
CM
165/*
166 * The main "user" data structure.
167 */
c70d7499 168struct ipmi_user {
1da177e4
LT
169 struct list_head link;
170
7aefac26
CM
171 /* Set to false when the user is destroyed. */
172 bool valid;
393d2cc3
CM
173
174 struct kref refcount;
175
1da177e4 176 /* The upper layer that handles receive messages. */
210af2a5 177 const struct ipmi_user_hndl *handler;
1da177e4
LT
178 void *handler_data;
179
180 /* The interface this user is bound to. */
181 ipmi_smi_t intf;
182
183 /* Does this interface receive IPMI events? */
89986496 184 bool gets_events;
1da177e4
LT
185};
186
c70d7499 187struct cmd_rcvr {
1da177e4
LT
188 struct list_head link;
189
190 ipmi_user_t user;
191 unsigned char netfn;
192 unsigned char cmd;
c69c3127 193 unsigned int chans;
393d2cc3
CM
194
195 /*
196 * This is used to form a linked lised during mass deletion.
197 * Since this is in an RCU list, we cannot use the link above
198 * or change any data until the RCU period completes. So we
199 * use this next variable during mass deletion so we can have
200 * a list and don't have to wait and restart the search on
c70d7499
CM
201 * every individual deletion of a command.
202 */
393d2cc3 203 struct cmd_rcvr *next;
1da177e4
LT
204};
205
c70d7499 206struct seq_table {
1da177e4
LT
207 unsigned int inuse : 1;
208 unsigned int broadcast : 1;
209
210 unsigned long timeout;
211 unsigned long orig_timeout;
212 unsigned int retries_left;
213
c70d7499
CM
214 /*
215 * To verify on an incoming send message response that this is
216 * the message that the response is for, we keep a sequence id
217 * and increment it every time we send a message.
218 */
1da177e4
LT
219 long seqid;
220
c70d7499
CM
221 /*
222 * This is held so we can properly respond to the message on a
223 * timeout, and it is used to hold the temporary data for
224 * retransmission, too.
225 */
1da177e4
LT
226 struct ipmi_recv_msg *recv_msg;
227};
228
c70d7499
CM
229/*
230 * Store the information in a msgid (long) to allow us to find a
231 * sequence table entry from the msgid.
232 */
a24b5dd5
CM
233#define STORE_SEQ_IN_MSGID(seq, seqid) \
234 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
1da177e4
LT
235
236#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
237 do { \
a24b5dd5
CM
238 seq = (((msgid) >> 26) & 0x3f); \
239 seqid = ((msgid) & 0x3ffffff); \
c70d7499 240 } while (0)
1da177e4 241
a24b5dd5 242#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
1da177e4 243
c70d7499 244struct ipmi_channel {
1da177e4
LT
245 unsigned char medium;
246 unsigned char protocol;
c14979b9 247
c70d7499
CM
248 /*
249 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
250 * but may be changed by the user.
251 */
c14979b9
CM
252 unsigned char address;
253
c70d7499
CM
254 /*
255 * My LUN. This should generally stay the SMS LUN, but just in
256 * case...
257 */
c14979b9 258 unsigned char lun;
1da177e4
LT
259};
260
3b625943 261#ifdef CONFIG_PROC_FS
c70d7499 262struct ipmi_proc_entry {
1da177e4
LT
263 char *name;
264 struct ipmi_proc_entry *next;
265};
3b625943 266#endif
1da177e4 267
c70d7499 268struct bmc_device {
16639eb0 269 struct platform_device pdev;
a9137c3d 270 struct list_head intfs;
aa9c9ab2
JK
271 struct ipmi_device_id id;
272 struct ipmi_device_id fetch_id;
273 int dyn_id_set;
274 unsigned long dyn_id_expiry;
275 struct mutex dyn_mutex; /* protects id & dyn* fields */
39d3fb45 276 u8 guid[16];
28f26ac7
CM
277 u8 fetch_guid[16];
278 int dyn_guid_set;
16639eb0 279 struct kref usecount;
50c812b2 280};
16639eb0 281#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
50c812b2 282
511d57dc 283static int bmc_get_device_id(ipmi_smi_t intf, struct bmc_device *bmc,
39d3fb45
CM
284 struct ipmi_device_id *id,
285 bool *guid_set, u8 *guid);
511d57dc 286
b2655f26
KB
287/*
288 * Various statistics for IPMI, these index stats[] in the ipmi_smi
289 * structure.
290 */
73f2bdb9
CM
291enum ipmi_stat_indexes {
292 /* Commands we got from the user that were invalid. */
293 IPMI_STAT_sent_invalid_commands = 0,
b2655f26 294
73f2bdb9
CM
295 /* Commands we sent to the MC. */
296 IPMI_STAT_sent_local_commands,
b2655f26 297
73f2bdb9
CM
298 /* Responses from the MC that were delivered to a user. */
299 IPMI_STAT_handled_local_responses,
b2655f26 300
73f2bdb9
CM
301 /* Responses from the MC that were not delivered to a user. */
302 IPMI_STAT_unhandled_local_responses,
b2655f26 303
73f2bdb9
CM
304 /* Commands we sent out to the IPMB bus. */
305 IPMI_STAT_sent_ipmb_commands,
b2655f26 306
73f2bdb9
CM
307 /* Commands sent on the IPMB that had errors on the SEND CMD */
308 IPMI_STAT_sent_ipmb_command_errs,
b2655f26 309
73f2bdb9
CM
310 /* Each retransmit increments this count. */
311 IPMI_STAT_retransmitted_ipmb_commands,
b2655f26 312
73f2bdb9
CM
313 /*
314 * When a message times out (runs out of retransmits) this is
315 * incremented.
316 */
317 IPMI_STAT_timed_out_ipmb_commands,
b2655f26 318
73f2bdb9
CM
319 /*
320 * This is like above, but for broadcasts. Broadcasts are
321 * *not* included in the above count (they are expected to
322 * time out).
323 */
324 IPMI_STAT_timed_out_ipmb_broadcasts,
b2655f26 325
73f2bdb9
CM
326 /* Responses I have sent to the IPMB bus. */
327 IPMI_STAT_sent_ipmb_responses,
b2655f26 328
73f2bdb9
CM
329 /* The response was delivered to the user. */
330 IPMI_STAT_handled_ipmb_responses,
b2655f26 331
73f2bdb9
CM
332 /* The response had invalid data in it. */
333 IPMI_STAT_invalid_ipmb_responses,
b2655f26 334
73f2bdb9
CM
335 /* The response didn't have anyone waiting for it. */
336 IPMI_STAT_unhandled_ipmb_responses,
b2655f26 337
73f2bdb9
CM
338 /* Commands we sent out to the IPMB bus. */
339 IPMI_STAT_sent_lan_commands,
b2655f26 340
73f2bdb9
CM
341 /* Commands sent on the IPMB that had errors on the SEND CMD */
342 IPMI_STAT_sent_lan_command_errs,
b2655f26 343
73f2bdb9
CM
344 /* Each retransmit increments this count. */
345 IPMI_STAT_retransmitted_lan_commands,
b2655f26 346
73f2bdb9
CM
347 /*
348 * When a message times out (runs out of retransmits) this is
349 * incremented.
350 */
351 IPMI_STAT_timed_out_lan_commands,
352
353 /* Responses I have sent to the IPMB bus. */
354 IPMI_STAT_sent_lan_responses,
b2655f26 355
73f2bdb9
CM
356 /* The response was delivered to the user. */
357 IPMI_STAT_handled_lan_responses,
b2655f26 358
73f2bdb9
CM
359 /* The response had invalid data in it. */
360 IPMI_STAT_invalid_lan_responses,
b2655f26 361
73f2bdb9
CM
362 /* The response didn't have anyone waiting for it. */
363 IPMI_STAT_unhandled_lan_responses,
b2655f26 364
73f2bdb9
CM
365 /* The command was delivered to the user. */
366 IPMI_STAT_handled_commands,
b2655f26 367
73f2bdb9
CM
368 /* The command had invalid data in it. */
369 IPMI_STAT_invalid_commands,
b2655f26 370
73f2bdb9
CM
371 /* The command didn't have anyone waiting for it. */
372 IPMI_STAT_unhandled_commands,
b2655f26 373
73f2bdb9
CM
374 /* Invalid data in an event. */
375 IPMI_STAT_invalid_events,
b2655f26 376
73f2bdb9
CM
377 /* Events that were received with the proper format. */
378 IPMI_STAT_events,
b2655f26 379
25176ed6
CM
380 /* Retransmissions on IPMB that failed. */
381 IPMI_STAT_dropped_rexmit_ipmb_commands,
382
383 /* Retransmissions on LAN that failed. */
384 IPMI_STAT_dropped_rexmit_lan_commands,
b2655f26 385
73f2bdb9
CM
386 /* This *must* remain last, add new values above this. */
387 IPMI_NUM_STATS
388};
b2655f26
KB
389
390
1da177e4 391#define IPMI_IPMB_NUM_SEQ 64
c14979b9 392#define IPMI_MAX_CHANNELS 16
c70d7499 393struct ipmi_smi {
1da177e4
LT
394 /* What interface number are we? */
395 int intf_num;
396
393d2cc3
CM
397 struct kref refcount;
398
7ea0ed2b
CM
399 /* Set when the interface is being unregistered. */
400 bool in_shutdown;
401
bca0324d
CM
402 /* Used for a list of interfaces. */
403 struct list_head link;
404
c70d7499
CM
405 /*
406 * The list of upper layers that are using me. seq_lock
407 * protects this.
408 */
393d2cc3 409 struct list_head users;
1da177e4
LT
410
411 /* Used for wake ups at startup. */
412 wait_queue_head_t waitq;
413
aa9c9ab2
JK
414 /*
415 * Prevents the interface from being unregistered when the
416 * interface is used by being looked up through the BMC
417 * structure.
418 */
419 struct mutex bmc_reg_mutex;
420
c659ff34 421 struct bmc_device tmp_bmc;
50c812b2 422 struct bmc_device *bmc;
a2cb600f 423 bool bmc_registered;
a9137c3d 424 struct list_head bmc_link;
50c812b2 425 char *my_dev_name;
1da177e4 426
c70d7499
CM
427 /*
428 * This is the lower-layer's sender routine. Note that you
b2c03941
CM
429 * must either be holding the ipmi_interfaces_mutex or be in
430 * an umpreemptible region to use this. You must fetch the
c70d7499
CM
431 * value into a local variable and make sure it is not NULL.
432 */
81d02b7f 433 const struct ipmi_smi_handlers *handlers;
1da177e4
LT
434 void *send_info;
435
3b625943 436#ifdef CONFIG_PROC_FS
ac019151
CM
437 /* A list of proc entries for this interface. */
438 struct mutex proc_entry_lock;
1da177e4 439 struct ipmi_proc_entry *proc_entries;
3b625943 440#endif
1da177e4 441
50c812b2
CM
442 /* Driver-model device for the system interface. */
443 struct device *si_dev;
444
c70d7499
CM
445 /*
446 * A table of sequence numbers for this interface. We use the
447 * sequence numbers for IPMB messages that go out of the
448 * interface to match them up with their responses. A routine
449 * is called periodically to time the items in this list.
450 */
1da177e4
LT
451 spinlock_t seq_lock;
452 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
453 int curr_seq;
454
c70d7499 455 /*
7adf579c
CM
456 * Messages queued for delivery. If delivery fails (out of memory
457 * for instance), They will stay in here to be processed later in a
458 * periodic timer interrupt. The tasklet is for handling received
459 * messages directly from the handler.
c70d7499 460 */
65be7544
CM
461 spinlock_t waiting_rcv_msgs_lock;
462 struct list_head waiting_rcv_msgs;
7adf579c
CM
463 atomic_t watchdog_pretimeouts_to_deliver;
464 struct tasklet_struct recv_tasklet;
1da177e4 465
7ea0ed2b
CM
466 spinlock_t xmit_msgs_lock;
467 struct list_head xmit_msgs;
468 struct ipmi_smi_msg *curr_msg;
469 struct list_head hp_xmit_msgs;
470
c70d7499
CM
471 /*
472 * The list of command receivers that are registered for commands
473 * on this interface.
474 */
d6dfd131 475 struct mutex cmd_rcvrs_mutex;
1da177e4
LT
476 struct list_head cmd_rcvrs;
477
c70d7499
CM
478 /*
479 * Events that were queues because no one was there to receive
480 * them.
481 */
1da177e4
LT
482 spinlock_t events_lock; /* For dealing with event stuff. */
483 struct list_head waiting_events;
484 unsigned int waiting_events_count; /* How many events in queue? */
87ebd06f
CM
485 char delivering_events;
486 char event_msg_printed;
89986496
CM
487 atomic_t event_waiters;
488 unsigned int ticks_to_req_ev;
489 int last_needs_timer;
1da177e4 490
c70d7499
CM
491 /*
492 * The event receiver for my BMC, only really used at panic
493 * shutdown as a place to store this.
494 */
1da177e4
LT
495 unsigned char event_receiver;
496 unsigned char event_receiver_lun;
497 unsigned char local_sel_device;
498 unsigned char local_event_generator;
499
b9675136
CM
500 /* For handling of maintenance mode. */
501 int maintenance_mode;
7aefac26 502 bool maintenance_mode_enable;
b9675136
CM
503 int auto_maintenance_timeout;
504 spinlock_t maintenance_mode_lock; /* Used in a timer... */
505
c70d7499
CM
506 /*
507 * A cheap hack, if this is non-null and a message to an
508 * interface comes in with a NULL user, call this routine with
509 * it. Note that the message will still be freed by the
510 * caller. This only works on the system interface.
aa9c9ab2
JK
511 *
512 * The only user outside of initialization an panic handling is
513 * the dynamic device id fetching, so no mutex is currently
514 * required on this. If more users come along, some sort of
515 * mutex will be required.
c70d7499 516 */
56a55ec6 517 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
1da177e4 518
c70d7499
CM
519 /*
520 * When we are scanning the channels for an SMI, this will
521 * tell which channel we are scanning.
522 */
1da177e4
LT
523 int curr_channel;
524
525 /* Channel information */
526 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
527
528 /* Proc FS stuff. */
529 struct proc_dir_entry *proc_dir;
530 char proc_dir_name[10];
531
b2655f26 532 atomic_t stats[IPMI_NUM_STATS];
5956dce1
KB
533
534 /*
535 * run_to_completion duplicate of smb_info, smi_info
536 * and ipmi_serial_info structures. Used to decrease numbers of
537 * parameters passed by "low" level IPMI code.
538 */
539 int run_to_completion;
1da177e4 540};
50c812b2 541#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
1da177e4 542
28f26ac7
CM
543static void __get_guid(ipmi_smi_t intf);
544
50c812b2
CM
545/**
546 * The driver model view of the IPMI messaging driver.
547 */
fe2d5ffc
DW
548static struct platform_driver ipmidriver = {
549 .driver = {
550 .name = "ipmi",
551 .bus = &platform_bus_type
552 }
50c812b2 553};
9ca15af3
CM
554/*
555 * This mutex protects adding/removing BMCs on the ipmidriver's device
556 * list. This way we can pull items out of the driver's list and reuse
557 * them.
558 */
50c812b2
CM
559static DEFINE_MUTEX(ipmidriver_mutex);
560
bed9759b 561static LIST_HEAD(ipmi_interfaces);
bca0324d 562static DEFINE_MUTEX(ipmi_interfaces_mutex);
1da177e4 563
c70d7499
CM
564/*
565 * List of watchers that want to know when smi's are added and deleted.
566 */
bed9759b 567static LIST_HEAD(smi_watchers);
b2c03941 568static DEFINE_MUTEX(smi_watchers_mutex);
1da177e4 569
b2655f26
KB
570#define ipmi_inc_stat(intf, stat) \
571 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
572#define ipmi_get_stat(intf, stat) \
573 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
574
99ee6735
LC
575static const char * const addr_src_to_str[] = {
576 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
b07b58a3 577 "device-tree"
99ee6735 578};
7e50387b
CM
579
580const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
581{
b07b58a3 582 if (src >= SI_LAST)
7e50387b
CM
583 src = 0; /* Invalid */
584 return addr_src_to_str[src];
585}
586EXPORT_SYMBOL(ipmi_addr_src_to_str);
587
25176ed6
CM
588static int is_lan_addr(struct ipmi_addr *addr)
589{
590 return addr->addr_type == IPMI_LAN_ADDR_TYPE;
591}
592
593static int is_ipmb_addr(struct ipmi_addr *addr)
594{
595 return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
596}
597
598static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
599{
600 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
601}
b2655f26 602
393d2cc3
CM
603static void free_recv_msg_list(struct list_head *q)
604{
605 struct ipmi_recv_msg *msg, *msg2;
606
607 list_for_each_entry_safe(msg, msg2, q, link) {
608 list_del(&msg->link);
609 ipmi_free_recv_msg(msg);
610 }
611}
612
f3ce6a0e
CM
613static void free_smi_msg_list(struct list_head *q)
614{
615 struct ipmi_smi_msg *msg, *msg2;
616
617 list_for_each_entry_safe(msg, msg2, q, link) {
618 list_del(&msg->link);
619 ipmi_free_smi_msg(msg);
620 }
621}
622
393d2cc3
CM
623static void clean_up_interface_data(ipmi_smi_t intf)
624{
625 int i;
626 struct cmd_rcvr *rcvr, *rcvr2;
393d2cc3
CM
627 struct list_head list;
628
7adf579c
CM
629 tasklet_kill(&intf->recv_tasklet);
630
65be7544 631 free_smi_msg_list(&intf->waiting_rcv_msgs);
393d2cc3
CM
632 free_recv_msg_list(&intf->waiting_events);
633
78ba2faf
CM
634 /*
635 * Wholesale remove all the entries from the list in the
636 * interface and wait for RCU to know that none are in use.
637 */
d6dfd131 638 mutex_lock(&intf->cmd_rcvrs_mutex);
78ba2faf
CM
639 INIT_LIST_HEAD(&list);
640 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
d6dfd131 641 mutex_unlock(&intf->cmd_rcvrs_mutex);
393d2cc3
CM
642
643 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
644 kfree(rcvr);
645
646 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
647 if ((intf->seq_table[i].inuse)
c70d7499 648 && (intf->seq_table[i].recv_msg))
393d2cc3 649 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
393d2cc3
CM
650 }
651}
652
653static void intf_free(struct kref *ref)
654{
655 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
656
657 clean_up_interface_data(intf);
658 kfree(intf);
659}
660
bca0324d 661struct watcher_entry {
b2c03941
CM
662 int intf_num;
663 ipmi_smi_t intf;
bca0324d 664 struct list_head link;
bca0324d
CM
665};
666
1da177e4
LT
667int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
668{
bca0324d 669 ipmi_smi_t intf;
e381d1c4 670 LIST_HEAD(to_deliver);
bca0324d
CM
671 struct watcher_entry *e, *e2;
672
b2c03941
CM
673 mutex_lock(&smi_watchers_mutex);
674
bca0324d
CM
675 mutex_lock(&ipmi_interfaces_mutex);
676
b2c03941 677 /* Build a list of things to deliver. */
78ba2faf 678 list_for_each_entry(intf, &ipmi_interfaces, link) {
bca0324d
CM
679 if (intf->intf_num == -1)
680 continue;
681 e = kmalloc(sizeof(*e), GFP_KERNEL);
682 if (!e)
683 goto out_err;
b2c03941
CM
684 kref_get(&intf->refcount);
685 e->intf = intf;
bca0324d
CM
686 e->intf_num = intf->intf_num;
687 list_add_tail(&e->link, &to_deliver);
688 }
1da177e4 689
b2c03941
CM
690 /* We will succeed, so add it to the list. */
691 list_add(&watcher->link, &smi_watchers);
bca0324d
CM
692
693 mutex_unlock(&ipmi_interfaces_mutex);
694
695 list_for_each_entry_safe(e, e2, &to_deliver, link) {
696 list_del(&e->link);
b2c03941
CM
697 watcher->new_smi(e->intf_num, e->intf->si_dev);
698 kref_put(&e->intf->refcount, intf_free);
bca0324d 699 kfree(e);
1da177e4 700 }
bca0324d 701
b2c03941 702 mutex_unlock(&smi_watchers_mutex);
bca0324d 703
1da177e4 704 return 0;
bca0324d
CM
705
706 out_err:
b2c03941
CM
707 mutex_unlock(&ipmi_interfaces_mutex);
708 mutex_unlock(&smi_watchers_mutex);
bca0324d
CM
709 list_for_each_entry_safe(e, e2, &to_deliver, link) {
710 list_del(&e->link);
b2c03941 711 kref_put(&e->intf->refcount, intf_free);
bca0324d
CM
712 kfree(e);
713 }
714 return -ENOMEM;
1da177e4 715}
c70d7499 716EXPORT_SYMBOL(ipmi_smi_watcher_register);
1da177e4
LT
717
718int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
719{
b2c03941 720 mutex_lock(&smi_watchers_mutex);
1da177e4 721 list_del(&(watcher->link));
b2c03941 722 mutex_unlock(&smi_watchers_mutex);
1da177e4
LT
723 return 0;
724}
c70d7499 725EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
1da177e4 726
b2c03941
CM
727/*
728 * Must be called with smi_watchers_mutex held.
729 */
1da177e4 730static void
50c812b2 731call_smi_watchers(int i, struct device *dev)
1da177e4
LT
732{
733 struct ipmi_smi_watcher *w;
734
1da177e4
LT
735 list_for_each_entry(w, &smi_watchers, link) {
736 if (try_module_get(w->owner)) {
50c812b2 737 w->new_smi(i, dev);
1da177e4
LT
738 module_put(w->owner);
739 }
740 }
1da177e4
LT
741}
742
743static int
744ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
745{
746 if (addr1->addr_type != addr2->addr_type)
747 return 0;
748
749 if (addr1->channel != addr2->channel)
750 return 0;
751
752 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
753 struct ipmi_system_interface_addr *smi_addr1
754 = (struct ipmi_system_interface_addr *) addr1;
755 struct ipmi_system_interface_addr *smi_addr2
756 = (struct ipmi_system_interface_addr *) addr2;
757 return (smi_addr1->lun == smi_addr2->lun);
758 }
759
25176ed6 760 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
1da177e4
LT
761 struct ipmi_ipmb_addr *ipmb_addr1
762 = (struct ipmi_ipmb_addr *) addr1;
763 struct ipmi_ipmb_addr *ipmb_addr2
764 = (struct ipmi_ipmb_addr *) addr2;
765
766 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
767 && (ipmb_addr1->lun == ipmb_addr2->lun));
768 }
769
25176ed6 770 if (is_lan_addr(addr1)) {
1da177e4
LT
771 struct ipmi_lan_addr *lan_addr1
772 = (struct ipmi_lan_addr *) addr1;
773 struct ipmi_lan_addr *lan_addr2
774 = (struct ipmi_lan_addr *) addr2;
775
776 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
777 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
778 && (lan_addr1->session_handle
779 == lan_addr2->session_handle)
780 && (lan_addr1->lun == lan_addr2->lun));
781 }
782
783 return 1;
784}
785
786int ipmi_validate_addr(struct ipmi_addr *addr, int len)
787{
c70d7499 788 if (len < sizeof(struct ipmi_system_interface_addr))
1da177e4 789 return -EINVAL;
1da177e4
LT
790
791 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
792 if (addr->channel != IPMI_BMC_CHANNEL)
793 return -EINVAL;
794 return 0;
795 }
796
797 if ((addr->channel == IPMI_BMC_CHANNEL)
12fc1d7b 798 || (addr->channel >= IPMI_MAX_CHANNELS)
1da177e4
LT
799 || (addr->channel < 0))
800 return -EINVAL;
801
25176ed6 802 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
c70d7499 803 if (len < sizeof(struct ipmi_ipmb_addr))
1da177e4 804 return -EINVAL;
1da177e4
LT
805 return 0;
806 }
807
25176ed6 808 if (is_lan_addr(addr)) {
c70d7499 809 if (len < sizeof(struct ipmi_lan_addr))
1da177e4 810 return -EINVAL;
1da177e4
LT
811 return 0;
812 }
813
814 return -EINVAL;
815}
c70d7499 816EXPORT_SYMBOL(ipmi_validate_addr);
1da177e4
LT
817
818unsigned int ipmi_addr_length(int addr_type)
819{
820 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
821 return sizeof(struct ipmi_system_interface_addr);
822
823 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
c70d7499 824 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1da177e4 825 return sizeof(struct ipmi_ipmb_addr);
1da177e4
LT
826
827 if (addr_type == IPMI_LAN_ADDR_TYPE)
828 return sizeof(struct ipmi_lan_addr);
829
830 return 0;
831}
c70d7499 832EXPORT_SYMBOL(ipmi_addr_length);
1da177e4
LT
833
834static void deliver_response(struct ipmi_recv_msg *msg)
835{
8a3628d5 836 if (!msg->user) {
56a55ec6 837 ipmi_smi_t intf = msg->user_msg_data;
56a55ec6
CM
838
839 /* Special handling for NULL users. */
840 if (intf->null_user_handler) {
841 intf->null_user_handler(intf, msg);
b2655f26 842 ipmi_inc_stat(intf, handled_local_responses);
56a55ec6
CM
843 } else {
844 /* No handler, so give up. */
b2655f26 845 ipmi_inc_stat(intf, unhandled_local_responses);
56a55ec6
CM
846 }
847 ipmi_free_recv_msg(msg);
c49c0976
HK
848 } else if (!oops_in_progress) {
849 /*
850 * If we are running in the panic context, calling the
851 * receive handler doesn't much meaning and has a deadlock
852 * risk. At this moment, simply skip it in that case.
853 */
854
393d2cc3
CM
855 ipmi_user_t user = msg->user;
856 user->handler->ipmi_recv_hndl(msg, user->handler_data);
56a55ec6 857 }
1da177e4
LT
858}
859
b2c03941
CM
860static void
861deliver_err_response(struct ipmi_recv_msg *msg, int err)
862{
863 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
864 msg->msg_data[0] = err;
865 msg->msg.netfn |= 1; /* Convert to a response. */
866 msg->msg.data_len = 1;
867 msg->msg.data = msg->msg_data;
868 deliver_response(msg);
869}
870
c70d7499
CM
871/*
872 * Find the next sequence number not being used and add the given
873 * message with the given timeout to the sequence table. This must be
874 * called with the interface's seq_lock held.
875 */
1da177e4
LT
876static int intf_next_seq(ipmi_smi_t intf,
877 struct ipmi_recv_msg *recv_msg,
878 unsigned long timeout,
879 int retries,
880 int broadcast,
881 unsigned char *seq,
882 long *seqid)
883{
884 int rv = 0;
885 unsigned int i;
886
c70d7499
CM
887 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
888 i = (i+1)%IPMI_IPMB_NUM_SEQ) {
8a3628d5 889 if (!intf->seq_table[i].inuse)
1da177e4
LT
890 break;
891 }
892
8a3628d5 893 if (!intf->seq_table[i].inuse) {
1da177e4
LT
894 intf->seq_table[i].recv_msg = recv_msg;
895
c70d7499
CM
896 /*
897 * Start with the maximum timeout, when the send response
898 * comes in we will start the real timer.
899 */
1da177e4
LT
900 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
901 intf->seq_table[i].orig_timeout = timeout;
902 intf->seq_table[i].retries_left = retries;
903 intf->seq_table[i].broadcast = broadcast;
904 intf->seq_table[i].inuse = 1;
905 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
906 *seq = i;
907 *seqid = intf->seq_table[i].seqid;
908 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
89986496 909 need_waiter(intf);
1da177e4
LT
910 } else {
911 rv = -EAGAIN;
912 }
c70d7499 913
1da177e4
LT
914 return rv;
915}
916
c70d7499
CM
917/*
918 * Return the receive message for the given sequence number and
919 * release the sequence number so it can be reused. Some other data
920 * is passed in to be sure the message matches up correctly (to help
921 * guard against message coming in after their timeout and the
922 * sequence number being reused).
923 */
1da177e4
LT
924static int intf_find_seq(ipmi_smi_t intf,
925 unsigned char seq,
926 short channel,
927 unsigned char cmd,
928 unsigned char netfn,
929 struct ipmi_addr *addr,
930 struct ipmi_recv_msg **recv_msg)
931{
932 int rv = -ENODEV;
933 unsigned long flags;
934
935 if (seq >= IPMI_IPMB_NUM_SEQ)
936 return -EINVAL;
937
938 spin_lock_irqsave(&(intf->seq_lock), flags);
939 if (intf->seq_table[seq].inuse) {
940 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
941
c70d7499
CM
942 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
943 && (msg->msg.netfn == netfn)
944 && (ipmi_addr_equal(addr, &(msg->addr)))) {
1da177e4
LT
945 *recv_msg = msg;
946 intf->seq_table[seq].inuse = 0;
947 rv = 0;
948 }
949 }
950 spin_unlock_irqrestore(&(intf->seq_lock), flags);
951
952 return rv;
953}
954
955
956/* Start the timer for a specific sequence table entry. */
957static int intf_start_seq_timer(ipmi_smi_t intf,
958 long msgid)
959{
960 int rv = -ENODEV;
961 unsigned long flags;
962 unsigned char seq;
963 unsigned long seqid;
964
965
966 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
967
968 spin_lock_irqsave(&(intf->seq_lock), flags);
c70d7499
CM
969 /*
970 * We do this verification because the user can be deleted
971 * while a message is outstanding.
972 */
1da177e4 973 if ((intf->seq_table[seq].inuse)
c70d7499 974 && (intf->seq_table[seq].seqid == seqid)) {
1da177e4
LT
975 struct seq_table *ent = &(intf->seq_table[seq]);
976 ent->timeout = ent->orig_timeout;
977 rv = 0;
978 }
979 spin_unlock_irqrestore(&(intf->seq_lock), flags);
980
981 return rv;
982}
983
984/* Got an error for the send message for a specific sequence number. */
985static int intf_err_seq(ipmi_smi_t intf,
986 long msgid,
987 unsigned int err)
988{
989 int rv = -ENODEV;
990 unsigned long flags;
991 unsigned char seq;
992 unsigned long seqid;
993 struct ipmi_recv_msg *msg = NULL;
994
995
996 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
997
998 spin_lock_irqsave(&(intf->seq_lock), flags);
c70d7499
CM
999 /*
1000 * We do this verification because the user can be deleted
1001 * while a message is outstanding.
1002 */
1da177e4 1003 if ((intf->seq_table[seq].inuse)
c70d7499 1004 && (intf->seq_table[seq].seqid == seqid)) {
1da177e4
LT
1005 struct seq_table *ent = &(intf->seq_table[seq]);
1006
1007 ent->inuse = 0;
1008 msg = ent->recv_msg;
1009 rv = 0;
1010 }
1011 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1012
b2c03941
CM
1013 if (msg)
1014 deliver_err_response(msg, err);
1da177e4
LT
1015
1016 return rv;
1017}
1018
1019
1020int ipmi_create_user(unsigned int if_num,
210af2a5 1021 const struct ipmi_user_hndl *handler,
1da177e4
LT
1022 void *handler_data,
1023 ipmi_user_t *user)
1024{
1025 unsigned long flags;
1026 ipmi_user_t new_user;
1027 int rv = 0;
1028 ipmi_smi_t intf;
1029
c70d7499
CM
1030 /*
1031 * There is no module usecount here, because it's not
1032 * required. Since this can only be used by and called from
1033 * other modules, they will implicitly use this module, and
1034 * thus this can't be removed unless the other modules are
1035 * removed.
1036 */
1da177e4
LT
1037
1038 if (handler == NULL)
1039 return -EINVAL;
1040
c70d7499
CM
1041 /*
1042 * Make sure the driver is actually initialized, this handles
1043 * problems with initialization order.
1044 */
1da177e4
LT
1045 if (!initialized) {
1046 rv = ipmi_init_msghandler();
1047 if (rv)
1048 return rv;
1049
c70d7499
CM
1050 /*
1051 * The init code doesn't return an error if it was turned
1052 * off, but it won't initialize. Check that.
1053 */
1da177e4
LT
1054 if (!initialized)
1055 return -ENODEV;
1056 }
1057
1058 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
8a3628d5 1059 if (!new_user)
1da177e4
LT
1060 return -ENOMEM;
1061
b2c03941 1062 mutex_lock(&ipmi_interfaces_mutex);
bca0324d
CM
1063 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1064 if (intf->intf_num == if_num)
1065 goto found;
1da177e4 1066 }
b2c03941 1067 /* Not found, return an error */
bca0324d
CM
1068 rv = -EINVAL;
1069 goto out_kfree;
1da177e4 1070
bca0324d 1071 found:
393d2cc3
CM
1072 /* Note that each existing user holds a refcount to the interface. */
1073 kref_get(&intf->refcount);
1da177e4 1074
393d2cc3 1075 kref_init(&new_user->refcount);
1da177e4
LT
1076 new_user->handler = handler;
1077 new_user->handler_data = handler_data;
1078 new_user->intf = intf;
89986496 1079 new_user->gets_events = false;
1da177e4
LT
1080
1081 if (!try_module_get(intf->handlers->owner)) {
1082 rv = -ENODEV;
5c98d29a 1083 goto out_kref;
1da177e4
LT
1084 }
1085
1086 if (intf->handlers->inc_usecount) {
1087 rv = intf->handlers->inc_usecount(intf->send_info);
1088 if (rv) {
1089 module_put(intf->handlers->owner);
5c98d29a 1090 goto out_kref;
1da177e4
LT
1091 }
1092 }
1093
c70d7499
CM
1094 /*
1095 * Hold the lock so intf->handlers is guaranteed to be good
1096 * until now
1097 */
b2c03941
CM
1098 mutex_unlock(&ipmi_interfaces_mutex);
1099
7aefac26 1100 new_user->valid = true;
393d2cc3
CM
1101 spin_lock_irqsave(&intf->seq_lock, flags);
1102 list_add_rcu(&new_user->link, &intf->users);
1103 spin_unlock_irqrestore(&intf->seq_lock, flags);
89986496
CM
1104 if (handler->ipmi_watchdog_pretimeout) {
1105 /* User wants pretimeouts, so make sure to watch for them. */
1106 if (atomic_inc_return(&intf->event_waiters) == 1)
1107 need_waiter(intf);
1108 }
393d2cc3
CM
1109 *user = new_user;
1110 return 0;
1da177e4 1111
5c98d29a 1112out_kref:
393d2cc3 1113 kref_put(&intf->refcount, intf_free);
5c98d29a 1114out_kfree:
b2c03941 1115 mutex_unlock(&ipmi_interfaces_mutex);
5c98d29a 1116 kfree(new_user);
1da177e4
LT
1117 return rv;
1118}
c70d7499 1119EXPORT_SYMBOL(ipmi_create_user);
1da177e4 1120
16f4232c
ZY
1121int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1122{
1123 int rv = 0;
1124 ipmi_smi_t intf;
81d02b7f 1125 const struct ipmi_smi_handlers *handlers;
16f4232c
ZY
1126
1127 mutex_lock(&ipmi_interfaces_mutex);
1128 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1129 if (intf->intf_num == if_num)
1130 goto found;
1131 }
1132 /* Not found, return an error */
1133 rv = -EINVAL;
1134 mutex_unlock(&ipmi_interfaces_mutex);
1135 return rv;
1136
1137found:
1138 handlers = intf->handlers;
1139 rv = -ENOSYS;
1140 if (handlers->get_smi_info)
1141 rv = handlers->get_smi_info(intf->send_info, data);
1142 mutex_unlock(&ipmi_interfaces_mutex);
1143
1144 return rv;
1145}
1146EXPORT_SYMBOL(ipmi_get_smi_info);
1147
393d2cc3
CM
1148static void free_user(struct kref *ref)
1149{
1150 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
1151 kfree(user);
1152}
1153
1154int ipmi_destroy_user(ipmi_user_t user)
1da177e4 1155{
393d2cc3 1156 ipmi_smi_t intf = user->intf;
1da177e4
LT
1157 int i;
1158 unsigned long flags;
393d2cc3 1159 struct cmd_rcvr *rcvr;
393d2cc3 1160 struct cmd_rcvr *rcvrs = NULL;
1da177e4 1161
7aefac26 1162 user->valid = false;
1da177e4 1163
89986496
CM
1164 if (user->handler->ipmi_watchdog_pretimeout)
1165 atomic_dec(&intf->event_waiters);
1166
1167 if (user->gets_events)
1168 atomic_dec(&intf->event_waiters);
1169
393d2cc3
CM
1170 /* Remove the user from the interface's sequence table. */
1171 spin_lock_irqsave(&intf->seq_lock, flags);
1172 list_del_rcu(&user->link);
1da177e4 1173
e8b33617 1174 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
393d2cc3 1175 if (intf->seq_table[i].inuse
c70d7499 1176 && (intf->seq_table[i].recv_msg->user == user)) {
393d2cc3 1177 intf->seq_table[i].inuse = 0;
b2c03941 1178 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1da177e4
LT
1179 }
1180 }
393d2cc3
CM
1181 spin_unlock_irqrestore(&intf->seq_lock, flags);
1182
1183 /*
1184 * Remove the user from the command receiver's table. First
1185 * we build a list of everything (not using the standard link,
1186 * since other things may be using it till we do
1187 * synchronize_rcu()) then free everything in that list.
1188 */
d6dfd131 1189 mutex_lock(&intf->cmd_rcvrs_mutex);
066bb8d0 1190 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1da177e4 1191 if (rcvr->user == user) {
393d2cc3
CM
1192 list_del_rcu(&rcvr->link);
1193 rcvr->next = rcvrs;
1194 rcvrs = rcvr;
1da177e4
LT
1195 }
1196 }
d6dfd131 1197 mutex_unlock(&intf->cmd_rcvrs_mutex);
393d2cc3
CM
1198 synchronize_rcu();
1199 while (rcvrs) {
1200 rcvr = rcvrs;
1201 rcvrs = rcvr->next;
1202 kfree(rcvr);
1203 }
1da177e4 1204
b2c03941
CM
1205 mutex_lock(&ipmi_interfaces_mutex);
1206 if (intf->handlers) {
1207 module_put(intf->handlers->owner);
1208 if (intf->handlers->dec_usecount)
1209 intf->handlers->dec_usecount(intf->send_info);
1210 }
1211 mutex_unlock(&ipmi_interfaces_mutex);
1da177e4 1212
393d2cc3 1213 kref_put(&intf->refcount, intf_free);
1da177e4 1214
393d2cc3 1215 kref_put(&user->refcount, free_user);
1da177e4 1216
8a3628d5 1217 return 0;
1da177e4 1218}
c70d7499 1219EXPORT_SYMBOL(ipmi_destroy_user);
1da177e4 1220
511d57dc
CM
1221int ipmi_get_version(ipmi_user_t user,
1222 unsigned char *major,
1223 unsigned char *minor)
1da177e4 1224{
511d57dc
CM
1225 struct ipmi_device_id id;
1226 int rv;
1227
39d3fb45 1228 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
511d57dc
CM
1229 if (rv)
1230 return rv;
1231
1232 *major = ipmi_version_major(&id);
1233 *minor = ipmi_version_minor(&id);
1234
1235 return 0;
1da177e4 1236}
c70d7499 1237EXPORT_SYMBOL(ipmi_get_version);
1da177e4 1238
c14979b9
CM
1239int ipmi_set_my_address(ipmi_user_t user,
1240 unsigned int channel,
1241 unsigned char address)
1da177e4 1242{
c14979b9
CM
1243 if (channel >= IPMI_MAX_CHANNELS)
1244 return -EINVAL;
1245 user->intf->channels[channel].address = address;
1246 return 0;
1da177e4 1247}
c70d7499 1248EXPORT_SYMBOL(ipmi_set_my_address);
1da177e4 1249
c14979b9
CM
1250int ipmi_get_my_address(ipmi_user_t user,
1251 unsigned int channel,
1252 unsigned char *address)
1da177e4 1253{
c14979b9
CM
1254 if (channel >= IPMI_MAX_CHANNELS)
1255 return -EINVAL;
1256 *address = user->intf->channels[channel].address;
1257 return 0;
1da177e4 1258}
c70d7499 1259EXPORT_SYMBOL(ipmi_get_my_address);
1da177e4 1260
c14979b9
CM
1261int ipmi_set_my_LUN(ipmi_user_t user,
1262 unsigned int channel,
1263 unsigned char LUN)
1da177e4 1264{
c14979b9
CM
1265 if (channel >= IPMI_MAX_CHANNELS)
1266 return -EINVAL;
1267 user->intf->channels[channel].lun = LUN & 0x3;
1268 return 0;
1da177e4 1269}
c70d7499 1270EXPORT_SYMBOL(ipmi_set_my_LUN);
1da177e4 1271
c14979b9
CM
1272int ipmi_get_my_LUN(ipmi_user_t user,
1273 unsigned int channel,
1274 unsigned char *address)
1da177e4 1275{
c14979b9
CM
1276 if (channel >= IPMI_MAX_CHANNELS)
1277 return -EINVAL;
1278 *address = user->intf->channels[channel].lun;
1279 return 0;
1da177e4 1280}
c70d7499 1281EXPORT_SYMBOL(ipmi_get_my_LUN);
1da177e4 1282
b9675136
CM
1283int ipmi_get_maintenance_mode(ipmi_user_t user)
1284{
1285 int mode;
1286 unsigned long flags;
1287
1288 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1289 mode = user->intf->maintenance_mode;
1290 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1291
1292 return mode;
1293}
1294EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1295
1296static void maintenance_mode_update(ipmi_smi_t intf)
1297{
1298 if (intf->handlers->set_maintenance_mode)
1299 intf->handlers->set_maintenance_mode(
1300 intf->send_info, intf->maintenance_mode_enable);
1301}
1302
1303int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1304{
1305 int rv = 0;
1306 unsigned long flags;
1307 ipmi_smi_t intf = user->intf;
1308
1309 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1310 if (intf->maintenance_mode != mode) {
1311 switch (mode) {
1312 case IPMI_MAINTENANCE_MODE_AUTO:
b9675136
CM
1313 intf->maintenance_mode_enable
1314 = (intf->auto_maintenance_timeout > 0);
1315 break;
1316
1317 case IPMI_MAINTENANCE_MODE_OFF:
7aefac26 1318 intf->maintenance_mode_enable = false;
b9675136
CM
1319 break;
1320
1321 case IPMI_MAINTENANCE_MODE_ON:
7aefac26 1322 intf->maintenance_mode_enable = true;
b9675136
CM
1323 break;
1324
1325 default:
1326 rv = -EINVAL;
1327 goto out_unlock;
1328 }
7aefac26 1329 intf->maintenance_mode = mode;
b9675136
CM
1330
1331 maintenance_mode_update(intf);
1332 }
1333 out_unlock:
1334 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1335
1336 return rv;
1337}
1338EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1339
89986496 1340int ipmi_set_gets_events(ipmi_user_t user, bool val)
1da177e4 1341{
393d2cc3
CM
1342 unsigned long flags;
1343 ipmi_smi_t intf = user->intf;
1344 struct ipmi_recv_msg *msg, *msg2;
1345 struct list_head msgs;
1da177e4 1346
393d2cc3
CM
1347 INIT_LIST_HEAD(&msgs);
1348
1349 spin_lock_irqsave(&intf->events_lock, flags);
89986496
CM
1350 if (user->gets_events == val)
1351 goto out;
1352
1da177e4
LT
1353 user->gets_events = val;
1354
89986496
CM
1355 if (val) {
1356 if (atomic_inc_return(&intf->event_waiters) == 1)
1357 need_waiter(intf);
1358 } else {
1359 atomic_dec(&intf->event_waiters);
1360 }
1361
b2c03941
CM
1362 if (intf->delivering_events)
1363 /*
1364 * Another thread is delivering events for this, so
1365 * let it handle any new events.
1366 */
1367 goto out;
1368
1369 /* Deliver any queued events. */
1370 while (user->gets_events && !list_empty(&intf->waiting_events)) {
179e0917
AM
1371 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1372 list_move_tail(&msg->link, &msgs);
4791c03d 1373 intf->waiting_events_count = 0;
87ebd06f
CM
1374 if (intf->event_msg_printed) {
1375 printk(KERN_WARNING PFX "Event queue no longer"
1376 " full\n");
1377 intf->event_msg_printed = 0;
1378 }
393d2cc3 1379
b2c03941
CM
1380 intf->delivering_events = 1;
1381 spin_unlock_irqrestore(&intf->events_lock, flags);
1382
1383 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1384 msg->user = user;
1385 kref_get(&user->refcount);
1386 deliver_response(msg);
1387 }
1388
1389 spin_lock_irqsave(&intf->events_lock, flags);
1390 intf->delivering_events = 0;
393d2cc3
CM
1391 }
1392
b2c03941 1393 out:
393d2cc3 1394 spin_unlock_irqrestore(&intf->events_lock, flags);
1da177e4
LT
1395
1396 return 0;
1397}
c70d7499 1398EXPORT_SYMBOL(ipmi_set_gets_events);
1da177e4 1399
393d2cc3
CM
1400static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
1401 unsigned char netfn,
c69c3127
CM
1402 unsigned char cmd,
1403 unsigned char chan)
393d2cc3
CM
1404{
1405 struct cmd_rcvr *rcvr;
1406
1407 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
c69c3127
CM
1408 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1409 && (rcvr->chans & (1 << chan)))
393d2cc3
CM
1410 return rcvr;
1411 }
1412 return NULL;
1413}
1414
c69c3127
CM
1415static int is_cmd_rcvr_exclusive(ipmi_smi_t intf,
1416 unsigned char netfn,
1417 unsigned char cmd,
1418 unsigned int chans)
1419{
1420 struct cmd_rcvr *rcvr;
1421
1422 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1423 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1424 && (rcvr->chans & chans))
1425 return 0;
1426 }
1427 return 1;
1428}
1429
1da177e4
LT
1430int ipmi_register_for_cmd(ipmi_user_t user,
1431 unsigned char netfn,
c69c3127
CM
1432 unsigned char cmd,
1433 unsigned int chans)
1da177e4 1434{
393d2cc3
CM
1435 ipmi_smi_t intf = user->intf;
1436 struct cmd_rcvr *rcvr;
393d2cc3 1437 int rv = 0;
1da177e4
LT
1438
1439
1440 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
8a3628d5 1441 if (!rcvr)
1da177e4 1442 return -ENOMEM;
393d2cc3
CM
1443 rcvr->cmd = cmd;
1444 rcvr->netfn = netfn;
c69c3127 1445 rcvr->chans = chans;
393d2cc3 1446 rcvr->user = user;
1da177e4 1447
d6dfd131 1448 mutex_lock(&intf->cmd_rcvrs_mutex);
1da177e4 1449 /* Make sure the command/netfn is not already registered. */
c69c3127 1450 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
393d2cc3
CM
1451 rv = -EBUSY;
1452 goto out_unlock;
1da177e4 1453 }
877197ef 1454
89986496
CM
1455 if (atomic_inc_return(&intf->event_waiters) == 1)
1456 need_waiter(intf);
1457
393d2cc3 1458 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1da177e4 1459
393d2cc3 1460 out_unlock:
d6dfd131 1461 mutex_unlock(&intf->cmd_rcvrs_mutex);
1da177e4
LT
1462 if (rv)
1463 kfree(rcvr);
1464
1465 return rv;
1466}
c70d7499 1467EXPORT_SYMBOL(ipmi_register_for_cmd);
1da177e4
LT
1468
1469int ipmi_unregister_for_cmd(ipmi_user_t user,
1470 unsigned char netfn,
c69c3127
CM
1471 unsigned char cmd,
1472 unsigned int chans)
1da177e4 1473{
393d2cc3
CM
1474 ipmi_smi_t intf = user->intf;
1475 struct cmd_rcvr *rcvr;
c69c3127
CM
1476 struct cmd_rcvr *rcvrs = NULL;
1477 int i, rv = -ENOENT;
1da177e4 1478
d6dfd131 1479 mutex_lock(&intf->cmd_rcvrs_mutex);
c69c3127
CM
1480 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1481 if (((1 << i) & chans) == 0)
1482 continue;
1483 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1484 if (rcvr == NULL)
1485 continue;
1486 if (rcvr->user == user) {
1487 rv = 0;
1488 rcvr->chans &= ~chans;
1489 if (rcvr->chans == 0) {
1490 list_del_rcu(&rcvr->link);
1491 rcvr->next = rcvrs;
1492 rcvrs = rcvr;
1493 }
1494 }
1495 }
1496 mutex_unlock(&intf->cmd_rcvrs_mutex);
1497 synchronize_rcu();
1498 while (rcvrs) {
89986496 1499 atomic_dec(&intf->event_waiters);
c69c3127
CM
1500 rcvr = rcvrs;
1501 rcvrs = rcvr->next;
393d2cc3 1502 kfree(rcvr);
1da177e4 1503 }
c69c3127 1504 return rv;
1da177e4 1505}
c70d7499 1506EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1da177e4 1507
1da177e4
LT
1508static unsigned char
1509ipmb_checksum(unsigned char *data, int size)
1510{
1511 unsigned char csum = 0;
c70d7499 1512
1da177e4
LT
1513 for (; size > 0; size--, data++)
1514 csum += *data;
1515
1516 return -csum;
1517}
1518
1519static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1520 struct kernel_ipmi_msg *msg,
1521 struct ipmi_ipmb_addr *ipmb_addr,
1522 long msgid,
1523 unsigned char ipmb_seq,
1524 int broadcast,
1525 unsigned char source_address,
1526 unsigned char source_lun)
1527{
1528 int i = broadcast;
1529
1530 /* Format the IPMB header data. */
1531 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1532 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1533 smi_msg->data[2] = ipmb_addr->channel;
1534 if (broadcast)
1535 smi_msg->data[3] = 0;
1536 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1537 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1538 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1539 smi_msg->data[i+6] = source_address;
1540 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1541 smi_msg->data[i+8] = msg->cmd;
1542
1543 /* Now tack on the data to the message. */
1544 if (msg->data_len > 0)
1545 memcpy(&(smi_msg->data[i+9]), msg->data,
1546 msg->data_len);
1547 smi_msg->data_size = msg->data_len + 9;
1548
1549 /* Now calculate the checksum and tack it on. */
1550 smi_msg->data[i+smi_msg->data_size]
1551 = ipmb_checksum(&(smi_msg->data[i+6]),
1552 smi_msg->data_size-6);
1553
c70d7499
CM
1554 /*
1555 * Add on the checksum size and the offset from the
1556 * broadcast.
1557 */
1da177e4
LT
1558 smi_msg->data_size += 1 + i;
1559
1560 smi_msg->msgid = msgid;
1561}
1562
1563static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1564 struct kernel_ipmi_msg *msg,
1565 struct ipmi_lan_addr *lan_addr,
1566 long msgid,
1567 unsigned char ipmb_seq,
1568 unsigned char source_lun)
1569{
1570 /* Format the IPMB header data. */
1571 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1572 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1573 smi_msg->data[2] = lan_addr->channel;
1574 smi_msg->data[3] = lan_addr->session_handle;
1575 smi_msg->data[4] = lan_addr->remote_SWID;
1576 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1577 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1578 smi_msg->data[7] = lan_addr->local_SWID;
1579 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1580 smi_msg->data[9] = msg->cmd;
1581
1582 /* Now tack on the data to the message. */
1583 if (msg->data_len > 0)
1584 memcpy(&(smi_msg->data[10]), msg->data,
1585 msg->data_len);
1586 smi_msg->data_size = msg->data_len + 10;
1587
1588 /* Now calculate the checksum and tack it on. */
1589 smi_msg->data[smi_msg->data_size]
1590 = ipmb_checksum(&(smi_msg->data[7]),
1591 smi_msg->data_size-7);
1592
c70d7499
CM
1593 /*
1594 * Add on the checksum size and the offset from the
1595 * broadcast.
1596 */
1da177e4
LT
1597 smi_msg->data_size += 1;
1598
1599 smi_msg->msgid = msgid;
1600}
1601
191cc414
AB
1602static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf,
1603 struct ipmi_smi_msg *smi_msg,
1604 int priority)
7f4a1c84 1605{
7ea0ed2b
CM
1606 if (intf->curr_msg) {
1607 if (priority > 0)
1608 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1609 else
1610 list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1611 smi_msg = NULL;
1612 } else {
1613 intf->curr_msg = smi_msg;
1614 }
191cc414
AB
1615
1616 return smi_msg;
1617}
1618
1619
81d02b7f 1620static void smi_send(ipmi_smi_t intf, const struct ipmi_smi_handlers *handlers,
191cc414
AB
1621 struct ipmi_smi_msg *smi_msg, int priority)
1622{
1623 int run_to_completion = intf->run_to_completion;
1624
1625 if (run_to_completion) {
1626 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1627 } else {
1628 unsigned long flags;
1629
1630 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1631 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
7ea0ed2b 1632 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
191cc414 1633 }
7ea0ed2b
CM
1634
1635 if (smi_msg)
99ab32f3 1636 handlers->sender(intf->send_info, smi_msg);
7f4a1c84
CM
1637}
1638
c70d7499
CM
1639/*
1640 * Separate from ipmi_request so that the user does not have to be
1641 * supplied in certain circumstances (mainly at panic time). If
1642 * messages are supplied, they will be freed, even if an error
1643 * occurs.
1644 */
393d2cc3
CM
1645static int i_ipmi_request(ipmi_user_t user,
1646 ipmi_smi_t intf,
1647 struct ipmi_addr *addr,
1648 long msgid,
1649 struct kernel_ipmi_msg *msg,
1650 void *user_msg_data,
1651 void *supplied_smi,
1652 struct ipmi_recv_msg *supplied_recv,
1653 int priority,
1654 unsigned char source_address,
1655 unsigned char source_lun,
1656 int retries,
1657 unsigned int retry_time_ms)
1da177e4 1658{
b2c03941
CM
1659 int rv = 0;
1660 struct ipmi_smi_msg *smi_msg;
1661 struct ipmi_recv_msg *recv_msg;
1662 unsigned long flags;
1da177e4
LT
1663
1664
c70d7499 1665 if (supplied_recv)
1da177e4 1666 recv_msg = supplied_recv;
c70d7499 1667 else {
1da177e4 1668 recv_msg = ipmi_alloc_recv_msg();
c70d7499 1669 if (recv_msg == NULL)
1da177e4 1670 return -ENOMEM;
1da177e4
LT
1671 }
1672 recv_msg->user_msg_data = user_msg_data;
1673
c70d7499 1674 if (supplied_smi)
1da177e4 1675 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
c70d7499 1676 else {
1da177e4
LT
1677 smi_msg = ipmi_alloc_smi_msg();
1678 if (smi_msg == NULL) {
1679 ipmi_free_recv_msg(recv_msg);
1680 return -ENOMEM;
1681 }
1682 }
1683
b2c03941 1684 rcu_read_lock();
7ea0ed2b 1685 if (intf->in_shutdown) {
b2c03941
CM
1686 rv = -ENODEV;
1687 goto out_err;
1688 }
1689
1da177e4 1690 recv_msg->user = user;
393d2cc3
CM
1691 if (user)
1692 kref_get(&user->refcount);
1da177e4 1693 recv_msg->msgid = msgid;
c70d7499
CM
1694 /*
1695 * Store the message to send in the receive message so timeout
1696 * responses can get the proper response data.
1697 */
1da177e4
LT
1698 recv_msg->msg = *msg;
1699
1700 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1701 struct ipmi_system_interface_addr *smi_addr;
1702
1703 if (msg->netfn & 1) {
1704 /* Responses are not allowed to the SMI. */
1705 rv = -EINVAL;
1706 goto out_err;
1707 }
1708
1709 smi_addr = (struct ipmi_system_interface_addr *) addr;
1710 if (smi_addr->lun > 3) {
b2655f26 1711 ipmi_inc_stat(intf, sent_invalid_commands);
1da177e4
LT
1712 rv = -EINVAL;
1713 goto out_err;
1714 }
1715
1716 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1717
1718 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1719 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1720 || (msg->cmd == IPMI_GET_MSG_CMD)
c70d7499
CM
1721 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1722 /*
1723 * We don't let the user do these, since we manage
1724 * the sequence numbers.
1725 */
b2655f26 1726 ipmi_inc_stat(intf, sent_invalid_commands);
1da177e4
LT
1727 rv = -EINVAL;
1728 goto out_err;
1729 }
1730
b9675136
CM
1731 if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1732 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1733 || (msg->cmd == IPMI_WARM_RESET_CMD)))
c70d7499 1734 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) {
b9675136
CM
1735 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1736 intf->auto_maintenance_timeout
1737 = IPMI_MAINTENANCE_MODE_TIMEOUT;
1738 if (!intf->maintenance_mode
c70d7499 1739 && !intf->maintenance_mode_enable) {
7aefac26 1740 intf->maintenance_mode_enable = true;
b9675136
CM
1741 maintenance_mode_update(intf);
1742 }
1743 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1744 flags);
1745 }
1746
1da177e4 1747 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
b2655f26 1748 ipmi_inc_stat(intf, sent_invalid_commands);
1da177e4
LT
1749 rv = -EMSGSIZE;
1750 goto out_err;
1751 }
1752
1753 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1754 smi_msg->data[1] = msg->cmd;
1755 smi_msg->msgid = msgid;
1756 smi_msg->user_data = recv_msg;
1757 if (msg->data_len > 0)
1758 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1759 smi_msg->data_size = msg->data_len + 2;
b2655f26 1760 ipmi_inc_stat(intf, sent_local_commands);
25176ed6 1761 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
1da177e4
LT
1762 struct ipmi_ipmb_addr *ipmb_addr;
1763 unsigned char ipmb_seq;
1764 long seqid;
1765 int broadcast = 0;
1766
9c101fd4 1767 if (addr->channel >= IPMI_MAX_CHANNELS) {
b2655f26 1768 ipmi_inc_stat(intf, sent_invalid_commands);
1da177e4
LT
1769 rv = -EINVAL;
1770 goto out_err;
1771 }
1772
1773 if (intf->channels[addr->channel].medium
c70d7499 1774 != IPMI_CHANNEL_MEDIUM_IPMB) {
b2655f26 1775 ipmi_inc_stat(intf, sent_invalid_commands);
1da177e4
LT
1776 rv = -EINVAL;
1777 goto out_err;
1778 }
1779
1780 if (retries < 0) {
1781 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1782 retries = 0; /* Don't retry broadcasts. */
1783 else
1784 retries = 4;
1785 }
1786 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
c70d7499
CM
1787 /*
1788 * Broadcasts add a zero at the beginning of the
1789 * message, but otherwise is the same as an IPMB
1790 * address.
1791 */
1da177e4
LT
1792 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1793 broadcast = 1;
1794 }
1795
1796
1797 /* Default to 1 second retries. */
1798 if (retry_time_ms == 0)
1799 retry_time_ms = 1000;
1800
c70d7499
CM
1801 /*
1802 * 9 for the header and 1 for the checksum, plus
1803 * possibly one for the broadcast.
1804 */
1da177e4 1805 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
b2655f26 1806 ipmi_inc_stat(intf, sent_invalid_commands);
1da177e4
LT
1807 rv = -EMSGSIZE;
1808 goto out_err;
1809 }
1810
1811 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1812 if (ipmb_addr->lun > 3) {
b2655f26 1813 ipmi_inc_stat(intf, sent_invalid_commands);
1da177e4
LT
1814 rv = -EINVAL;
1815 goto out_err;
1816 }
1817
1818 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1819
1820 if (recv_msg->msg.netfn & 0x1) {
c70d7499
CM
1821 /*
1822 * It's a response, so use the user's sequence
1823 * from msgid.
1824 */
b2655f26 1825 ipmi_inc_stat(intf, sent_ipmb_responses);
1da177e4
LT
1826 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1827 msgid, broadcast,
1828 source_address, source_lun);
1829
c70d7499
CM
1830 /*
1831 * Save the receive message so we can use it
1832 * to deliver the response.
1833 */
1da177e4
LT
1834 smi_msg->user_data = recv_msg;
1835 } else {
1836 /* It's a command, so get a sequence for it. */
1837
1838 spin_lock_irqsave(&(intf->seq_lock), flags);
1839
c70d7499
CM
1840 /*
1841 * Create a sequence number with a 1 second
1842 * timeout and 4 retries.
1843 */
1da177e4
LT
1844 rv = intf_next_seq(intf,
1845 recv_msg,
1846 retry_time_ms,
1847 retries,
1848 broadcast,
1849 &ipmb_seq,
1850 &seqid);
1851 if (rv) {
c70d7499
CM
1852 /*
1853 * We have used up all the sequence numbers,
1854 * probably, so abort.
1855 */
1da177e4
LT
1856 spin_unlock_irqrestore(&(intf->seq_lock),
1857 flags);
1858 goto out_err;
1859 }
1860
25176ed6
CM
1861 ipmi_inc_stat(intf, sent_ipmb_commands);
1862
c70d7499
CM
1863 /*
1864 * Store the sequence number in the message,
1865 * so that when the send message response
1866 * comes back we can start the timer.
1867 */
1da177e4
LT
1868 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1869 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1870 ipmb_seq, broadcast,
1871 source_address, source_lun);
1872
c70d7499
CM
1873 /*
1874 * Copy the message into the recv message data, so we
1875 * can retransmit it later if necessary.
1876 */
1da177e4
LT
1877 memcpy(recv_msg->msg_data, smi_msg->data,
1878 smi_msg->data_size);
1879 recv_msg->msg.data = recv_msg->msg_data;
1880 recv_msg->msg.data_len = smi_msg->data_size;
1881
c70d7499
CM
1882 /*
1883 * We don't unlock until here, because we need
1884 * to copy the completed message into the
1885 * recv_msg before we release the lock.
1886 * Otherwise, race conditions may bite us. I
1887 * know that's pretty paranoid, but I prefer
1888 * to be correct.
1889 */
1da177e4
LT
1890 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1891 }
25176ed6 1892 } else if (is_lan_addr(addr)) {
1da177e4
LT
1893 struct ipmi_lan_addr *lan_addr;
1894 unsigned char ipmb_seq;
1895 long seqid;
1896
12fc1d7b 1897 if (addr->channel >= IPMI_MAX_CHANNELS) {
b2655f26 1898 ipmi_inc_stat(intf, sent_invalid_commands);
1da177e4
LT
1899 rv = -EINVAL;
1900 goto out_err;
1901 }
1902
1903 if ((intf->channels[addr->channel].medium
c70d7499 1904 != IPMI_CHANNEL_MEDIUM_8023LAN)
1da177e4 1905 && (intf->channels[addr->channel].medium
c70d7499 1906 != IPMI_CHANNEL_MEDIUM_ASYNC)) {
b2655f26 1907 ipmi_inc_stat(intf, sent_invalid_commands);
1da177e4
LT
1908 rv = -EINVAL;
1909 goto out_err;
1910 }
1911
1912 retries = 4;
1913
1914 /* Default to 1 second retries. */
1915 if (retry_time_ms == 0)
1916 retry_time_ms = 1000;
1917
1918 /* 11 for the header and 1 for the checksum. */
1919 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
b2655f26 1920 ipmi_inc_stat(intf, sent_invalid_commands);
1da177e4
LT
1921 rv = -EMSGSIZE;
1922 goto out_err;
1923 }
1924
1925 lan_addr = (struct ipmi_lan_addr *) addr;
1926 if (lan_addr->lun > 3) {
b2655f26 1927 ipmi_inc_stat(intf, sent_invalid_commands);
1da177e4
LT
1928 rv = -EINVAL;
1929 goto out_err;
1930 }
1931
1932 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1933
1934 if (recv_msg->msg.netfn & 0x1) {
c70d7499
CM
1935 /*
1936 * It's a response, so use the user's sequence
1937 * from msgid.
1938 */
b2655f26 1939 ipmi_inc_stat(intf, sent_lan_responses);
1da177e4
LT
1940 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1941 msgid, source_lun);
1942
c70d7499
CM
1943 /*
1944 * Save the receive message so we can use it
1945 * to deliver the response.
1946 */
1da177e4
LT
1947 smi_msg->user_data = recv_msg;
1948 } else {
1949 /* It's a command, so get a sequence for it. */
1950
1951 spin_lock_irqsave(&(intf->seq_lock), flags);
1952
c70d7499
CM
1953 /*
1954 * Create a sequence number with a 1 second
1955 * timeout and 4 retries.
1956 */
1da177e4
LT
1957 rv = intf_next_seq(intf,
1958 recv_msg,
1959 retry_time_ms,
1960 retries,
1961 0,
1962 &ipmb_seq,
1963 &seqid);
1964 if (rv) {
c70d7499
CM
1965 /*
1966 * We have used up all the sequence numbers,
1967 * probably, so abort.
1968 */
1da177e4
LT
1969 spin_unlock_irqrestore(&(intf->seq_lock),
1970 flags);
1971 goto out_err;
1972 }
1973
25176ed6
CM
1974 ipmi_inc_stat(intf, sent_lan_commands);
1975
c70d7499
CM
1976 /*
1977 * Store the sequence number in the message,
1978 * so that when the send message response
1979 * comes back we can start the timer.
1980 */
1da177e4
LT
1981 format_lan_msg(smi_msg, msg, lan_addr,
1982 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1983 ipmb_seq, source_lun);
1984
c70d7499
CM
1985 /*
1986 * Copy the message into the recv message data, so we
1987 * can retransmit it later if necessary.
1988 */
1da177e4
LT
1989 memcpy(recv_msg->msg_data, smi_msg->data,
1990 smi_msg->data_size);
1991 recv_msg->msg.data = recv_msg->msg_data;
1992 recv_msg->msg.data_len = smi_msg->data_size;
1993
c70d7499
CM
1994 /*
1995 * We don't unlock until here, because we need
1996 * to copy the completed message into the
1997 * recv_msg before we release the lock.
1998 * Otherwise, race conditions may bite us. I
1999 * know that's pretty paranoid, but I prefer
2000 * to be correct.
2001 */
1da177e4
LT
2002 spin_unlock_irqrestore(&(intf->seq_lock), flags);
2003 }
2004 } else {
2005 /* Unknown address type. */
b2655f26 2006 ipmi_inc_stat(intf, sent_invalid_commands);
1da177e4
LT
2007 rv = -EINVAL;
2008 goto out_err;
2009 }
2010
2011#ifdef DEBUG_MSGING
2012 {
2013 int m;
e8b33617 2014 for (m = 0; m < smi_msg->data_size; m++)
1da177e4
LT
2015 printk(" %2.2x", smi_msg->data[m]);
2016 printk("\n");
2017 }
2018#endif
b2c03941 2019
7ea0ed2b 2020 smi_send(intf, intf->handlers, smi_msg, priority);
b2c03941 2021 rcu_read_unlock();
1da177e4
LT
2022
2023 return 0;
2024
2025 out_err:
b2c03941 2026 rcu_read_unlock();
1da177e4
LT
2027 ipmi_free_smi_msg(smi_msg);
2028 ipmi_free_recv_msg(recv_msg);
2029 return rv;
2030}
2031
c14979b9
CM
2032static int check_addr(ipmi_smi_t intf,
2033 struct ipmi_addr *addr,
2034 unsigned char *saddr,
2035 unsigned char *lun)
2036{
2037 if (addr->channel >= IPMI_MAX_CHANNELS)
2038 return -EINVAL;
2039 *lun = intf->channels[addr->channel].lun;
2040 *saddr = intf->channels[addr->channel].address;
2041 return 0;
2042}
2043
1da177e4
LT
2044int ipmi_request_settime(ipmi_user_t user,
2045 struct ipmi_addr *addr,
2046 long msgid,
2047 struct kernel_ipmi_msg *msg,
2048 void *user_msg_data,
2049 int priority,
2050 int retries,
2051 unsigned int retry_time_ms)
2052{
f0ba9390 2053 unsigned char saddr = 0, lun = 0;
c14979b9
CM
2054 int rv;
2055
8a3628d5 2056 if (!user)
56a55ec6 2057 return -EINVAL;
c14979b9
CM
2058 rv = check_addr(user->intf, addr, &saddr, &lun);
2059 if (rv)
2060 return rv;
1da177e4
LT
2061 return i_ipmi_request(user,
2062 user->intf,
2063 addr,
2064 msgid,
2065 msg,
2066 user_msg_data,
2067 NULL, NULL,
2068 priority,
c14979b9
CM
2069 saddr,
2070 lun,
1da177e4
LT
2071 retries,
2072 retry_time_ms);
2073}
c70d7499 2074EXPORT_SYMBOL(ipmi_request_settime);
1da177e4
LT
2075
2076int ipmi_request_supply_msgs(ipmi_user_t user,
2077 struct ipmi_addr *addr,
2078 long msgid,
2079 struct kernel_ipmi_msg *msg,
2080 void *user_msg_data,
2081 void *supplied_smi,
2082 struct ipmi_recv_msg *supplied_recv,
2083 int priority)
2084{
9ebca93b 2085 unsigned char saddr = 0, lun = 0;
c14979b9
CM
2086 int rv;
2087
8a3628d5 2088 if (!user)
56a55ec6 2089 return -EINVAL;
c14979b9
CM
2090 rv = check_addr(user->intf, addr, &saddr, &lun);
2091 if (rv)
2092 return rv;
1da177e4
LT
2093 return i_ipmi_request(user,
2094 user->intf,
2095 addr,
2096 msgid,
2097 msg,
2098 user_msg_data,
2099 supplied_smi,
2100 supplied_recv,
2101 priority,
c14979b9
CM
2102 saddr,
2103 lun,
1da177e4
LT
2104 -1, 0);
2105}
c70d7499 2106EXPORT_SYMBOL(ipmi_request_supply_msgs);
1da177e4 2107
aa9c9ab2
JK
2108static void bmc_device_id_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2109{
2110 int rv;
2111
2112 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2113 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2114 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2115 pr_warn(PFX "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2116 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2117 return;
2118 }
2119
2120 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2121 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2122 if (rv) {
2123 pr_warn(PFX "device id demangle failed: %d\n", rv);
2124 intf->bmc->dyn_id_set = 0;
2125 } else {
2126 /*
2127 * Make sure the id data is available before setting
2128 * dyn_id_set.
2129 */
2130 smp_wmb();
2131 intf->bmc->dyn_id_set = 1;
2132 }
2133
2134 wake_up(&intf->waitq);
2135}
2136
2137static int
2138send_get_device_id_cmd(ipmi_smi_t intf)
2139{
2140 struct ipmi_system_interface_addr si;
2141 struct kernel_ipmi_msg msg;
2142
2143 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2144 si.channel = IPMI_BMC_CHANNEL;
2145 si.lun = 0;
2146
2147 msg.netfn = IPMI_NETFN_APP_REQUEST;
2148 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2149 msg.data = NULL;
2150 msg.data_len = 0;
2151
2152 return i_ipmi_request(NULL,
2153 intf,
2154 (struct ipmi_addr *) &si,
2155 0,
2156 &msg,
2157 intf,
2158 NULL,
2159 NULL,
2160 0,
2161 intf->channels[0].address,
2162 intf->channels[0].lun,
2163 -1, 0);
2164}
2165
2166static int __get_device_id(ipmi_smi_t intf, struct bmc_device *bmc)
2167{
2168 int rv;
2169
2170 bmc->dyn_id_set = 2;
2171
2172 intf->null_user_handler = bmc_device_id_handler;
2173
2174 rv = send_get_device_id_cmd(intf);
2175 if (rv)
2176 return rv;
2177
2178 wait_event(intf->waitq, bmc->dyn_id_set != 2);
2179
2180 if (!bmc->dyn_id_set)
2181 rv = -EIO; /* Something went wrong in the fetch. */
2182
2183 /* dyn_id_set makes the id data available. */
2184 smp_rmb();
2185
2186 intf->null_user_handler = NULL;
2187
2188 return rv;
2189}
2190
2191/*
2192 * Fetch the device id for the bmc/interface. You must pass in either
2193 * bmc or intf, this code will get the other one. If the data has
2194 * been recently fetched, this will just use the cached data. Otherwise
2195 * it will run a new fetch.
2196 *
2197 * Except for the first time this is called (in ipmi_register_smi()),
2198 * this will always return good data;
2199 */
511d57dc 2200static int bmc_get_device_id(ipmi_smi_t intf, struct bmc_device *bmc,
39d3fb45
CM
2201 struct ipmi_device_id *id,
2202 bool *guid_set, u8 *guid)
511d57dc 2203{
aa9c9ab2 2204 int rv = 0;
28f26ac7 2205 int prev_dyn_id_set, prev_guid_set;
aa9c9ab2
JK
2206
2207 if (!intf) {
2208 mutex_lock(&bmc->dyn_mutex);
2209retry_bmc_lock:
2210 if (list_empty(&bmc->intfs)) {
2211 mutex_unlock(&bmc->dyn_mutex);
2212 return -ENOENT;
2213 }
2214 intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2215 bmc_link);
2216 kref_get(&intf->refcount);
2217 mutex_unlock(&bmc->dyn_mutex);
2218 mutex_lock(&intf->bmc_reg_mutex);
2219 mutex_lock(&bmc->dyn_mutex);
2220 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2221 bmc_link)) {
2222 mutex_unlock(&intf->bmc_reg_mutex);
2223 kref_put(&intf->refcount, intf_free);
2224 goto retry_bmc_lock;
2225 }
2226 } else {
2227 mutex_lock(&intf->bmc_reg_mutex);
511d57dc 2228 bmc = intf->bmc;
aa9c9ab2
JK
2229 mutex_lock(&bmc->dyn_mutex);
2230 kref_get(&intf->refcount);
2231 }
511d57dc 2232
aa9c9ab2
JK
2233 /* If we have a valid and current ID, just return that. */
2234 if (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))
2235 goto out;
2236
28f26ac7
CM
2237 prev_guid_set = bmc->dyn_guid_set;
2238 __get_guid(intf);
2239
2240 if (bmc->dyn_guid_set)
2241 memcpy(bmc->guid, bmc->fetch_guid, 16);
2242 else if (prev_guid_set)
2243 /*
2244 * The guid used to be valid and it failed to fetch,
2245 * just use the cached value.
2246 */
2247 bmc->dyn_guid_set = prev_guid_set;
aa9c9ab2 2248
28f26ac7 2249 prev_dyn_id_set = bmc->dyn_id_set;
aa9c9ab2
JK
2250 rv = __get_device_id(intf, bmc);
2251 if (rv)
2252 goto out;
2253
2254 memcpy(&bmc->id, &bmc->fetch_id, sizeof(bmc->id));
2255
2256 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2257
2258out:
2259 if (rv && prev_dyn_id_set) {
2260 rv = 0; /* Ignore failures if we have previous data. */
2261 bmc->dyn_id_set = prev_dyn_id_set;
2262 }
2263
2264 if (id)
2265 *id = bmc->id;
2266
39d3fb45 2267 if (guid_set)
28f26ac7 2268 *guid_set = bmc->dyn_guid_set;
39d3fb45 2269
28f26ac7 2270 if (guid && bmc->dyn_guid_set)
39d3fb45
CM
2271 memcpy(guid, bmc->guid, 16);
2272
aa9c9ab2
JK
2273 mutex_unlock(&bmc->dyn_mutex);
2274 mutex_unlock(&intf->bmc_reg_mutex);
2275
2276 kref_put(&intf->refcount, intf_free);
2277 return rv;
511d57dc
CM
2278}
2279
1aa16eea 2280#ifdef CONFIG_PROC_FS
07412736 2281static int smi_ipmb_proc_show(struct seq_file *m, void *v)
1da177e4 2282{
07412736 2283 ipmi_smi_t intf = m->private;
c14979b9 2284 int i;
1da177e4 2285
07412736
AD
2286 seq_printf(m, "%x", intf->channels[0].address);
2287 for (i = 1; i < IPMI_MAX_CHANNELS; i++)
2288 seq_printf(m, " %x", intf->channels[i].address);
d6c5dc18
JP
2289 seq_putc(m, '\n');
2290
5e33cd0c 2291 return 0;
1da177e4
LT
2292}
2293
07412736 2294static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
1da177e4 2295{
d9dda78b 2296 return single_open(file, smi_ipmb_proc_show, PDE_DATA(inode));
07412736 2297}
1da177e4 2298
07412736
AD
2299static const struct file_operations smi_ipmb_proc_ops = {
2300 .open = smi_ipmb_proc_open,
2301 .read = seq_read,
2302 .llseek = seq_lseek,
2303 .release = single_release,
2304};
2305
2306static int smi_version_proc_show(struct seq_file *m, void *v)
2307{
2308 ipmi_smi_t intf = m->private;
511d57dc
CM
2309 struct ipmi_device_id id;
2310 int rv;
2311
39d3fb45 2312 rv = bmc_get_device_id(intf, NULL, &id, NULL, NULL);
511d57dc
CM
2313 if (rv)
2314 return rv;
07412736 2315
d6c5dc18 2316 seq_printf(m, "%u.%u\n",
511d57dc
CM
2317 ipmi_version_major(&id),
2318 ipmi_version_minor(&id));
d6c5dc18 2319
5e33cd0c 2320 return 0;
1da177e4
LT
2321}
2322
07412736 2323static int smi_version_proc_open(struct inode *inode, struct file *file)
1da177e4 2324{
d9dda78b 2325 return single_open(file, smi_version_proc_show, PDE_DATA(inode));
07412736
AD
2326}
2327
2328static const struct file_operations smi_version_proc_ops = {
2329 .open = smi_version_proc_open,
2330 .read = seq_read,
2331 .llseek = seq_lseek,
2332 .release = single_release,
2333};
1da177e4 2334
07412736
AD
2335static int smi_stats_proc_show(struct seq_file *m, void *v)
2336{
2337 ipmi_smi_t intf = m->private;
2338
2339 seq_printf(m, "sent_invalid_commands: %u\n",
b2655f26 2340 ipmi_get_stat(intf, sent_invalid_commands));
07412736 2341 seq_printf(m, "sent_local_commands: %u\n",
b2655f26 2342 ipmi_get_stat(intf, sent_local_commands));
07412736 2343 seq_printf(m, "handled_local_responses: %u\n",
b2655f26 2344 ipmi_get_stat(intf, handled_local_responses));
07412736 2345 seq_printf(m, "unhandled_local_responses: %u\n",
b2655f26 2346 ipmi_get_stat(intf, unhandled_local_responses));
07412736 2347 seq_printf(m, "sent_ipmb_commands: %u\n",
b2655f26 2348 ipmi_get_stat(intf, sent_ipmb_commands));
07412736 2349 seq_printf(m, "sent_ipmb_command_errs: %u\n",
b2655f26 2350 ipmi_get_stat(intf, sent_ipmb_command_errs));
07412736 2351 seq_printf(m, "retransmitted_ipmb_commands: %u\n",
b2655f26 2352 ipmi_get_stat(intf, retransmitted_ipmb_commands));
07412736 2353 seq_printf(m, "timed_out_ipmb_commands: %u\n",
b2655f26 2354 ipmi_get_stat(intf, timed_out_ipmb_commands));
07412736 2355 seq_printf(m, "timed_out_ipmb_broadcasts: %u\n",
b2655f26 2356 ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
07412736 2357 seq_printf(m, "sent_ipmb_responses: %u\n",
b2655f26 2358 ipmi_get_stat(intf, sent_ipmb_responses));
07412736 2359 seq_printf(m, "handled_ipmb_responses: %u\n",
b2655f26 2360 ipmi_get_stat(intf, handled_ipmb_responses));
07412736 2361 seq_printf(m, "invalid_ipmb_responses: %u\n",
b2655f26 2362 ipmi_get_stat(intf, invalid_ipmb_responses));
07412736 2363 seq_printf(m, "unhandled_ipmb_responses: %u\n",
b2655f26 2364 ipmi_get_stat(intf, unhandled_ipmb_responses));
07412736 2365 seq_printf(m, "sent_lan_commands: %u\n",
b2655f26 2366 ipmi_get_stat(intf, sent_lan_commands));
07412736 2367 seq_printf(m, "sent_lan_command_errs: %u\n",
b2655f26 2368 ipmi_get_stat(intf, sent_lan_command_errs));
07412736 2369 seq_printf(m, "retransmitted_lan_commands: %u\n",
b2655f26 2370 ipmi_get_stat(intf, retransmitted_lan_commands));
07412736 2371 seq_printf(m, "timed_out_lan_commands: %u\n",
b2655f26 2372 ipmi_get_stat(intf, timed_out_lan_commands));
07412736 2373 seq_printf(m, "sent_lan_responses: %u\n",
b2655f26 2374 ipmi_get_stat(intf, sent_lan_responses));
07412736 2375 seq_printf(m, "handled_lan_responses: %u\n",
b2655f26 2376 ipmi_get_stat(intf, handled_lan_responses));
07412736 2377 seq_printf(m, "invalid_lan_responses: %u\n",
b2655f26 2378 ipmi_get_stat(intf, invalid_lan_responses));
07412736 2379 seq_printf(m, "unhandled_lan_responses: %u\n",
b2655f26 2380 ipmi_get_stat(intf, unhandled_lan_responses));
07412736 2381 seq_printf(m, "handled_commands: %u\n",
b2655f26 2382 ipmi_get_stat(intf, handled_commands));
07412736 2383 seq_printf(m, "invalid_commands: %u\n",
b2655f26 2384 ipmi_get_stat(intf, invalid_commands));
07412736 2385 seq_printf(m, "unhandled_commands: %u\n",
b2655f26 2386 ipmi_get_stat(intf, unhandled_commands));
07412736 2387 seq_printf(m, "invalid_events: %u\n",
b2655f26 2388 ipmi_get_stat(intf, invalid_events));
07412736 2389 seq_printf(m, "events: %u\n",
b2655f26 2390 ipmi_get_stat(intf, events));
07412736 2391 seq_printf(m, "failed rexmit LAN msgs: %u\n",
25176ed6 2392 ipmi_get_stat(intf, dropped_rexmit_lan_commands));
07412736 2393 seq_printf(m, "failed rexmit IPMB msgs: %u\n",
25176ed6 2394 ipmi_get_stat(intf, dropped_rexmit_ipmb_commands));
07412736
AD
2395 return 0;
2396}
1da177e4 2397
07412736
AD
2398static int smi_stats_proc_open(struct inode *inode, struct file *file)
2399{
d9dda78b 2400 return single_open(file, smi_stats_proc_show, PDE_DATA(inode));
1da177e4 2401}
07412736
AD
2402
2403static const struct file_operations smi_stats_proc_ops = {
2404 .open = smi_stats_proc_open,
2405 .read = seq_read,
2406 .llseek = seq_lseek,
2407 .release = single_release,
2408};
1aa16eea 2409#endif /* CONFIG_PROC_FS */
1da177e4
LT
2410
2411int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
07412736 2412 const struct file_operations *proc_ops,
99b76233 2413 void *data)
1da177e4 2414{
1da177e4 2415 int rv = 0;
3b625943
CM
2416#ifdef CONFIG_PROC_FS
2417 struct proc_dir_entry *file;
1da177e4
LT
2418 struct ipmi_proc_entry *entry;
2419
2420 /* Create a list element. */
2421 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2422 if (!entry)
2423 return -ENOMEM;
1b6b698f 2424 entry->name = kstrdup(name, GFP_KERNEL);
1da177e4
LT
2425 if (!entry->name) {
2426 kfree(entry);
2427 return -ENOMEM;
2428 }
1da177e4 2429
07412736 2430 file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
1da177e4
LT
2431 if (!file) {
2432 kfree(entry->name);
2433 kfree(entry);
2434 rv = -ENOMEM;
2435 } else {
ac019151 2436 mutex_lock(&smi->proc_entry_lock);
1da177e4
LT
2437 /* Stick it on the list. */
2438 entry->next = smi->proc_entries;
2439 smi->proc_entries = entry;
ac019151 2440 mutex_unlock(&smi->proc_entry_lock);
1da177e4 2441 }
3b625943 2442#endif /* CONFIG_PROC_FS */
1da177e4
LT
2443
2444 return rv;
2445}
c70d7499 2446EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
1da177e4
LT
2447
2448static int add_proc_entries(ipmi_smi_t smi, int num)
2449{
2450 int rv = 0;
2451
3b625943 2452#ifdef CONFIG_PROC_FS
1da177e4
LT
2453 sprintf(smi->proc_dir_name, "%d", num);
2454 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
2455 if (!smi->proc_dir)
2456 rv = -ENOMEM;
1da177e4
LT
2457
2458 if (rv == 0)
2459 rv = ipmi_smi_add_proc_entry(smi, "stats",
07412736 2460 &smi_stats_proc_ops,
99b76233 2461 smi);
1da177e4
LT
2462
2463 if (rv == 0)
2464 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
07412736 2465 &smi_ipmb_proc_ops,
99b76233 2466 smi);
1da177e4
LT
2467
2468 if (rv == 0)
2469 rv = ipmi_smi_add_proc_entry(smi, "version",
07412736 2470 &smi_version_proc_ops,
99b76233 2471 smi);
3b625943 2472#endif /* CONFIG_PROC_FS */
1da177e4
LT
2473
2474 return rv;
2475}
2476
2477static void remove_proc_entries(ipmi_smi_t smi)
2478{
3b625943 2479#ifdef CONFIG_PROC_FS
1da177e4
LT
2480 struct ipmi_proc_entry *entry;
2481
ac019151 2482 mutex_lock(&smi->proc_entry_lock);
1da177e4
LT
2483 while (smi->proc_entries) {
2484 entry = smi->proc_entries;
2485 smi->proc_entries = entry->next;
2486
2487 remove_proc_entry(entry->name, smi->proc_dir);
2488 kfree(entry->name);
2489 kfree(entry);
2490 }
ac019151 2491 mutex_unlock(&smi->proc_entry_lock);
1da177e4 2492 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
3b625943 2493#endif /* CONFIG_PROC_FS */
1da177e4
LT
2494}
2495
50c812b2
CM
2496static ssize_t device_id_show(struct device *dev,
2497 struct device_attribute *attr,
2498 char *buf)
2499{
16639eb0 2500 struct bmc_device *bmc = to_bmc_device(dev);
511d57dc
CM
2501 struct ipmi_device_id id;
2502 int rv;
2503
39d3fb45 2504 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
511d57dc
CM
2505 if (rv)
2506 return rv;
50c812b2 2507
511d57dc 2508 return snprintf(buf, 10, "%u\n", id.device_id);
50c812b2 2509}
9c633317 2510static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
50c812b2 2511
16639eb0
CM
2512static ssize_t provides_device_sdrs_show(struct device *dev,
2513 struct device_attribute *attr,
2514 char *buf)
50c812b2 2515{
16639eb0 2516 struct bmc_device *bmc = to_bmc_device(dev);
511d57dc
CM
2517 struct ipmi_device_id id;
2518 int rv;
50c812b2 2519
39d3fb45 2520 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
511d57dc
CM
2521 if (rv)
2522 return rv;
2523
2524 return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
50c812b2 2525}
9c633317
CM
2526static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show,
2527 NULL);
50c812b2
CM
2528
2529static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2530 char *buf)
2531{
16639eb0 2532 struct bmc_device *bmc = to_bmc_device(dev);
511d57dc
CM
2533 struct ipmi_device_id id;
2534 int rv;
50c812b2 2535
39d3fb45 2536 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
511d57dc
CM
2537 if (rv)
2538 return rv;
2539
2540 return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
50c812b2 2541}
9c633317 2542static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
50c812b2 2543
16639eb0
CM
2544static ssize_t firmware_revision_show(struct device *dev,
2545 struct device_attribute *attr,
2546 char *buf)
50c812b2 2547{
16639eb0 2548 struct bmc_device *bmc = to_bmc_device(dev);
511d57dc
CM
2549 struct ipmi_device_id id;
2550 int rv;
50c812b2 2551
39d3fb45 2552 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
511d57dc
CM
2553 if (rv)
2554 return rv;
2555
2556 return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
2557 id.firmware_revision_2);
50c812b2 2558}
9c633317 2559static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
50c812b2
CM
2560
2561static ssize_t ipmi_version_show(struct device *dev,
2562 struct device_attribute *attr,
2563 char *buf)
2564{
16639eb0 2565 struct bmc_device *bmc = to_bmc_device(dev);
511d57dc
CM
2566 struct ipmi_device_id id;
2567 int rv;
2568
39d3fb45 2569 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
511d57dc
CM
2570 if (rv)
2571 return rv;
50c812b2
CM
2572
2573 return snprintf(buf, 20, "%u.%u\n",
511d57dc
CM
2574 ipmi_version_major(&id),
2575 ipmi_version_minor(&id));
50c812b2 2576}
9c633317 2577static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
50c812b2
CM
2578
2579static ssize_t add_dev_support_show(struct device *dev,
2580 struct device_attribute *attr,
2581 char *buf)
2582{
16639eb0 2583 struct bmc_device *bmc = to_bmc_device(dev);
511d57dc
CM
2584 struct ipmi_device_id id;
2585 int rv;
50c812b2 2586
39d3fb45 2587 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
511d57dc
CM
2588 if (rv)
2589 return rv;
2590
2591 return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
50c812b2 2592}
9c633317
CM
2593static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2594 NULL);
50c812b2
CM
2595
2596static ssize_t manufacturer_id_show(struct device *dev,
2597 struct device_attribute *attr,
2598 char *buf)
2599{
16639eb0 2600 struct bmc_device *bmc = to_bmc_device(dev);
511d57dc
CM
2601 struct ipmi_device_id id;
2602 int rv;
2603
39d3fb45 2604 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
511d57dc
CM
2605 if (rv)
2606 return rv;
50c812b2 2607
511d57dc 2608 return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
50c812b2 2609}
9c633317 2610static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
50c812b2
CM
2611
2612static ssize_t product_id_show(struct device *dev,
2613 struct device_attribute *attr,
2614 char *buf)
2615{
16639eb0 2616 struct bmc_device *bmc = to_bmc_device(dev);
511d57dc
CM
2617 struct ipmi_device_id id;
2618 int rv;
2619
39d3fb45 2620 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
511d57dc
CM
2621 if (rv)
2622 return rv;
50c812b2 2623
511d57dc 2624 return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
50c812b2 2625}
9c633317 2626static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
50c812b2
CM
2627
2628static ssize_t aux_firmware_rev_show(struct device *dev,
2629 struct device_attribute *attr,
2630 char *buf)
2631{
16639eb0 2632 struct bmc_device *bmc = to_bmc_device(dev);
511d57dc
CM
2633 struct ipmi_device_id id;
2634 int rv;
2635
39d3fb45 2636 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
511d57dc
CM
2637 if (rv)
2638 return rv;
50c812b2
CM
2639
2640 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
511d57dc
CM
2641 id.aux_firmware_revision[3],
2642 id.aux_firmware_revision[2],
2643 id.aux_firmware_revision[1],
2644 id.aux_firmware_revision[0]);
50c812b2 2645}
9c633317 2646static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
50c812b2
CM
2647
2648static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2649 char *buf)
2650{
16639eb0 2651 struct bmc_device *bmc = to_bmc_device(dev);
39d3fb45
CM
2652 bool guid_set;
2653 u8 guid[16];
2654 int rv;
2655
2656 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, guid);
2657 if (rv)
2658 return rv;
2659 if (!guid_set)
2660 return -ENOENT;
50c812b2 2661
9b64a8ba 2662 return snprintf(buf, 100,
39d3fb45
CM
2663 "%2.2x%2.2x%2.2x%2.2x-%2.2x%2.2x-%2.2x%2.2x-%2.2x%2.2x-%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
2664 guid[3], guid[2], guid[1], guid[0],
2665 guid[5], guid[4], guid[7], guid[6],
2666 guid[8], guid[9], guid[10], guid[11],
2667 guid[12], guid[13], guid[14], guid[15]);
50c812b2 2668}
9c633317 2669static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
16639eb0
CM
2670
2671static struct attribute *bmc_dev_attrs[] = {
2672 &dev_attr_device_id.attr,
2673 &dev_attr_provides_device_sdrs.attr,
2674 &dev_attr_revision.attr,
2675 &dev_attr_firmware_revision.attr,
2676 &dev_attr_ipmi_version.attr,
2677 &dev_attr_additional_device_support.attr,
2678 &dev_attr_manufacturer_id.attr,
2679 &dev_attr_product_id.attr,
2d06a0c9
TI
2680 &dev_attr_aux_firmware_revision.attr,
2681 &dev_attr_guid.attr,
16639eb0
CM
2682 NULL
2683};
50c812b2 2684
2d06a0c9
TI
2685static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2686 struct attribute *attr, int idx)
2687{
2688 struct device *dev = kobj_to_dev(kobj);
2689 struct bmc_device *bmc = to_bmc_device(dev);
2690 umode_t mode = attr->mode;
511d57dc 2691 int rv;
2d06a0c9 2692
511d57dc 2693 if (attr == &dev_attr_aux_firmware_revision.attr) {
39d3fb45
CM
2694 struct ipmi_device_id id;
2695
2696 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
511d57dc
CM
2697 return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2698 }
39d3fb45
CM
2699 if (attr == &dev_attr_guid.attr) {
2700 bool guid_set;
2701
2702 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2703 return (!rv && guid_set) ? mode : 0;
2704 }
2d06a0c9
TI
2705 return mode;
2706}
2707
1e7a75f7 2708static const struct attribute_group bmc_dev_attr_group = {
16639eb0 2709 .attrs = bmc_dev_attrs,
2d06a0c9 2710 .is_visible = bmc_dev_attr_is_visible,
16639eb0 2711};
5e59393e 2712
16639eb0
CM
2713static const struct attribute_group *bmc_dev_attr_groups[] = {
2714 &bmc_dev_attr_group,
2715 NULL
2716};
2717
1e7a75f7 2718static const struct device_type bmc_device_type = {
16639eb0
CM
2719 .groups = bmc_dev_attr_groups,
2720};
2721
f33e4df8
CM
2722static int __find_bmc_guid(struct device *dev, void *data)
2723{
2724 unsigned char *id = data;
39d3fb45
CM
2725 struct bmc_device *bmc;
2726 bool guid_set;
2727 u8 guid[16];
2728 int rv;
f33e4df8 2729
eae4a36a
CM
2730 if (dev->type != &bmc_device_type)
2731 return 0;
2732
39d3fb45
CM
2733 bmc = to_bmc_device(dev);
2734 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, guid);
2735 if (rv || !guid_set)
2736 return 0;
2737
2738 return memcmp(guid, id, 16) == 0;
f33e4df8
CM
2739}
2740
9ca15af3
CM
2741/*
2742 * Must be called with ipmidriver_mutex held. Returns with the
2743 * bmc's usecount incremented, if it is non-NULL.
2744 */
f33e4df8
CM
2745static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2746 unsigned char *guid)
2747{
2748 struct device *dev;
9ca15af3 2749 struct bmc_device *bmc = NULL;
f33e4df8
CM
2750
2751 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
9ca15af3
CM
2752 if (dev) {
2753 bmc = to_bmc_device(dev);
2754 kref_get(&bmc->usecount);
2755 put_device(dev);
2756 }
2757 return bmc;
f33e4df8
CM
2758}
2759
2760struct prod_dev_id {
2761 unsigned int product_id;
2762 unsigned char device_id;
2763};
2764
2765static int __find_bmc_prod_dev_id(struct device *dev, void *data)
2766{
39d3fb45 2767 struct prod_dev_id *cid = data;
eae4a36a 2768 struct bmc_device *bmc;
39d3fb45
CM
2769 struct ipmi_device_id id;
2770 int rv;
eae4a36a
CM
2771
2772 if (dev->type != &bmc_device_type)
2773 return 0;
f33e4df8 2774
eae4a36a 2775 bmc = to_bmc_device(dev);
39d3fb45
CM
2776 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2777 if (rv)
2778 return 0;
2779
2780 return (id.product_id == cid->product_id
2781 && id.device_id == cid->device_id);
f33e4df8
CM
2782}
2783
9ca15af3
CM
2784/*
2785 * Must be called with ipmidriver_mutex held. Returns with the
2786 * bmc's usecount incremented, if it is non-NULL.
2787 */
f33e4df8
CM
2788static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2789 struct device_driver *drv,
2790 unsigned int product_id, unsigned char device_id)
2791{
2792 struct prod_dev_id id = {
2793 .product_id = product_id,
2794 .device_id = device_id,
2795 };
2796 struct device *dev;
9ca15af3 2797 struct bmc_device *bmc = NULL;
f33e4df8
CM
2798
2799 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
9ca15af3
CM
2800 if (dev) {
2801 bmc = to_bmc_device(dev);
2802 kref_get(&bmc->usecount);
2803 put_device(dev);
2804 }
2805 return bmc;
f33e4df8
CM
2806}
2807
68e7e50f
CM
2808static DEFINE_IDA(ipmi_bmc_ida);
2809
16639eb0
CM
2810static void
2811release_bmc_device(struct device *dev)
2812{
2813 kfree(to_bmc_device(dev));
5e59393e
JG
2814}
2815
2816static void
2817cleanup_bmc_device(struct kref *ref)
2818{
16639eb0 2819 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
68e7e50f 2820 int id = bmc->pdev.id; /* Unregister overwrites id */
5e59393e 2821
16639eb0 2822 platform_device_unregister(&bmc->pdev);
68e7e50f 2823 ida_simple_remove(&ipmi_bmc_ida, id);
50c812b2
CM
2824}
2825
2826static void ipmi_bmc_unregister(ipmi_smi_t intf)
2827{
2828 struct bmc_device *bmc = intf->bmc;
2829
a2cb600f
CM
2830 if (!intf->bmc_registered)
2831 return;
2832
aa9c9ab2
JK
2833 mutex_lock(&intf->bmc_reg_mutex);
2834
5a0e10ec 2835 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
a2cb600f
CM
2836 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
2837 kfree(intf->my_dev_name);
2838 intf->my_dev_name = NULL;
50c812b2 2839
aa9c9ab2 2840 mutex_lock(&bmc->dyn_mutex);
a9137c3d 2841 list_del(&intf->bmc_link);
aa9c9ab2 2842 mutex_unlock(&bmc->dyn_mutex);
c659ff34 2843 intf->bmc = &intf->tmp_bmc;
aa9c9ab2 2844 mutex_lock(&ipmidriver_mutex);
16639eb0 2845 kref_put(&bmc->usecount, cleanup_bmc_device);
50c812b2 2846 mutex_unlock(&ipmidriver_mutex);
a2cb600f 2847 intf->bmc_registered = false;
aa9c9ab2
JK
2848
2849 mutex_unlock(&intf->bmc_reg_mutex);
50c812b2
CM
2850}
2851
5a0e10ec 2852static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
50c812b2
CM
2853{
2854 int rv;
2855 struct bmc_device *bmc = intf->bmc;
2856 struct bmc_device *old_bmc;
50c812b2 2857
50c812b2
CM
2858 /*
2859 * Try to find if there is an bmc_device struct
2860 * representing the interfaced BMC already
2861 */
9ca15af3 2862 mutex_lock(&ipmidriver_mutex);
28f26ac7 2863 if (bmc->dyn_guid_set)
fe2d5ffc 2864 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, bmc->guid);
50c812b2 2865 else
fe2d5ffc 2866 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
50c812b2
CM
2867 bmc->id.product_id,
2868 bmc->id.device_id);
9ca15af3 2869 mutex_unlock(&ipmidriver_mutex);
50c812b2
CM
2870
2871 /*
2872 * If there is already an bmc_device, free the new one,
2873 * otherwise register the new BMC device
2874 */
2875 if (old_bmc) {
aa9c9ab2 2876 bmc = old_bmc;
50c812b2 2877 intf->bmc = old_bmc;
aa9c9ab2 2878 mutex_lock(&bmc->dyn_mutex);
a9137c3d 2879 list_add_tail(&intf->bmc_link, &bmc->intfs);
aa9c9ab2 2880 mutex_unlock(&bmc->dyn_mutex);
50c812b2 2881
50c812b2
CM
2882 printk(KERN_INFO
2883 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2884 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2885 bmc->id.manufacturer_id,
2886 bmc->id.product_id,
2887 bmc->id.device_id);
2888 } else {
c659ff34
CM
2889 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
2890 if (!bmc) {
2891 rv = -ENOMEM;
2892 goto out;
2893 }
2894 INIT_LIST_HEAD(&bmc->intfs);
2895 mutex_init(&bmc->dyn_mutex);
2896
68e7e50f 2897 bmc->pdev.name = "ipmi_bmc";
f0b55da0 2898
68e7e50f
CM
2899 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
2900 if (rv < 0)
2901 goto out;
16639eb0 2902 bmc->pdev.dev.driver = &ipmidriver.driver;
68e7e50f 2903 bmc->pdev.id = rv;
16639eb0
CM
2904 bmc->pdev.dev.release = release_bmc_device;
2905 bmc->pdev.dev.type = &bmc_device_type;
5a0e10ec 2906 kref_init(&bmc->usecount);
50c812b2 2907
aa9c9ab2
JK
2908 intf->bmc = bmc;
2909 mutex_lock(&bmc->dyn_mutex);
a9137c3d 2910 list_add_tail(&intf->bmc_link, &bmc->intfs);
aa9c9ab2
JK
2911 mutex_unlock(&bmc->dyn_mutex);
2912
2913 rv = platform_device_register(&bmc->pdev);
50c812b2
CM
2914 if (rv) {
2915 printk(KERN_ERR
2916 "ipmi_msghandler:"
2917 " Unable to register bmc device: %d\n",
2918 rv);
a2cb600f 2919 goto out_list_del;
50c812b2
CM
2920 }
2921
279fbd0c
MS
2922 dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
2923 "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2924 bmc->id.manufacturer_id,
2925 bmc->id.product_id,
2926 bmc->id.device_id);
50c812b2
CM
2927 }
2928
2929 /*
2930 * create symlink from system interface device to bmc device
2931 * and back.
2932 */
5a0e10ec 2933 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
50c812b2
CM
2934 if (rv) {
2935 printk(KERN_ERR
2936 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2937 rv);
a2cb600f 2938 goto out_put_bmc;
50c812b2
CM
2939 }
2940
16639eb0 2941 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", ifnum);
50c812b2
CM
2942 if (!intf->my_dev_name) {
2943 rv = -ENOMEM;
2944 printk(KERN_ERR
2945 "ipmi_msghandler: allocate link from BMC: %d\n",
2946 rv);
a2cb600f 2947 goto out_unlink1;
50c812b2 2948 }
50c812b2 2949
16639eb0 2950 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
50c812b2
CM
2951 intf->my_dev_name);
2952 if (rv) {
2953 kfree(intf->my_dev_name);
2954 intf->my_dev_name = NULL;
2955 printk(KERN_ERR
2956 "ipmi_msghandler:"
2957 " Unable to create symlink to bmc: %d\n",
2958 rv);
a2cb600f 2959 goto out_free_my_dev_name;
50c812b2
CM
2960 }
2961
a2cb600f 2962 intf->bmc_registered = true;
50c812b2 2963
a2cb600f 2964out:
50c812b2 2965 return rv;
a2cb600f
CM
2966
2967
2968out_free_my_dev_name:
2969 kfree(intf->my_dev_name);
2970 intf->my_dev_name = NULL;
2971
2972out_unlink1:
2973 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
2974
2975out_put_bmc:
aa9c9ab2 2976 mutex_lock(&bmc->dyn_mutex);
a9137c3d 2977 list_del(&intf->bmc_link);
aa9c9ab2 2978 mutex_unlock(&bmc->dyn_mutex);
c659ff34 2979 intf->bmc = &intf->tmp_bmc;
aa9c9ab2 2980 mutex_lock(&ipmidriver_mutex);
a2cb600f
CM
2981 kref_put(&bmc->usecount, cleanup_bmc_device);
2982 mutex_unlock(&ipmidriver_mutex);
2983 goto out;
2984
2985out_list_del:
aa9c9ab2 2986 mutex_lock(&bmc->dyn_mutex);
a9137c3d 2987 list_del(&intf->bmc_link);
aa9c9ab2 2988 mutex_unlock(&bmc->dyn_mutex);
c659ff34 2989 intf->bmc = &intf->tmp_bmc;
a2cb600f
CM
2990 put_device(&bmc->pdev.dev);
2991 goto out;
50c812b2
CM
2992}
2993
2994static int
2995send_guid_cmd(ipmi_smi_t intf, int chan)
2996{
2997 struct kernel_ipmi_msg msg;
2998 struct ipmi_system_interface_addr si;
2999
3000 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3001 si.channel = IPMI_BMC_CHANNEL;
3002 si.lun = 0;
3003
3004 msg.netfn = IPMI_NETFN_APP_REQUEST;
3005 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3006 msg.data = NULL;
3007 msg.data_len = 0;
3008 return i_ipmi_request(NULL,
3009 intf,
3010 (struct ipmi_addr *) &si,
3011 0,
3012 &msg,
3013 intf,
3014 NULL,
3015 NULL,
3016 0,
3017 intf->channels[0].address,
3018 intf->channels[0].lun,
3019 -1, 0);
3020}
3021
28f26ac7 3022static void guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
50c812b2 3023{
28f26ac7
CM
3024 struct bmc_device *bmc = intf->bmc;
3025
50c812b2
CM
3026 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3027 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3028 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3029 /* Not for me */
3030 return;
3031
3032 if (msg->msg.data[0] != 0) {
3033 /* Error from getting the GUID, the BMC doesn't have one. */
28f26ac7 3034 bmc->dyn_guid_set = 0;
50c812b2
CM
3035 goto out;
3036 }
3037
3038 if (msg->msg.data_len < 17) {
28f26ac7 3039 bmc->dyn_guid_set = 0;
50c812b2
CM
3040 printk(KERN_WARNING PFX
3041 "guid_handler: The GUID response from the BMC was too"
3042 " short, it was %d but should have been 17. Assuming"
3043 " GUID is not available.\n",
3044 msg->msg.data_len);
3045 goto out;
3046 }
3047
28f26ac7
CM
3048 memcpy(bmc->fetch_guid, msg->msg.data + 1, 16);
3049 /*
3050 * Make sure the guid data is available before setting
3051 * dyn_guid_set.
3052 */
3053 smp_wmb();
3054 bmc->dyn_guid_set = 1;
50c812b2
CM
3055 out:
3056 wake_up(&intf->waitq);
3057}
3058
28f26ac7 3059static void __get_guid(ipmi_smi_t intf)
50c812b2
CM
3060{
3061 int rv;
28f26ac7 3062 struct bmc_device *bmc = intf->bmc;
50c812b2 3063
28f26ac7 3064 bmc->dyn_guid_set = 2;
50c812b2
CM
3065 intf->null_user_handler = guid_handler;
3066 rv = send_guid_cmd(intf, 0);
3067 if (rv)
3068 /* Send failed, no GUID available. */
28f26ac7
CM
3069 bmc->dyn_guid_set = 0;
3070
3071 wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3072
3073 /* dyn_guid_set makes the guid data available. */
3074 smp_rmb();
3075
50c812b2
CM
3076 intf->null_user_handler = NULL;
3077}
3078
1da177e4
LT
3079static int
3080send_channel_info_cmd(ipmi_smi_t intf, int chan)
3081{
3082 struct kernel_ipmi_msg msg;
3083 unsigned char data[1];
3084 struct ipmi_system_interface_addr si;
3085
3086 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3087 si.channel = IPMI_BMC_CHANNEL;
3088 si.lun = 0;
3089
3090 msg.netfn = IPMI_NETFN_APP_REQUEST;
3091 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3092 msg.data = data;
3093 msg.data_len = 1;
3094 data[0] = chan;
3095 return i_ipmi_request(NULL,
3096 intf,
3097 (struct ipmi_addr *) &si,
3098 0,
3099 &msg,
56a55ec6 3100 intf,
1da177e4
LT
3101 NULL,
3102 NULL,
3103 0,
c14979b9
CM
3104 intf->channels[0].address,
3105 intf->channels[0].lun,
1da177e4
LT
3106 -1, 0);
3107}
3108
3109static void
56a55ec6 3110channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
1da177e4
LT
3111{
3112 int rv = 0;
3113 int chan;
3114
56a55ec6
CM
3115 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3116 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
c70d7499 3117 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
1da177e4 3118 /* It's the one we want */
56a55ec6 3119 if (msg->msg.data[0] != 0) {
1da177e4
LT
3120 /* Got an error from the channel, just go on. */
3121
56a55ec6 3122 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
c70d7499
CM
3123 /*
3124 * If the MC does not support this
3125 * command, that is legal. We just
3126 * assume it has one IPMB at channel
3127 * zero.
3128 */
1da177e4
LT
3129 intf->channels[0].medium
3130 = IPMI_CHANNEL_MEDIUM_IPMB;
3131 intf->channels[0].protocol
3132 = IPMI_CHANNEL_PROTOCOL_IPMB;
1da177e4
LT
3133
3134 intf->curr_channel = IPMI_MAX_CHANNELS;
3135 wake_up(&intf->waitq);
3136 goto out;
3137 }
3138 goto next_channel;
3139 }
56a55ec6 3140 if (msg->msg.data_len < 4) {
1da177e4
LT
3141 /* Message not big enough, just go on. */
3142 goto next_channel;
3143 }
3144 chan = intf->curr_channel;
56a55ec6
CM
3145 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
3146 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
1da177e4 3147
c70d7499 3148 next_channel:
1da177e4
LT
3149 intf->curr_channel++;
3150 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
3151 wake_up(&intf->waitq);
3152 else
3153 rv = send_channel_info_cmd(intf, intf->curr_channel);
3154
3155 if (rv) {
3156 /* Got an error somehow, just give up. */
1f668423
CM
3157 printk(KERN_WARNING PFX
3158 "Error sending channel information for channel"
3159 " %d: %d\n", intf->curr_channel, rv);
3160
1da177e4
LT
3161 intf->curr_channel = IPMI_MAX_CHANNELS;
3162 wake_up(&intf->waitq);
1da177e4
LT
3163 }
3164 }
3165 out:
3166 return;
3167}
3168
895dcfd1 3169static void ipmi_poll(ipmi_smi_t intf)
fcfa4724 3170{
fcfa4724
CM
3171 if (intf->handlers->poll)
3172 intf->handlers->poll(intf->send_info);
7adf579c
CM
3173 /* In case something came in */
3174 handle_new_recv_msgs(intf);
fcfa4724 3175}
895dcfd1
CM
3176
3177void ipmi_poll_interface(ipmi_user_t user)
3178{
3179 ipmi_poll(user->intf);
fcfa4724 3180}
c70d7499 3181EXPORT_SYMBOL(ipmi_poll_interface);
fcfa4724 3182
81d02b7f 3183int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
1da177e4 3184 void *send_info,
50c812b2 3185 struct device *si_dev,
453823ba 3186 unsigned char slave_addr)
1da177e4
LT
3187{
3188 int i, j;
3189 int rv;
393d2cc3 3190 ipmi_smi_t intf;
bca0324d 3191 ipmi_smi_t tintf;
bca0324d 3192 struct list_head *link;
511d57dc 3193 struct ipmi_device_id id;
1da177e4 3194
c70d7499
CM
3195 /*
3196 * Make sure the driver is actually initialized, this handles
3197 * problems with initialization order.
3198 */
1da177e4
LT
3199 if (!initialized) {
3200 rv = ipmi_init_msghandler();
3201 if (rv)
3202 return rv;
c70d7499
CM
3203 /*
3204 * The init code doesn't return an error if it was turned
3205 * off, but it won't initialize. Check that.
3206 */
1da177e4
LT
3207 if (!initialized)
3208 return -ENODEV;
3209 }
3210
dd00cc48 3211 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
393d2cc3 3212 if (!intf)
1da177e4 3213 return -ENOMEM;
b2c03941 3214
c659ff34 3215 intf->bmc = &intf->tmp_bmc;
a9137c3d 3216 INIT_LIST_HEAD(&intf->bmc->intfs);
aa9c9ab2
JK
3217 mutex_init(&intf->bmc->dyn_mutex);
3218 INIT_LIST_HEAD(&intf->bmc_link);
3219 mutex_init(&intf->bmc_reg_mutex);
bca0324d 3220 intf->intf_num = -1; /* Mark it invalid for now. */
393d2cc3 3221 kref_init(&intf->refcount);
50c812b2 3222 intf->si_dev = si_dev;
393d2cc3
CM
3223 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3224 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
3225 intf->channels[j].lun = 2;
3226 }
3227 if (slave_addr != 0)
3228 intf->channels[0].address = slave_addr;
3229 INIT_LIST_HEAD(&intf->users);
3230 intf->handlers = handlers;
3231 intf->send_info = send_info;
3232 spin_lock_init(&intf->seq_lock);
3233 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3234 intf->seq_table[j].inuse = 0;
3235 intf->seq_table[j].seqid = 0;
3236 }
3237 intf->curr_seq = 0;
3238#ifdef CONFIG_PROC_FS
ac019151 3239 mutex_init(&intf->proc_entry_lock);
393d2cc3 3240#endif
65be7544
CM
3241 spin_lock_init(&intf->waiting_rcv_msgs_lock);
3242 INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
7adf579c
CM
3243 tasklet_init(&intf->recv_tasklet,
3244 smi_recv_tasklet,
3245 (unsigned long) intf);
3246 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
7ea0ed2b
CM
3247 spin_lock_init(&intf->xmit_msgs_lock);
3248 INIT_LIST_HEAD(&intf->xmit_msgs);
3249 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
393d2cc3 3250 spin_lock_init(&intf->events_lock);
89986496
CM
3251 atomic_set(&intf->event_waiters, 0);
3252 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
393d2cc3
CM
3253 INIT_LIST_HEAD(&intf->waiting_events);
3254 intf->waiting_events_count = 0;
d6dfd131 3255 mutex_init(&intf->cmd_rcvrs_mutex);
b9675136 3256 spin_lock_init(&intf->maintenance_mode_lock);
393d2cc3
CM
3257 INIT_LIST_HEAD(&intf->cmd_rcvrs);
3258 init_waitqueue_head(&intf->waitq);
b2655f26
KB
3259 for (i = 0; i < IPMI_NUM_STATS; i++)
3260 atomic_set(&intf->stats[i], 0);
393d2cc3 3261
393d2cc3 3262 intf->proc_dir = NULL;
1da177e4 3263
b2c03941 3264 mutex_lock(&smi_watchers_mutex);
bca0324d
CM
3265 mutex_lock(&ipmi_interfaces_mutex);
3266 /* Look for a hole in the numbers. */
3267 i = 0;
3268 link = &ipmi_interfaces;
3269 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
3270 if (tintf->intf_num != i) {
3271 link = &tintf->link;
1da177e4
LT
3272 break;
3273 }
bca0324d 3274 i++;
1da177e4 3275 }
bca0324d
CM
3276 /* Add the new interface in numeric order. */
3277 if (i == 0)
3278 list_add_rcu(&intf->link, &ipmi_interfaces);
3279 else
3280 list_add_tail_rcu(&intf->link, link);
1da177e4 3281
453823ba
CM
3282 rv = handlers->start_processing(send_info, intf);
3283 if (rv)
3284 goto out;
1da177e4 3285
39d3fb45 3286 rv = bmc_get_device_id(intf, NULL, &id, NULL, NULL);
511d57dc
CM
3287 if (rv) {
3288 dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3289 goto out;
3290 }
3291
a2cb600f
CM
3292 rv = ipmi_bmc_register(intf, i);
3293 if (rv)
3294 goto out;
3295
511d57dc
CM
3296 if (ipmi_version_major(&id) > 1
3297 || (ipmi_version_major(&id) == 1
3298 && ipmi_version_minor(&id) >= 5)) {
c70d7499
CM
3299 /*
3300 * Start scanning the channels to see what is
3301 * available.
3302 */
393d2cc3
CM
3303 intf->null_user_handler = channel_handler;
3304 intf->curr_channel = 0;
3305 rv = send_channel_info_cmd(intf, 0);
1f668423
CM
3306 if (rv) {
3307 printk(KERN_WARNING PFX
3308 "Error sending channel information for channel"
3309 " 0, %d\n", rv);
393d2cc3 3310 goto out;
1f668423 3311 }
1da177e4 3312
393d2cc3
CM
3313 /* Wait for the channel info to be read. */
3314 wait_event(intf->waitq,
3315 intf->curr_channel >= IPMI_MAX_CHANNELS);
50c812b2 3316 intf->null_user_handler = NULL;
393d2cc3
CM
3317 } else {
3318 /* Assume a single IPMB channel at zero. */
3319 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3320 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
9a2845c4 3321 intf->curr_channel = IPMI_MAX_CHANNELS;
1da177e4
LT
3322 }
3323
a2cb600f 3324 rv = add_proc_entries(intf, i);
1da177e4 3325
393d2cc3 3326 out:
1da177e4 3327 if (rv) {
a2cb600f 3328 ipmi_bmc_unregister(intf);
393d2cc3
CM
3329 if (intf->proc_dir)
3330 remove_proc_entries(intf);
b2c03941 3331 intf->handlers = NULL;
bca0324d
CM
3332 list_del_rcu(&intf->link);
3333 mutex_unlock(&ipmi_interfaces_mutex);
b2c03941 3334 mutex_unlock(&smi_watchers_mutex);
bca0324d 3335 synchronize_rcu();
393d2cc3 3336 kref_put(&intf->refcount, intf_free);
393d2cc3 3337 } else {
78ba2faf
CM
3338 /*
3339 * Keep memory order straight for RCU readers. Make
3340 * sure everything else is committed to memory before
3341 * setting intf_num to mark the interface valid.
3342 */
3343 smp_wmb();
bca0324d
CM
3344 intf->intf_num = i;
3345 mutex_unlock(&ipmi_interfaces_mutex);
78ba2faf 3346 /* After this point the interface is legal to use. */
50c812b2 3347 call_smi_watchers(i, intf->si_dev);
b2c03941 3348 mutex_unlock(&smi_watchers_mutex);
1da177e4
LT
3349 }
3350
3351 return rv;
3352}
c70d7499 3353EXPORT_SYMBOL(ipmi_register_smi);
1da177e4 3354
7ea0ed2b
CM
3355static void deliver_smi_err_response(ipmi_smi_t intf,
3356 struct ipmi_smi_msg *msg,
3357 unsigned char err)
3358{
3359 msg->rsp[0] = msg->data[0] | 4;
3360 msg->rsp[1] = msg->data[1];
3361 msg->rsp[2] = err;
3362 msg->rsp_size = 3;
3363 /* It's an error, so it will never requeue, no need to check return. */
3364 handle_one_recv_msg(intf, msg);
3365}
3366
b2c03941
CM
3367static void cleanup_smi_msgs(ipmi_smi_t intf)
3368{
3369 int i;
3370 struct seq_table *ent;
7ea0ed2b
CM
3371 struct ipmi_smi_msg *msg;
3372 struct list_head *entry;
3373 struct list_head tmplist;
3374
3375 /* Clear out our transmit queues and hold the messages. */
3376 INIT_LIST_HEAD(&tmplist);
3377 list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3378 list_splice_tail(&intf->xmit_msgs, &tmplist);
3379
3380 /* Current message first, to preserve order */
3381 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3382 /* Wait for the message to clear out. */
3383 schedule_timeout(1);
3384 }
b2c03941
CM
3385
3386 /* No need for locks, the interface is down. */
7ea0ed2b
CM
3387
3388 /*
3389 * Return errors for all pending messages in queue and in the
3390 * tables waiting for remote responses.
3391 */
3392 while (!list_empty(&tmplist)) {
3393 entry = tmplist.next;
3394 list_del(entry);
3395 msg = list_entry(entry, struct ipmi_smi_msg, link);
3396 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3397 }
3398
b2c03941
CM
3399 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3400 ent = &(intf->seq_table[i]);
3401 if (!ent->inuse)
3402 continue;
3403 deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3404 }
3405}
3406
1da177e4
LT
3407int ipmi_unregister_smi(ipmi_smi_t intf)
3408{
1da177e4 3409 struct ipmi_smi_watcher *w;
7ea0ed2b
CM
3410 int intf_num = intf->intf_num;
3411 ipmi_user_t user;
1da177e4 3412
b2c03941 3413 mutex_lock(&smi_watchers_mutex);
bca0324d 3414 mutex_lock(&ipmi_interfaces_mutex);
b2c03941 3415 intf->intf_num = -1;
7ea0ed2b 3416 intf->in_shutdown = true;
bca0324d
CM
3417 list_del_rcu(&intf->link);
3418 mutex_unlock(&ipmi_interfaces_mutex);
3419 synchronize_rcu();
1da177e4 3420
b2c03941
CM
3421 cleanup_smi_msgs(intf);
3422
7ea0ed2b
CM
3423 /* Clean up the effects of users on the lower-level software. */
3424 mutex_lock(&ipmi_interfaces_mutex);
3425 rcu_read_lock();
3426 list_for_each_entry_rcu(user, &intf->users, link) {
3427 module_put(intf->handlers->owner);
3428 if (intf->handlers->dec_usecount)
3429 intf->handlers->dec_usecount(intf->send_info);
3430 }
3431 rcu_read_unlock();
3432 intf->handlers = NULL;
3433 mutex_unlock(&ipmi_interfaces_mutex);
3434
393d2cc3 3435 remove_proc_entries(intf);
bd85f4b3 3436 ipmi_bmc_unregister(intf);
1da177e4 3437
c70d7499
CM
3438 /*
3439 * Call all the watcher interfaces to tell them that
3440 * an interface is gone.
3441 */
393d2cc3 3442 list_for_each_entry(w, &smi_watchers, link)
b2c03941
CM
3443 w->smi_gone(intf_num);
3444 mutex_unlock(&smi_watchers_mutex);
393d2cc3 3445
393d2cc3 3446 kref_put(&intf->refcount, intf_free);
1da177e4
LT
3447 return 0;
3448}
c70d7499 3449EXPORT_SYMBOL(ipmi_unregister_smi);
1da177e4
LT
3450
3451static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
3452 struct ipmi_smi_msg *msg)
3453{
3454 struct ipmi_ipmb_addr ipmb_addr;
3455 struct ipmi_recv_msg *recv_msg;
1da177e4 3456
c70d7499
CM
3457 /*
3458 * This is 11, not 10, because the response must contain a
3459 * completion code.
3460 */
1da177e4
LT
3461 if (msg->rsp_size < 11) {
3462 /* Message not big enough, just ignore it. */
b2655f26 3463 ipmi_inc_stat(intf, invalid_ipmb_responses);
1da177e4
LT
3464 return 0;
3465 }
3466
3467 if (msg->rsp[2] != 0) {
3468 /* An error getting the response, just ignore it. */
3469 return 0;
3470 }
3471
3472 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3473 ipmb_addr.slave_addr = msg->rsp[6];
3474 ipmb_addr.channel = msg->rsp[3] & 0x0f;
3475 ipmb_addr.lun = msg->rsp[7] & 3;
3476
c70d7499
CM
3477 /*
3478 * It's a response from a remote entity. Look up the sequence
3479 * number and handle the response.
3480 */
1da177e4
LT
3481 if (intf_find_seq(intf,
3482 msg->rsp[7] >> 2,
3483 msg->rsp[3] & 0x0f,
3484 msg->rsp[8],
3485 (msg->rsp[4] >> 2) & (~1),
3486 (struct ipmi_addr *) &(ipmb_addr),
c70d7499
CM
3487 &recv_msg)) {
3488 /*
3489 * We were unable to find the sequence number,
3490 * so just nuke the message.
3491 */
b2655f26 3492 ipmi_inc_stat(intf, unhandled_ipmb_responses);
1da177e4
LT
3493 return 0;
3494 }
3495
3496 memcpy(recv_msg->msg_data,
3497 &(msg->rsp[9]),
3498 msg->rsp_size - 9);
c70d7499
CM
3499 /*
3500 * The other fields matched, so no need to set them, except
3501 * for netfn, which needs to be the response that was
3502 * returned, not the request value.
3503 */
1da177e4
LT
3504 recv_msg->msg.netfn = msg->rsp[4] >> 2;
3505 recv_msg->msg.data = recv_msg->msg_data;
3506 recv_msg->msg.data_len = msg->rsp_size - 10;
3507 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
b2655f26 3508 ipmi_inc_stat(intf, handled_ipmb_responses);
1da177e4
LT
3509 deliver_response(recv_msg);
3510
3511 return 0;
3512}
3513
3514static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
3515 struct ipmi_smi_msg *msg)
3516{
393d2cc3
CM
3517 struct cmd_rcvr *rcvr;
3518 int rv = 0;
3519 unsigned char netfn;
3520 unsigned char cmd;
c69c3127 3521 unsigned char chan;
393d2cc3
CM
3522 ipmi_user_t user = NULL;
3523 struct ipmi_ipmb_addr *ipmb_addr;
3524 struct ipmi_recv_msg *recv_msg;
1da177e4
LT
3525
3526 if (msg->rsp_size < 10) {
3527 /* Message not big enough, just ignore it. */
b2655f26 3528 ipmi_inc_stat(intf, invalid_commands);
1da177e4
LT
3529 return 0;
3530 }
3531
3532 if (msg->rsp[2] != 0) {
3533 /* An error getting the response, just ignore it. */
3534 return 0;
3535 }
3536
3537 netfn = msg->rsp[4] >> 2;
3538 cmd = msg->rsp[8];
c69c3127 3539 chan = msg->rsp[3] & 0xf;
1da177e4 3540
e61fb5b6 3541 rcu_read_lock();
c69c3127 3542 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
393d2cc3
CM
3543 if (rcvr) {
3544 user = rcvr->user;
3545 kref_get(&user->refcount);
3546 } else
3547 user = NULL;
e61fb5b6 3548 rcu_read_unlock();
1da177e4
LT
3549
3550 if (user == NULL) {
3551 /* We didn't find a user, deliver an error response. */
b2655f26 3552 ipmi_inc_stat(intf, unhandled_commands);
1da177e4
LT
3553
3554 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3555 msg->data[1] = IPMI_SEND_MSG_CMD;
3556 msg->data[2] = msg->rsp[3];
3557 msg->data[3] = msg->rsp[6];
c70d7499 3558 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
1da177e4 3559 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
c14979b9 3560 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
c70d7499
CM
3561 /* rqseq/lun */
3562 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
1da177e4
LT
3563 msg->data[8] = msg->rsp[8]; /* cmd */
3564 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3565 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
3566 msg->data_size = 11;
3567
3568#ifdef DEBUG_MSGING
3569 {
3570 int m;
3571 printk("Invalid command:");
e8b33617 3572 for (m = 0; m < msg->data_size; m++)
1da177e4
LT
3573 printk(" %2.2x", msg->data[m]);
3574 printk("\n");
3575 }
3576#endif
b2c03941 3577 rcu_read_lock();
7ea0ed2b
CM
3578 if (!intf->in_shutdown) {
3579 smi_send(intf, intf->handlers, msg, 0);
c70d7499
CM
3580 /*
3581 * We used the message, so return the value
3582 * that causes it to not be freed or
3583 * queued.
3584 */
b2c03941
CM
3585 rv = -1;
3586 }
3587 rcu_read_unlock();
1da177e4
LT
3588 } else {
3589 /* Deliver the message to the user. */
b2655f26 3590 ipmi_inc_stat(intf, handled_commands);
1da177e4
LT
3591
3592 recv_msg = ipmi_alloc_recv_msg();
8a3628d5 3593 if (!recv_msg) {
c70d7499
CM
3594 /*
3595 * We couldn't allocate memory for the
3596 * message, so requeue it for handling
3597 * later.
3598 */
1da177e4 3599 rv = 1;
393d2cc3 3600 kref_put(&user->refcount, free_user);
1da177e4
LT
3601 } else {
3602 /* Extract the source address from the data. */
3603 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3604 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3605 ipmb_addr->slave_addr = msg->rsp[6];
3606 ipmb_addr->lun = msg->rsp[7] & 3;
3607 ipmb_addr->channel = msg->rsp[3] & 0xf;
3608
c70d7499
CM
3609 /*
3610 * Extract the rest of the message information
3611 * from the IPMB header.
3612 */
1da177e4
LT
3613 recv_msg->user = user;
3614 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3615 recv_msg->msgid = msg->rsp[7] >> 2;
3616 recv_msg->msg.netfn = msg->rsp[4] >> 2;
3617 recv_msg->msg.cmd = msg->rsp[8];
3618 recv_msg->msg.data = recv_msg->msg_data;
3619
c70d7499
CM
3620 /*
3621 * We chop off 10, not 9 bytes because the checksum
3622 * at the end also needs to be removed.
3623 */
1da177e4
LT
3624 recv_msg->msg.data_len = msg->rsp_size - 10;
3625 memcpy(recv_msg->msg_data,
3626 &(msg->rsp[9]),
3627 msg->rsp_size - 10);
3628 deliver_response(recv_msg);
3629 }
3630 }
3631
3632 return rv;
3633}
3634
3635static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
3636 struct ipmi_smi_msg *msg)
3637{
3638 struct ipmi_lan_addr lan_addr;
3639 struct ipmi_recv_msg *recv_msg;
1da177e4
LT
3640
3641
c70d7499
CM
3642 /*
3643 * This is 13, not 12, because the response must contain a
3644 * completion code.
3645 */
1da177e4
LT
3646 if (msg->rsp_size < 13) {
3647 /* Message not big enough, just ignore it. */
b2655f26 3648 ipmi_inc_stat(intf, invalid_lan_responses);
1da177e4
LT
3649 return 0;
3650 }
3651
3652 if (msg->rsp[2] != 0) {
3653 /* An error getting the response, just ignore it. */
3654 return 0;
3655 }
3656
3657 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3658 lan_addr.session_handle = msg->rsp[4];
3659 lan_addr.remote_SWID = msg->rsp[8];
3660 lan_addr.local_SWID = msg->rsp[5];
3661 lan_addr.channel = msg->rsp[3] & 0x0f;
3662 lan_addr.privilege = msg->rsp[3] >> 4;
3663 lan_addr.lun = msg->rsp[9] & 3;
3664
c70d7499
CM
3665 /*
3666 * It's a response from a remote entity. Look up the sequence
3667 * number and handle the response.
3668 */
1da177e4
LT
3669 if (intf_find_seq(intf,
3670 msg->rsp[9] >> 2,
3671 msg->rsp[3] & 0x0f,
3672 msg->rsp[10],
3673 (msg->rsp[6] >> 2) & (~1),
3674 (struct ipmi_addr *) &(lan_addr),
c70d7499
CM
3675 &recv_msg)) {
3676 /*
3677 * We were unable to find the sequence number,
3678 * so just nuke the message.
3679 */
b2655f26 3680 ipmi_inc_stat(intf, unhandled_lan_responses);
1da177e4
LT
3681 return 0;
3682 }
3683
3684 memcpy(recv_msg->msg_data,
3685 &(msg->rsp[11]),
3686 msg->rsp_size - 11);
c70d7499
CM
3687 /*
3688 * The other fields matched, so no need to set them, except
3689 * for netfn, which needs to be the response that was
3690 * returned, not the request value.
3691 */
1da177e4
LT
3692 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3693 recv_msg->msg.data = recv_msg->msg_data;
3694 recv_msg->msg.data_len = msg->rsp_size - 12;
3695 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
b2655f26 3696 ipmi_inc_stat(intf, handled_lan_responses);
1da177e4
LT
3697 deliver_response(recv_msg);
3698
3699 return 0;
3700}
3701
3702static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
3703 struct ipmi_smi_msg *msg)
3704{
393d2cc3
CM
3705 struct cmd_rcvr *rcvr;
3706 int rv = 0;
3707 unsigned char netfn;
3708 unsigned char cmd;
c69c3127 3709 unsigned char chan;
393d2cc3
CM
3710 ipmi_user_t user = NULL;
3711 struct ipmi_lan_addr *lan_addr;
3712 struct ipmi_recv_msg *recv_msg;
1da177e4
LT
3713
3714 if (msg->rsp_size < 12) {
3715 /* Message not big enough, just ignore it. */
b2655f26 3716 ipmi_inc_stat(intf, invalid_commands);
1da177e4
LT
3717 return 0;
3718 }
3719
3720 if (msg->rsp[2] != 0) {
3721 /* An error getting the response, just ignore it. */
3722 return 0;
3723 }
3724
3725 netfn = msg->rsp[6] >> 2;
3726 cmd = msg->rsp[10];
c69c3127 3727 chan = msg->rsp[3] & 0xf;
1da177e4 3728
e61fb5b6 3729 rcu_read_lock();
c69c3127 3730 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
393d2cc3
CM
3731 if (rcvr) {
3732 user = rcvr->user;
3733 kref_get(&user->refcount);
3734 } else
3735 user = NULL;
e61fb5b6 3736 rcu_read_unlock();
1da177e4
LT
3737
3738 if (user == NULL) {
393d2cc3 3739 /* We didn't find a user, just give up. */
b2655f26 3740 ipmi_inc_stat(intf, unhandled_commands);
1da177e4 3741
c70d7499
CM
3742 /*
3743 * Don't do anything with these messages, just allow
3744 * them to be freed.
3745 */
3746 rv = 0;
1da177e4
LT
3747 } else {
3748 /* Deliver the message to the user. */
b2655f26 3749 ipmi_inc_stat(intf, handled_commands);
1da177e4
LT
3750
3751 recv_msg = ipmi_alloc_recv_msg();
8a3628d5 3752 if (!recv_msg) {
c70d7499
CM
3753 /*
3754 * We couldn't allocate memory for the
3755 * message, so requeue it for handling later.
3756 */
1da177e4 3757 rv = 1;
393d2cc3 3758 kref_put(&user->refcount, free_user);
1da177e4
LT
3759 } else {
3760 /* Extract the source address from the data. */
3761 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3762 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3763 lan_addr->session_handle = msg->rsp[4];
3764 lan_addr->remote_SWID = msg->rsp[8];
3765 lan_addr->local_SWID = msg->rsp[5];
3766 lan_addr->lun = msg->rsp[9] & 3;
3767 lan_addr->channel = msg->rsp[3] & 0xf;
3768 lan_addr->privilege = msg->rsp[3] >> 4;
3769
c70d7499
CM
3770 /*
3771 * Extract the rest of the message information
3772 * from the IPMB header.
3773 */
1da177e4
LT
3774 recv_msg->user = user;
3775 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3776 recv_msg->msgid = msg->rsp[9] >> 2;
3777 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3778 recv_msg->msg.cmd = msg->rsp[10];
3779 recv_msg->msg.data = recv_msg->msg_data;
3780
c70d7499
CM
3781 /*
3782 * We chop off 12, not 11 bytes because the checksum
3783 * at the end also needs to be removed.
3784 */
1da177e4
LT
3785 recv_msg->msg.data_len = msg->rsp_size - 12;
3786 memcpy(recv_msg->msg_data,
3787 &(msg->rsp[11]),
3788 msg->rsp_size - 12);
3789 deliver_response(recv_msg);
3790 }
3791 }
3792
3793 return rv;
3794}
3795
4dec302f 3796/*
3797 * This routine will handle "Get Message" command responses with
3798 * channels that use an OEM Medium. The message format belongs to
3799 * the OEM. See IPMI 2.0 specification, Chapter 6 and
3800 * Chapter 22, sections 22.6 and 22.24 for more details.
3801 */
3802static int handle_oem_get_msg_cmd(ipmi_smi_t intf,
3803 struct ipmi_smi_msg *msg)
3804{
3805 struct cmd_rcvr *rcvr;
3806 int rv = 0;
3807 unsigned char netfn;
3808 unsigned char cmd;
3809 unsigned char chan;
3810 ipmi_user_t user = NULL;
3811 struct ipmi_system_interface_addr *smi_addr;
3812 struct ipmi_recv_msg *recv_msg;
3813
3814 /*
3815 * We expect the OEM SW to perform error checking
3816 * so we just do some basic sanity checks
3817 */
3818 if (msg->rsp_size < 4) {
3819 /* Message not big enough, just ignore it. */
3820 ipmi_inc_stat(intf, invalid_commands);
3821 return 0;
3822 }
3823
3824 if (msg->rsp[2] != 0) {
3825 /* An error getting the response, just ignore it. */
3826 return 0;
3827 }
3828
3829 /*
3830 * This is an OEM Message so the OEM needs to know how
3831 * handle the message. We do no interpretation.
3832 */
3833 netfn = msg->rsp[0] >> 2;
3834 cmd = msg->rsp[1];
3835 chan = msg->rsp[3] & 0xf;
3836
3837 rcu_read_lock();
3838 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3839 if (rcvr) {
3840 user = rcvr->user;
3841 kref_get(&user->refcount);
3842 } else
3843 user = NULL;
3844 rcu_read_unlock();
3845
3846 if (user == NULL) {
3847 /* We didn't find a user, just give up. */
3848 ipmi_inc_stat(intf, unhandled_commands);
3849
3850 /*
3851 * Don't do anything with these messages, just allow
3852 * them to be freed.
3853 */
3854
3855 rv = 0;
3856 } else {
3857 /* Deliver the message to the user. */
3858 ipmi_inc_stat(intf, handled_commands);
3859
3860 recv_msg = ipmi_alloc_recv_msg();
3861 if (!recv_msg) {
3862 /*
3863 * We couldn't allocate memory for the
3864 * message, so requeue it for handling
3865 * later.
3866 */
3867 rv = 1;
3868 kref_put(&user->refcount, free_user);
3869 } else {
3870 /*
3871 * OEM Messages are expected to be delivered via
3872 * the system interface to SMS software. We might
3873 * need to visit this again depending on OEM
3874 * requirements
3875 */
3876 smi_addr = ((struct ipmi_system_interface_addr *)
3877 &(recv_msg->addr));
3878 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3879 smi_addr->channel = IPMI_BMC_CHANNEL;
3880 smi_addr->lun = msg->rsp[0] & 3;
3881
3882 recv_msg->user = user;
3883 recv_msg->user_msg_data = NULL;
3884 recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
3885 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3886 recv_msg->msg.cmd = msg->rsp[1];
3887 recv_msg->msg.data = recv_msg->msg_data;
3888
3889 /*
3890 * The message starts at byte 4 which follows the
3891 * the Channel Byte in the "GET MESSAGE" command
3892 */
3893 recv_msg->msg.data_len = msg->rsp_size - 4;
3894 memcpy(recv_msg->msg_data,
3895 &(msg->rsp[4]),
3896 msg->rsp_size - 4);
3897 deliver_response(recv_msg);
3898 }
3899 }
3900
3901 return rv;
3902}
3903
1da177e4
LT
3904static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3905 struct ipmi_smi_msg *msg)
3906{
3907 struct ipmi_system_interface_addr *smi_addr;
c70d7499 3908
1da177e4
LT
3909 recv_msg->msgid = 0;
3910 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
3911 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3912 smi_addr->channel = IPMI_BMC_CHANNEL;
3913 smi_addr->lun = msg->rsp[0] & 3;
3914 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3915 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3916 recv_msg->msg.cmd = msg->rsp[1];
3917 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
3918 recv_msg->msg.data = recv_msg->msg_data;
3919 recv_msg->msg.data_len = msg->rsp_size - 3;
3920}
3921
1da177e4
LT
3922static int handle_read_event_rsp(ipmi_smi_t intf,
3923 struct ipmi_smi_msg *msg)
3924{
3925 struct ipmi_recv_msg *recv_msg, *recv_msg2;
3926 struct list_head msgs;
3927 ipmi_user_t user;
3928 int rv = 0;
3929 int deliver_count = 0;
3930 unsigned long flags;
3931
3932 if (msg->rsp_size < 19) {
3933 /* Message is too small to be an IPMB event. */
b2655f26 3934 ipmi_inc_stat(intf, invalid_events);
1da177e4
LT
3935 return 0;
3936 }
3937
3938 if (msg->rsp[2] != 0) {
3939 /* An error getting the event, just ignore it. */
3940 return 0;
3941 }
3942
3943 INIT_LIST_HEAD(&msgs);
3944
393d2cc3 3945 spin_lock_irqsave(&intf->events_lock, flags);
1da177e4 3946
b2655f26 3947 ipmi_inc_stat(intf, events);
1da177e4 3948
c70d7499
CM
3949 /*
3950 * Allocate and fill in one message for every user that is
3951 * getting events.
3952 */
393d2cc3
CM
3953 rcu_read_lock();
3954 list_for_each_entry_rcu(user, &intf->users, link) {
8a3628d5 3955 if (!user->gets_events)
1da177e4
LT
3956 continue;
3957
3958 recv_msg = ipmi_alloc_recv_msg();
8a3628d5 3959 if (!recv_msg) {
393d2cc3 3960 rcu_read_unlock();
8a3628d5
CM
3961 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
3962 link) {
1da177e4
LT
3963 list_del(&recv_msg->link);
3964 ipmi_free_recv_msg(recv_msg);
3965 }
c70d7499
CM
3966 /*
3967 * We couldn't allocate memory for the
3968 * message, so requeue it for handling
3969 * later.
3970 */
1da177e4
LT
3971 rv = 1;
3972 goto out;
3973 }
3974
3975 deliver_count++;
3976
3977 copy_event_into_recv_msg(recv_msg, msg);
3978 recv_msg->user = user;
393d2cc3 3979 kref_get(&user->refcount);
1da177e4
LT
3980 list_add_tail(&(recv_msg->link), &msgs);
3981 }
393d2cc3 3982 rcu_read_unlock();
1da177e4
LT
3983
3984 if (deliver_count) {
3985 /* Now deliver all the messages. */
3986 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
3987 list_del(&recv_msg->link);
3988 deliver_response(recv_msg);
3989 }
3990 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
c70d7499
CM
3991 /*
3992 * No one to receive the message, put it in queue if there's
3993 * not already too many things in the queue.
3994 */
1da177e4 3995 recv_msg = ipmi_alloc_recv_msg();
8a3628d5 3996 if (!recv_msg) {
c70d7499
CM
3997 /*
3998 * We couldn't allocate memory for the
3999 * message, so requeue it for handling
4000 * later.
4001 */
1da177e4
LT
4002 rv = 1;
4003 goto out;
4004 }
4005
4006 copy_event_into_recv_msg(recv_msg, msg);
4007 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
4791c03d 4008 intf->waiting_events_count++;
87ebd06f 4009 } else if (!intf->event_msg_printed) {
c70d7499
CM
4010 /*
4011 * There's too many things in the queue, discard this
4012 * message.
4013 */
87ebd06f
CM
4014 printk(KERN_WARNING PFX "Event queue full, discarding"
4015 " incoming events\n");
4016 intf->event_msg_printed = 1;
1da177e4
LT
4017 }
4018
4019 out:
4020 spin_unlock_irqrestore(&(intf->events_lock), flags);
4021
4022 return rv;
4023}
4024
4025static int handle_bmc_rsp(ipmi_smi_t intf,
4026 struct ipmi_smi_msg *msg)
4027{
4028 struct ipmi_recv_msg *recv_msg;
393d2cc3 4029 struct ipmi_user *user;
1da177e4
LT
4030
4031 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
c70d7499
CM
4032 if (recv_msg == NULL) {
4033 printk(KERN_WARNING
4034 "IPMI message received with no owner. This\n"
4035 "could be because of a malformed message, or\n"
4036 "because of a hardware error. Contact your\n"
4037 "hardware vender for assistance\n");
56a55ec6
CM
4038 return 0;
4039 }
1da177e4 4040
393d2cc3 4041 user = recv_msg->user;
1da177e4 4042 /* Make sure the user still exists. */
393d2cc3 4043 if (user && !user->valid) {
56a55ec6 4044 /* The user for the message went away, so give up. */
b2655f26 4045 ipmi_inc_stat(intf, unhandled_local_responses);
1da177e4
LT
4046 ipmi_free_recv_msg(recv_msg);
4047 } else {
4048 struct ipmi_system_interface_addr *smi_addr;
4049
b2655f26 4050 ipmi_inc_stat(intf, handled_local_responses);
1da177e4
LT
4051 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4052 recv_msg->msgid = msg->msgid;
4053 smi_addr = ((struct ipmi_system_interface_addr *)
4054 &(recv_msg->addr));
4055 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4056 smi_addr->channel = IPMI_BMC_CHANNEL;
4057 smi_addr->lun = msg->rsp[0] & 3;
4058 recv_msg->msg.netfn = msg->rsp[0] >> 2;
4059 recv_msg->msg.cmd = msg->rsp[1];
4060 memcpy(recv_msg->msg_data,
4061 &(msg->rsp[2]),
4062 msg->rsp_size - 2);
4063 recv_msg->msg.data = recv_msg->msg_data;
4064 recv_msg->msg.data_len = msg->rsp_size - 2;
4065 deliver_response(recv_msg);
4066 }
4067
4068 return 0;
4069}
4070
c70d7499 4071/*
7adf579c 4072 * Handle a received message. Return 1 if the message should be requeued,
c70d7499
CM
4073 * 0 if the message should be freed, or -1 if the message should not
4074 * be freed or requeued.
4075 */
7adf579c 4076static int handle_one_recv_msg(ipmi_smi_t intf,
1da177e4
LT
4077 struct ipmi_smi_msg *msg)
4078{
4079 int requeue;
4080 int chan;
4081
4082#ifdef DEBUG_MSGING
4083 int m;
4084 printk("Recv:");
e8b33617 4085 for (m = 0; m < msg->rsp_size; m++)
1da177e4
LT
4086 printk(" %2.2x", msg->rsp[m]);
4087 printk("\n");
4088#endif
4089 if (msg->rsp_size < 2) {
4090 /* Message is too small to be correct. */
4091 printk(KERN_WARNING PFX "BMC returned to small a message"
4092 " for netfn %x cmd %x, got %d bytes\n",
4093 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
4094
4095 /* Generate an error response for the message. */
4096 msg->rsp[0] = msg->data[0] | (1 << 2);
4097 msg->rsp[1] = msg->data[1];
4098 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4099 msg->rsp_size = 3;
c70d7499
CM
4100 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4101 || (msg->rsp[1] != msg->data[1])) {
4102 /*
4103 * The NetFN and Command in the response is not even
4104 * marginally correct.
4105 */
1da177e4
LT
4106 printk(KERN_WARNING PFX "BMC returned incorrect response,"
4107 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
4108 (msg->data[0] >> 2) | 1, msg->data[1],
4109 msg->rsp[0] >> 2, msg->rsp[1]);
4110
4111 /* Generate an error response for the message. */
4112 msg->rsp[0] = msg->data[0] | (1 << 2);
4113 msg->rsp[1] = msg->data[1];
4114 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4115 msg->rsp_size = 3;
4116 }
4117
4118 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4119 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
c70d7499
CM
4120 && (msg->user_data != NULL)) {
4121 /*
4122 * It's a response to a response we sent. For this we
4123 * deliver a send message response to the user.
4124 */
393d2cc3 4125 struct ipmi_recv_msg *recv_msg = msg->user_data;
1da177e4
LT
4126
4127 requeue = 0;
4128 if (msg->rsp_size < 2)
4129 /* Message is too small to be correct. */
4130 goto out;
4131
4132 chan = msg->data[2] & 0x0f;
4133 if (chan >= IPMI_MAX_CHANNELS)
4134 /* Invalid channel number */
4135 goto out;
4136
393d2cc3
CM
4137 if (!recv_msg)
4138 goto out;
4139
4140 /* Make sure the user still exists. */
4141 if (!recv_msg->user || !recv_msg->user->valid)
4142 goto out;
4143
4144 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4145 recv_msg->msg.data = recv_msg->msg_data;
4146 recv_msg->msg.data_len = 1;
4147 recv_msg->msg_data[0] = msg->rsp[2];
4148 deliver_response(recv_msg);
1da177e4 4149 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
c70d7499 4150 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
1da177e4
LT
4151 /* It's from the receive queue. */
4152 chan = msg->rsp[3] & 0xf;
4153 if (chan >= IPMI_MAX_CHANNELS) {
4154 /* Invalid channel number */
4155 requeue = 0;
4156 goto out;
4157 }
4158
4dec302f 4159 /*
9a2845c4
CM
4160 * We need to make sure the channels have been initialized.
4161 * The channel_handler routine will set the "curr_channel"
4162 * equal to or greater than IPMI_MAX_CHANNELS when all the
4163 * channels for this interface have been initialized.
4164 */
4dec302f 4165 if (intf->curr_channel < IPMI_MAX_CHANNELS) {
9a2845c4 4166 requeue = 0; /* Throw the message away */
4dec302f 4167 goto out;
4168 }
4169
1da177e4
LT
4170 switch (intf->channels[chan].medium) {
4171 case IPMI_CHANNEL_MEDIUM_IPMB:
4172 if (msg->rsp[4] & 0x04) {
c70d7499
CM
4173 /*
4174 * It's a response, so find the
4175 * requesting message and send it up.
4176 */
1da177e4
LT
4177 requeue = handle_ipmb_get_msg_rsp(intf, msg);
4178 } else {
c70d7499
CM
4179 /*
4180 * It's a command to the SMS from some other
4181 * entity. Handle that.
4182 */
1da177e4
LT
4183 requeue = handle_ipmb_get_msg_cmd(intf, msg);
4184 }
4185 break;
4186
4187 case IPMI_CHANNEL_MEDIUM_8023LAN:
4188 case IPMI_CHANNEL_MEDIUM_ASYNC:
4189 if (msg->rsp[6] & 0x04) {
c70d7499
CM
4190 /*
4191 * It's a response, so find the
4192 * requesting message and send it up.
4193 */
1da177e4
LT
4194 requeue = handle_lan_get_msg_rsp(intf, msg);
4195 } else {
c70d7499
CM
4196 /*
4197 * It's a command to the SMS from some other
4198 * entity. Handle that.
4199 */
1da177e4
LT
4200 requeue = handle_lan_get_msg_cmd(intf, msg);
4201 }
4202 break;
4203
4204 default:
4dec302f 4205 /* Check for OEM Channels. Clients had better
4206 register for these commands. */
4207 if ((intf->channels[chan].medium
4208 >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4209 && (intf->channels[chan].medium
4210 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4211 requeue = handle_oem_get_msg_cmd(intf, msg);
4212 } else {
4213 /*
4214 * We don't handle the channel type, so just
4215 * free the message.
4216 */
4217 requeue = 0;
4218 }
1da177e4
LT
4219 }
4220
4221 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
c70d7499 4222 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
b3834be5 4223 /* It's an asynchronous event. */
1da177e4
LT
4224 requeue = handle_read_event_rsp(intf, msg);
4225 } else {
4226 /* It's a response from the local BMC. */
4227 requeue = handle_bmc_rsp(intf, msg);
4228 }
4229
4230 out:
4231 return requeue;
4232}
4233
7adf579c
CM
4234/*
4235 * If there are messages in the queue or pretimeouts, handle them.
4236 */
4237static void handle_new_recv_msgs(ipmi_smi_t intf)
4238{
4239 struct ipmi_smi_msg *smi_msg;
4240 unsigned long flags = 0;
4241 int rv;
4242 int run_to_completion = intf->run_to_completion;
4243
4244 /* See if any waiting messages need to be processed. */
4245 if (!run_to_completion)
65be7544
CM
4246 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4247 while (!list_empty(&intf->waiting_rcv_msgs)) {
4248 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
7adf579c 4249 struct ipmi_smi_msg, link);
ae4ea9a2 4250 list_del(&smi_msg->link);
7adf579c 4251 if (!run_to_completion)
65be7544
CM
4252 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4253 flags);
7adf579c
CM
4254 rv = handle_one_recv_msg(intf, smi_msg);
4255 if (!run_to_completion)
65be7544 4256 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
7ea0ed2b 4257 if (rv > 0) {
7adf579c
CM
4258 /*
4259 * To preserve message order, quit if we
ae4ea9a2
JN
4260 * can't handle a message. Add the message
4261 * back at the head, this is safe because this
4262 * tasklet is the only thing that pulls the
4263 * messages.
7adf579c 4264 */
ae4ea9a2 4265 list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
7adf579c 4266 break;
7ea0ed2b 4267 } else {
7ea0ed2b
CM
4268 if (rv == 0)
4269 /* Message handled */
4270 ipmi_free_smi_msg(smi_msg);
4271 /* If rv < 0, fatal error, del but don't free. */
7adf579c
CM
4272 }
4273 }
4274 if (!run_to_completion)
65be7544 4275 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
7adf579c
CM
4276
4277 /*
4278 * If the pretimout count is non-zero, decrement one from it and
4279 * deliver pretimeouts to all the users.
4280 */
4281 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4282 ipmi_user_t user;
4283
4284 rcu_read_lock();
4285 list_for_each_entry_rcu(user, &intf->users, link) {
4286 if (user->handler->ipmi_watchdog_pretimeout)
4287 user->handler->ipmi_watchdog_pretimeout(
4288 user->handler_data);
4289 }
4290 rcu_read_unlock();
4291 }
4292}
4293
4294static void smi_recv_tasklet(unsigned long val)
4295{
7ea0ed2b
CM
4296 unsigned long flags = 0; /* keep us warning-free. */
4297 ipmi_smi_t intf = (ipmi_smi_t) val;
4298 int run_to_completion = intf->run_to_completion;
4299 struct ipmi_smi_msg *newmsg = NULL;
4300
4301 /*
4302 * Start the next message if available.
4303 *
4304 * Do this here, not in the actual receiver, because we may deadlock
4305 * because the lower layer is allowed to hold locks while calling
4306 * message delivery.
4307 */
cdea4656
TC
4308
4309 rcu_read_lock();
4310
7ea0ed2b
CM
4311 if (!run_to_completion)
4312 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4313 if (intf->curr_msg == NULL && !intf->in_shutdown) {
4314 struct list_head *entry = NULL;
4315
4316 /* Pick the high priority queue first. */
4317 if (!list_empty(&intf->hp_xmit_msgs))
4318 entry = intf->hp_xmit_msgs.next;
4319 else if (!list_empty(&intf->xmit_msgs))
4320 entry = intf->xmit_msgs.next;
4321
4322 if (entry) {
4323 list_del(entry);
4324 newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4325 intf->curr_msg = newmsg;
4326 }
4327 }
4328 if (!run_to_completion)
4329 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4330 if (newmsg)
99ab32f3 4331 intf->handlers->sender(intf->send_info, newmsg);
7ea0ed2b 4332
cdea4656
TC
4333 rcu_read_unlock();
4334
7ea0ed2b 4335 handle_new_recv_msgs(intf);
7adf579c
CM
4336}
4337
1da177e4
LT
4338/* Handle a new message from the lower layer. */
4339void ipmi_smi_msg_received(ipmi_smi_t intf,
4340 struct ipmi_smi_msg *msg)
4341{
5956dce1 4342 unsigned long flags = 0; /* keep us warning-free. */
7ea0ed2b 4343 int run_to_completion = intf->run_to_completion;
1da177e4 4344
1da177e4
LT
4345 if ((msg->data_size >= 2)
4346 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4347 && (msg->data[1] == IPMI_SEND_MSG_CMD)
c70d7499 4348 && (msg->user_data == NULL)) {
7ea0ed2b
CM
4349
4350 if (intf->in_shutdown)
4351 goto free_msg;
4352
c70d7499
CM
4353 /*
4354 * This is the local response to a command send, start
4355 * the timer for these. The user_data will not be
4356 * NULL if this is a response send, and we will let
4357 * response sends just go through.
4358 */
4359
4360 /*
4361 * Check for errors, if we get certain errors (ones
4362 * that mean basically we can try again later), we
4363 * ignore them and start the timer. Otherwise we
4364 * report the error immediately.
4365 */
1da177e4
LT
4366 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4367 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
46d52b09
CM
4368 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4369 && (msg->rsp[2] != IPMI_BUS_ERR)
c70d7499 4370 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
1da177e4
LT
4371 int chan = msg->rsp[3] & 0xf;
4372
4373 /* Got an error sending the message, handle it. */
1da177e4
LT
4374 if (chan >= IPMI_MAX_CHANNELS)
4375 ; /* This shouldn't happen */
4376 else if ((intf->channels[chan].medium
4377 == IPMI_CHANNEL_MEDIUM_8023LAN)
4378 || (intf->channels[chan].medium
4379 == IPMI_CHANNEL_MEDIUM_ASYNC))
b2655f26 4380 ipmi_inc_stat(intf, sent_lan_command_errs);
1da177e4 4381 else
b2655f26 4382 ipmi_inc_stat(intf, sent_ipmb_command_errs);
1da177e4 4383 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
c70d7499 4384 } else
1da177e4
LT
4385 /* The message was sent, start the timer. */
4386 intf_start_seq_timer(intf, msg->msgid);
1da177e4 4387
7ea0ed2b 4388free_msg:
1da177e4 4389 ipmi_free_smi_msg(msg);
7ea0ed2b
CM
4390 } else {
4391 /*
4392 * To preserve message order, we keep a queue and deliver from
4393 * a tasklet.
4394 */
4395 if (!run_to_completion)
4396 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4397 list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4398 if (!run_to_completion)
4399 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4400 flags);
1da177e4
LT
4401 }
4402
5956dce1 4403 if (!run_to_completion)
7ea0ed2b 4404 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
b2234ee9
CM
4405 /*
4406 * We can get an asynchronous event or receive message in addition
4407 * to commands we send.
4408 */
7ea0ed2b
CM
4409 if (msg == intf->curr_msg)
4410 intf->curr_msg = NULL;
5956dce1 4411 if (!run_to_completion)
7ea0ed2b 4412 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
c70d7499 4413
7ea0ed2b
CM
4414 if (run_to_completion)
4415 smi_recv_tasklet((unsigned long) intf);
4416 else
4417 tasklet_schedule(&intf->recv_tasklet);
1da177e4 4418}
c70d7499 4419EXPORT_SYMBOL(ipmi_smi_msg_received);
1da177e4
LT
4420
4421void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
4422{
7ea0ed2b
CM
4423 if (intf->in_shutdown)
4424 return;
4425
7adf579c
CM
4426 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4427 tasklet_schedule(&intf->recv_tasklet);
1da177e4 4428}
c70d7499 4429EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
1da177e4 4430
882fe011
CM
4431static struct ipmi_smi_msg *
4432smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
4433 unsigned char seq, long seqid)
1da177e4 4434{
882fe011 4435 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
1da177e4 4436 if (!smi_msg)
c70d7499
CM
4437 /*
4438 * If we can't allocate the message, then just return, we
4439 * get 4 retries, so this should be ok.
4440 */
882fe011 4441 return NULL;
1da177e4
LT
4442
4443 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4444 smi_msg->data_size = recv_msg->msg.data_len;
4445 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
c70d7499 4446
1da177e4
LT
4447#ifdef DEBUG_MSGING
4448 {
4449 int m;
4450 printk("Resend: ");
e8b33617 4451 for (m = 0; m < smi_msg->data_size; m++)
1da177e4
LT
4452 printk(" %2.2x", smi_msg->data[m]);
4453 printk("\n");
4454 }
4455#endif
882fe011 4456 return smi_msg;
1da177e4
LT
4457}
4458
393d2cc3 4459static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
392a17b1
CM
4460 struct list_head *timeouts,
4461 unsigned long timeout_period,
89986496
CM
4462 int slot, unsigned long *flags,
4463 unsigned int *waiting_msgs)
393d2cc3 4464{
b2c03941 4465 struct ipmi_recv_msg *msg;
81d02b7f 4466 const struct ipmi_smi_handlers *handlers;
b2c03941 4467
7ea0ed2b 4468 if (intf->in_shutdown)
b2c03941 4469 return;
393d2cc3
CM
4470
4471 if (!ent->inuse)
4472 return;
4473
392a17b1
CM
4474 if (timeout_period < ent->timeout) {
4475 ent->timeout -= timeout_period;
89986496 4476 (*waiting_msgs)++;
393d2cc3 4477 return;
89986496 4478 }
393d2cc3
CM
4479
4480 if (ent->retries_left == 0) {
4481 /* The message has used all its retries. */
4482 ent->inuse = 0;
4483 msg = ent->recv_msg;
4484 list_add_tail(&msg->link, timeouts);
393d2cc3 4485 if (ent->broadcast)
b2655f26 4486 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
25176ed6 4487 else if (is_lan_addr(&ent->recv_msg->addr))
b2655f26 4488 ipmi_inc_stat(intf, timed_out_lan_commands);
393d2cc3 4489 else
b2655f26 4490 ipmi_inc_stat(intf, timed_out_ipmb_commands);
393d2cc3
CM
4491 } else {
4492 struct ipmi_smi_msg *smi_msg;
4493 /* More retries, send again. */
4494
89986496
CM
4495 (*waiting_msgs)++;
4496
c70d7499
CM
4497 /*
4498 * Start with the max timer, set to normal timer after
4499 * the message is sent.
4500 */
393d2cc3
CM
4501 ent->timeout = MAX_MSG_TIMEOUT;
4502 ent->retries_left--;
393d2cc3
CM
4503 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4504 ent->seqid);
25176ed6
CM
4505 if (!smi_msg) {
4506 if (is_lan_addr(&ent->recv_msg->addr))
4507 ipmi_inc_stat(intf,
4508 dropped_rexmit_lan_commands);
4509 else
4510 ipmi_inc_stat(intf,
4511 dropped_rexmit_ipmb_commands);
393d2cc3 4512 return;
25176ed6 4513 }
393d2cc3
CM
4514
4515 spin_unlock_irqrestore(&intf->seq_lock, *flags);
b2c03941 4516
c70d7499
CM
4517 /*
4518 * Send the new message. We send with a zero
4519 * priority. It timed out, I doubt time is that
4520 * critical now, and high priority messages are really
4521 * only for messages to the local MC, which don't get
4522 * resent.
4523 */
b2c03941 4524 handlers = intf->handlers;
25176ed6
CM
4525 if (handlers) {
4526 if (is_lan_addr(&ent->recv_msg->addr))
4527 ipmi_inc_stat(intf,
4528 retransmitted_lan_commands);
4529 else
4530 ipmi_inc_stat(intf,
4531 retransmitted_ipmb_commands);
4532
81d02b7f 4533 smi_send(intf, handlers, smi_msg, 0);
25176ed6 4534 } else
b2c03941
CM
4535 ipmi_free_smi_msg(smi_msg);
4536
393d2cc3
CM
4537 spin_lock_irqsave(&intf->seq_lock, *flags);
4538 }
4539}
4540
392a17b1
CM
4541static unsigned int ipmi_timeout_handler(ipmi_smi_t intf,
4542 unsigned long timeout_period)
1da177e4 4543{
1da177e4
LT
4544 struct list_head timeouts;
4545 struct ipmi_recv_msg *msg, *msg2;
1da177e4 4546 unsigned long flags;
bca0324d 4547 int i;
89986496 4548 unsigned int waiting_msgs = 0;
1da177e4 4549
89986496
CM
4550 /*
4551 * Go through the seq table and find any messages that
4552 * have timed out, putting them in the timeouts
4553 * list.
4554 */
4555 INIT_LIST_HEAD(&timeouts);
4556 spin_lock_irqsave(&intf->seq_lock, flags);
4557 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4558 check_msg_timeout(intf, &(intf->seq_table[i]),
4559 &timeouts, timeout_period, i,
4560 &flags, &waiting_msgs);
4561 spin_unlock_irqrestore(&intf->seq_lock, flags);
393d2cc3 4562
89986496
CM
4563 list_for_each_entry_safe(msg, msg2, &timeouts, link)
4564 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
b9675136 4565
89986496
CM
4566 /*
4567 * Maintenance mode handling. Check the timeout
4568 * optimistically before we claim the lock. It may
4569 * mean a timeout gets missed occasionally, but that
4570 * only means the timeout gets extended by one period
4571 * in that case. No big deal, and it avoids the lock
4572 * most of the time.
4573 */
4574 if (intf->auto_maintenance_timeout > 0) {
4575 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
b9675136 4576 if (intf->auto_maintenance_timeout > 0) {
89986496
CM
4577 intf->auto_maintenance_timeout
4578 -= timeout_period;
4579 if (!intf->maintenance_mode
4580 && (intf->auto_maintenance_timeout <= 0)) {
7aefac26 4581 intf->maintenance_mode_enable = false;
89986496 4582 maintenance_mode_update(intf);
b9675136 4583 }
b9675136 4584 }
89986496
CM
4585 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4586 flags);
1da177e4 4587 }
89986496
CM
4588
4589 tasklet_schedule(&intf->recv_tasklet);
4590
4591 return waiting_msgs;
1da177e4
LT
4592}
4593
89986496 4594static void ipmi_request_event(ipmi_smi_t intf)
1da177e4 4595{
89986496
CM
4596 /* No event requests when in maintenance mode. */
4597 if (intf->maintenance_mode_enable)
4598 return;
b9675136 4599
7ea0ed2b
CM
4600 if (!intf->in_shutdown)
4601 intf->handlers->request_events(intf->send_info);
1da177e4
LT
4602}
4603
4604static struct timer_list ipmi_timer;
4605
8f43f84f 4606static atomic_t stop_operation;
1da177e4
LT
4607
4608static void ipmi_timeout(unsigned long data)
4609{
89986496
CM
4610 ipmi_smi_t intf;
4611 int nt = 0;
4612
8f43f84f 4613 if (atomic_read(&stop_operation))
1da177e4 4614 return;
1da177e4 4615
89986496
CM
4616 rcu_read_lock();
4617 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4618 int lnt = 0;
4619
4620 if (atomic_read(&intf->event_waiters)) {
4621 intf->ticks_to_req_ev--;
4622 if (intf->ticks_to_req_ev == 0) {
4623 ipmi_request_event(intf);
4624 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4625 }
4626 lnt++;
4627 }
4628
4629 lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
1da177e4 4630
89986496
CM
4631 lnt = !!lnt;
4632 if (lnt != intf->last_needs_timer &&
4633 intf->handlers->set_need_watch)
4634 intf->handlers->set_need_watch(intf->send_info, lnt);
4635 intf->last_needs_timer = lnt;
1da177e4 4636
89986496
CM
4637 nt += lnt;
4638 }
4639 rcu_read_unlock();
4640
4641 if (nt)
4642 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
1da177e4
LT
4643}
4644
89986496
CM
4645static void need_waiter(ipmi_smi_t intf)
4646{
4647 /* Racy, but worst case we start the timer twice. */
4648 if (!timer_pending(&ipmi_timer))
4649 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4650}
1da177e4
LT
4651
4652static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4653static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4654
1da177e4
LT
4655static void free_smi_msg(struct ipmi_smi_msg *msg)
4656{
4657 atomic_dec(&smi_msg_inuse_count);
4658 kfree(msg);
4659}
4660
4661struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
4662{
4663 struct ipmi_smi_msg *rv;
4664 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
4665 if (rv) {
4666 rv->done = free_smi_msg;
4667 rv->user_data = NULL;
4668 atomic_inc(&smi_msg_inuse_count);
4669 }
4670 return rv;
4671}
c70d7499 4672EXPORT_SYMBOL(ipmi_alloc_smi_msg);
1da177e4
LT
4673
4674static void free_recv_msg(struct ipmi_recv_msg *msg)
4675{
4676 atomic_dec(&recv_msg_inuse_count);
4677 kfree(msg);
4678}
4679
74006309 4680static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
1da177e4
LT
4681{
4682 struct ipmi_recv_msg *rv;
4683
4684 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
4685 if (rv) {
a9eec556 4686 rv->user = NULL;
1da177e4
LT
4687 rv->done = free_recv_msg;
4688 atomic_inc(&recv_msg_inuse_count);
4689 }
4690 return rv;
4691}
4692
393d2cc3
CM
4693void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
4694{
4695 if (msg->user)
4696 kref_put(&msg->user->refcount, free_user);
4697 msg->done(msg);
4698}
c70d7499 4699EXPORT_SYMBOL(ipmi_free_recv_msg);
393d2cc3 4700
895dcfd1
CM
4701static atomic_t panic_done_count = ATOMIC_INIT(0);
4702
1da177e4
LT
4703static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4704{
895dcfd1 4705 atomic_dec(&panic_done_count);
1da177e4
LT
4706}
4707
4708static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4709{
895dcfd1
CM
4710 atomic_dec(&panic_done_count);
4711}
4712
4713/*
4714 * Inside a panic, send a message and wait for a response.
4715 */
4716static void ipmi_panic_request_and_wait(ipmi_smi_t intf,
4717 struct ipmi_addr *addr,
4718 struct kernel_ipmi_msg *msg)
4719{
4720 struct ipmi_smi_msg smi_msg;
4721 struct ipmi_recv_msg recv_msg;
4722 int rv;
4723
4724 smi_msg.done = dummy_smi_done_handler;
4725 recv_msg.done = dummy_recv_done_handler;
4726 atomic_add(2, &panic_done_count);
4727 rv = i_ipmi_request(NULL,
4728 intf,
4729 addr,
4730 0,
4731 msg,
4732 intf,
4733 &smi_msg,
4734 &recv_msg,
4735 0,
4736 intf->channels[0].address,
4737 intf->channels[0].lun,
4738 0, 1); /* Don't retry, and don't wait. */
4739 if (rv)
4740 atomic_sub(2, &panic_done_count);
82802f96
HK
4741 else if (intf->handlers->flush_messages)
4742 intf->handlers->flush_messages(intf->send_info);
4743
895dcfd1
CM
4744 while (atomic_read(&panic_done_count) != 0)
4745 ipmi_poll(intf);
1da177e4
LT
4746}
4747
56a55ec6 4748static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
1da177e4 4749{
56a55ec6
CM
4750 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4751 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
4752 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
c70d7499 4753 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
1da177e4 4754 /* A get event receiver command, save it. */
56a55ec6
CM
4755 intf->event_receiver = msg->msg.data[1];
4756 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
1da177e4
LT
4757 }
4758}
4759
56a55ec6 4760static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
1da177e4 4761{
56a55ec6
CM
4762 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4763 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
4764 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
c70d7499
CM
4765 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4766 /*
4767 * A get device id command, save if we are an event
4768 * receiver or generator.
4769 */
56a55ec6
CM
4770 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
4771 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
1da177e4
LT
4772 }
4773}
1da177e4
LT
4774
4775static void send_panic_events(char *str)
4776{
4777 struct kernel_ipmi_msg msg;
4778 ipmi_smi_t intf;
4779 unsigned char data[16];
1da177e4
LT
4780 struct ipmi_system_interface_addr *si;
4781 struct ipmi_addr addr;
1da177e4 4782
1c9f98d1
CM
4783 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
4784 return;
4785
1da177e4
LT
4786 si = (struct ipmi_system_interface_addr *) &addr;
4787 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4788 si->channel = IPMI_BMC_CHANNEL;
4789 si->lun = 0;
4790
4791 /* Fill in an event telling that we have failed. */
4792 msg.netfn = 0x04; /* Sensor or Event. */
4793 msg.cmd = 2; /* Platform event command. */
4794 msg.data = data;
4795 msg.data_len = 8;
cda315ab 4796 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
1da177e4
LT
4797 data[1] = 0x03; /* This is for IPMI 1.0. */
4798 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4799 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4800 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4801
c70d7499
CM
4802 /*
4803 * Put a few breadcrumbs in. Hopefully later we can add more things
4804 * to make the panic events more useful.
4805 */
1da177e4
LT
4806 if (str) {
4807 data[3] = str[0];
4808 data[6] = str[1];
4809 data[7] = str[2];
4810 }
4811
1da177e4 4812 /* For every registered interface, send the event. */
bca0324d 4813 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1c9f98d1
CM
4814 if (!intf->handlers || !intf->handlers->poll)
4815 /* Interface is not ready or can't run at panic time. */
1da177e4
LT
4816 continue;
4817
4818 /* Send the event announcing the panic. */
895dcfd1 4819 ipmi_panic_request_and_wait(intf, &addr, &msg);
1da177e4
LT
4820 }
4821
c70d7499
CM
4822 /*
4823 * On every interface, dump a bunch of OEM event holding the
4824 * string.
4825 */
1c9f98d1 4826 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
1da177e4
LT
4827 return;
4828
bca0324d
CM
4829 /* For every registered interface, send the event. */
4830 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1da177e4
LT
4831 char *p = str;
4832 struct ipmi_ipmb_addr *ipmb;
4833 int j;
4834
bca0324d
CM
4835 if (intf->intf_num == -1)
4836 /* Interface was not ready yet. */
1da177e4
LT
4837 continue;
4838
78ba2faf
CM
4839 /*
4840 * intf_num is used as an marker to tell if the
4841 * interface is valid. Thus we need a read barrier to
4842 * make sure data fetched before checking intf_num
4843 * won't be used.
4844 */
4845 smp_rmb();
4846
c70d7499
CM
4847 /*
4848 * First job here is to figure out where to send the
4849 * OEM events. There's no way in IPMI to send OEM
4850 * events using an event send command, so we have to
4851 * find the SEL to put them in and stick them in
4852 * there.
4853 */
1da177e4
LT
4854
4855 /* Get capabilities from the get device id. */
4856 intf->local_sel_device = 0;
4857 intf->local_event_generator = 0;
4858 intf->event_receiver = 0;
4859
4860 /* Request the device info from the local MC. */
4861 msg.netfn = IPMI_NETFN_APP_REQUEST;
4862 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
4863 msg.data = NULL;
4864 msg.data_len = 0;
4865 intf->null_user_handler = device_id_fetcher;
895dcfd1 4866 ipmi_panic_request_and_wait(intf, &addr, &msg);
1da177e4
LT
4867
4868 if (intf->local_event_generator) {
4869 /* Request the event receiver from the local MC. */
4870 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
4871 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
4872 msg.data = NULL;
4873 msg.data_len = 0;
4874 intf->null_user_handler = event_receiver_fetcher;
895dcfd1 4875 ipmi_panic_request_and_wait(intf, &addr, &msg);
1da177e4
LT
4876 }
4877 intf->null_user_handler = NULL;
4878
c70d7499
CM
4879 /*
4880 * Validate the event receiver. The low bit must not
4881 * be 1 (it must be a valid IPMB address), it cannot
4882 * be zero, and it must not be my address.
4883 */
4884 if (((intf->event_receiver & 1) == 0)
1da177e4 4885 && (intf->event_receiver != 0)
c70d7499
CM
4886 && (intf->event_receiver != intf->channels[0].address)) {
4887 /*
4888 * The event receiver is valid, send an IPMB
4889 * message.
4890 */
1da177e4
LT
4891 ipmb = (struct ipmi_ipmb_addr *) &addr;
4892 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
4893 ipmb->channel = 0; /* FIXME - is this right? */
4894 ipmb->lun = intf->event_receiver_lun;
4895 ipmb->slave_addr = intf->event_receiver;
4896 } else if (intf->local_sel_device) {
c70d7499
CM
4897 /*
4898 * The event receiver was not valid (or was
4899 * me), but I am an SEL device, just dump it
4900 * in my SEL.
4901 */
1da177e4
LT
4902 si = (struct ipmi_system_interface_addr *) &addr;
4903 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4904 si->channel = IPMI_BMC_CHANNEL;
4905 si->lun = 0;
4906 } else
4907 continue; /* No where to send the event. */
4908
1da177e4
LT
4909 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
4910 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
4911 msg.data = data;
4912 msg.data_len = 16;
4913
4914 j = 0;
4915 while (*p) {
4916 int size = strlen(p);
4917
4918 if (size > 11)
4919 size = 11;
4920 data[0] = 0;
4921 data[1] = 0;
4922 data[2] = 0xf0; /* OEM event without timestamp. */
c14979b9 4923 data[3] = intf->channels[0].address;
1da177e4 4924 data[4] = j++; /* sequence # */
c70d7499
CM
4925 /*
4926 * Always give 11 bytes, so strncpy will fill
4927 * it with zeroes for me.
4928 */
1da177e4
LT
4929 strncpy(data+5, p, 11);
4930 p += size;
4931
895dcfd1 4932 ipmi_panic_request_and_wait(intf, &addr, &msg);
1da177e4 4933 }
c70d7499 4934 }
1da177e4 4935}
1da177e4 4936
0c8204b3 4937static int has_panicked;
1da177e4
LT
4938
4939static int panic_event(struct notifier_block *this,
4940 unsigned long event,
c70d7499 4941 void *ptr)
1da177e4 4942{
1da177e4
LT
4943 ipmi_smi_t intf;
4944
f18190bd 4945 if (has_panicked)
1da177e4 4946 return NOTIFY_DONE;
f18190bd 4947 has_panicked = 1;
1da177e4
LT
4948
4949 /* For every registered interface, set it to run to completion. */
bca0324d 4950 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
b2c03941
CM
4951 if (!intf->handlers)
4952 /* Interface is not ready. */
1da177e4
LT
4953 continue;
4954
06e5e345
HK
4955 /*
4956 * If we were interrupted while locking xmit_msgs_lock or
4957 * waiting_rcv_msgs_lock, the corresponding list may be
4958 * corrupted. In this case, drop items on the list for
4959 * the safety.
4960 */
4961 if (!spin_trylock(&intf->xmit_msgs_lock)) {
4962 INIT_LIST_HEAD(&intf->xmit_msgs);
4963 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
4964 } else
4965 spin_unlock(&intf->xmit_msgs_lock);
4966
4967 if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
4968 INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
4969 else
4970 spin_unlock(&intf->waiting_rcv_msgs_lock);
4971
5956dce1 4972 intf->run_to_completion = 1;
1c9f98d1
CM
4973 if (intf->handlers->set_run_to_completion)
4974 intf->handlers->set_run_to_completion(intf->send_info,
4975 1);
1da177e4
LT
4976 }
4977
1da177e4 4978 send_panic_events(ptr);
1da177e4
LT
4979
4980 return NOTIFY_DONE;
4981}
4982
4983static struct notifier_block panic_block = {
4984 .notifier_call = panic_event,
4985 .next = NULL,
4986 .priority = 200 /* priority: INT_MAX >= x >= 0 */
4987};
4988
4989static int ipmi_init_msghandler(void)
4990{
50c812b2 4991 int rv;
1da177e4
LT
4992
4993 if (initialized)
4994 return 0;
4995
fe2d5ffc 4996 rv = driver_register(&ipmidriver.driver);
50c812b2
CM
4997 if (rv) {
4998 printk(KERN_ERR PFX "Could not register IPMI driver\n");
4999 return rv;
5000 }
5001
1da177e4 5002 printk(KERN_INFO "ipmi message handler version "
1fdd75bd 5003 IPMI_DRIVER_VERSION "\n");
1da177e4 5004
3b625943 5005#ifdef CONFIG_PROC_FS
1da177e4
LT
5006 proc_ipmi_root = proc_mkdir("ipmi", NULL);
5007 if (!proc_ipmi_root) {
5008 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
80fad5b9 5009 driver_unregister(&ipmidriver.driver);
1da177e4
LT
5010 return -ENOMEM;
5011 }
5012
3b625943 5013#endif /* CONFIG_PROC_FS */
1da177e4 5014
409035e0
CM
5015 setup_timer(&ipmi_timer, ipmi_timeout, 0);
5016 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
1da177e4 5017
e041c683 5018 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
1da177e4
LT
5019
5020 initialized = 1;
5021
5022 return 0;
5023}
5024
60ee6d5f 5025static int __init ipmi_init_msghandler_mod(void)
1da177e4
LT
5026{
5027 ipmi_init_msghandler();
5028 return 0;
5029}
5030
60ee6d5f 5031static void __exit cleanup_ipmi(void)
1da177e4
LT
5032{
5033 int count;
5034
5035 if (!initialized)
5036 return;
5037
e041c683 5038 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
1da177e4 5039
c70d7499
CM
5040 /*
5041 * This can't be called if any interfaces exist, so no worry
5042 * about shutting down the interfaces.
5043 */
1da177e4 5044
c70d7499
CM
5045 /*
5046 * Tell the timer to stop, then wait for it to stop. This
5047 * avoids problems with race conditions removing the timer
5048 * here.
5049 */
8f43f84f
CM
5050 atomic_inc(&stop_operation);
5051 del_timer_sync(&ipmi_timer);
1da177e4 5052
3b625943 5053#ifdef CONFIG_PROC_FS
a8ca16ea 5054 proc_remove(proc_ipmi_root);
3b625943 5055#endif /* CONFIG_PROC_FS */
1da177e4 5056
fe2d5ffc 5057 driver_unregister(&ipmidriver.driver);
50c812b2 5058
1da177e4
LT
5059 initialized = 0;
5060
5061 /* Check for buffer leaks. */
5062 count = atomic_read(&smi_msg_inuse_count);
5063 if (count != 0)
5064 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
5065 count);
5066 count = atomic_read(&recv_msg_inuse_count);
5067 if (count != 0)
5068 printk(KERN_WARNING PFX "recv message count %d at exit\n",
5069 count);
5070}
5071module_exit(cleanup_ipmi);
5072
5073module_init(ipmi_init_msghandler_mod);
5074MODULE_LICENSE("GPL");
1fdd75bd 5075MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
c70d7499
CM
5076MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
5077 " interface.");
1fdd75bd 5078MODULE_VERSION(IPMI_DRIVER_VERSION);
070cbd1d 5079MODULE_SOFTDEP("post: ipmi_devintf");