]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/char/ipmi/ipmi_si_intf.c
[POWERPC] Fix non HUGETLB_PAGE build warning
[mirror_ubuntu-bionic-kernel.git] / drivers / char / ipmi / ipmi_si_intf.c
CommitLineData
1da177e4
LT
1/*
2 * ipmi_si.c
3 *
4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5 * BT).
6 *
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
9 * source@mvista.com
10 *
11 * Copyright 2002 MontaVista Software Inc.
dba9b4f6 12 * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
1da177e4
LT
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2 of the License, or (at your
17 * option) any later version.
18 *
19 *
20 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
21 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
28 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
29 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * You should have received a copy of the GNU General Public License along
32 * with this program; if not, write to the Free Software Foundation, Inc.,
33 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 */
35
36/*
37 * This file holds the "policy" for the interface to the SMI state
38 * machine. It does the configuration, handles timers and interrupts,
39 * and drives the real SMI state machine.
40 */
41
1da177e4
LT
42#include <linux/module.h>
43#include <linux/moduleparam.h>
44#include <asm/system.h>
45#include <linux/sched.h>
46#include <linux/timer.h>
47#include <linux/errno.h>
48#include <linux/spinlock.h>
49#include <linux/slab.h>
50#include <linux/delay.h>
51#include <linux/list.h>
52#include <linux/pci.h>
53#include <linux/ioport.h>
ea94027b 54#include <linux/notifier.h>
b0defcdb 55#include <linux/mutex.h>
e9a705a0 56#include <linux/kthread.h>
1da177e4 57#include <asm/irq.h>
1da177e4
LT
58#include <linux/interrupt.h>
59#include <linux/rcupdate.h>
60#include <linux/ipmi_smi.h>
61#include <asm/io.h>
62#include "ipmi_si_sm.h"
63#include <linux/init.h>
b224cd3a 64#include <linux/dmi.h>
b361e27b
CM
65#include <linux/string.h>
66#include <linux/ctype.h>
67
dba9b4f6
CM
68#ifdef CONFIG_PPC_OF
69#include <asm/of_device.h>
70#include <asm/of_platform.h>
71#endif
72
b361e27b 73#define PFX "ipmi_si: "
1da177e4
LT
74
75/* Measure times between events in the driver. */
76#undef DEBUG_TIMING
77
78/* Call every 10 ms. */
79#define SI_TIMEOUT_TIME_USEC 10000
80#define SI_USEC_PER_JIFFY (1000000/HZ)
81#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
82#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
83 short timeout */
84
ee6cd5f8
CM
85/* Bit for BMC global enables. */
86#define IPMI_BMC_RCV_MSG_INTR 0x01
87#define IPMI_BMC_EVT_MSG_INTR 0x02
88#define IPMI_BMC_EVT_MSG_BUFF 0x04
89#define IPMI_BMC_SYS_LOG 0x08
90
1da177e4
LT
91enum si_intf_state {
92 SI_NORMAL,
93 SI_GETTING_FLAGS,
94 SI_GETTING_EVENTS,
95 SI_CLEARING_FLAGS,
96 SI_CLEARING_FLAGS_THEN_SET_IRQ,
97 SI_GETTING_MESSAGES,
98 SI_ENABLE_INTERRUPTS1,
ee6cd5f8
CM
99 SI_ENABLE_INTERRUPTS2,
100 SI_DISABLE_INTERRUPTS1,
101 SI_DISABLE_INTERRUPTS2
1da177e4
LT
102 /* FIXME - add watchdog stuff. */
103};
104
9dbf68f9
CM
105/* Some BT-specific defines we need here. */
106#define IPMI_BT_INTMASK_REG 2
107#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
108#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
109
1da177e4
LT
110enum si_type {
111 SI_KCS, SI_SMIC, SI_BT
112};
b361e27b 113static char *si_to_str[] = { "kcs", "smic", "bt" };
1da177e4 114
50c812b2
CM
115#define DEVICE_NAME "ipmi_si"
116
117static struct device_driver ipmi_driver =
118{
119 .name = DEVICE_NAME,
120 .bus = &platform_bus_type
121};
3ae0e0f9 122
1da177e4
LT
123struct smi_info
124{
a9a2c44f 125 int intf_num;
1da177e4
LT
126 ipmi_smi_t intf;
127 struct si_sm_data *si_sm;
128 struct si_sm_handlers *handlers;
129 enum si_type si_type;
130 spinlock_t si_lock;
131 spinlock_t msg_lock;
132 struct list_head xmit_msgs;
133 struct list_head hp_xmit_msgs;
134 struct ipmi_smi_msg *curr_msg;
135 enum si_intf_state si_state;
136
137 /* Used to handle the various types of I/O that can occur with
138 IPMI */
139 struct si_sm_io io;
140 int (*io_setup)(struct smi_info *info);
141 void (*io_cleanup)(struct smi_info *info);
142 int (*irq_setup)(struct smi_info *info);
143 void (*irq_cleanup)(struct smi_info *info);
144 unsigned int io_size;
b0defcdb
CM
145 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
146 void (*addr_source_cleanup)(struct smi_info *info);
147 void *addr_source_data;
1da177e4 148
3ae0e0f9
CM
149 /* Per-OEM handler, called from handle_flags().
150 Returns 1 when handle_flags() needs to be re-run
151 or 0 indicating it set si_state itself.
152 */
153 int (*oem_data_avail_handler)(struct smi_info *smi_info);
154
1da177e4
LT
155 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
156 is set to hold the flags until we are done handling everything
157 from the flags. */
158#define RECEIVE_MSG_AVAIL 0x01
159#define EVENT_MSG_BUFFER_FULL 0x02
160#define WDT_PRE_TIMEOUT_INT 0x08
3ae0e0f9
CM
161#define OEM0_DATA_AVAIL 0x20
162#define OEM1_DATA_AVAIL 0x40
163#define OEM2_DATA_AVAIL 0x80
164#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
165 OEM1_DATA_AVAIL | \
166 OEM2_DATA_AVAIL)
1da177e4
LT
167 unsigned char msg_flags;
168
169 /* If set to true, this will request events the next time the
170 state machine is idle. */
171 atomic_t req_events;
172
173 /* If true, run the state machine to completion on every send
174 call. Generally used after a panic to make sure stuff goes
175 out. */
176 int run_to_completion;
177
178 /* The I/O port of an SI interface. */
179 int port;
180
181 /* The space between start addresses of the two ports. For
182 instance, if the first port is 0xca2 and the spacing is 4, then
183 the second port is 0xca6. */
184 unsigned int spacing;
185
186 /* zero if no irq; */
187 int irq;
188
189 /* The timer for this si. */
190 struct timer_list si_timer;
191
192 /* The time (in jiffies) the last timeout occurred at. */
193 unsigned long last_timeout_jiffies;
194
195 /* Used to gracefully stop the timer without race conditions. */
a9a2c44f 196 atomic_t stop_operation;
1da177e4
LT
197
198 /* The driver will disable interrupts when it gets into a
199 situation where it cannot handle messages due to lack of
200 memory. Once that situation clears up, it will re-enable
201 interrupts. */
202 int interrupt_disabled;
203
50c812b2 204 /* From the get device id response... */
3ae0e0f9 205 struct ipmi_device_id device_id;
1da177e4 206
50c812b2
CM
207 /* Driver model stuff. */
208 struct device *dev;
209 struct platform_device *pdev;
210
211 /* True if we allocated the device, false if it came from
212 * someplace else (like PCI). */
213 int dev_registered;
214
1da177e4
LT
215 /* Slave address, could be reported from DMI. */
216 unsigned char slave_addr;
217
218 /* Counters and things for the proc filesystem. */
219 spinlock_t count_lock;
220 unsigned long short_timeouts;
221 unsigned long long_timeouts;
222 unsigned long timeout_restarts;
223 unsigned long idles;
224 unsigned long interrupts;
225 unsigned long attentions;
226 unsigned long flag_fetches;
227 unsigned long hosed_count;
228 unsigned long complete_transactions;
229 unsigned long events;
230 unsigned long watchdog_pretimeouts;
231 unsigned long incoming_messages;
a9a2c44f 232
e9a705a0 233 struct task_struct *thread;
b0defcdb
CM
234
235 struct list_head link;
1da177e4
LT
236};
237
a51f4a81
CM
238#define SI_MAX_PARMS 4
239
240static int force_kipmid[SI_MAX_PARMS];
241static int num_force_kipmid;
242
b361e27b
CM
243static int unload_when_empty = 1;
244
b0defcdb 245static int try_smi_init(struct smi_info *smi);
b361e27b 246static void cleanup_one_si(struct smi_info *to_clean);
b0defcdb 247
e041c683 248static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
ea94027b
CM
249static int register_xaction_notifier(struct notifier_block * nb)
250{
e041c683 251 return atomic_notifier_chain_register(&xaction_notifier_list, nb);
ea94027b
CM
252}
253
1da177e4
LT
254static void deliver_recv_msg(struct smi_info *smi_info,
255 struct ipmi_smi_msg *msg)
256{
257 /* Deliver the message to the upper layer with the lock
258 released. */
259 spin_unlock(&(smi_info->si_lock));
260 ipmi_smi_msg_received(smi_info->intf, msg);
261 spin_lock(&(smi_info->si_lock));
262}
263
4d7cbac7 264static void return_hosed_msg(struct smi_info *smi_info, int cCode)
1da177e4
LT
265{
266 struct ipmi_smi_msg *msg = smi_info->curr_msg;
267
4d7cbac7
CM
268 if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
269 cCode = IPMI_ERR_UNSPECIFIED;
270 /* else use it as is */
271
1da177e4
LT
272 /* Make it a reponse */
273 msg->rsp[0] = msg->data[0] | 4;
274 msg->rsp[1] = msg->data[1];
4d7cbac7 275 msg->rsp[2] = cCode;
1da177e4
LT
276 msg->rsp_size = 3;
277
278 smi_info->curr_msg = NULL;
279 deliver_recv_msg(smi_info, msg);
280}
281
282static enum si_sm_result start_next_msg(struct smi_info *smi_info)
283{
284 int rv;
285 struct list_head *entry = NULL;
286#ifdef DEBUG_TIMING
287 struct timeval t;
288#endif
289
290 /* No need to save flags, we aleady have interrupts off and we
291 already hold the SMI lock. */
292 spin_lock(&(smi_info->msg_lock));
293
294 /* Pick the high priority queue first. */
b0defcdb 295 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
1da177e4 296 entry = smi_info->hp_xmit_msgs.next;
b0defcdb 297 } else if (!list_empty(&(smi_info->xmit_msgs))) {
1da177e4
LT
298 entry = smi_info->xmit_msgs.next;
299 }
300
b0defcdb 301 if (!entry) {
1da177e4
LT
302 smi_info->curr_msg = NULL;
303 rv = SI_SM_IDLE;
304 } else {
305 int err;
306
307 list_del(entry);
308 smi_info->curr_msg = list_entry(entry,
309 struct ipmi_smi_msg,
310 link);
311#ifdef DEBUG_TIMING
312 do_gettimeofday(&t);
313 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
314#endif
e041c683
AS
315 err = atomic_notifier_call_chain(&xaction_notifier_list,
316 0, smi_info);
ea94027b
CM
317 if (err & NOTIFY_STOP_MASK) {
318 rv = SI_SM_CALL_WITHOUT_DELAY;
319 goto out;
320 }
1da177e4
LT
321 err = smi_info->handlers->start_transaction(
322 smi_info->si_sm,
323 smi_info->curr_msg->data,
324 smi_info->curr_msg->data_size);
325 if (err) {
4d7cbac7 326 return_hosed_msg(smi_info, err);
1da177e4
LT
327 }
328
329 rv = SI_SM_CALL_WITHOUT_DELAY;
330 }
ea94027b 331 out:
1da177e4
LT
332 spin_unlock(&(smi_info->msg_lock));
333
334 return rv;
335}
336
337static void start_enable_irq(struct smi_info *smi_info)
338{
339 unsigned char msg[2];
340
341 /* If we are enabling interrupts, we have to tell the
342 BMC to use them. */
343 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
344 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
345
346 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
347 smi_info->si_state = SI_ENABLE_INTERRUPTS1;
348}
349
ee6cd5f8
CM
350static void start_disable_irq(struct smi_info *smi_info)
351{
352 unsigned char msg[2];
353
354 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
355 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
356
357 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
358 smi_info->si_state = SI_DISABLE_INTERRUPTS1;
359}
360
1da177e4
LT
361static void start_clear_flags(struct smi_info *smi_info)
362{
363 unsigned char msg[3];
364
365 /* Make sure the watchdog pre-timeout flag is not set at startup. */
366 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
367 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
368 msg[2] = WDT_PRE_TIMEOUT_INT;
369
370 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
371 smi_info->si_state = SI_CLEARING_FLAGS;
372}
373
374/* When we have a situtaion where we run out of memory and cannot
375 allocate messages, we just leave them in the BMC and run the system
376 polled until we can allocate some memory. Once we have some
377 memory, we will re-enable the interrupt. */
378static inline void disable_si_irq(struct smi_info *smi_info)
379{
b0defcdb 380 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
ee6cd5f8 381 start_disable_irq(smi_info);
1da177e4
LT
382 smi_info->interrupt_disabled = 1;
383 }
384}
385
386static inline void enable_si_irq(struct smi_info *smi_info)
387{
388 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
ee6cd5f8 389 start_enable_irq(smi_info);
1da177e4
LT
390 smi_info->interrupt_disabled = 0;
391 }
392}
393
394static void handle_flags(struct smi_info *smi_info)
395{
3ae0e0f9 396 retry:
1da177e4
LT
397 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
398 /* Watchdog pre-timeout */
399 spin_lock(&smi_info->count_lock);
400 smi_info->watchdog_pretimeouts++;
401 spin_unlock(&smi_info->count_lock);
402
403 start_clear_flags(smi_info);
404 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
405 spin_unlock(&(smi_info->si_lock));
406 ipmi_smi_watchdog_pretimeout(smi_info->intf);
407 spin_lock(&(smi_info->si_lock));
408 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
409 /* Messages available. */
410 smi_info->curr_msg = ipmi_alloc_smi_msg();
b0defcdb 411 if (!smi_info->curr_msg) {
1da177e4
LT
412 disable_si_irq(smi_info);
413 smi_info->si_state = SI_NORMAL;
414 return;
415 }
416 enable_si_irq(smi_info);
417
418 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
419 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
420 smi_info->curr_msg->data_size = 2;
421
422 smi_info->handlers->start_transaction(
423 smi_info->si_sm,
424 smi_info->curr_msg->data,
425 smi_info->curr_msg->data_size);
426 smi_info->si_state = SI_GETTING_MESSAGES;
427 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
428 /* Events available. */
429 smi_info->curr_msg = ipmi_alloc_smi_msg();
b0defcdb 430 if (!smi_info->curr_msg) {
1da177e4
LT
431 disable_si_irq(smi_info);
432 smi_info->si_state = SI_NORMAL;
433 return;
434 }
435 enable_si_irq(smi_info);
436
437 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
438 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
439 smi_info->curr_msg->data_size = 2;
440
441 smi_info->handlers->start_transaction(
442 smi_info->si_sm,
443 smi_info->curr_msg->data,
444 smi_info->curr_msg->data_size);
445 smi_info->si_state = SI_GETTING_EVENTS;
4064d5ef
CM
446 } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
447 smi_info->oem_data_avail_handler) {
448 if (smi_info->oem_data_avail_handler(smi_info))
449 goto retry;
1da177e4
LT
450 } else {
451 smi_info->si_state = SI_NORMAL;
452 }
453}
454
455static void handle_transaction_done(struct smi_info *smi_info)
456{
457 struct ipmi_smi_msg *msg;
458#ifdef DEBUG_TIMING
459 struct timeval t;
460
461 do_gettimeofday(&t);
462 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
463#endif
464 switch (smi_info->si_state) {
465 case SI_NORMAL:
b0defcdb 466 if (!smi_info->curr_msg)
1da177e4
LT
467 break;
468
469 smi_info->curr_msg->rsp_size
470 = smi_info->handlers->get_result(
471 smi_info->si_sm,
472 smi_info->curr_msg->rsp,
473 IPMI_MAX_MSG_LENGTH);
474
475 /* Do this here becase deliver_recv_msg() releases the
476 lock, and a new message can be put in during the
477 time the lock is released. */
478 msg = smi_info->curr_msg;
479 smi_info->curr_msg = NULL;
480 deliver_recv_msg(smi_info, msg);
481 break;
482
483 case SI_GETTING_FLAGS:
484 {
485 unsigned char msg[4];
486 unsigned int len;
487
488 /* We got the flags from the SMI, now handle them. */
489 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
490 if (msg[2] != 0) {
491 /* Error fetching flags, just give up for
492 now. */
493 smi_info->si_state = SI_NORMAL;
494 } else if (len < 4) {
495 /* Hmm, no flags. That's technically illegal, but
496 don't use uninitialized data. */
497 smi_info->si_state = SI_NORMAL;
498 } else {
499 smi_info->msg_flags = msg[3];
500 handle_flags(smi_info);
501 }
502 break;
503 }
504
505 case SI_CLEARING_FLAGS:
506 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
507 {
508 unsigned char msg[3];
509
510 /* We cleared the flags. */
511 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
512 if (msg[2] != 0) {
513 /* Error clearing flags */
514 printk(KERN_WARNING
515 "ipmi_si: Error clearing flags: %2.2x\n",
516 msg[2]);
517 }
518 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
519 start_enable_irq(smi_info);
520 else
521 smi_info->si_state = SI_NORMAL;
522 break;
523 }
524
525 case SI_GETTING_EVENTS:
526 {
527 smi_info->curr_msg->rsp_size
528 = smi_info->handlers->get_result(
529 smi_info->si_sm,
530 smi_info->curr_msg->rsp,
531 IPMI_MAX_MSG_LENGTH);
532
533 /* Do this here becase deliver_recv_msg() releases the
534 lock, and a new message can be put in during the
535 time the lock is released. */
536 msg = smi_info->curr_msg;
537 smi_info->curr_msg = NULL;
538 if (msg->rsp[2] != 0) {
539 /* Error getting event, probably done. */
540 msg->done(msg);
541
542 /* Take off the event flag. */
543 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
544 handle_flags(smi_info);
545 } else {
546 spin_lock(&smi_info->count_lock);
547 smi_info->events++;
548 spin_unlock(&smi_info->count_lock);
549
550 /* Do this before we deliver the message
551 because delivering the message releases the
552 lock and something else can mess with the
553 state. */
554 handle_flags(smi_info);
555
556 deliver_recv_msg(smi_info, msg);
557 }
558 break;
559 }
560
561 case SI_GETTING_MESSAGES:
562 {
563 smi_info->curr_msg->rsp_size
564 = smi_info->handlers->get_result(
565 smi_info->si_sm,
566 smi_info->curr_msg->rsp,
567 IPMI_MAX_MSG_LENGTH);
568
569 /* Do this here becase deliver_recv_msg() releases the
570 lock, and a new message can be put in during the
571 time the lock is released. */
572 msg = smi_info->curr_msg;
573 smi_info->curr_msg = NULL;
574 if (msg->rsp[2] != 0) {
575 /* Error getting event, probably done. */
576 msg->done(msg);
577
578 /* Take off the msg flag. */
579 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
580 handle_flags(smi_info);
581 } else {
582 spin_lock(&smi_info->count_lock);
583 smi_info->incoming_messages++;
584 spin_unlock(&smi_info->count_lock);
585
586 /* Do this before we deliver the message
587 because delivering the message releases the
588 lock and something else can mess with the
589 state. */
590 handle_flags(smi_info);
591
592 deliver_recv_msg(smi_info, msg);
593 }
594 break;
595 }
596
597 case SI_ENABLE_INTERRUPTS1:
598 {
599 unsigned char msg[4];
600
601 /* We got the flags from the SMI, now handle them. */
602 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
603 if (msg[2] != 0) {
604 printk(KERN_WARNING
605 "ipmi_si: Could not enable interrupts"
606 ", failed get, using polled mode.\n");
607 smi_info->si_state = SI_NORMAL;
608 } else {
609 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
610 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
ee6cd5f8
CM
611 msg[2] = (msg[3] |
612 IPMI_BMC_RCV_MSG_INTR |
613 IPMI_BMC_EVT_MSG_INTR);
1da177e4
LT
614 smi_info->handlers->start_transaction(
615 smi_info->si_sm, msg, 3);
616 smi_info->si_state = SI_ENABLE_INTERRUPTS2;
617 }
618 break;
619 }
620
621 case SI_ENABLE_INTERRUPTS2:
622 {
623 unsigned char msg[4];
624
625 /* We got the flags from the SMI, now handle them. */
626 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
627 if (msg[2] != 0) {
628 printk(KERN_WARNING
629 "ipmi_si: Could not enable interrupts"
630 ", failed set, using polled mode.\n");
631 }
632 smi_info->si_state = SI_NORMAL;
633 break;
634 }
ee6cd5f8
CM
635
636 case SI_DISABLE_INTERRUPTS1:
637 {
638 unsigned char msg[4];
639
640 /* We got the flags from the SMI, now handle them. */
641 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
642 if (msg[2] != 0) {
643 printk(KERN_WARNING
644 "ipmi_si: Could not disable interrupts"
645 ", failed get.\n");
646 smi_info->si_state = SI_NORMAL;
647 } else {
648 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
649 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
650 msg[2] = (msg[3] &
651 ~(IPMI_BMC_RCV_MSG_INTR |
652 IPMI_BMC_EVT_MSG_INTR));
653 smi_info->handlers->start_transaction(
654 smi_info->si_sm, msg, 3);
655 smi_info->si_state = SI_DISABLE_INTERRUPTS2;
656 }
657 break;
658 }
659
660 case SI_DISABLE_INTERRUPTS2:
661 {
662 unsigned char msg[4];
663
664 /* We got the flags from the SMI, now handle them. */
665 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
666 if (msg[2] != 0) {
667 printk(KERN_WARNING
668 "ipmi_si: Could not disable interrupts"
669 ", failed set.\n");
670 }
671 smi_info->si_state = SI_NORMAL;
672 break;
673 }
1da177e4
LT
674 }
675}
676
677/* Called on timeouts and events. Timeouts should pass the elapsed
678 time, interrupts should pass in zero. */
679static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
680 int time)
681{
682 enum si_sm_result si_sm_result;
683
684 restart:
685 /* There used to be a loop here that waited a little while
686 (around 25us) before giving up. That turned out to be
687 pointless, the minimum delays I was seeing were in the 300us
688 range, which is far too long to wait in an interrupt. So
689 we just run until the state machine tells us something
690 happened or it needs a delay. */
691 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
692 time = 0;
693 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
694 {
695 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
696 }
697
698 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
699 {
700 spin_lock(&smi_info->count_lock);
701 smi_info->complete_transactions++;
702 spin_unlock(&smi_info->count_lock);
703
704 handle_transaction_done(smi_info);
705 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
706 }
707 else if (si_sm_result == SI_SM_HOSED)
708 {
709 spin_lock(&smi_info->count_lock);
710 smi_info->hosed_count++;
711 spin_unlock(&smi_info->count_lock);
712
713 /* Do the before return_hosed_msg, because that
714 releases the lock. */
715 smi_info->si_state = SI_NORMAL;
716 if (smi_info->curr_msg != NULL) {
717 /* If we were handling a user message, format
718 a response to send to the upper layer to
719 tell it about the error. */
4d7cbac7 720 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
1da177e4
LT
721 }
722 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
723 }
724
725 /* We prefer handling attn over new messages. */
726 if (si_sm_result == SI_SM_ATTN)
727 {
728 unsigned char msg[2];
729
730 spin_lock(&smi_info->count_lock);
731 smi_info->attentions++;
732 spin_unlock(&smi_info->count_lock);
733
734 /* Got a attn, send down a get message flags to see
735 what's causing it. It would be better to handle
736 this in the upper layer, but due to the way
737 interrupts work with the SMI, that's not really
738 possible. */
739 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
740 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
741
742 smi_info->handlers->start_transaction(
743 smi_info->si_sm, msg, 2);
744 smi_info->si_state = SI_GETTING_FLAGS;
745 goto restart;
746 }
747
748 /* If we are currently idle, try to start the next message. */
749 if (si_sm_result == SI_SM_IDLE) {
750 spin_lock(&smi_info->count_lock);
751 smi_info->idles++;
752 spin_unlock(&smi_info->count_lock);
753
754 si_sm_result = start_next_msg(smi_info);
755 if (si_sm_result != SI_SM_IDLE)
756 goto restart;
757 }
758
759 if ((si_sm_result == SI_SM_IDLE)
760 && (atomic_read(&smi_info->req_events)))
761 {
762 /* We are idle and the upper layer requested that I fetch
763 events, so do so. */
55162fb1 764 atomic_set(&smi_info->req_events, 0);
1da177e4 765
55162fb1
CM
766 smi_info->curr_msg = ipmi_alloc_smi_msg();
767 if (!smi_info->curr_msg)
768 goto out;
1da177e4 769
55162fb1
CM
770 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
771 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
772 smi_info->curr_msg->data_size = 2;
1da177e4
LT
773
774 smi_info->handlers->start_transaction(
55162fb1
CM
775 smi_info->si_sm,
776 smi_info->curr_msg->data,
777 smi_info->curr_msg->data_size);
778 smi_info->si_state = SI_GETTING_EVENTS;
1da177e4
LT
779 goto restart;
780 }
55162fb1 781 out:
1da177e4
LT
782 return si_sm_result;
783}
784
785static void sender(void *send_info,
786 struct ipmi_smi_msg *msg,
787 int priority)
788{
789 struct smi_info *smi_info = send_info;
790 enum si_sm_result result;
791 unsigned long flags;
792#ifdef DEBUG_TIMING
793 struct timeval t;
794#endif
795
b361e27b
CM
796 if (atomic_read(&smi_info->stop_operation)) {
797 msg->rsp[0] = msg->data[0] | 4;
798 msg->rsp[1] = msg->data[1];
799 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
800 msg->rsp_size = 3;
801 deliver_recv_msg(smi_info, msg);
802 return;
803 }
804
1da177e4
LT
805 spin_lock_irqsave(&(smi_info->msg_lock), flags);
806#ifdef DEBUG_TIMING
807 do_gettimeofday(&t);
808 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
809#endif
810
811 if (smi_info->run_to_completion) {
812 /* If we are running to completion, then throw it in
813 the list and run transactions until everything is
814 clear. Priority doesn't matter here. */
815 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
816
817 /* We have to release the msg lock and claim the smi
818 lock in this case, because of race conditions. */
819 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
820
821 spin_lock_irqsave(&(smi_info->si_lock), flags);
822 result = smi_event_handler(smi_info, 0);
823 while (result != SI_SM_IDLE) {
824 udelay(SI_SHORT_TIMEOUT_USEC);
825 result = smi_event_handler(smi_info,
826 SI_SHORT_TIMEOUT_USEC);
827 }
828 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
829 return;
830 } else {
831 if (priority > 0) {
832 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
833 } else {
834 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
835 }
836 }
837 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
838
839 spin_lock_irqsave(&(smi_info->si_lock), flags);
840 if ((smi_info->si_state == SI_NORMAL)
841 && (smi_info->curr_msg == NULL))
842 {
843 start_next_msg(smi_info);
1da177e4
LT
844 }
845 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
846}
847
848static void set_run_to_completion(void *send_info, int i_run_to_completion)
849{
850 struct smi_info *smi_info = send_info;
851 enum si_sm_result result;
852 unsigned long flags;
853
854 spin_lock_irqsave(&(smi_info->si_lock), flags);
855
856 smi_info->run_to_completion = i_run_to_completion;
857 if (i_run_to_completion) {
858 result = smi_event_handler(smi_info, 0);
859 while (result != SI_SM_IDLE) {
860 udelay(SI_SHORT_TIMEOUT_USEC);
861 result = smi_event_handler(smi_info,
862 SI_SHORT_TIMEOUT_USEC);
863 }
864 }
865
866 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
867}
868
a9a2c44f
CM
869static int ipmi_thread(void *data)
870{
871 struct smi_info *smi_info = data;
e9a705a0 872 unsigned long flags;
a9a2c44f
CM
873 enum si_sm_result smi_result;
874
a9a2c44f 875 set_user_nice(current, 19);
e9a705a0 876 while (!kthread_should_stop()) {
a9a2c44f 877 spin_lock_irqsave(&(smi_info->si_lock), flags);
8a3628d5 878 smi_result = smi_event_handler(smi_info, 0);
a9a2c44f 879 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
e9a705a0
MD
880 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
881 /* do nothing */
a9a2c44f 882 }
e9a705a0 883 else if (smi_result == SI_SM_CALL_WITH_DELAY)
33979734 884 schedule();
e9a705a0
MD
885 else
886 schedule_timeout_interruptible(1);
a9a2c44f 887 }
a9a2c44f
CM
888 return 0;
889}
890
891
1da177e4
LT
892static void poll(void *send_info)
893{
894 struct smi_info *smi_info = send_info;
895
15c62e10
CM
896 /*
897 * Make sure there is some delay in the poll loop so we can
898 * drive time forward and timeout things.
899 */
900 udelay(10);
901 smi_event_handler(smi_info, 10);
1da177e4
LT
902}
903
904static void request_events(void *send_info)
905{
906 struct smi_info *smi_info = send_info;
907
b361e27b
CM
908 if (atomic_read(&smi_info->stop_operation))
909 return;
910
1da177e4
LT
911 atomic_set(&smi_info->req_events, 1);
912}
913
0c8204b3 914static int initialized;
1da177e4 915
1da177e4
LT
916static void smi_timeout(unsigned long data)
917{
918 struct smi_info *smi_info = (struct smi_info *) data;
919 enum si_sm_result smi_result;
920 unsigned long flags;
921 unsigned long jiffies_now;
c4edff1c 922 long time_diff;
1da177e4
LT
923#ifdef DEBUG_TIMING
924 struct timeval t;
925#endif
926
1da177e4
LT
927 spin_lock_irqsave(&(smi_info->si_lock), flags);
928#ifdef DEBUG_TIMING
929 do_gettimeofday(&t);
930 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
931#endif
932 jiffies_now = jiffies;
c4edff1c 933 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1da177e4
LT
934 * SI_USEC_PER_JIFFY);
935 smi_result = smi_event_handler(smi_info, time_diff);
936
937 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
938
939 smi_info->last_timeout_jiffies = jiffies_now;
940
b0defcdb 941 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1da177e4
LT
942 /* Running with interrupts, only do long timeouts. */
943 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
944 spin_lock_irqsave(&smi_info->count_lock, flags);
945 smi_info->long_timeouts++;
946 spin_unlock_irqrestore(&smi_info->count_lock, flags);
947 goto do_add_timer;
948 }
949
950 /* If the state machine asks for a short delay, then shorten
951 the timer timeout. */
952 if (smi_result == SI_SM_CALL_WITH_DELAY) {
953 spin_lock_irqsave(&smi_info->count_lock, flags);
954 smi_info->short_timeouts++;
955 spin_unlock_irqrestore(&smi_info->count_lock, flags);
1da177e4 956 smi_info->si_timer.expires = jiffies + 1;
1da177e4
LT
957 } else {
958 spin_lock_irqsave(&smi_info->count_lock, flags);
959 smi_info->long_timeouts++;
960 spin_unlock_irqrestore(&smi_info->count_lock, flags);
961 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
1da177e4
LT
962 }
963
964 do_add_timer:
965 add_timer(&(smi_info->si_timer));
966}
967
7d12e780 968static irqreturn_t si_irq_handler(int irq, void *data)
1da177e4
LT
969{
970 struct smi_info *smi_info = data;
971 unsigned long flags;
972#ifdef DEBUG_TIMING
973 struct timeval t;
974#endif
975
976 spin_lock_irqsave(&(smi_info->si_lock), flags);
977
978 spin_lock(&smi_info->count_lock);
979 smi_info->interrupts++;
980 spin_unlock(&smi_info->count_lock);
981
1da177e4
LT
982#ifdef DEBUG_TIMING
983 do_gettimeofday(&t);
984 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
985#endif
986 smi_event_handler(smi_info, 0);
1da177e4
LT
987 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
988 return IRQ_HANDLED;
989}
990
7d12e780 991static irqreturn_t si_bt_irq_handler(int irq, void *data)
9dbf68f9
CM
992{
993 struct smi_info *smi_info = data;
994 /* We need to clear the IRQ flag for the BT interface. */
995 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
996 IPMI_BT_INTMASK_CLEAR_IRQ_BIT
997 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
7d12e780 998 return si_irq_handler(irq, data);
9dbf68f9
CM
999}
1000
453823ba
CM
1001static int smi_start_processing(void *send_info,
1002 ipmi_smi_t intf)
1003{
1004 struct smi_info *new_smi = send_info;
a51f4a81 1005 int enable = 0;
453823ba
CM
1006
1007 new_smi->intf = intf;
1008
1009 /* Set up the timer that drives the interface. */
1010 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1011 new_smi->last_timeout_jiffies = jiffies;
1012 mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
1013
a51f4a81
CM
1014 /*
1015 * Check if the user forcefully enabled the daemon.
1016 */
1017 if (new_smi->intf_num < num_force_kipmid)
1018 enable = force_kipmid[new_smi->intf_num];
df3fe8de
CM
1019 /*
1020 * The BT interface is efficient enough to not need a thread,
1021 * and there is no need for a thread if we have interrupts.
1022 */
a51f4a81
CM
1023 else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
1024 enable = 1;
1025
1026 if (enable) {
453823ba
CM
1027 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1028 "kipmi%d", new_smi->intf_num);
1029 if (IS_ERR(new_smi->thread)) {
1030 printk(KERN_NOTICE "ipmi_si_intf: Could not start"
1031 " kernel thread due to error %ld, only using"
1032 " timers to drive the interface\n",
1033 PTR_ERR(new_smi->thread));
1034 new_smi->thread = NULL;
1035 }
1036 }
1037
1038 return 0;
1039}
9dbf68f9 1040
b9675136
CM
1041static void set_maintenance_mode(void *send_info, int enable)
1042{
1043 struct smi_info *smi_info = send_info;
1044
1045 if (!enable)
1046 atomic_set(&smi_info->req_events, 0);
1047}
1048
1da177e4
LT
1049static struct ipmi_smi_handlers handlers =
1050{
1051 .owner = THIS_MODULE,
453823ba 1052 .start_processing = smi_start_processing,
1da177e4
LT
1053 .sender = sender,
1054 .request_events = request_events,
b9675136 1055 .set_maintenance_mode = set_maintenance_mode,
1da177e4
LT
1056 .set_run_to_completion = set_run_to_completion,
1057 .poll = poll,
1058};
1059
1060/* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1061 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
1062
b0defcdb 1063static LIST_HEAD(smi_infos);
d6dfd131 1064static DEFINE_MUTEX(smi_infos_lock);
b0defcdb 1065static int smi_num; /* Used to sequence the SMIs */
1da177e4 1066
1da177e4 1067#define DEFAULT_REGSPACING 1
dba9b4f6 1068#define DEFAULT_REGSIZE 1
1da177e4
LT
1069
1070static int si_trydefaults = 1;
1071static char *si_type[SI_MAX_PARMS];
1072#define MAX_SI_TYPE_STR 30
1073static char si_type_str[MAX_SI_TYPE_STR];
1074static unsigned long addrs[SI_MAX_PARMS];
1075static int num_addrs;
1076static unsigned int ports[SI_MAX_PARMS];
1077static int num_ports;
1078static int irqs[SI_MAX_PARMS];
1079static int num_irqs;
1080static int regspacings[SI_MAX_PARMS];
0c8204b3 1081static int num_regspacings;
1da177e4 1082static int regsizes[SI_MAX_PARMS];
0c8204b3 1083static int num_regsizes;
1da177e4 1084static int regshifts[SI_MAX_PARMS];
0c8204b3 1085static int num_regshifts;
1da177e4 1086static int slave_addrs[SI_MAX_PARMS];
0c8204b3 1087static int num_slave_addrs;
1da177e4 1088
b361e27b
CM
1089#define IPMI_IO_ADDR_SPACE 0
1090#define IPMI_MEM_ADDR_SPACE 1
1d5636cc 1091static char *addr_space_to_str[] = { "i/o", "mem" };
b361e27b
CM
1092
1093static int hotmod_handler(const char *val, struct kernel_param *kp);
1094
1095module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1096MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
1097 " Documentation/IPMI.txt in the kernel sources for the"
1098 " gory details.");
1da177e4
LT
1099
1100module_param_named(trydefaults, si_trydefaults, bool, 0);
1101MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1102 " default scan of the KCS and SMIC interface at the standard"
1103 " address");
1104module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1105MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1106 " interface separated by commas. The types are 'kcs',"
1107 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
1108 " the first interface to kcs and the second to bt");
1109module_param_array(addrs, long, &num_addrs, 0);
1110MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1111 " addresses separated by commas. Only use if an interface"
1112 " is in memory. Otherwise, set it to zero or leave"
1113 " it blank.");
1114module_param_array(ports, int, &num_ports, 0);
1115MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1116 " addresses separated by commas. Only use if an interface"
1117 " is a port. Otherwise, set it to zero or leave"
1118 " it blank.");
1119module_param_array(irqs, int, &num_irqs, 0);
1120MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1121 " addresses separated by commas. Only use if an interface"
1122 " has an interrupt. Otherwise, set it to zero or leave"
1123 " it blank.");
1124module_param_array(regspacings, int, &num_regspacings, 0);
1125MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1126 " and each successive register used by the interface. For"
1127 " instance, if the start address is 0xca2 and the spacing"
1128 " is 2, then the second address is at 0xca4. Defaults"
1129 " to 1.");
1130module_param_array(regsizes, int, &num_regsizes, 0);
1131MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1132 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1133 " 16-bit, 32-bit, or 64-bit register. Use this if you"
1134 " the 8-bit IPMI register has to be read from a larger"
1135 " register.");
1136module_param_array(regshifts, int, &num_regshifts, 0);
1137MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1138 " IPMI register, in bits. For instance, if the data"
1139 " is read from a 32-bit word and the IPMI data is in"
1140 " bit 8-15, then the shift would be 8");
1141module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1142MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1143 " the controller. Normally this is 0x20, but can be"
1144 " overridden by this parm. This is an array indexed"
1145 " by interface number.");
a51f4a81
CM
1146module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1147MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1148 " disabled(0). Normally the IPMI driver auto-detects"
1149 " this, but the value may be overridden by this parm.");
b361e27b
CM
1150module_param(unload_when_empty, int, 0);
1151MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1152 " specified or found, default is 1. Setting to 0"
1153 " is useful for hot add of devices using hotmod.");
1da177e4
LT
1154
1155
b0defcdb 1156static void std_irq_cleanup(struct smi_info *info)
1da177e4 1157{
b0defcdb
CM
1158 if (info->si_type == SI_BT)
1159 /* Disable the interrupt in the BT interface. */
1160 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1161 free_irq(info->irq, info);
1da177e4 1162}
1da177e4
LT
1163
1164static int std_irq_setup(struct smi_info *info)
1165{
1166 int rv;
1167
b0defcdb 1168 if (!info->irq)
1da177e4
LT
1169 return 0;
1170
9dbf68f9
CM
1171 if (info->si_type == SI_BT) {
1172 rv = request_irq(info->irq,
1173 si_bt_irq_handler,
ee6cd5f8 1174 IRQF_SHARED | IRQF_DISABLED,
9dbf68f9
CM
1175 DEVICE_NAME,
1176 info);
b0defcdb 1177 if (!rv)
9dbf68f9
CM
1178 /* Enable the interrupt in the BT interface. */
1179 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1180 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1181 } else
1182 rv = request_irq(info->irq,
1183 si_irq_handler,
ee6cd5f8 1184 IRQF_SHARED | IRQF_DISABLED,
9dbf68f9
CM
1185 DEVICE_NAME,
1186 info);
1da177e4
LT
1187 if (rv) {
1188 printk(KERN_WARNING
1189 "ipmi_si: %s unable to claim interrupt %d,"
1190 " running polled\n",
1191 DEVICE_NAME, info->irq);
1192 info->irq = 0;
1193 } else {
b0defcdb 1194 info->irq_cleanup = std_irq_cleanup;
1da177e4
LT
1195 printk(" Using irq %d\n", info->irq);
1196 }
1197
1198 return rv;
1199}
1200
1da177e4
LT
1201static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1202{
b0defcdb 1203 unsigned int addr = io->addr_data;
1da177e4 1204
b0defcdb 1205 return inb(addr + (offset * io->regspacing));
1da177e4
LT
1206}
1207
1208static void port_outb(struct si_sm_io *io, unsigned int offset,
1209 unsigned char b)
1210{
b0defcdb 1211 unsigned int addr = io->addr_data;
1da177e4 1212
b0defcdb 1213 outb(b, addr + (offset * io->regspacing));
1da177e4
LT
1214}
1215
1216static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1217{
b0defcdb 1218 unsigned int addr = io->addr_data;
1da177e4 1219
b0defcdb 1220 return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1da177e4
LT
1221}
1222
1223static void port_outw(struct si_sm_io *io, unsigned int offset,
1224 unsigned char b)
1225{
b0defcdb 1226 unsigned int addr = io->addr_data;
1da177e4 1227
b0defcdb 1228 outw(b << io->regshift, addr + (offset * io->regspacing));
1da177e4
LT
1229}
1230
1231static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1232{
b0defcdb 1233 unsigned int addr = io->addr_data;
1da177e4 1234
b0defcdb 1235 return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1da177e4
LT
1236}
1237
1238static void port_outl(struct si_sm_io *io, unsigned int offset,
1239 unsigned char b)
1240{
b0defcdb 1241 unsigned int addr = io->addr_data;
1da177e4 1242
b0defcdb 1243 outl(b << io->regshift, addr+(offset * io->regspacing));
1da177e4
LT
1244}
1245
1246static void port_cleanup(struct smi_info *info)
1247{
b0defcdb 1248 unsigned int addr = info->io.addr_data;
d61a3ead 1249 int idx;
1da177e4 1250
b0defcdb 1251 if (addr) {
d61a3ead
CM
1252 for (idx = 0; idx < info->io_size; idx++) {
1253 release_region(addr + idx * info->io.regspacing,
1254 info->io.regsize);
1255 }
1da177e4 1256 }
1da177e4
LT
1257}
1258
1259static int port_setup(struct smi_info *info)
1260{
b0defcdb 1261 unsigned int addr = info->io.addr_data;
d61a3ead 1262 int idx;
1da177e4 1263
b0defcdb 1264 if (!addr)
1da177e4
LT
1265 return -ENODEV;
1266
1267 info->io_cleanup = port_cleanup;
1268
1269 /* Figure out the actual inb/inw/inl/etc routine to use based
1270 upon the register size. */
1271 switch (info->io.regsize) {
1272 case 1:
1273 info->io.inputb = port_inb;
1274 info->io.outputb = port_outb;
1275 break;
1276 case 2:
1277 info->io.inputb = port_inw;
1278 info->io.outputb = port_outw;
1279 break;
1280 case 4:
1281 info->io.inputb = port_inl;
1282 info->io.outputb = port_outl;
1283 break;
1284 default:
1285 printk("ipmi_si: Invalid register size: %d\n",
1286 info->io.regsize);
1287 return -EINVAL;
1288 }
1289
d61a3ead
CM
1290 /* Some BIOSes reserve disjoint I/O regions in their ACPI
1291 * tables. This causes problems when trying to register the
1292 * entire I/O region. Therefore we must register each I/O
1293 * port separately.
1294 */
1295 for (idx = 0; idx < info->io_size; idx++) {
1296 if (request_region(addr + idx * info->io.regspacing,
1297 info->io.regsize, DEVICE_NAME) == NULL) {
1298 /* Undo allocations */
1299 while (idx--) {
1300 release_region(addr + idx * info->io.regspacing,
1301 info->io.regsize);
1302 }
1303 return -EIO;
1304 }
1305 }
1da177e4
LT
1306 return 0;
1307}
1308
546cfdf4 1309static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1da177e4
LT
1310{
1311 return readb((io->addr)+(offset * io->regspacing));
1312}
1313
546cfdf4 1314static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1da177e4
LT
1315 unsigned char b)
1316{
1317 writeb(b, (io->addr)+(offset * io->regspacing));
1318}
1319
546cfdf4 1320static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1da177e4
LT
1321{
1322 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
64d9fe69 1323 & 0xff;
1da177e4
LT
1324}
1325
546cfdf4 1326static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1da177e4
LT
1327 unsigned char b)
1328{
1329 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1330}
1331
546cfdf4 1332static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1da177e4
LT
1333{
1334 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
64d9fe69 1335 & 0xff;
1da177e4
LT
1336}
1337
546cfdf4 1338static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1da177e4
LT
1339 unsigned char b)
1340{
1341 writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1342}
1343
1344#ifdef readq
1345static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1346{
1347 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
64d9fe69 1348 & 0xff;
1da177e4
LT
1349}
1350
1351static void mem_outq(struct si_sm_io *io, unsigned int offset,
1352 unsigned char b)
1353{
1354 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1355}
1356#endif
1357
1358static void mem_cleanup(struct smi_info *info)
1359{
b0defcdb 1360 unsigned long addr = info->io.addr_data;
1da177e4
LT
1361 int mapsize;
1362
1363 if (info->io.addr) {
1364 iounmap(info->io.addr);
1365
1366 mapsize = ((info->io_size * info->io.regspacing)
1367 - (info->io.regspacing - info->io.regsize));
1368
b0defcdb 1369 release_mem_region(addr, mapsize);
1da177e4 1370 }
1da177e4
LT
1371}
1372
1373static int mem_setup(struct smi_info *info)
1374{
b0defcdb 1375 unsigned long addr = info->io.addr_data;
1da177e4
LT
1376 int mapsize;
1377
b0defcdb 1378 if (!addr)
1da177e4
LT
1379 return -ENODEV;
1380
1381 info->io_cleanup = mem_cleanup;
1382
1383 /* Figure out the actual readb/readw/readl/etc routine to use based
1384 upon the register size. */
1385 switch (info->io.regsize) {
1386 case 1:
546cfdf4
AD
1387 info->io.inputb = intf_mem_inb;
1388 info->io.outputb = intf_mem_outb;
1da177e4
LT
1389 break;
1390 case 2:
546cfdf4
AD
1391 info->io.inputb = intf_mem_inw;
1392 info->io.outputb = intf_mem_outw;
1da177e4
LT
1393 break;
1394 case 4:
546cfdf4
AD
1395 info->io.inputb = intf_mem_inl;
1396 info->io.outputb = intf_mem_outl;
1da177e4
LT
1397 break;
1398#ifdef readq
1399 case 8:
1400 info->io.inputb = mem_inq;
1401 info->io.outputb = mem_outq;
1402 break;
1403#endif
1404 default:
1405 printk("ipmi_si: Invalid register size: %d\n",
1406 info->io.regsize);
1407 return -EINVAL;
1408 }
1409
1410 /* Calculate the total amount of memory to claim. This is an
1411 * unusual looking calculation, but it avoids claiming any
1412 * more memory than it has to. It will claim everything
1413 * between the first address to the end of the last full
1414 * register. */
1415 mapsize = ((info->io_size * info->io.regspacing)
1416 - (info->io.regspacing - info->io.regsize));
1417
b0defcdb 1418 if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1da177e4
LT
1419 return -EIO;
1420
b0defcdb 1421 info->io.addr = ioremap(addr, mapsize);
1da177e4 1422 if (info->io.addr == NULL) {
b0defcdb 1423 release_mem_region(addr, mapsize);
1da177e4
LT
1424 return -EIO;
1425 }
1426 return 0;
1427}
1428
b361e27b
CM
1429/*
1430 * Parms come in as <op1>[:op2[:op3...]]. ops are:
1431 * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1432 * Options are:
1433 * rsp=<regspacing>
1434 * rsi=<regsize>
1435 * rsh=<regshift>
1436 * irq=<irq>
1437 * ipmb=<ipmb addr>
1438 */
1439enum hotmod_op { HM_ADD, HM_REMOVE };
1440struct hotmod_vals {
1441 char *name;
1442 int val;
1443};
1444static struct hotmod_vals hotmod_ops[] = {
1445 { "add", HM_ADD },
1446 { "remove", HM_REMOVE },
1447 { NULL }
1448};
1449static struct hotmod_vals hotmod_si[] = {
1450 { "kcs", SI_KCS },
1451 { "smic", SI_SMIC },
1452 { "bt", SI_BT },
1453 { NULL }
1454};
1455static struct hotmod_vals hotmod_as[] = {
1456 { "mem", IPMI_MEM_ADDR_SPACE },
1457 { "i/o", IPMI_IO_ADDR_SPACE },
1458 { NULL }
1459};
1d5636cc 1460
b361e27b
CM
1461static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1462{
1463 char *s;
1464 int i;
1465
1466 s = strchr(*curr, ',');
1467 if (!s) {
1468 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1469 return -EINVAL;
1470 }
1471 *s = '\0';
1472 s++;
1473 for (i = 0; hotmod_ops[i].name; i++) {
1d5636cc 1474 if (strcmp(*curr, v[i].name) == 0) {
b361e27b
CM
1475 *val = v[i].val;
1476 *curr = s;
1477 return 0;
1478 }
1479 }
1480
1481 printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1482 return -EINVAL;
1483}
1484
1d5636cc
CM
1485static int check_hotmod_int_op(const char *curr, const char *option,
1486 const char *name, int *val)
1487{
1488 char *n;
1489
1490 if (strcmp(curr, name) == 0) {
1491 if (!option) {
1492 printk(KERN_WARNING PFX
1493 "No option given for '%s'\n",
1494 curr);
1495 return -EINVAL;
1496 }
1497 *val = simple_strtoul(option, &n, 0);
1498 if ((*n != '\0') || (*option == '\0')) {
1499 printk(KERN_WARNING PFX
1500 "Bad option given for '%s'\n",
1501 curr);
1502 return -EINVAL;
1503 }
1504 return 1;
1505 }
1506 return 0;
1507}
1508
b361e27b
CM
1509static int hotmod_handler(const char *val, struct kernel_param *kp)
1510{
1511 char *str = kstrdup(val, GFP_KERNEL);
1d5636cc 1512 int rv;
b361e27b
CM
1513 char *next, *curr, *s, *n, *o;
1514 enum hotmod_op op;
1515 enum si_type si_type;
1516 int addr_space;
1517 unsigned long addr;
1518 int regspacing;
1519 int regsize;
1520 int regshift;
1521 int irq;
1522 int ipmb;
1523 int ival;
1d5636cc 1524 int len;
b361e27b
CM
1525 struct smi_info *info;
1526
1527 if (!str)
1528 return -ENOMEM;
1529
1530 /* Kill any trailing spaces, as we can get a "\n" from echo. */
1d5636cc
CM
1531 len = strlen(str);
1532 ival = len - 1;
b361e27b
CM
1533 while ((ival >= 0) && isspace(str[ival])) {
1534 str[ival] = '\0';
1535 ival--;
1536 }
1537
1538 for (curr = str; curr; curr = next) {
1539 regspacing = 1;
1540 regsize = 1;
1541 regshift = 0;
1542 irq = 0;
1543 ipmb = 0x20;
1544
1545 next = strchr(curr, ':');
1546 if (next) {
1547 *next = '\0';
1548 next++;
1549 }
1550
1551 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1552 if (rv)
1553 break;
1554 op = ival;
1555
1556 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1557 if (rv)
1558 break;
1559 si_type = ival;
1560
1561 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1562 if (rv)
1563 break;
1564
1565 s = strchr(curr, ',');
1566 if (s) {
1567 *s = '\0';
1568 s++;
1569 }
1570 addr = simple_strtoul(curr, &n, 0);
1571 if ((*n != '\0') || (*curr == '\0')) {
1572 printk(KERN_WARNING PFX "Invalid hotmod address"
1573 " '%s'\n", curr);
1574 break;
1575 }
1576
1577 while (s) {
1578 curr = s;
1579 s = strchr(curr, ',');
1580 if (s) {
1581 *s = '\0';
1582 s++;
1583 }
1584 o = strchr(curr, '=');
1585 if (o) {
1586 *o = '\0';
1587 o++;
1588 }
1d5636cc
CM
1589 rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1590 if (rv < 0)
b361e27b 1591 goto out;
1d5636cc
CM
1592 else if (rv)
1593 continue;
1594 rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1595 if (rv < 0)
1596 goto out;
1597 else if (rv)
1598 continue;
1599 rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1600 if (rv < 0)
1601 goto out;
1602 else if (rv)
1603 continue;
1604 rv = check_hotmod_int_op(curr, o, "irq", &irq);
1605 if (rv < 0)
1606 goto out;
1607 else if (rv)
1608 continue;
1609 rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1610 if (rv < 0)
1611 goto out;
1612 else if (rv)
1613 continue;
1614
1615 rv = -EINVAL;
1616 printk(KERN_WARNING PFX
1617 "Invalid hotmod option '%s'\n",
1618 curr);
1619 goto out;
b361e27b
CM
1620 }
1621
1622 if (op == HM_ADD) {
1623 info = kzalloc(sizeof(*info), GFP_KERNEL);
1624 if (!info) {
1625 rv = -ENOMEM;
1626 goto out;
1627 }
1628
1629 info->addr_source = "hotmod";
1630 info->si_type = si_type;
1631 info->io.addr_data = addr;
1632 info->io.addr_type = addr_space;
1633 if (addr_space == IPMI_MEM_ADDR_SPACE)
1634 info->io_setup = mem_setup;
1635 else
1636 info->io_setup = port_setup;
1637
1638 info->io.addr = NULL;
1639 info->io.regspacing = regspacing;
1640 if (!info->io.regspacing)
1641 info->io.regspacing = DEFAULT_REGSPACING;
1642 info->io.regsize = regsize;
1643 if (!info->io.regsize)
1644 info->io.regsize = DEFAULT_REGSPACING;
1645 info->io.regshift = regshift;
1646 info->irq = irq;
1647 if (info->irq)
1648 info->irq_setup = std_irq_setup;
1649 info->slave_addr = ipmb;
1650
1651 try_smi_init(info);
1652 } else {
1653 /* remove */
1654 struct smi_info *e, *tmp_e;
1655
1656 mutex_lock(&smi_infos_lock);
1657 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1658 if (e->io.addr_type != addr_space)
1659 continue;
1660 if (e->si_type != si_type)
1661 continue;
1662 if (e->io.addr_data == addr)
1663 cleanup_one_si(e);
1664 }
1665 mutex_unlock(&smi_infos_lock);
1666 }
1667 }
1d5636cc 1668 rv = len;
b361e27b
CM
1669 out:
1670 kfree(str);
1671 return rv;
1672}
b0defcdb
CM
1673
1674static __devinit void hardcode_find_bmc(void)
1da177e4 1675{
b0defcdb 1676 int i;
1da177e4
LT
1677 struct smi_info *info;
1678
b0defcdb
CM
1679 for (i = 0; i < SI_MAX_PARMS; i++) {
1680 if (!ports[i] && !addrs[i])
1681 continue;
1da177e4 1682
b0defcdb
CM
1683 info = kzalloc(sizeof(*info), GFP_KERNEL);
1684 if (!info)
1685 return;
1da177e4 1686
b0defcdb 1687 info->addr_source = "hardcoded";
1da177e4 1688
1d5636cc 1689 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
b0defcdb 1690 info->si_type = SI_KCS;
1d5636cc 1691 } else if (strcmp(si_type[i], "smic") == 0) {
b0defcdb 1692 info->si_type = SI_SMIC;
1d5636cc 1693 } else if (strcmp(si_type[i], "bt") == 0) {
b0defcdb
CM
1694 info->si_type = SI_BT;
1695 } else {
1696 printk(KERN_WARNING
1697 "ipmi_si: Interface type specified "
1698 "for interface %d, was invalid: %s\n",
1699 i, si_type[i]);
1700 kfree(info);
1701 continue;
1702 }
1da177e4 1703
b0defcdb
CM
1704 if (ports[i]) {
1705 /* An I/O port */
1706 info->io_setup = port_setup;
1707 info->io.addr_data = ports[i];
1708 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1709 } else if (addrs[i]) {
1710 /* A memory port */
1711 info->io_setup = mem_setup;
1712 info->io.addr_data = addrs[i];
1713 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1714 } else {
1715 printk(KERN_WARNING
1716 "ipmi_si: Interface type specified "
1717 "for interface %d, "
1718 "but port and address were not set or "
1719 "set to zero.\n", i);
1720 kfree(info);
1721 continue;
1722 }
1da177e4 1723
b0defcdb
CM
1724 info->io.addr = NULL;
1725 info->io.regspacing = regspacings[i];
1726 if (!info->io.regspacing)
1727 info->io.regspacing = DEFAULT_REGSPACING;
1728 info->io.regsize = regsizes[i];
1729 if (!info->io.regsize)
1730 info->io.regsize = DEFAULT_REGSPACING;
1731 info->io.regshift = regshifts[i];
1732 info->irq = irqs[i];
1733 if (info->irq)
1734 info->irq_setup = std_irq_setup;
1da177e4 1735
b0defcdb
CM
1736 try_smi_init(info);
1737 }
1738}
1da177e4 1739
8466361a 1740#ifdef CONFIG_ACPI
1da177e4
LT
1741
1742#include <linux/acpi.h>
1743
1744/* Once we get an ACPI failure, we don't try any more, because we go
1745 through the tables sequentially. Once we don't find a table, there
1746 are no more. */
0c8204b3 1747static int acpi_failure;
1da177e4
LT
1748
1749/* For GPE-type interrupts. */
1750static u32 ipmi_acpi_gpe(void *context)
1751{
1752 struct smi_info *smi_info = context;
1753 unsigned long flags;
1754#ifdef DEBUG_TIMING
1755 struct timeval t;
1756#endif
1757
1758 spin_lock_irqsave(&(smi_info->si_lock), flags);
1759
1760 spin_lock(&smi_info->count_lock);
1761 smi_info->interrupts++;
1762 spin_unlock(&smi_info->count_lock);
1763
1da177e4
LT
1764#ifdef DEBUG_TIMING
1765 do_gettimeofday(&t);
1766 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1767#endif
1768 smi_event_handler(smi_info, 0);
1da177e4
LT
1769 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1770
1771 return ACPI_INTERRUPT_HANDLED;
1772}
1773
b0defcdb
CM
1774static void acpi_gpe_irq_cleanup(struct smi_info *info)
1775{
1776 if (!info->irq)
1777 return;
1778
1779 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1780}
1781
1da177e4
LT
1782static int acpi_gpe_irq_setup(struct smi_info *info)
1783{
1784 acpi_status status;
1785
b0defcdb 1786 if (!info->irq)
1da177e4
LT
1787 return 0;
1788
1789 /* FIXME - is level triggered right? */
1790 status = acpi_install_gpe_handler(NULL,
1791 info->irq,
1792 ACPI_GPE_LEVEL_TRIGGERED,
1793 &ipmi_acpi_gpe,
1794 info);
1795 if (status != AE_OK) {
1796 printk(KERN_WARNING
1797 "ipmi_si: %s unable to claim ACPI GPE %d,"
1798 " running polled\n",
1799 DEVICE_NAME, info->irq);
1800 info->irq = 0;
1801 return -EINVAL;
1802 } else {
b0defcdb 1803 info->irq_cleanup = acpi_gpe_irq_cleanup;
1da177e4
LT
1804 printk(" Using ACPI GPE %d\n", info->irq);
1805 return 0;
1806 }
1807}
1808
1da177e4
LT
1809/*
1810 * Defined at
1811 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1812 */
1813struct SPMITable {
1814 s8 Signature[4];
1815 u32 Length;
1816 u8 Revision;
1817 u8 Checksum;
1818 s8 OEMID[6];
1819 s8 OEMTableID[8];
1820 s8 OEMRevision[4];
1821 s8 CreatorID[4];
1822 s8 CreatorRevision[4];
1823 u8 InterfaceType;
1824 u8 IPMIlegacy;
1825 s16 SpecificationRevision;
1826
1827 /*
1828 * Bit 0 - SCI interrupt supported
1829 * Bit 1 - I/O APIC/SAPIC
1830 */
1831 u8 InterruptType;
1832
1833 /* If bit 0 of InterruptType is set, then this is the SCI
1834 interrupt in the GPEx_STS register. */
1835 u8 GPE;
1836
1837 s16 Reserved;
1838
1839 /* If bit 1 of InterruptType is set, then this is the I/O
1840 APIC/SAPIC interrupt. */
1841 u32 GlobalSystemInterrupt;
1842
1843 /* The actual register address. */
1844 struct acpi_generic_address addr;
1845
1846 u8 UID[4];
1847
1848 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1849};
1850
b0defcdb 1851static __devinit int try_init_acpi(struct SPMITable *spmi)
1da177e4
LT
1852{
1853 struct smi_info *info;
1da177e4
LT
1854 u8 addr_space;
1855
1da177e4
LT
1856 if (spmi->IPMIlegacy != 1) {
1857 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1858 return -ENODEV;
1859 }
1860
15a58ed1 1861 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1da177e4
LT
1862 addr_space = IPMI_MEM_ADDR_SPACE;
1863 else
1864 addr_space = IPMI_IO_ADDR_SPACE;
b0defcdb
CM
1865
1866 info = kzalloc(sizeof(*info), GFP_KERNEL);
1867 if (!info) {
1868 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1869 return -ENOMEM;
1870 }
1871
1872 info->addr_source = "ACPI";
1da177e4 1873
1da177e4
LT
1874 /* Figure out the interface type. */
1875 switch (spmi->InterfaceType)
1876 {
1877 case 1: /* KCS */
b0defcdb 1878 info->si_type = SI_KCS;
1da177e4 1879 break;
1da177e4 1880 case 2: /* SMIC */
b0defcdb 1881 info->si_type = SI_SMIC;
1da177e4 1882 break;
1da177e4 1883 case 3: /* BT */
b0defcdb 1884 info->si_type = SI_BT;
1da177e4 1885 break;
1da177e4
LT
1886 default:
1887 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1888 spmi->InterfaceType);
b0defcdb 1889 kfree(info);
1da177e4
LT
1890 return -EIO;
1891 }
1892
1da177e4
LT
1893 if (spmi->InterruptType & 1) {
1894 /* We've got a GPE interrupt. */
1895 info->irq = spmi->GPE;
1896 info->irq_setup = acpi_gpe_irq_setup;
1da177e4
LT
1897 } else if (spmi->InterruptType & 2) {
1898 /* We've got an APIC/SAPIC interrupt. */
1899 info->irq = spmi->GlobalSystemInterrupt;
1900 info->irq_setup = std_irq_setup;
1da177e4
LT
1901 } else {
1902 /* Use the default interrupt setting. */
1903 info->irq = 0;
1904 info->irq_setup = NULL;
1905 }
1906
15a58ed1 1907 if (spmi->addr.bit_width) {
35bc37a0 1908 /* A (hopefully) properly formed register bit width. */
15a58ed1 1909 info->io.regspacing = spmi->addr.bit_width / 8;
35bc37a0 1910 } else {
35bc37a0
CM
1911 info->io.regspacing = DEFAULT_REGSPACING;
1912 }
b0defcdb 1913 info->io.regsize = info->io.regspacing;
15a58ed1 1914 info->io.regshift = spmi->addr.bit_offset;
1da177e4 1915
15a58ed1 1916 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1da177e4 1917 info->io_setup = mem_setup;
8fe1425a 1918 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
15a58ed1 1919 } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1da177e4 1920 info->io_setup = port_setup;
8fe1425a 1921 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1da177e4
LT
1922 } else {
1923 kfree(info);
1924 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1925 return -EIO;
1926 }
b0defcdb 1927 info->io.addr_data = spmi->addr.address;
1da177e4 1928
b0defcdb 1929 try_smi_init(info);
1da177e4 1930
1da177e4
LT
1931 return 0;
1932}
b0defcdb
CM
1933
1934static __devinit void acpi_find_bmc(void)
1935{
1936 acpi_status status;
1937 struct SPMITable *spmi;
1938 int i;
1939
1940 if (acpi_disabled)
1941 return;
1942
1943 if (acpi_failure)
1944 return;
1945
1946 for (i = 0; ; i++) {
15a58ed1
AS
1947 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
1948 (struct acpi_table_header **)&spmi);
b0defcdb
CM
1949 if (status != AE_OK)
1950 return;
1951
1952 try_init_acpi(spmi);
1953 }
1954}
1da177e4
LT
1955#endif
1956
a9fad4cc 1957#ifdef CONFIG_DMI
b0defcdb 1958struct dmi_ipmi_data
1da177e4
LT
1959{
1960 u8 type;
1961 u8 addr_space;
1962 unsigned long base_addr;
1963 u8 irq;
1964 u8 offset;
1965 u8 slave_addr;
b0defcdb 1966};
1da177e4 1967
b0defcdb
CM
1968static int __devinit decode_dmi(struct dmi_header *dm,
1969 struct dmi_ipmi_data *dmi)
1da177e4 1970{
e8b33617 1971 u8 *data = (u8 *)dm;
1da177e4
LT
1972 unsigned long base_addr;
1973 u8 reg_spacing;
b224cd3a 1974 u8 len = dm->length;
1da177e4 1975
b0defcdb 1976 dmi->type = data[4];
1da177e4
LT
1977
1978 memcpy(&base_addr, data+8, sizeof(unsigned long));
1979 if (len >= 0x11) {
1980 if (base_addr & 1) {
1981 /* I/O */
1982 base_addr &= 0xFFFE;
b0defcdb 1983 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1da177e4
LT
1984 }
1985 else {
1986 /* Memory */
b0defcdb 1987 dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1da177e4
LT
1988 }
1989 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1990 is odd. */
b0defcdb 1991 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1da177e4 1992
b0defcdb 1993 dmi->irq = data[0x11];
1da177e4
LT
1994
1995 /* The top two bits of byte 0x10 hold the register spacing. */
b224cd3a 1996 reg_spacing = (data[0x10] & 0xC0) >> 6;
1da177e4
LT
1997 switch(reg_spacing){
1998 case 0x00: /* Byte boundaries */
b0defcdb 1999 dmi->offset = 1;
1da177e4
LT
2000 break;
2001 case 0x01: /* 32-bit boundaries */
b0defcdb 2002 dmi->offset = 4;
1da177e4
LT
2003 break;
2004 case 0x02: /* 16-byte boundaries */
b0defcdb 2005 dmi->offset = 16;
1da177e4
LT
2006 break;
2007 default:
2008 /* Some other interface, just ignore it. */
2009 return -EIO;
2010 }
2011 } else {
2012 /* Old DMI spec. */
92068801
CM
2013 /* Note that technically, the lower bit of the base
2014 * address should be 1 if the address is I/O and 0 if
2015 * the address is in memory. So many systems get that
2016 * wrong (and all that I have seen are I/O) so we just
2017 * ignore that bit and assume I/O. Systems that use
2018 * memory should use the newer spec, anyway. */
b0defcdb
CM
2019 dmi->base_addr = base_addr & 0xfffe;
2020 dmi->addr_space = IPMI_IO_ADDR_SPACE;
2021 dmi->offset = 1;
1da177e4
LT
2022 }
2023
b0defcdb 2024 dmi->slave_addr = data[6];
1da177e4 2025
b0defcdb 2026 return 0;
1da177e4
LT
2027}
2028
b0defcdb 2029static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1da177e4 2030{
b0defcdb 2031 struct smi_info *info;
1da177e4 2032
b0defcdb
CM
2033 info = kzalloc(sizeof(*info), GFP_KERNEL);
2034 if (!info) {
2035 printk(KERN_ERR
2036 "ipmi_si: Could not allocate SI data\n");
2037 return;
1da177e4 2038 }
1da177e4 2039
b0defcdb 2040 info->addr_source = "SMBIOS";
1da177e4 2041
e8b33617 2042 switch (ipmi_data->type) {
b0defcdb
CM
2043 case 0x01: /* KCS */
2044 info->si_type = SI_KCS;
2045 break;
2046 case 0x02: /* SMIC */
2047 info->si_type = SI_SMIC;
2048 break;
2049 case 0x03: /* BT */
2050 info->si_type = SI_BT;
2051 break;
2052 default:
80cd6920 2053 kfree(info);
b0defcdb 2054 return;
1da177e4 2055 }
1da177e4 2056
b0defcdb
CM
2057 switch (ipmi_data->addr_space) {
2058 case IPMI_MEM_ADDR_SPACE:
1da177e4 2059 info->io_setup = mem_setup;
b0defcdb
CM
2060 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2061 break;
2062
2063 case IPMI_IO_ADDR_SPACE:
1da177e4 2064 info->io_setup = port_setup;
b0defcdb
CM
2065 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2066 break;
2067
2068 default:
1da177e4 2069 kfree(info);
b0defcdb
CM
2070 printk(KERN_WARNING
2071 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2072 ipmi_data->addr_space);
2073 return;
1da177e4 2074 }
b0defcdb 2075 info->io.addr_data = ipmi_data->base_addr;
1da177e4 2076
b0defcdb
CM
2077 info->io.regspacing = ipmi_data->offset;
2078 if (!info->io.regspacing)
1da177e4
LT
2079 info->io.regspacing = DEFAULT_REGSPACING;
2080 info->io.regsize = DEFAULT_REGSPACING;
b0defcdb 2081 info->io.regshift = 0;
1da177e4
LT
2082
2083 info->slave_addr = ipmi_data->slave_addr;
2084
b0defcdb
CM
2085 info->irq = ipmi_data->irq;
2086 if (info->irq)
2087 info->irq_setup = std_irq_setup;
1da177e4 2088
b0defcdb
CM
2089 try_smi_init(info);
2090}
1da177e4 2091
b0defcdb
CM
2092static void __devinit dmi_find_bmc(void)
2093{
2094 struct dmi_device *dev = NULL;
2095 struct dmi_ipmi_data data;
2096 int rv;
2097
2098 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
397f4ebf 2099 memset(&data, 0, sizeof(data));
b0defcdb
CM
2100 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
2101 if (!rv)
2102 try_init_dmi(&data);
2103 }
1da177e4 2104}
a9fad4cc 2105#endif /* CONFIG_DMI */
1da177e4
LT
2106
2107#ifdef CONFIG_PCI
2108
b0defcdb
CM
2109#define PCI_ERMC_CLASSCODE 0x0C0700
2110#define PCI_ERMC_CLASSCODE_MASK 0xffffff00
2111#define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
2112#define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
2113#define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
2114#define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
2115
1da177e4
LT
2116#define PCI_HP_VENDOR_ID 0x103C
2117#define PCI_MMC_DEVICE_ID 0x121A
2118#define PCI_MMC_ADDR_CW 0x10
2119
b0defcdb
CM
2120static void ipmi_pci_cleanup(struct smi_info *info)
2121{
2122 struct pci_dev *pdev = info->addr_source_data;
2123
2124 pci_disable_device(pdev);
2125}
1da177e4 2126
b0defcdb
CM
2127static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2128 const struct pci_device_id *ent)
1da177e4 2129{
b0defcdb
CM
2130 int rv;
2131 int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2132 struct smi_info *info;
2133 int first_reg_offset = 0;
1da177e4 2134
b0defcdb
CM
2135 info = kzalloc(sizeof(*info), GFP_KERNEL);
2136 if (!info)
1cd441f9 2137 return -ENOMEM;
1da177e4 2138
b0defcdb 2139 info->addr_source = "PCI";
1da177e4 2140
b0defcdb
CM
2141 switch (class_type) {
2142 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2143 info->si_type = SI_SMIC;
2144 break;
1da177e4 2145
b0defcdb
CM
2146 case PCI_ERMC_CLASSCODE_TYPE_KCS:
2147 info->si_type = SI_KCS;
2148 break;
2149
2150 case PCI_ERMC_CLASSCODE_TYPE_BT:
2151 info->si_type = SI_BT;
2152 break;
2153
2154 default:
2155 kfree(info);
2156 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2157 pci_name(pdev), class_type);
1cd441f9 2158 return -ENOMEM;
1da177e4
LT
2159 }
2160
b0defcdb
CM
2161 rv = pci_enable_device(pdev);
2162 if (rv) {
2163 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2164 pci_name(pdev));
2165 kfree(info);
2166 return rv;
1da177e4
LT
2167 }
2168
b0defcdb
CM
2169 info->addr_source_cleanup = ipmi_pci_cleanup;
2170 info->addr_source_data = pdev;
1da177e4 2171
b0defcdb
CM
2172 if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2173 first_reg_offset = 1;
1da177e4 2174
b0defcdb
CM
2175 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2176 info->io_setup = port_setup;
2177 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2178 } else {
2179 info->io_setup = mem_setup;
2180 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1da177e4 2181 }
b0defcdb 2182 info->io.addr_data = pci_resource_start(pdev, 0);
1da177e4 2183
b0defcdb 2184 info->io.regspacing = DEFAULT_REGSPACING;
1da177e4 2185 info->io.regsize = DEFAULT_REGSPACING;
b0defcdb 2186 info->io.regshift = 0;
1da177e4 2187
b0defcdb
CM
2188 info->irq = pdev->irq;
2189 if (info->irq)
2190 info->irq_setup = std_irq_setup;
1da177e4 2191
50c812b2 2192 info->dev = &pdev->dev;
fca3b747 2193 pci_set_drvdata(pdev, info);
50c812b2 2194
b0defcdb
CM
2195 return try_smi_init(info);
2196}
1da177e4 2197
b0defcdb
CM
2198static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2199{
fca3b747
CM
2200 struct smi_info *info = pci_get_drvdata(pdev);
2201 cleanup_one_si(info);
b0defcdb 2202}
1da177e4 2203
b0defcdb
CM
2204#ifdef CONFIG_PM
2205static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2206{
1da177e4
LT
2207 return 0;
2208}
1da177e4 2209
b0defcdb 2210static int ipmi_pci_resume(struct pci_dev *pdev)
1da177e4 2211{
b0defcdb
CM
2212 return 0;
2213}
1da177e4 2214#endif
1da177e4 2215
b0defcdb
CM
2216static struct pci_device_id ipmi_pci_devices[] = {
2217 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
d13adb60 2218 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }
b0defcdb
CM
2219};
2220MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2221
2222static struct pci_driver ipmi_pci_driver = {
2223 .name = DEVICE_NAME,
2224 .id_table = ipmi_pci_devices,
2225 .probe = ipmi_pci_probe,
2226 .remove = __devexit_p(ipmi_pci_remove),
2227#ifdef CONFIG_PM
2228 .suspend = ipmi_pci_suspend,
2229 .resume = ipmi_pci_resume,
2230#endif
2231};
2232#endif /* CONFIG_PCI */
1da177e4
LT
2233
2234
dba9b4f6
CM
2235#ifdef CONFIG_PPC_OF
2236static int __devinit ipmi_of_probe(struct of_device *dev,
2237 const struct of_device_id *match)
2238{
2239 struct smi_info *info;
2240 struct resource resource;
2241 const int *regsize, *regspacing, *regshift;
2242 struct device_node *np = dev->node;
2243 int ret;
2244 int proplen;
2245
2246 dev_info(&dev->dev, PFX "probing via device tree\n");
2247
2248 ret = of_address_to_resource(np, 0, &resource);
2249 if (ret) {
2250 dev_warn(&dev->dev, PFX "invalid address from OF\n");
2251 return ret;
2252 }
2253
2254 regsize = get_property(np, "reg-size", &proplen);
2255 if (regsize && proplen != 4) {
2256 dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
2257 return -EINVAL;
2258 }
2259
2260 regspacing = get_property(np, "reg-spacing", &proplen);
2261 if (regspacing && proplen != 4) {
2262 dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
2263 return -EINVAL;
2264 }
2265
2266 regshift = get_property(np, "reg-shift", &proplen);
2267 if (regshift && proplen != 4) {
2268 dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
2269 return -EINVAL;
2270 }
2271
2272 info = kzalloc(sizeof(*info), GFP_KERNEL);
2273
2274 if (!info) {
2275 dev_err(&dev->dev,
2276 PFX "could not allocate memory for OF probe\n");
2277 return -ENOMEM;
2278 }
2279
2280 info->si_type = (enum si_type) match->data;
2281 info->addr_source = "device-tree";
2282 info->io_setup = mem_setup;
2283 info->irq_setup = std_irq_setup;
2284
2285 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2286 info->io.addr_data = resource.start;
2287
2288 info->io.regsize = regsize ? *regsize : DEFAULT_REGSIZE;
2289 info->io.regspacing = regspacing ? *regspacing : DEFAULT_REGSPACING;
2290 info->io.regshift = regshift ? *regshift : 0;
2291
2292 info->irq = irq_of_parse_and_map(dev->node, 0);
2293 info->dev = &dev->dev;
2294
2295 dev_dbg(&dev->dev, "addr 0x%lx regsize %ld spacing %ld irq %x\n",
2296 info->io.addr_data, info->io.regsize, info->io.regspacing,
2297 info->irq);
2298
2299 dev->dev.driver_data = (void*) info;
2300
2301 return try_smi_init(info);
2302}
2303
2304static int __devexit ipmi_of_remove(struct of_device *dev)
2305{
2306 cleanup_one_si(dev->dev.driver_data);
2307 return 0;
2308}
2309
2310static struct of_device_id ipmi_match[] =
2311{
2312 { .type = "ipmi", .compatible = "ipmi-kcs", .data = (void *)(unsigned long) SI_KCS },
2313 { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC },
2314 { .type = "ipmi", .compatible = "ipmi-bt", .data = (void *)(unsigned long) SI_BT },
2315 {},
2316};
2317
2318static struct of_platform_driver ipmi_of_platform_driver =
2319{
2320 .name = "ipmi",
2321 .match_table = ipmi_match,
2322 .probe = ipmi_of_probe,
2323 .remove = __devexit_p(ipmi_of_remove),
2324};
2325#endif /* CONFIG_PPC_OF */
2326
2327
1da177e4
LT
2328static int try_get_dev_id(struct smi_info *smi_info)
2329{
50c812b2
CM
2330 unsigned char msg[2];
2331 unsigned char *resp;
2332 unsigned long resp_len;
2333 enum si_sm_result smi_result;
2334 int rv = 0;
1da177e4
LT
2335
2336 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
b0defcdb 2337 if (!resp)
1da177e4
LT
2338 return -ENOMEM;
2339
2340 /* Do a Get Device ID command, since it comes back with some
2341 useful info. */
2342 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2343 msg[1] = IPMI_GET_DEVICE_ID_CMD;
2344 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2345
2346 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2347 for (;;)
2348 {
c3e7e791
CM
2349 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2350 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
da4cd8df 2351 schedule_timeout_uninterruptible(1);
1da177e4
LT
2352 smi_result = smi_info->handlers->event(
2353 smi_info->si_sm, 100);
2354 }
2355 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2356 {
2357 smi_result = smi_info->handlers->event(
2358 smi_info->si_sm, 0);
2359 }
2360 else
2361 break;
2362 }
2363 if (smi_result == SI_SM_HOSED) {
2364 /* We couldn't get the state machine to run, so whatever's at
2365 the port is probably not an IPMI SMI interface. */
2366 rv = -ENODEV;
2367 goto out;
2368 }
2369
2370 /* Otherwise, we got some data. */
2371 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2372 resp, IPMI_MAX_MSG_LENGTH);
50c812b2 2373 if (resp_len < 14) {
1da177e4
LT
2374 /* That's odd, it should be longer. */
2375 rv = -EINVAL;
2376 goto out;
2377 }
2378
2379 if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
2380 /* That's odd, it shouldn't be able to fail. */
2381 rv = -EINVAL;
2382 goto out;
2383 }
2384
2385 /* Record info from the get device id, in case we need it. */
50c812b2 2386 ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
1da177e4
LT
2387
2388 out:
2389 kfree(resp);
2390 return rv;
2391}
2392
2393static int type_file_read_proc(char *page, char **start, off_t off,
2394 int count, int *eof, void *data)
2395{
1da177e4
LT
2396 struct smi_info *smi = data;
2397
b361e27b 2398 return sprintf(page, "%s\n", si_to_str[smi->si_type]);
1da177e4
LT
2399}
2400
2401static int stat_file_read_proc(char *page, char **start, off_t off,
2402 int count, int *eof, void *data)
2403{
2404 char *out = (char *) page;
2405 struct smi_info *smi = data;
2406
2407 out += sprintf(out, "interrupts_enabled: %d\n",
b0defcdb 2408 smi->irq && !smi->interrupt_disabled);
1da177e4
LT
2409 out += sprintf(out, "short_timeouts: %ld\n",
2410 smi->short_timeouts);
2411 out += sprintf(out, "long_timeouts: %ld\n",
2412 smi->long_timeouts);
2413 out += sprintf(out, "timeout_restarts: %ld\n",
2414 smi->timeout_restarts);
2415 out += sprintf(out, "idles: %ld\n",
2416 smi->idles);
2417 out += sprintf(out, "interrupts: %ld\n",
2418 smi->interrupts);
2419 out += sprintf(out, "attentions: %ld\n",
2420 smi->attentions);
2421 out += sprintf(out, "flag_fetches: %ld\n",
2422 smi->flag_fetches);
2423 out += sprintf(out, "hosed_count: %ld\n",
2424 smi->hosed_count);
2425 out += sprintf(out, "complete_transactions: %ld\n",
2426 smi->complete_transactions);
2427 out += sprintf(out, "events: %ld\n",
2428 smi->events);
2429 out += sprintf(out, "watchdog_pretimeouts: %ld\n",
2430 smi->watchdog_pretimeouts);
2431 out += sprintf(out, "incoming_messages: %ld\n",
2432 smi->incoming_messages);
2433
b361e27b
CM
2434 return out - page;
2435}
2436
2437static int param_read_proc(char *page, char **start, off_t off,
2438 int count, int *eof, void *data)
2439{
2440 struct smi_info *smi = data;
2441
2442 return sprintf(page,
2443 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2444 si_to_str[smi->si_type],
2445 addr_space_to_str[smi->io.addr_type],
2446 smi->io.addr_data,
2447 smi->io.regspacing,
2448 smi->io.regsize,
2449 smi->io.regshift,
2450 smi->irq,
2451 smi->slave_addr);
1da177e4
LT
2452}
2453
3ae0e0f9
CM
2454/*
2455 * oem_data_avail_to_receive_msg_avail
2456 * @info - smi_info structure with msg_flags set
2457 *
2458 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2459 * Returns 1 indicating need to re-run handle_flags().
2460 */
2461static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2462{
e8b33617
CM
2463 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2464 RECEIVE_MSG_AVAIL);
3ae0e0f9
CM
2465 return 1;
2466}
2467
2468/*
2469 * setup_dell_poweredge_oem_data_handler
2470 * @info - smi_info.device_id must be populated
2471 *
2472 * Systems that match, but have firmware version < 1.40 may assert
2473 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2474 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
2475 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2476 * as RECEIVE_MSG_AVAIL instead.
2477 *
2478 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2479 * assert the OEM[012] bits, and if it did, the driver would have to
2480 * change to handle that properly, we don't actually check for the
2481 * firmware version.
2482 * Device ID = 0x20 BMC on PowerEdge 8G servers
2483 * Device Revision = 0x80
2484 * Firmware Revision1 = 0x01 BMC version 1.40
2485 * Firmware Revision2 = 0x40 BCD encoded
2486 * IPMI Version = 0x51 IPMI 1.5
2487 * Manufacturer ID = A2 02 00 Dell IANA
2488 *
d5a2b89a
CM
2489 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2490 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2491 *
3ae0e0f9
CM
2492 */
2493#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2494#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2495#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
50c812b2 2496#define DELL_IANA_MFR_ID 0x0002a2
3ae0e0f9
CM
2497static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2498{
2499 struct ipmi_device_id *id = &smi_info->device_id;
50c812b2 2500 if (id->manufacturer_id == DELL_IANA_MFR_ID) {
d5a2b89a
CM
2501 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
2502 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
50c812b2 2503 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
d5a2b89a
CM
2504 smi_info->oem_data_avail_handler =
2505 oem_data_avail_to_receive_msg_avail;
2506 }
2507 else if (ipmi_version_major(id) < 1 ||
2508 (ipmi_version_major(id) == 1 &&
2509 ipmi_version_minor(id) < 5)) {
2510 smi_info->oem_data_avail_handler =
2511 oem_data_avail_to_receive_msg_avail;
2512 }
3ae0e0f9
CM
2513 }
2514}
2515
ea94027b
CM
2516#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2517static void return_hosed_msg_badsize(struct smi_info *smi_info)
2518{
2519 struct ipmi_smi_msg *msg = smi_info->curr_msg;
2520
2521 /* Make it a reponse */
2522 msg->rsp[0] = msg->data[0] | 4;
2523 msg->rsp[1] = msg->data[1];
2524 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2525 msg->rsp_size = 3;
2526 smi_info->curr_msg = NULL;
2527 deliver_recv_msg(smi_info, msg);
2528}
2529
2530/*
2531 * dell_poweredge_bt_xaction_handler
2532 * @info - smi_info.device_id must be populated
2533 *
2534 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2535 * not respond to a Get SDR command if the length of the data
2536 * requested is exactly 0x3A, which leads to command timeouts and no
2537 * data returned. This intercepts such commands, and causes userspace
2538 * callers to try again with a different-sized buffer, which succeeds.
2539 */
2540
2541#define STORAGE_NETFN 0x0A
2542#define STORAGE_CMD_GET_SDR 0x23
2543static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2544 unsigned long unused,
2545 void *in)
2546{
2547 struct smi_info *smi_info = in;
2548 unsigned char *data = smi_info->curr_msg->data;
2549 unsigned int size = smi_info->curr_msg->data_size;
2550 if (size >= 8 &&
2551 (data[0]>>2) == STORAGE_NETFN &&
2552 data[1] == STORAGE_CMD_GET_SDR &&
2553 data[7] == 0x3A) {
2554 return_hosed_msg_badsize(smi_info);
2555 return NOTIFY_STOP;
2556 }
2557 return NOTIFY_DONE;
2558}
2559
2560static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2561 .notifier_call = dell_poweredge_bt_xaction_handler,
2562};
2563
2564/*
2565 * setup_dell_poweredge_bt_xaction_handler
2566 * @info - smi_info.device_id must be filled in already
2567 *
2568 * Fills in smi_info.device_id.start_transaction_pre_hook
2569 * when we know what function to use there.
2570 */
2571static void
2572setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2573{
2574 struct ipmi_device_id *id = &smi_info->device_id;
50c812b2 2575 if (id->manufacturer_id == DELL_IANA_MFR_ID &&
ea94027b
CM
2576 smi_info->si_type == SI_BT)
2577 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2578}
2579
3ae0e0f9
CM
2580/*
2581 * setup_oem_data_handler
2582 * @info - smi_info.device_id must be filled in already
2583 *
2584 * Fills in smi_info.device_id.oem_data_available_handler
2585 * when we know what function to use there.
2586 */
2587
2588static void setup_oem_data_handler(struct smi_info *smi_info)
2589{
2590 setup_dell_poweredge_oem_data_handler(smi_info);
2591}
2592
ea94027b
CM
2593static void setup_xaction_handlers(struct smi_info *smi_info)
2594{
2595 setup_dell_poweredge_bt_xaction_handler(smi_info);
2596}
2597
a9a2c44f
CM
2598static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2599{
453823ba
CM
2600 if (smi_info->intf) {
2601 /* The timer and thread are only running if the
2602 interface has been started up and registered. */
2603 if (smi_info->thread != NULL)
2604 kthread_stop(smi_info->thread);
2605 del_timer_sync(&smi_info->si_timer);
2606 }
a9a2c44f
CM
2607}
2608
7420884c 2609static __devinitdata struct ipmi_default_vals
b0defcdb
CM
2610{
2611 int type;
2612 int port;
7420884c 2613} ipmi_defaults[] =
b0defcdb
CM
2614{
2615 { .type = SI_KCS, .port = 0xca2 },
2616 { .type = SI_SMIC, .port = 0xca9 },
2617 { .type = SI_BT, .port = 0xe4 },
2618 { .port = 0 }
2619};
2620
2621static __devinit void default_find_bmc(void)
2622{
2623 struct smi_info *info;
2624 int i;
2625
2626 for (i = 0; ; i++) {
2627 if (!ipmi_defaults[i].port)
2628 break;
2629
2630 info = kzalloc(sizeof(*info), GFP_KERNEL);
2631 if (!info)
2632 return;
2633
4ff31d77
CK
2634#ifdef CONFIG_PPC_MERGE
2635 if (check_legacy_ioport(ipmi_defaults[i].port))
2636 continue;
2637#endif
2638
b0defcdb
CM
2639 info->addr_source = NULL;
2640
2641 info->si_type = ipmi_defaults[i].type;
2642 info->io_setup = port_setup;
2643 info->io.addr_data = ipmi_defaults[i].port;
2644 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2645
2646 info->io.addr = NULL;
2647 info->io.regspacing = DEFAULT_REGSPACING;
2648 info->io.regsize = DEFAULT_REGSPACING;
2649 info->io.regshift = 0;
2650
2651 if (try_smi_init(info) == 0) {
2652 /* Found one... */
2653 printk(KERN_INFO "ipmi_si: Found default %s state"
2654 " machine at %s address 0x%lx\n",
2655 si_to_str[info->si_type],
2656 addr_space_to_str[info->io.addr_type],
2657 info->io.addr_data);
2658 return;
2659 }
2660 }
2661}
2662
2663static int is_new_interface(struct smi_info *info)
1da177e4 2664{
b0defcdb 2665 struct smi_info *e;
1da177e4 2666
b0defcdb
CM
2667 list_for_each_entry(e, &smi_infos, link) {
2668 if (e->io.addr_type != info->io.addr_type)
2669 continue;
2670 if (e->io.addr_data == info->io.addr_data)
2671 return 0;
2672 }
1da177e4 2673
b0defcdb
CM
2674 return 1;
2675}
1da177e4 2676
b0defcdb
CM
2677static int try_smi_init(struct smi_info *new_smi)
2678{
2679 int rv;
2680
2681 if (new_smi->addr_source) {
2682 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2683 " machine at %s address 0x%lx, slave address 0x%x,"
2684 " irq %d\n",
2685 new_smi->addr_source,
2686 si_to_str[new_smi->si_type],
2687 addr_space_to_str[new_smi->io.addr_type],
2688 new_smi->io.addr_data,
2689 new_smi->slave_addr, new_smi->irq);
2690 }
2691
d6dfd131 2692 mutex_lock(&smi_infos_lock);
b0defcdb
CM
2693 if (!is_new_interface(new_smi)) {
2694 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2695 rv = -EBUSY;
2696 goto out_err;
2697 }
1da177e4
LT
2698
2699 /* So we know not to free it unless we have allocated one. */
2700 new_smi->intf = NULL;
2701 new_smi->si_sm = NULL;
2702 new_smi->handlers = NULL;
2703
b0defcdb
CM
2704 switch (new_smi->si_type) {
2705 case SI_KCS:
1da177e4 2706 new_smi->handlers = &kcs_smi_handlers;
b0defcdb
CM
2707 break;
2708
2709 case SI_SMIC:
1da177e4 2710 new_smi->handlers = &smic_smi_handlers;
b0defcdb
CM
2711 break;
2712
2713 case SI_BT:
1da177e4 2714 new_smi->handlers = &bt_smi_handlers;
b0defcdb
CM
2715 break;
2716
2717 default:
1da177e4
LT
2718 /* No support for anything else yet. */
2719 rv = -EIO;
2720 goto out_err;
2721 }
2722
2723 /* Allocate the state machine's data and initialize it. */
2724 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
b0defcdb 2725 if (!new_smi->si_sm) {
1da177e4
LT
2726 printk(" Could not allocate state machine memory\n");
2727 rv = -ENOMEM;
2728 goto out_err;
2729 }
2730 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2731 &new_smi->io);
2732
2733 /* Now that we know the I/O size, we can set up the I/O. */
2734 rv = new_smi->io_setup(new_smi);
2735 if (rv) {
2736 printk(" Could not set up I/O space\n");
2737 goto out_err;
2738 }
2739
2740 spin_lock_init(&(new_smi->si_lock));
2741 spin_lock_init(&(new_smi->msg_lock));
2742 spin_lock_init(&(new_smi->count_lock));
2743
2744 /* Do low-level detection first. */
2745 if (new_smi->handlers->detect(new_smi->si_sm)) {
b0defcdb
CM
2746 if (new_smi->addr_source)
2747 printk(KERN_INFO "ipmi_si: Interface detection"
2748 " failed\n");
1da177e4
LT
2749 rv = -ENODEV;
2750 goto out_err;
2751 }
2752
2753 /* Attempt a get device id command. If it fails, we probably
b0defcdb 2754 don't have a BMC here. */
1da177e4 2755 rv = try_get_dev_id(new_smi);
b0defcdb
CM
2756 if (rv) {
2757 if (new_smi->addr_source)
2758 printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2759 " at this location\n");
1da177e4 2760 goto out_err;
b0defcdb 2761 }
1da177e4 2762
3ae0e0f9 2763 setup_oem_data_handler(new_smi);
ea94027b 2764 setup_xaction_handlers(new_smi);
3ae0e0f9 2765
1da177e4 2766 /* Try to claim any interrupts. */
b0defcdb
CM
2767 if (new_smi->irq_setup)
2768 new_smi->irq_setup(new_smi);
1da177e4
LT
2769
2770 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2771 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2772 new_smi->curr_msg = NULL;
2773 atomic_set(&new_smi->req_events, 0);
2774 new_smi->run_to_completion = 0;
2775
2776 new_smi->interrupt_disabled = 0;
a9a2c44f 2777 atomic_set(&new_smi->stop_operation, 0);
b0defcdb
CM
2778 new_smi->intf_num = smi_num;
2779 smi_num++;
1da177e4
LT
2780
2781 /* Start clearing the flags before we enable interrupts or the
2782 timer to avoid racing with the timer. */
2783 start_clear_flags(new_smi);
2784 /* IRQ is defined to be set when non-zero. */
2785 if (new_smi->irq)
2786 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2787
50c812b2
CM
2788 if (!new_smi->dev) {
2789 /* If we don't already have a device from something
2790 * else (like PCI), then register a new one. */
2791 new_smi->pdev = platform_device_alloc("ipmi_si",
2792 new_smi->intf_num);
2793 if (rv) {
2794 printk(KERN_ERR
2795 "ipmi_si_intf:"
2796 " Unable to allocate platform device\n");
453823ba 2797 goto out_err;
50c812b2
CM
2798 }
2799 new_smi->dev = &new_smi->pdev->dev;
2800 new_smi->dev->driver = &ipmi_driver;
2801
b48f5457 2802 rv = platform_device_add(new_smi->pdev);
50c812b2
CM
2803 if (rv) {
2804 printk(KERN_ERR
2805 "ipmi_si_intf:"
2806 " Unable to register system interface device:"
2807 " %d\n",
2808 rv);
453823ba 2809 goto out_err;
50c812b2
CM
2810 }
2811 new_smi->dev_registered = 1;
2812 }
2813
1da177e4
LT
2814 rv = ipmi_register_smi(&handlers,
2815 new_smi,
50c812b2
CM
2816 &new_smi->device_id,
2817 new_smi->dev,
759643b8 2818 "bmc",
453823ba 2819 new_smi->slave_addr);
1da177e4
LT
2820 if (rv) {
2821 printk(KERN_ERR
2822 "ipmi_si: Unable to register device: error %d\n",
2823 rv);
2824 goto out_err_stop_timer;
2825 }
2826
2827 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2828 type_file_read_proc, NULL,
2829 new_smi, THIS_MODULE);
2830 if (rv) {
2831 printk(KERN_ERR
2832 "ipmi_si: Unable to create proc entry: %d\n",
2833 rv);
2834 goto out_err_stop_timer;
2835 }
2836
2837 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2838 stat_file_read_proc, NULL,
2839 new_smi, THIS_MODULE);
2840 if (rv) {
2841 printk(KERN_ERR
2842 "ipmi_si: Unable to create proc entry: %d\n",
2843 rv);
2844 goto out_err_stop_timer;
2845 }
2846
b361e27b
CM
2847 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2848 param_read_proc, NULL,
2849 new_smi, THIS_MODULE);
2850 if (rv) {
2851 printk(KERN_ERR
2852 "ipmi_si: Unable to create proc entry: %d\n",
2853 rv);
2854 goto out_err_stop_timer;
2855 }
2856
b0defcdb
CM
2857 list_add_tail(&new_smi->link, &smi_infos);
2858
d6dfd131 2859 mutex_unlock(&smi_infos_lock);
1da177e4 2860
8f14137e 2861 printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
1da177e4
LT
2862
2863 return 0;
2864
2865 out_err_stop_timer:
a9a2c44f
CM
2866 atomic_inc(&new_smi->stop_operation);
2867 wait_for_timer_and_thread(new_smi);
1da177e4
LT
2868
2869 out_err:
2870 if (new_smi->intf)
2871 ipmi_unregister_smi(new_smi->intf);
2872
b0defcdb
CM
2873 if (new_smi->irq_cleanup)
2874 new_smi->irq_cleanup(new_smi);
1da177e4
LT
2875
2876 /* Wait until we know that we are out of any interrupt
2877 handlers might have been running before we freed the
2878 interrupt. */
fbd568a3 2879 synchronize_sched();
1da177e4
LT
2880
2881 if (new_smi->si_sm) {
2882 if (new_smi->handlers)
2883 new_smi->handlers->cleanup(new_smi->si_sm);
2884 kfree(new_smi->si_sm);
2885 }
b0defcdb
CM
2886 if (new_smi->addr_source_cleanup)
2887 new_smi->addr_source_cleanup(new_smi);
7767e126
PG
2888 if (new_smi->io_cleanup)
2889 new_smi->io_cleanup(new_smi);
1da177e4 2890
50c812b2
CM
2891 if (new_smi->dev_registered)
2892 platform_device_unregister(new_smi->pdev);
2893
2894 kfree(new_smi);
2895
d6dfd131 2896 mutex_unlock(&smi_infos_lock);
b0defcdb 2897
1da177e4
LT
2898 return rv;
2899}
2900
b0defcdb 2901static __devinit int init_ipmi_si(void)
1da177e4 2902{
1da177e4
LT
2903 int i;
2904 char *str;
50c812b2 2905 int rv;
1da177e4
LT
2906
2907 if (initialized)
2908 return 0;
2909 initialized = 1;
2910
50c812b2
CM
2911 /* Register the device drivers. */
2912 rv = driver_register(&ipmi_driver);
2913 if (rv) {
2914 printk(KERN_ERR
2915 "init_ipmi_si: Unable to register driver: %d\n",
2916 rv);
2917 return rv;
2918 }
2919
2920
1da177e4
LT
2921 /* Parse out the si_type string into its components. */
2922 str = si_type_str;
2923 if (*str != '\0') {
e8b33617 2924 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
1da177e4
LT
2925 si_type[i] = str;
2926 str = strchr(str, ',');
2927 if (str) {
2928 *str = '\0';
2929 str++;
2930 } else {
2931 break;
2932 }
2933 }
2934 }
2935
1fdd75bd 2936 printk(KERN_INFO "IPMI System Interface driver.\n");
1da177e4 2937
b0defcdb
CM
2938 hardcode_find_bmc();
2939
a9fad4cc 2940#ifdef CONFIG_DMI
b224cd3a 2941 dmi_find_bmc();
1da177e4
LT
2942#endif
2943
b0defcdb 2944#ifdef CONFIG_ACPI
1d5636cc 2945 acpi_find_bmc();
b0defcdb 2946#endif
1da177e4 2947
b0defcdb 2948#ifdef CONFIG_PCI
168b35a7
CM
2949 rv = pci_register_driver(&ipmi_pci_driver);
2950 if (rv){
2951 printk(KERN_ERR
2952 "init_ipmi_si: Unable to register PCI driver: %d\n",
2953 rv);
2954 }
b0defcdb
CM
2955#endif
2956
dba9b4f6
CM
2957#ifdef CONFIG_PPC_OF
2958 of_register_platform_driver(&ipmi_of_platform_driver);
2959#endif
2960
b0defcdb 2961 if (si_trydefaults) {
d6dfd131 2962 mutex_lock(&smi_infos_lock);
b0defcdb
CM
2963 if (list_empty(&smi_infos)) {
2964 /* No BMC was found, try defaults. */
d6dfd131 2965 mutex_unlock(&smi_infos_lock);
b0defcdb
CM
2966 default_find_bmc();
2967 } else {
d6dfd131 2968 mutex_unlock(&smi_infos_lock);
b0defcdb 2969 }
1da177e4
LT
2970 }
2971
d6dfd131 2972 mutex_lock(&smi_infos_lock);
b361e27b 2973 if (unload_when_empty && list_empty(&smi_infos)) {
d6dfd131 2974 mutex_unlock(&smi_infos_lock);
b0defcdb
CM
2975#ifdef CONFIG_PCI
2976 pci_unregister_driver(&ipmi_pci_driver);
2977#endif
10fb62e5
CK
2978
2979#ifdef CONFIG_PPC_OF
2980 of_unregister_platform_driver(&ipmi_of_platform_driver);
2981#endif
55ebcc38 2982 driver_unregister(&ipmi_driver);
1da177e4
LT
2983 printk("ipmi_si: Unable to find any System Interface(s)\n");
2984 return -ENODEV;
b0defcdb 2985 } else {
d6dfd131 2986 mutex_unlock(&smi_infos_lock);
b0defcdb 2987 return 0;
1da177e4 2988 }
1da177e4
LT
2989}
2990module_init(init_ipmi_si);
2991
b361e27b 2992static void cleanup_one_si(struct smi_info *to_clean)
1da177e4
LT
2993{
2994 int rv;
2995 unsigned long flags;
2996
b0defcdb 2997 if (!to_clean)
1da177e4
LT
2998 return;
2999
b0defcdb
CM
3000 list_del(&to_clean->link);
3001
ee6cd5f8 3002 /* Tell the driver that we are shutting down. */
a9a2c44f 3003 atomic_inc(&to_clean->stop_operation);
b0defcdb 3004
ee6cd5f8
CM
3005 /* Make sure the timer and thread are stopped and will not run
3006 again. */
a9a2c44f 3007 wait_for_timer_and_thread(to_clean);
1da177e4 3008
ee6cd5f8
CM
3009 /* Timeouts are stopped, now make sure the interrupts are off
3010 for the device. A little tricky with locks to make sure
3011 there are no races. */
3012 spin_lock_irqsave(&to_clean->si_lock, flags);
3013 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3014 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3015 poll(to_clean);
3016 schedule_timeout_uninterruptible(1);
3017 spin_lock_irqsave(&to_clean->si_lock, flags);
3018 }
3019 disable_si_irq(to_clean);
3020 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3021 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3022 poll(to_clean);
3023 schedule_timeout_uninterruptible(1);
3024 }
3025
3026 /* Clean up interrupts and make sure that everything is done. */
3027 if (to_clean->irq_cleanup)
3028 to_clean->irq_cleanup(to_clean);
e8b33617 3029 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
1da177e4 3030 poll(to_clean);
da4cd8df 3031 schedule_timeout_uninterruptible(1);
1da177e4
LT
3032 }
3033
3034 rv = ipmi_unregister_smi(to_clean->intf);
3035 if (rv) {
3036 printk(KERN_ERR
3037 "ipmi_si: Unable to unregister device: errno=%d\n",
3038 rv);
3039 }
3040
3041 to_clean->handlers->cleanup(to_clean->si_sm);
3042
3043 kfree(to_clean->si_sm);
3044
b0defcdb
CM
3045 if (to_clean->addr_source_cleanup)
3046 to_clean->addr_source_cleanup(to_clean);
7767e126
PG
3047 if (to_clean->io_cleanup)
3048 to_clean->io_cleanup(to_clean);
50c812b2
CM
3049
3050 if (to_clean->dev_registered)
3051 platform_device_unregister(to_clean->pdev);
3052
3053 kfree(to_clean);
1da177e4
LT
3054}
3055
3056static __exit void cleanup_ipmi_si(void)
3057{
b0defcdb 3058 struct smi_info *e, *tmp_e;
1da177e4 3059
b0defcdb 3060 if (!initialized)
1da177e4
LT
3061 return;
3062
b0defcdb
CM
3063#ifdef CONFIG_PCI
3064 pci_unregister_driver(&ipmi_pci_driver);
3065#endif
3066
dba9b4f6
CM
3067#ifdef CONFIG_PPC_OF
3068 of_unregister_platform_driver(&ipmi_of_platform_driver);
3069#endif
3070
d6dfd131 3071 mutex_lock(&smi_infos_lock);
b0defcdb
CM
3072 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
3073 cleanup_one_si(e);
d6dfd131 3074 mutex_unlock(&smi_infos_lock);
50c812b2
CM
3075
3076 driver_unregister(&ipmi_driver);
1da177e4
LT
3077}
3078module_exit(cleanup_ipmi_si);
3079
3080MODULE_LICENSE("GPL");
1fdd75bd
CM
3081MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3082MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");