]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/char/ipmi/ipmi_si_intf.c
[PATCH] ecryptfs: handle AOP_TRUNCATED_PAGE better
[mirror_ubuntu-eoan-kernel.git] / drivers / char / ipmi / ipmi_si_intf.c
CommitLineData
1da177e4
LT
1/*
2 * ipmi_si.c
3 *
4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5 * BT).
6 *
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
9 * source@mvista.com
10 *
11 * Copyright 2002 MontaVista Software Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 *
19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * You should have received a copy of the GNU General Public License along
31 * with this program; if not, write to the Free Software Foundation, Inc.,
32 * 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35/*
36 * This file holds the "policy" for the interface to the SMI state
37 * machine. It does the configuration, handles timers and interrupts,
38 * and drives the real SMI state machine.
39 */
40
1da177e4
LT
41#include <linux/module.h>
42#include <linux/moduleparam.h>
43#include <asm/system.h>
44#include <linux/sched.h>
45#include <linux/timer.h>
46#include <linux/errno.h>
47#include <linux/spinlock.h>
48#include <linux/slab.h>
49#include <linux/delay.h>
50#include <linux/list.h>
51#include <linux/pci.h>
52#include <linux/ioport.h>
ea94027b 53#include <linux/notifier.h>
b0defcdb 54#include <linux/mutex.h>
e9a705a0 55#include <linux/kthread.h>
1da177e4 56#include <asm/irq.h>
1da177e4
LT
57#include <linux/interrupt.h>
58#include <linux/rcupdate.h>
59#include <linux/ipmi_smi.h>
60#include <asm/io.h>
61#include "ipmi_si_sm.h"
62#include <linux/init.h>
b224cd3a 63#include <linux/dmi.h>
b361e27b
CM
64#include <linux/string.h>
65#include <linux/ctype.h>
66
67#define PFX "ipmi_si: "
1da177e4
LT
68
69/* Measure times between events in the driver. */
70#undef DEBUG_TIMING
71
72/* Call every 10 ms. */
73#define SI_TIMEOUT_TIME_USEC 10000
74#define SI_USEC_PER_JIFFY (1000000/HZ)
75#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
76#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
77 short timeout */
78
79enum si_intf_state {
80 SI_NORMAL,
81 SI_GETTING_FLAGS,
82 SI_GETTING_EVENTS,
83 SI_CLEARING_FLAGS,
84 SI_CLEARING_FLAGS_THEN_SET_IRQ,
85 SI_GETTING_MESSAGES,
86 SI_ENABLE_INTERRUPTS1,
87 SI_ENABLE_INTERRUPTS2
88 /* FIXME - add watchdog stuff. */
89};
90
9dbf68f9
CM
91/* Some BT-specific defines we need here. */
92#define IPMI_BT_INTMASK_REG 2
93#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
94#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
95
1da177e4
LT
96enum si_type {
97 SI_KCS, SI_SMIC, SI_BT
98};
b361e27b 99static char *si_to_str[] = { "kcs", "smic", "bt" };
1da177e4 100
50c812b2
CM
101#define DEVICE_NAME "ipmi_si"
102
103static struct device_driver ipmi_driver =
104{
105 .name = DEVICE_NAME,
106 .bus = &platform_bus_type
107};
3ae0e0f9 108
1da177e4
LT
109struct smi_info
110{
a9a2c44f 111 int intf_num;
1da177e4
LT
112 ipmi_smi_t intf;
113 struct si_sm_data *si_sm;
114 struct si_sm_handlers *handlers;
115 enum si_type si_type;
116 spinlock_t si_lock;
117 spinlock_t msg_lock;
118 struct list_head xmit_msgs;
119 struct list_head hp_xmit_msgs;
120 struct ipmi_smi_msg *curr_msg;
121 enum si_intf_state si_state;
122
123 /* Used to handle the various types of I/O that can occur with
124 IPMI */
125 struct si_sm_io io;
126 int (*io_setup)(struct smi_info *info);
127 void (*io_cleanup)(struct smi_info *info);
128 int (*irq_setup)(struct smi_info *info);
129 void (*irq_cleanup)(struct smi_info *info);
130 unsigned int io_size;
b0defcdb
CM
131 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
132 void (*addr_source_cleanup)(struct smi_info *info);
133 void *addr_source_data;
1da177e4 134
3ae0e0f9
CM
135 /* Per-OEM handler, called from handle_flags().
136 Returns 1 when handle_flags() needs to be re-run
137 or 0 indicating it set si_state itself.
138 */
139 int (*oem_data_avail_handler)(struct smi_info *smi_info);
140
1da177e4
LT
141 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
142 is set to hold the flags until we are done handling everything
143 from the flags. */
144#define RECEIVE_MSG_AVAIL 0x01
145#define EVENT_MSG_BUFFER_FULL 0x02
146#define WDT_PRE_TIMEOUT_INT 0x08
3ae0e0f9
CM
147#define OEM0_DATA_AVAIL 0x20
148#define OEM1_DATA_AVAIL 0x40
149#define OEM2_DATA_AVAIL 0x80
150#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
151 OEM1_DATA_AVAIL | \
152 OEM2_DATA_AVAIL)
1da177e4
LT
153 unsigned char msg_flags;
154
155 /* If set to true, this will request events the next time the
156 state machine is idle. */
157 atomic_t req_events;
158
159 /* If true, run the state machine to completion on every send
160 call. Generally used after a panic to make sure stuff goes
161 out. */
162 int run_to_completion;
163
164 /* The I/O port of an SI interface. */
165 int port;
166
167 /* The space between start addresses of the two ports. For
168 instance, if the first port is 0xca2 and the spacing is 4, then
169 the second port is 0xca6. */
170 unsigned int spacing;
171
172 /* zero if no irq; */
173 int irq;
174
175 /* The timer for this si. */
176 struct timer_list si_timer;
177
178 /* The time (in jiffies) the last timeout occurred at. */
179 unsigned long last_timeout_jiffies;
180
181 /* Used to gracefully stop the timer without race conditions. */
a9a2c44f 182 atomic_t stop_operation;
1da177e4
LT
183
184 /* The driver will disable interrupts when it gets into a
185 situation where it cannot handle messages due to lack of
186 memory. Once that situation clears up, it will re-enable
187 interrupts. */
188 int interrupt_disabled;
189
50c812b2 190 /* From the get device id response... */
3ae0e0f9 191 struct ipmi_device_id device_id;
1da177e4 192
50c812b2
CM
193 /* Driver model stuff. */
194 struct device *dev;
195 struct platform_device *pdev;
196
197 /* True if we allocated the device, false if it came from
198 * someplace else (like PCI). */
199 int dev_registered;
200
1da177e4
LT
201 /* Slave address, could be reported from DMI. */
202 unsigned char slave_addr;
203
204 /* Counters and things for the proc filesystem. */
205 spinlock_t count_lock;
206 unsigned long short_timeouts;
207 unsigned long long_timeouts;
208 unsigned long timeout_restarts;
209 unsigned long idles;
210 unsigned long interrupts;
211 unsigned long attentions;
212 unsigned long flag_fetches;
213 unsigned long hosed_count;
214 unsigned long complete_transactions;
215 unsigned long events;
216 unsigned long watchdog_pretimeouts;
217 unsigned long incoming_messages;
a9a2c44f 218
e9a705a0 219 struct task_struct *thread;
b0defcdb
CM
220
221 struct list_head link;
1da177e4
LT
222};
223
a51f4a81
CM
224#define SI_MAX_PARMS 4
225
226static int force_kipmid[SI_MAX_PARMS];
227static int num_force_kipmid;
228
b361e27b
CM
229static int unload_when_empty = 1;
230
b0defcdb 231static int try_smi_init(struct smi_info *smi);
b361e27b 232static void cleanup_one_si(struct smi_info *to_clean);
b0defcdb 233
e041c683 234static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
ea94027b
CM
235static int register_xaction_notifier(struct notifier_block * nb)
236{
e041c683 237 return atomic_notifier_chain_register(&xaction_notifier_list, nb);
ea94027b
CM
238}
239
1da177e4
LT
240static void deliver_recv_msg(struct smi_info *smi_info,
241 struct ipmi_smi_msg *msg)
242{
243 /* Deliver the message to the upper layer with the lock
244 released. */
245 spin_unlock(&(smi_info->si_lock));
246 ipmi_smi_msg_received(smi_info->intf, msg);
247 spin_lock(&(smi_info->si_lock));
248}
249
4d7cbac7 250static void return_hosed_msg(struct smi_info *smi_info, int cCode)
1da177e4
LT
251{
252 struct ipmi_smi_msg *msg = smi_info->curr_msg;
253
4d7cbac7
CM
254 if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
255 cCode = IPMI_ERR_UNSPECIFIED;
256 /* else use it as is */
257
1da177e4
LT
258 /* Make it a reponse */
259 msg->rsp[0] = msg->data[0] | 4;
260 msg->rsp[1] = msg->data[1];
4d7cbac7 261 msg->rsp[2] = cCode;
1da177e4
LT
262 msg->rsp_size = 3;
263
264 smi_info->curr_msg = NULL;
265 deliver_recv_msg(smi_info, msg);
266}
267
268static enum si_sm_result start_next_msg(struct smi_info *smi_info)
269{
270 int rv;
271 struct list_head *entry = NULL;
272#ifdef DEBUG_TIMING
273 struct timeval t;
274#endif
275
276 /* No need to save flags, we aleady have interrupts off and we
277 already hold the SMI lock. */
278 spin_lock(&(smi_info->msg_lock));
279
280 /* Pick the high priority queue first. */
b0defcdb 281 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
1da177e4 282 entry = smi_info->hp_xmit_msgs.next;
b0defcdb 283 } else if (!list_empty(&(smi_info->xmit_msgs))) {
1da177e4
LT
284 entry = smi_info->xmit_msgs.next;
285 }
286
b0defcdb 287 if (!entry) {
1da177e4
LT
288 smi_info->curr_msg = NULL;
289 rv = SI_SM_IDLE;
290 } else {
291 int err;
292
293 list_del(entry);
294 smi_info->curr_msg = list_entry(entry,
295 struct ipmi_smi_msg,
296 link);
297#ifdef DEBUG_TIMING
298 do_gettimeofday(&t);
299 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
300#endif
e041c683
AS
301 err = atomic_notifier_call_chain(&xaction_notifier_list,
302 0, smi_info);
ea94027b
CM
303 if (err & NOTIFY_STOP_MASK) {
304 rv = SI_SM_CALL_WITHOUT_DELAY;
305 goto out;
306 }
1da177e4
LT
307 err = smi_info->handlers->start_transaction(
308 smi_info->si_sm,
309 smi_info->curr_msg->data,
310 smi_info->curr_msg->data_size);
311 if (err) {
4d7cbac7 312 return_hosed_msg(smi_info, err);
1da177e4
LT
313 }
314
315 rv = SI_SM_CALL_WITHOUT_DELAY;
316 }
ea94027b 317 out:
1da177e4
LT
318 spin_unlock(&(smi_info->msg_lock));
319
320 return rv;
321}
322
323static void start_enable_irq(struct smi_info *smi_info)
324{
325 unsigned char msg[2];
326
327 /* If we are enabling interrupts, we have to tell the
328 BMC to use them. */
329 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
330 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
331
332 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
333 smi_info->si_state = SI_ENABLE_INTERRUPTS1;
334}
335
336static void start_clear_flags(struct smi_info *smi_info)
337{
338 unsigned char msg[3];
339
340 /* Make sure the watchdog pre-timeout flag is not set at startup. */
341 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
342 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
343 msg[2] = WDT_PRE_TIMEOUT_INT;
344
345 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
346 smi_info->si_state = SI_CLEARING_FLAGS;
347}
348
349/* When we have a situtaion where we run out of memory and cannot
350 allocate messages, we just leave them in the BMC and run the system
351 polled until we can allocate some memory. Once we have some
352 memory, we will re-enable the interrupt. */
353static inline void disable_si_irq(struct smi_info *smi_info)
354{
b0defcdb 355 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1da177e4
LT
356 disable_irq_nosync(smi_info->irq);
357 smi_info->interrupt_disabled = 1;
358 }
359}
360
361static inline void enable_si_irq(struct smi_info *smi_info)
362{
363 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
364 enable_irq(smi_info->irq);
365 smi_info->interrupt_disabled = 0;
366 }
367}
368
369static void handle_flags(struct smi_info *smi_info)
370{
3ae0e0f9 371 retry:
1da177e4
LT
372 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
373 /* Watchdog pre-timeout */
374 spin_lock(&smi_info->count_lock);
375 smi_info->watchdog_pretimeouts++;
376 spin_unlock(&smi_info->count_lock);
377
378 start_clear_flags(smi_info);
379 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
380 spin_unlock(&(smi_info->si_lock));
381 ipmi_smi_watchdog_pretimeout(smi_info->intf);
382 spin_lock(&(smi_info->si_lock));
383 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
384 /* Messages available. */
385 smi_info->curr_msg = ipmi_alloc_smi_msg();
b0defcdb 386 if (!smi_info->curr_msg) {
1da177e4
LT
387 disable_si_irq(smi_info);
388 smi_info->si_state = SI_NORMAL;
389 return;
390 }
391 enable_si_irq(smi_info);
392
393 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
394 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
395 smi_info->curr_msg->data_size = 2;
396
397 smi_info->handlers->start_transaction(
398 smi_info->si_sm,
399 smi_info->curr_msg->data,
400 smi_info->curr_msg->data_size);
401 smi_info->si_state = SI_GETTING_MESSAGES;
402 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
403 /* Events available. */
404 smi_info->curr_msg = ipmi_alloc_smi_msg();
b0defcdb 405 if (!smi_info->curr_msg) {
1da177e4
LT
406 disable_si_irq(smi_info);
407 smi_info->si_state = SI_NORMAL;
408 return;
409 }
410 enable_si_irq(smi_info);
411
412 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
413 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
414 smi_info->curr_msg->data_size = 2;
415
416 smi_info->handlers->start_transaction(
417 smi_info->si_sm,
418 smi_info->curr_msg->data,
419 smi_info->curr_msg->data_size);
420 smi_info->si_state = SI_GETTING_EVENTS;
4064d5ef
CM
421 } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
422 smi_info->oem_data_avail_handler) {
423 if (smi_info->oem_data_avail_handler(smi_info))
424 goto retry;
1da177e4
LT
425 } else {
426 smi_info->si_state = SI_NORMAL;
427 }
428}
429
430static void handle_transaction_done(struct smi_info *smi_info)
431{
432 struct ipmi_smi_msg *msg;
433#ifdef DEBUG_TIMING
434 struct timeval t;
435
436 do_gettimeofday(&t);
437 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
438#endif
439 switch (smi_info->si_state) {
440 case SI_NORMAL:
b0defcdb 441 if (!smi_info->curr_msg)
1da177e4
LT
442 break;
443
444 smi_info->curr_msg->rsp_size
445 = smi_info->handlers->get_result(
446 smi_info->si_sm,
447 smi_info->curr_msg->rsp,
448 IPMI_MAX_MSG_LENGTH);
449
450 /* Do this here becase deliver_recv_msg() releases the
451 lock, and a new message can be put in during the
452 time the lock is released. */
453 msg = smi_info->curr_msg;
454 smi_info->curr_msg = NULL;
455 deliver_recv_msg(smi_info, msg);
456 break;
457
458 case SI_GETTING_FLAGS:
459 {
460 unsigned char msg[4];
461 unsigned int len;
462
463 /* We got the flags from the SMI, now handle them. */
464 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
465 if (msg[2] != 0) {
466 /* Error fetching flags, just give up for
467 now. */
468 smi_info->si_state = SI_NORMAL;
469 } else if (len < 4) {
470 /* Hmm, no flags. That's technically illegal, but
471 don't use uninitialized data. */
472 smi_info->si_state = SI_NORMAL;
473 } else {
474 smi_info->msg_flags = msg[3];
475 handle_flags(smi_info);
476 }
477 break;
478 }
479
480 case SI_CLEARING_FLAGS:
481 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
482 {
483 unsigned char msg[3];
484
485 /* We cleared the flags. */
486 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
487 if (msg[2] != 0) {
488 /* Error clearing flags */
489 printk(KERN_WARNING
490 "ipmi_si: Error clearing flags: %2.2x\n",
491 msg[2]);
492 }
493 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
494 start_enable_irq(smi_info);
495 else
496 smi_info->si_state = SI_NORMAL;
497 break;
498 }
499
500 case SI_GETTING_EVENTS:
501 {
502 smi_info->curr_msg->rsp_size
503 = smi_info->handlers->get_result(
504 smi_info->si_sm,
505 smi_info->curr_msg->rsp,
506 IPMI_MAX_MSG_LENGTH);
507
508 /* Do this here becase deliver_recv_msg() releases the
509 lock, and a new message can be put in during the
510 time the lock is released. */
511 msg = smi_info->curr_msg;
512 smi_info->curr_msg = NULL;
513 if (msg->rsp[2] != 0) {
514 /* Error getting event, probably done. */
515 msg->done(msg);
516
517 /* Take off the event flag. */
518 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
519 handle_flags(smi_info);
520 } else {
521 spin_lock(&smi_info->count_lock);
522 smi_info->events++;
523 spin_unlock(&smi_info->count_lock);
524
525 /* Do this before we deliver the message
526 because delivering the message releases the
527 lock and something else can mess with the
528 state. */
529 handle_flags(smi_info);
530
531 deliver_recv_msg(smi_info, msg);
532 }
533 break;
534 }
535
536 case SI_GETTING_MESSAGES:
537 {
538 smi_info->curr_msg->rsp_size
539 = smi_info->handlers->get_result(
540 smi_info->si_sm,
541 smi_info->curr_msg->rsp,
542 IPMI_MAX_MSG_LENGTH);
543
544 /* Do this here becase deliver_recv_msg() releases the
545 lock, and a new message can be put in during the
546 time the lock is released. */
547 msg = smi_info->curr_msg;
548 smi_info->curr_msg = NULL;
549 if (msg->rsp[2] != 0) {
550 /* Error getting event, probably done. */
551 msg->done(msg);
552
553 /* Take off the msg flag. */
554 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
555 handle_flags(smi_info);
556 } else {
557 spin_lock(&smi_info->count_lock);
558 smi_info->incoming_messages++;
559 spin_unlock(&smi_info->count_lock);
560
561 /* Do this before we deliver the message
562 because delivering the message releases the
563 lock and something else can mess with the
564 state. */
565 handle_flags(smi_info);
566
567 deliver_recv_msg(smi_info, msg);
568 }
569 break;
570 }
571
572 case SI_ENABLE_INTERRUPTS1:
573 {
574 unsigned char msg[4];
575
576 /* We got the flags from the SMI, now handle them. */
577 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
578 if (msg[2] != 0) {
579 printk(KERN_WARNING
580 "ipmi_si: Could not enable interrupts"
581 ", failed get, using polled mode.\n");
582 smi_info->si_state = SI_NORMAL;
583 } else {
584 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
585 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
586 msg[2] = msg[3] | 1; /* enable msg queue int */
587 smi_info->handlers->start_transaction(
588 smi_info->si_sm, msg, 3);
589 smi_info->si_state = SI_ENABLE_INTERRUPTS2;
590 }
591 break;
592 }
593
594 case SI_ENABLE_INTERRUPTS2:
595 {
596 unsigned char msg[4];
597
598 /* We got the flags from the SMI, now handle them. */
599 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
600 if (msg[2] != 0) {
601 printk(KERN_WARNING
602 "ipmi_si: Could not enable interrupts"
603 ", failed set, using polled mode.\n");
604 }
605 smi_info->si_state = SI_NORMAL;
606 break;
607 }
608 }
609}
610
611/* Called on timeouts and events. Timeouts should pass the elapsed
612 time, interrupts should pass in zero. */
613static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
614 int time)
615{
616 enum si_sm_result si_sm_result;
617
618 restart:
619 /* There used to be a loop here that waited a little while
620 (around 25us) before giving up. That turned out to be
621 pointless, the minimum delays I was seeing were in the 300us
622 range, which is far too long to wait in an interrupt. So
623 we just run until the state machine tells us something
624 happened or it needs a delay. */
625 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
626 time = 0;
627 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
628 {
629 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
630 }
631
632 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
633 {
634 spin_lock(&smi_info->count_lock);
635 smi_info->complete_transactions++;
636 spin_unlock(&smi_info->count_lock);
637
638 handle_transaction_done(smi_info);
639 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
640 }
641 else if (si_sm_result == SI_SM_HOSED)
642 {
643 spin_lock(&smi_info->count_lock);
644 smi_info->hosed_count++;
645 spin_unlock(&smi_info->count_lock);
646
647 /* Do the before return_hosed_msg, because that
648 releases the lock. */
649 smi_info->si_state = SI_NORMAL;
650 if (smi_info->curr_msg != NULL) {
651 /* If we were handling a user message, format
652 a response to send to the upper layer to
653 tell it about the error. */
4d7cbac7 654 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
1da177e4
LT
655 }
656 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
657 }
658
659 /* We prefer handling attn over new messages. */
660 if (si_sm_result == SI_SM_ATTN)
661 {
662 unsigned char msg[2];
663
664 spin_lock(&smi_info->count_lock);
665 smi_info->attentions++;
666 spin_unlock(&smi_info->count_lock);
667
668 /* Got a attn, send down a get message flags to see
669 what's causing it. It would be better to handle
670 this in the upper layer, but due to the way
671 interrupts work with the SMI, that's not really
672 possible. */
673 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
674 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
675
676 smi_info->handlers->start_transaction(
677 smi_info->si_sm, msg, 2);
678 smi_info->si_state = SI_GETTING_FLAGS;
679 goto restart;
680 }
681
682 /* If we are currently idle, try to start the next message. */
683 if (si_sm_result == SI_SM_IDLE) {
684 spin_lock(&smi_info->count_lock);
685 smi_info->idles++;
686 spin_unlock(&smi_info->count_lock);
687
688 si_sm_result = start_next_msg(smi_info);
689 if (si_sm_result != SI_SM_IDLE)
690 goto restart;
691 }
692
693 if ((si_sm_result == SI_SM_IDLE)
694 && (atomic_read(&smi_info->req_events)))
695 {
696 /* We are idle and the upper layer requested that I fetch
697 events, so do so. */
55162fb1 698 atomic_set(&smi_info->req_events, 0);
1da177e4 699
55162fb1
CM
700 smi_info->curr_msg = ipmi_alloc_smi_msg();
701 if (!smi_info->curr_msg)
702 goto out;
1da177e4 703
55162fb1
CM
704 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
705 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
706 smi_info->curr_msg->data_size = 2;
1da177e4
LT
707
708 smi_info->handlers->start_transaction(
55162fb1
CM
709 smi_info->si_sm,
710 smi_info->curr_msg->data,
711 smi_info->curr_msg->data_size);
712 smi_info->si_state = SI_GETTING_EVENTS;
1da177e4
LT
713 goto restart;
714 }
55162fb1 715 out:
1da177e4
LT
716 return si_sm_result;
717}
718
719static void sender(void *send_info,
720 struct ipmi_smi_msg *msg,
721 int priority)
722{
723 struct smi_info *smi_info = send_info;
724 enum si_sm_result result;
725 unsigned long flags;
726#ifdef DEBUG_TIMING
727 struct timeval t;
728#endif
729
b361e27b
CM
730 if (atomic_read(&smi_info->stop_operation)) {
731 msg->rsp[0] = msg->data[0] | 4;
732 msg->rsp[1] = msg->data[1];
733 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
734 msg->rsp_size = 3;
735 deliver_recv_msg(smi_info, msg);
736 return;
737 }
738
1da177e4
LT
739 spin_lock_irqsave(&(smi_info->msg_lock), flags);
740#ifdef DEBUG_TIMING
741 do_gettimeofday(&t);
742 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
743#endif
744
745 if (smi_info->run_to_completion) {
746 /* If we are running to completion, then throw it in
747 the list and run transactions until everything is
748 clear. Priority doesn't matter here. */
749 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
750
751 /* We have to release the msg lock and claim the smi
752 lock in this case, because of race conditions. */
753 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
754
755 spin_lock_irqsave(&(smi_info->si_lock), flags);
756 result = smi_event_handler(smi_info, 0);
757 while (result != SI_SM_IDLE) {
758 udelay(SI_SHORT_TIMEOUT_USEC);
759 result = smi_event_handler(smi_info,
760 SI_SHORT_TIMEOUT_USEC);
761 }
762 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
763 return;
764 } else {
765 if (priority > 0) {
766 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
767 } else {
768 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
769 }
770 }
771 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
772
773 spin_lock_irqsave(&(smi_info->si_lock), flags);
774 if ((smi_info->si_state == SI_NORMAL)
775 && (smi_info->curr_msg == NULL))
776 {
777 start_next_msg(smi_info);
1da177e4
LT
778 }
779 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
780}
781
782static void set_run_to_completion(void *send_info, int i_run_to_completion)
783{
784 struct smi_info *smi_info = send_info;
785 enum si_sm_result result;
786 unsigned long flags;
787
788 spin_lock_irqsave(&(smi_info->si_lock), flags);
789
790 smi_info->run_to_completion = i_run_to_completion;
791 if (i_run_to_completion) {
792 result = smi_event_handler(smi_info, 0);
793 while (result != SI_SM_IDLE) {
794 udelay(SI_SHORT_TIMEOUT_USEC);
795 result = smi_event_handler(smi_info,
796 SI_SHORT_TIMEOUT_USEC);
797 }
798 }
799
800 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
801}
802
a9a2c44f
CM
803static int ipmi_thread(void *data)
804{
805 struct smi_info *smi_info = data;
e9a705a0 806 unsigned long flags;
a9a2c44f
CM
807 enum si_sm_result smi_result;
808
a9a2c44f 809 set_user_nice(current, 19);
e9a705a0 810 while (!kthread_should_stop()) {
a9a2c44f 811 spin_lock_irqsave(&(smi_info->si_lock), flags);
8a3628d5 812 smi_result = smi_event_handler(smi_info, 0);
a9a2c44f 813 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
e9a705a0
MD
814 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
815 /* do nothing */
a9a2c44f 816 }
e9a705a0 817 else if (smi_result == SI_SM_CALL_WITH_DELAY)
33979734 818 schedule();
e9a705a0
MD
819 else
820 schedule_timeout_interruptible(1);
a9a2c44f 821 }
a9a2c44f
CM
822 return 0;
823}
824
825
1da177e4
LT
826static void poll(void *send_info)
827{
828 struct smi_info *smi_info = send_info;
829
15c62e10
CM
830 /*
831 * Make sure there is some delay in the poll loop so we can
832 * drive time forward and timeout things.
833 */
834 udelay(10);
835 smi_event_handler(smi_info, 10);
1da177e4
LT
836}
837
838static void request_events(void *send_info)
839{
840 struct smi_info *smi_info = send_info;
841
b361e27b
CM
842 if (atomic_read(&smi_info->stop_operation))
843 return;
844
1da177e4
LT
845 atomic_set(&smi_info->req_events, 1);
846}
847
0c8204b3 848static int initialized;
1da177e4 849
1da177e4
LT
850static void smi_timeout(unsigned long data)
851{
852 struct smi_info *smi_info = (struct smi_info *) data;
853 enum si_sm_result smi_result;
854 unsigned long flags;
855 unsigned long jiffies_now;
c4edff1c 856 long time_diff;
1da177e4
LT
857#ifdef DEBUG_TIMING
858 struct timeval t;
859#endif
860
a9a2c44f 861 if (atomic_read(&smi_info->stop_operation))
1da177e4 862 return;
1da177e4
LT
863
864 spin_lock_irqsave(&(smi_info->si_lock), flags);
865#ifdef DEBUG_TIMING
866 do_gettimeofday(&t);
867 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
868#endif
869 jiffies_now = jiffies;
c4edff1c 870 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1da177e4
LT
871 * SI_USEC_PER_JIFFY);
872 smi_result = smi_event_handler(smi_info, time_diff);
873
874 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
875
876 smi_info->last_timeout_jiffies = jiffies_now;
877
b0defcdb 878 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1da177e4
LT
879 /* Running with interrupts, only do long timeouts. */
880 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
881 spin_lock_irqsave(&smi_info->count_lock, flags);
882 smi_info->long_timeouts++;
883 spin_unlock_irqrestore(&smi_info->count_lock, flags);
884 goto do_add_timer;
885 }
886
887 /* If the state machine asks for a short delay, then shorten
888 the timer timeout. */
889 if (smi_result == SI_SM_CALL_WITH_DELAY) {
890 spin_lock_irqsave(&smi_info->count_lock, flags);
891 smi_info->short_timeouts++;
892 spin_unlock_irqrestore(&smi_info->count_lock, flags);
1da177e4 893 smi_info->si_timer.expires = jiffies + 1;
1da177e4
LT
894 } else {
895 spin_lock_irqsave(&smi_info->count_lock, flags);
896 smi_info->long_timeouts++;
897 spin_unlock_irqrestore(&smi_info->count_lock, flags);
898 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
1da177e4
LT
899 }
900
901 do_add_timer:
902 add_timer(&(smi_info->si_timer));
903}
904
7d12e780 905static irqreturn_t si_irq_handler(int irq, void *data)
1da177e4
LT
906{
907 struct smi_info *smi_info = data;
908 unsigned long flags;
909#ifdef DEBUG_TIMING
910 struct timeval t;
911#endif
912
913 spin_lock_irqsave(&(smi_info->si_lock), flags);
914
915 spin_lock(&smi_info->count_lock);
916 smi_info->interrupts++;
917 spin_unlock(&smi_info->count_lock);
918
a9a2c44f 919 if (atomic_read(&smi_info->stop_operation))
1da177e4
LT
920 goto out;
921
922#ifdef DEBUG_TIMING
923 do_gettimeofday(&t);
924 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
925#endif
926 smi_event_handler(smi_info, 0);
927 out:
928 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
929 return IRQ_HANDLED;
930}
931
7d12e780 932static irqreturn_t si_bt_irq_handler(int irq, void *data)
9dbf68f9
CM
933{
934 struct smi_info *smi_info = data;
935 /* We need to clear the IRQ flag for the BT interface. */
936 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
937 IPMI_BT_INTMASK_CLEAR_IRQ_BIT
938 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
7d12e780 939 return si_irq_handler(irq, data);
9dbf68f9
CM
940}
941
453823ba
CM
942static int smi_start_processing(void *send_info,
943 ipmi_smi_t intf)
944{
945 struct smi_info *new_smi = send_info;
a51f4a81 946 int enable = 0;
453823ba
CM
947
948 new_smi->intf = intf;
949
950 /* Set up the timer that drives the interface. */
951 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
952 new_smi->last_timeout_jiffies = jiffies;
953 mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
954
a51f4a81
CM
955 /*
956 * Check if the user forcefully enabled the daemon.
957 */
958 if (new_smi->intf_num < num_force_kipmid)
959 enable = force_kipmid[new_smi->intf_num];
df3fe8de
CM
960 /*
961 * The BT interface is efficient enough to not need a thread,
962 * and there is no need for a thread if we have interrupts.
963 */
a51f4a81
CM
964 else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
965 enable = 1;
966
967 if (enable) {
453823ba
CM
968 new_smi->thread = kthread_run(ipmi_thread, new_smi,
969 "kipmi%d", new_smi->intf_num);
970 if (IS_ERR(new_smi->thread)) {
971 printk(KERN_NOTICE "ipmi_si_intf: Could not start"
972 " kernel thread due to error %ld, only using"
973 " timers to drive the interface\n",
974 PTR_ERR(new_smi->thread));
975 new_smi->thread = NULL;
976 }
977 }
978
979 return 0;
980}
9dbf68f9 981
b9675136
CM
982static void set_maintenance_mode(void *send_info, int enable)
983{
984 struct smi_info *smi_info = send_info;
985
986 if (!enable)
987 atomic_set(&smi_info->req_events, 0);
988}
989
1da177e4
LT
990static struct ipmi_smi_handlers handlers =
991{
992 .owner = THIS_MODULE,
453823ba 993 .start_processing = smi_start_processing,
1da177e4
LT
994 .sender = sender,
995 .request_events = request_events,
b9675136 996 .set_maintenance_mode = set_maintenance_mode,
1da177e4
LT
997 .set_run_to_completion = set_run_to_completion,
998 .poll = poll,
999};
1000
1001/* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1002 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
1003
b0defcdb 1004static LIST_HEAD(smi_infos);
d6dfd131 1005static DEFINE_MUTEX(smi_infos_lock);
b0defcdb 1006static int smi_num; /* Used to sequence the SMIs */
1da177e4 1007
1da177e4
LT
1008#define DEFAULT_REGSPACING 1
1009
1010static int si_trydefaults = 1;
1011static char *si_type[SI_MAX_PARMS];
1012#define MAX_SI_TYPE_STR 30
1013static char si_type_str[MAX_SI_TYPE_STR];
1014static unsigned long addrs[SI_MAX_PARMS];
1015static int num_addrs;
1016static unsigned int ports[SI_MAX_PARMS];
1017static int num_ports;
1018static int irqs[SI_MAX_PARMS];
1019static int num_irqs;
1020static int regspacings[SI_MAX_PARMS];
0c8204b3 1021static int num_regspacings;
1da177e4 1022static int regsizes[SI_MAX_PARMS];
0c8204b3 1023static int num_regsizes;
1da177e4 1024static int regshifts[SI_MAX_PARMS];
0c8204b3 1025static int num_regshifts;
1da177e4 1026static int slave_addrs[SI_MAX_PARMS];
0c8204b3 1027static int num_slave_addrs;
1da177e4 1028
b361e27b
CM
1029#define IPMI_IO_ADDR_SPACE 0
1030#define IPMI_MEM_ADDR_SPACE 1
1d5636cc 1031static char *addr_space_to_str[] = { "i/o", "mem" };
b361e27b
CM
1032
1033static int hotmod_handler(const char *val, struct kernel_param *kp);
1034
1035module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1036MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
1037 " Documentation/IPMI.txt in the kernel sources for the"
1038 " gory details.");
1da177e4
LT
1039
1040module_param_named(trydefaults, si_trydefaults, bool, 0);
1041MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1042 " default scan of the KCS and SMIC interface at the standard"
1043 " address");
1044module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1045MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1046 " interface separated by commas. The types are 'kcs',"
1047 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
1048 " the first interface to kcs and the second to bt");
1049module_param_array(addrs, long, &num_addrs, 0);
1050MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1051 " addresses separated by commas. Only use if an interface"
1052 " is in memory. Otherwise, set it to zero or leave"
1053 " it blank.");
1054module_param_array(ports, int, &num_ports, 0);
1055MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1056 " addresses separated by commas. Only use if an interface"
1057 " is a port. Otherwise, set it to zero or leave"
1058 " it blank.");
1059module_param_array(irqs, int, &num_irqs, 0);
1060MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1061 " addresses separated by commas. Only use if an interface"
1062 " has an interrupt. Otherwise, set it to zero or leave"
1063 " it blank.");
1064module_param_array(regspacings, int, &num_regspacings, 0);
1065MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1066 " and each successive register used by the interface. For"
1067 " instance, if the start address is 0xca2 and the spacing"
1068 " is 2, then the second address is at 0xca4. Defaults"
1069 " to 1.");
1070module_param_array(regsizes, int, &num_regsizes, 0);
1071MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1072 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1073 " 16-bit, 32-bit, or 64-bit register. Use this if you"
1074 " the 8-bit IPMI register has to be read from a larger"
1075 " register.");
1076module_param_array(regshifts, int, &num_regshifts, 0);
1077MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1078 " IPMI register, in bits. For instance, if the data"
1079 " is read from a 32-bit word and the IPMI data is in"
1080 " bit 8-15, then the shift would be 8");
1081module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1082MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1083 " the controller. Normally this is 0x20, but can be"
1084 " overridden by this parm. This is an array indexed"
1085 " by interface number.");
a51f4a81
CM
1086module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1087MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1088 " disabled(0). Normally the IPMI driver auto-detects"
1089 " this, but the value may be overridden by this parm.");
b361e27b
CM
1090module_param(unload_when_empty, int, 0);
1091MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1092 " specified or found, default is 1. Setting to 0"
1093 " is useful for hot add of devices using hotmod.");
1da177e4
LT
1094
1095
b0defcdb 1096static void std_irq_cleanup(struct smi_info *info)
1da177e4 1097{
b0defcdb
CM
1098 if (info->si_type == SI_BT)
1099 /* Disable the interrupt in the BT interface. */
1100 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1101 free_irq(info->irq, info);
1da177e4 1102}
1da177e4
LT
1103
1104static int std_irq_setup(struct smi_info *info)
1105{
1106 int rv;
1107
b0defcdb 1108 if (!info->irq)
1da177e4
LT
1109 return 0;
1110
9dbf68f9
CM
1111 if (info->si_type == SI_BT) {
1112 rv = request_irq(info->irq,
1113 si_bt_irq_handler,
0f2ed4c6 1114 IRQF_DISABLED,
9dbf68f9
CM
1115 DEVICE_NAME,
1116 info);
b0defcdb 1117 if (!rv)
9dbf68f9
CM
1118 /* Enable the interrupt in the BT interface. */
1119 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1120 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1121 } else
1122 rv = request_irq(info->irq,
1123 si_irq_handler,
0f2ed4c6 1124 IRQF_DISABLED,
9dbf68f9
CM
1125 DEVICE_NAME,
1126 info);
1da177e4
LT
1127 if (rv) {
1128 printk(KERN_WARNING
1129 "ipmi_si: %s unable to claim interrupt %d,"
1130 " running polled\n",
1131 DEVICE_NAME, info->irq);
1132 info->irq = 0;
1133 } else {
b0defcdb 1134 info->irq_cleanup = std_irq_cleanup;
1da177e4
LT
1135 printk(" Using irq %d\n", info->irq);
1136 }
1137
1138 return rv;
1139}
1140
1da177e4
LT
1141static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1142{
b0defcdb 1143 unsigned int addr = io->addr_data;
1da177e4 1144
b0defcdb 1145 return inb(addr + (offset * io->regspacing));
1da177e4
LT
1146}
1147
1148static void port_outb(struct si_sm_io *io, unsigned int offset,
1149 unsigned char b)
1150{
b0defcdb 1151 unsigned int addr = io->addr_data;
1da177e4 1152
b0defcdb 1153 outb(b, addr + (offset * io->regspacing));
1da177e4
LT
1154}
1155
1156static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1157{
b0defcdb 1158 unsigned int addr = io->addr_data;
1da177e4 1159
b0defcdb 1160 return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1da177e4
LT
1161}
1162
1163static void port_outw(struct si_sm_io *io, unsigned int offset,
1164 unsigned char b)
1165{
b0defcdb 1166 unsigned int addr = io->addr_data;
1da177e4 1167
b0defcdb 1168 outw(b << io->regshift, addr + (offset * io->regspacing));
1da177e4
LT
1169}
1170
1171static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1172{
b0defcdb 1173 unsigned int addr = io->addr_data;
1da177e4 1174
b0defcdb 1175 return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1da177e4
LT
1176}
1177
1178static void port_outl(struct si_sm_io *io, unsigned int offset,
1179 unsigned char b)
1180{
b0defcdb 1181 unsigned int addr = io->addr_data;
1da177e4 1182
b0defcdb 1183 outl(b << io->regshift, addr+(offset * io->regspacing));
1da177e4
LT
1184}
1185
1186static void port_cleanup(struct smi_info *info)
1187{
b0defcdb 1188 unsigned int addr = info->io.addr_data;
d61a3ead 1189 int idx;
1da177e4 1190
b0defcdb 1191 if (addr) {
d61a3ead
CM
1192 for (idx = 0; idx < info->io_size; idx++) {
1193 release_region(addr + idx * info->io.regspacing,
1194 info->io.regsize);
1195 }
1da177e4 1196 }
1da177e4
LT
1197}
1198
1199static int port_setup(struct smi_info *info)
1200{
b0defcdb 1201 unsigned int addr = info->io.addr_data;
d61a3ead 1202 int idx;
1da177e4 1203
b0defcdb 1204 if (!addr)
1da177e4
LT
1205 return -ENODEV;
1206
1207 info->io_cleanup = port_cleanup;
1208
1209 /* Figure out the actual inb/inw/inl/etc routine to use based
1210 upon the register size. */
1211 switch (info->io.regsize) {
1212 case 1:
1213 info->io.inputb = port_inb;
1214 info->io.outputb = port_outb;
1215 break;
1216 case 2:
1217 info->io.inputb = port_inw;
1218 info->io.outputb = port_outw;
1219 break;
1220 case 4:
1221 info->io.inputb = port_inl;
1222 info->io.outputb = port_outl;
1223 break;
1224 default:
1225 printk("ipmi_si: Invalid register size: %d\n",
1226 info->io.regsize);
1227 return -EINVAL;
1228 }
1229
d61a3ead
CM
1230 /* Some BIOSes reserve disjoint I/O regions in their ACPI
1231 * tables. This causes problems when trying to register the
1232 * entire I/O region. Therefore we must register each I/O
1233 * port separately.
1234 */
1235 for (idx = 0; idx < info->io_size; idx++) {
1236 if (request_region(addr + idx * info->io.regspacing,
1237 info->io.regsize, DEVICE_NAME) == NULL) {
1238 /* Undo allocations */
1239 while (idx--) {
1240 release_region(addr + idx * info->io.regspacing,
1241 info->io.regsize);
1242 }
1243 return -EIO;
1244 }
1245 }
1da177e4
LT
1246 return 0;
1247}
1248
546cfdf4 1249static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1da177e4
LT
1250{
1251 return readb((io->addr)+(offset * io->regspacing));
1252}
1253
546cfdf4 1254static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1da177e4
LT
1255 unsigned char b)
1256{
1257 writeb(b, (io->addr)+(offset * io->regspacing));
1258}
1259
546cfdf4 1260static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1da177e4
LT
1261{
1262 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
64d9fe69 1263 & 0xff;
1da177e4
LT
1264}
1265
546cfdf4 1266static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1da177e4
LT
1267 unsigned char b)
1268{
1269 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1270}
1271
546cfdf4 1272static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1da177e4
LT
1273{
1274 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
64d9fe69 1275 & 0xff;
1da177e4
LT
1276}
1277
546cfdf4 1278static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1da177e4
LT
1279 unsigned char b)
1280{
1281 writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1282}
1283
1284#ifdef readq
1285static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1286{
1287 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
64d9fe69 1288 & 0xff;
1da177e4
LT
1289}
1290
1291static void mem_outq(struct si_sm_io *io, unsigned int offset,
1292 unsigned char b)
1293{
1294 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1295}
1296#endif
1297
1298static void mem_cleanup(struct smi_info *info)
1299{
b0defcdb 1300 unsigned long addr = info->io.addr_data;
1da177e4
LT
1301 int mapsize;
1302
1303 if (info->io.addr) {
1304 iounmap(info->io.addr);
1305
1306 mapsize = ((info->io_size * info->io.regspacing)
1307 - (info->io.regspacing - info->io.regsize));
1308
b0defcdb 1309 release_mem_region(addr, mapsize);
1da177e4 1310 }
1da177e4
LT
1311}
1312
1313static int mem_setup(struct smi_info *info)
1314{
b0defcdb 1315 unsigned long addr = info->io.addr_data;
1da177e4
LT
1316 int mapsize;
1317
b0defcdb 1318 if (!addr)
1da177e4
LT
1319 return -ENODEV;
1320
1321 info->io_cleanup = mem_cleanup;
1322
1323 /* Figure out the actual readb/readw/readl/etc routine to use based
1324 upon the register size. */
1325 switch (info->io.regsize) {
1326 case 1:
546cfdf4
AD
1327 info->io.inputb = intf_mem_inb;
1328 info->io.outputb = intf_mem_outb;
1da177e4
LT
1329 break;
1330 case 2:
546cfdf4
AD
1331 info->io.inputb = intf_mem_inw;
1332 info->io.outputb = intf_mem_outw;
1da177e4
LT
1333 break;
1334 case 4:
546cfdf4
AD
1335 info->io.inputb = intf_mem_inl;
1336 info->io.outputb = intf_mem_outl;
1da177e4
LT
1337 break;
1338#ifdef readq
1339 case 8:
1340 info->io.inputb = mem_inq;
1341 info->io.outputb = mem_outq;
1342 break;
1343#endif
1344 default:
1345 printk("ipmi_si: Invalid register size: %d\n",
1346 info->io.regsize);
1347 return -EINVAL;
1348 }
1349
1350 /* Calculate the total amount of memory to claim. This is an
1351 * unusual looking calculation, but it avoids claiming any
1352 * more memory than it has to. It will claim everything
1353 * between the first address to the end of the last full
1354 * register. */
1355 mapsize = ((info->io_size * info->io.regspacing)
1356 - (info->io.regspacing - info->io.regsize));
1357
b0defcdb 1358 if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1da177e4
LT
1359 return -EIO;
1360
b0defcdb 1361 info->io.addr = ioremap(addr, mapsize);
1da177e4 1362 if (info->io.addr == NULL) {
b0defcdb 1363 release_mem_region(addr, mapsize);
1da177e4
LT
1364 return -EIO;
1365 }
1366 return 0;
1367}
1368
b361e27b
CM
1369/*
1370 * Parms come in as <op1>[:op2[:op3...]]. ops are:
1371 * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1372 * Options are:
1373 * rsp=<regspacing>
1374 * rsi=<regsize>
1375 * rsh=<regshift>
1376 * irq=<irq>
1377 * ipmb=<ipmb addr>
1378 */
1379enum hotmod_op { HM_ADD, HM_REMOVE };
1380struct hotmod_vals {
1381 char *name;
1382 int val;
1383};
1384static struct hotmod_vals hotmod_ops[] = {
1385 { "add", HM_ADD },
1386 { "remove", HM_REMOVE },
1387 { NULL }
1388};
1389static struct hotmod_vals hotmod_si[] = {
1390 { "kcs", SI_KCS },
1391 { "smic", SI_SMIC },
1392 { "bt", SI_BT },
1393 { NULL }
1394};
1395static struct hotmod_vals hotmod_as[] = {
1396 { "mem", IPMI_MEM_ADDR_SPACE },
1397 { "i/o", IPMI_IO_ADDR_SPACE },
1398 { NULL }
1399};
1d5636cc 1400
b361e27b
CM
1401static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1402{
1403 char *s;
1404 int i;
1405
1406 s = strchr(*curr, ',');
1407 if (!s) {
1408 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1409 return -EINVAL;
1410 }
1411 *s = '\0';
1412 s++;
1413 for (i = 0; hotmod_ops[i].name; i++) {
1d5636cc 1414 if (strcmp(*curr, v[i].name) == 0) {
b361e27b
CM
1415 *val = v[i].val;
1416 *curr = s;
1417 return 0;
1418 }
1419 }
1420
1421 printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1422 return -EINVAL;
1423}
1424
1d5636cc
CM
1425static int check_hotmod_int_op(const char *curr, const char *option,
1426 const char *name, int *val)
1427{
1428 char *n;
1429
1430 if (strcmp(curr, name) == 0) {
1431 if (!option) {
1432 printk(KERN_WARNING PFX
1433 "No option given for '%s'\n",
1434 curr);
1435 return -EINVAL;
1436 }
1437 *val = simple_strtoul(option, &n, 0);
1438 if ((*n != '\0') || (*option == '\0')) {
1439 printk(KERN_WARNING PFX
1440 "Bad option given for '%s'\n",
1441 curr);
1442 return -EINVAL;
1443 }
1444 return 1;
1445 }
1446 return 0;
1447}
1448
b361e27b
CM
1449static int hotmod_handler(const char *val, struct kernel_param *kp)
1450{
1451 char *str = kstrdup(val, GFP_KERNEL);
1d5636cc 1452 int rv;
b361e27b
CM
1453 char *next, *curr, *s, *n, *o;
1454 enum hotmod_op op;
1455 enum si_type si_type;
1456 int addr_space;
1457 unsigned long addr;
1458 int regspacing;
1459 int regsize;
1460 int regshift;
1461 int irq;
1462 int ipmb;
1463 int ival;
1d5636cc 1464 int len;
b361e27b
CM
1465 struct smi_info *info;
1466
1467 if (!str)
1468 return -ENOMEM;
1469
1470 /* Kill any trailing spaces, as we can get a "\n" from echo. */
1d5636cc
CM
1471 len = strlen(str);
1472 ival = len - 1;
b361e27b
CM
1473 while ((ival >= 0) && isspace(str[ival])) {
1474 str[ival] = '\0';
1475 ival--;
1476 }
1477
1478 for (curr = str; curr; curr = next) {
1479 regspacing = 1;
1480 regsize = 1;
1481 regshift = 0;
1482 irq = 0;
1483 ipmb = 0x20;
1484
1485 next = strchr(curr, ':');
1486 if (next) {
1487 *next = '\0';
1488 next++;
1489 }
1490
1491 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1492 if (rv)
1493 break;
1494 op = ival;
1495
1496 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1497 if (rv)
1498 break;
1499 si_type = ival;
1500
1501 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1502 if (rv)
1503 break;
1504
1505 s = strchr(curr, ',');
1506 if (s) {
1507 *s = '\0';
1508 s++;
1509 }
1510 addr = simple_strtoul(curr, &n, 0);
1511 if ((*n != '\0') || (*curr == '\0')) {
1512 printk(KERN_WARNING PFX "Invalid hotmod address"
1513 " '%s'\n", curr);
1514 break;
1515 }
1516
1517 while (s) {
1518 curr = s;
1519 s = strchr(curr, ',');
1520 if (s) {
1521 *s = '\0';
1522 s++;
1523 }
1524 o = strchr(curr, '=');
1525 if (o) {
1526 *o = '\0';
1527 o++;
1528 }
1d5636cc
CM
1529 rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1530 if (rv < 0)
b361e27b 1531 goto out;
1d5636cc
CM
1532 else if (rv)
1533 continue;
1534 rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1535 if (rv < 0)
1536 goto out;
1537 else if (rv)
1538 continue;
1539 rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1540 if (rv < 0)
1541 goto out;
1542 else if (rv)
1543 continue;
1544 rv = check_hotmod_int_op(curr, o, "irq", &irq);
1545 if (rv < 0)
1546 goto out;
1547 else if (rv)
1548 continue;
1549 rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1550 if (rv < 0)
1551 goto out;
1552 else if (rv)
1553 continue;
1554
1555 rv = -EINVAL;
1556 printk(KERN_WARNING PFX
1557 "Invalid hotmod option '%s'\n",
1558 curr);
1559 goto out;
b361e27b
CM
1560 }
1561
1562 if (op == HM_ADD) {
1563 info = kzalloc(sizeof(*info), GFP_KERNEL);
1564 if (!info) {
1565 rv = -ENOMEM;
1566 goto out;
1567 }
1568
1569 info->addr_source = "hotmod";
1570 info->si_type = si_type;
1571 info->io.addr_data = addr;
1572 info->io.addr_type = addr_space;
1573 if (addr_space == IPMI_MEM_ADDR_SPACE)
1574 info->io_setup = mem_setup;
1575 else
1576 info->io_setup = port_setup;
1577
1578 info->io.addr = NULL;
1579 info->io.regspacing = regspacing;
1580 if (!info->io.regspacing)
1581 info->io.regspacing = DEFAULT_REGSPACING;
1582 info->io.regsize = regsize;
1583 if (!info->io.regsize)
1584 info->io.regsize = DEFAULT_REGSPACING;
1585 info->io.regshift = regshift;
1586 info->irq = irq;
1587 if (info->irq)
1588 info->irq_setup = std_irq_setup;
1589 info->slave_addr = ipmb;
1590
1591 try_smi_init(info);
1592 } else {
1593 /* remove */
1594 struct smi_info *e, *tmp_e;
1595
1596 mutex_lock(&smi_infos_lock);
1597 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1598 if (e->io.addr_type != addr_space)
1599 continue;
1600 if (e->si_type != si_type)
1601 continue;
1602 if (e->io.addr_data == addr)
1603 cleanup_one_si(e);
1604 }
1605 mutex_unlock(&smi_infos_lock);
1606 }
1607 }
1d5636cc 1608 rv = len;
b361e27b
CM
1609 out:
1610 kfree(str);
1611 return rv;
1612}
b0defcdb
CM
1613
1614static __devinit void hardcode_find_bmc(void)
1da177e4 1615{
b0defcdb 1616 int i;
1da177e4
LT
1617 struct smi_info *info;
1618
b0defcdb
CM
1619 for (i = 0; i < SI_MAX_PARMS; i++) {
1620 if (!ports[i] && !addrs[i])
1621 continue;
1da177e4 1622
b0defcdb
CM
1623 info = kzalloc(sizeof(*info), GFP_KERNEL);
1624 if (!info)
1625 return;
1da177e4 1626
b0defcdb 1627 info->addr_source = "hardcoded";
1da177e4 1628
1d5636cc 1629 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
b0defcdb 1630 info->si_type = SI_KCS;
1d5636cc 1631 } else if (strcmp(si_type[i], "smic") == 0) {
b0defcdb 1632 info->si_type = SI_SMIC;
1d5636cc 1633 } else if (strcmp(si_type[i], "bt") == 0) {
b0defcdb
CM
1634 info->si_type = SI_BT;
1635 } else {
1636 printk(KERN_WARNING
1637 "ipmi_si: Interface type specified "
1638 "for interface %d, was invalid: %s\n",
1639 i, si_type[i]);
1640 kfree(info);
1641 continue;
1642 }
1da177e4 1643
b0defcdb
CM
1644 if (ports[i]) {
1645 /* An I/O port */
1646 info->io_setup = port_setup;
1647 info->io.addr_data = ports[i];
1648 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1649 } else if (addrs[i]) {
1650 /* A memory port */
1651 info->io_setup = mem_setup;
1652 info->io.addr_data = addrs[i];
1653 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1654 } else {
1655 printk(KERN_WARNING
1656 "ipmi_si: Interface type specified "
1657 "for interface %d, "
1658 "but port and address were not set or "
1659 "set to zero.\n", i);
1660 kfree(info);
1661 continue;
1662 }
1da177e4 1663
b0defcdb
CM
1664 info->io.addr = NULL;
1665 info->io.regspacing = regspacings[i];
1666 if (!info->io.regspacing)
1667 info->io.regspacing = DEFAULT_REGSPACING;
1668 info->io.regsize = regsizes[i];
1669 if (!info->io.regsize)
1670 info->io.regsize = DEFAULT_REGSPACING;
1671 info->io.regshift = regshifts[i];
1672 info->irq = irqs[i];
1673 if (info->irq)
1674 info->irq_setup = std_irq_setup;
1da177e4 1675
b0defcdb
CM
1676 try_smi_init(info);
1677 }
1678}
1da177e4 1679
8466361a 1680#ifdef CONFIG_ACPI
1da177e4
LT
1681
1682#include <linux/acpi.h>
1683
1684/* Once we get an ACPI failure, we don't try any more, because we go
1685 through the tables sequentially. Once we don't find a table, there
1686 are no more. */
0c8204b3 1687static int acpi_failure;
1da177e4
LT
1688
1689/* For GPE-type interrupts. */
1690static u32 ipmi_acpi_gpe(void *context)
1691{
1692 struct smi_info *smi_info = context;
1693 unsigned long flags;
1694#ifdef DEBUG_TIMING
1695 struct timeval t;
1696#endif
1697
1698 spin_lock_irqsave(&(smi_info->si_lock), flags);
1699
1700 spin_lock(&smi_info->count_lock);
1701 smi_info->interrupts++;
1702 spin_unlock(&smi_info->count_lock);
1703
a9a2c44f 1704 if (atomic_read(&smi_info->stop_operation))
1da177e4
LT
1705 goto out;
1706
1707#ifdef DEBUG_TIMING
1708 do_gettimeofday(&t);
1709 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1710#endif
1711 smi_event_handler(smi_info, 0);
1712 out:
1713 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1714
1715 return ACPI_INTERRUPT_HANDLED;
1716}
1717
b0defcdb
CM
1718static void acpi_gpe_irq_cleanup(struct smi_info *info)
1719{
1720 if (!info->irq)
1721 return;
1722
1723 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1724}
1725
1da177e4
LT
1726static int acpi_gpe_irq_setup(struct smi_info *info)
1727{
1728 acpi_status status;
1729
b0defcdb 1730 if (!info->irq)
1da177e4
LT
1731 return 0;
1732
1733 /* FIXME - is level triggered right? */
1734 status = acpi_install_gpe_handler(NULL,
1735 info->irq,
1736 ACPI_GPE_LEVEL_TRIGGERED,
1737 &ipmi_acpi_gpe,
1738 info);
1739 if (status != AE_OK) {
1740 printk(KERN_WARNING
1741 "ipmi_si: %s unable to claim ACPI GPE %d,"
1742 " running polled\n",
1743 DEVICE_NAME, info->irq);
1744 info->irq = 0;
1745 return -EINVAL;
1746 } else {
b0defcdb 1747 info->irq_cleanup = acpi_gpe_irq_cleanup;
1da177e4
LT
1748 printk(" Using ACPI GPE %d\n", info->irq);
1749 return 0;
1750 }
1751}
1752
1da177e4
LT
1753/*
1754 * Defined at
1755 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1756 */
1757struct SPMITable {
1758 s8 Signature[4];
1759 u32 Length;
1760 u8 Revision;
1761 u8 Checksum;
1762 s8 OEMID[6];
1763 s8 OEMTableID[8];
1764 s8 OEMRevision[4];
1765 s8 CreatorID[4];
1766 s8 CreatorRevision[4];
1767 u8 InterfaceType;
1768 u8 IPMIlegacy;
1769 s16 SpecificationRevision;
1770
1771 /*
1772 * Bit 0 - SCI interrupt supported
1773 * Bit 1 - I/O APIC/SAPIC
1774 */
1775 u8 InterruptType;
1776
1777 /* If bit 0 of InterruptType is set, then this is the SCI
1778 interrupt in the GPEx_STS register. */
1779 u8 GPE;
1780
1781 s16 Reserved;
1782
1783 /* If bit 1 of InterruptType is set, then this is the I/O
1784 APIC/SAPIC interrupt. */
1785 u32 GlobalSystemInterrupt;
1786
1787 /* The actual register address. */
1788 struct acpi_generic_address addr;
1789
1790 u8 UID[4];
1791
1792 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1793};
1794
b0defcdb 1795static __devinit int try_init_acpi(struct SPMITable *spmi)
1da177e4
LT
1796{
1797 struct smi_info *info;
1da177e4
LT
1798 u8 addr_space;
1799
1da177e4
LT
1800 if (spmi->IPMIlegacy != 1) {
1801 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1802 return -ENODEV;
1803 }
1804
15a58ed1 1805 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1da177e4
LT
1806 addr_space = IPMI_MEM_ADDR_SPACE;
1807 else
1808 addr_space = IPMI_IO_ADDR_SPACE;
b0defcdb
CM
1809
1810 info = kzalloc(sizeof(*info), GFP_KERNEL);
1811 if (!info) {
1812 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1813 return -ENOMEM;
1814 }
1815
1816 info->addr_source = "ACPI";
1da177e4 1817
1da177e4
LT
1818 /* Figure out the interface type. */
1819 switch (spmi->InterfaceType)
1820 {
1821 case 1: /* KCS */
b0defcdb 1822 info->si_type = SI_KCS;
1da177e4 1823 break;
1da177e4 1824 case 2: /* SMIC */
b0defcdb 1825 info->si_type = SI_SMIC;
1da177e4 1826 break;
1da177e4 1827 case 3: /* BT */
b0defcdb 1828 info->si_type = SI_BT;
1da177e4 1829 break;
1da177e4
LT
1830 default:
1831 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1832 spmi->InterfaceType);
b0defcdb 1833 kfree(info);
1da177e4
LT
1834 return -EIO;
1835 }
1836
1da177e4
LT
1837 if (spmi->InterruptType & 1) {
1838 /* We've got a GPE interrupt. */
1839 info->irq = spmi->GPE;
1840 info->irq_setup = acpi_gpe_irq_setup;
1da177e4
LT
1841 } else if (spmi->InterruptType & 2) {
1842 /* We've got an APIC/SAPIC interrupt. */
1843 info->irq = spmi->GlobalSystemInterrupt;
1844 info->irq_setup = std_irq_setup;
1da177e4
LT
1845 } else {
1846 /* Use the default interrupt setting. */
1847 info->irq = 0;
1848 info->irq_setup = NULL;
1849 }
1850
15a58ed1 1851 if (spmi->addr.bit_width) {
35bc37a0 1852 /* A (hopefully) properly formed register bit width. */
15a58ed1 1853 info->io.regspacing = spmi->addr.bit_width / 8;
35bc37a0 1854 } else {
35bc37a0
CM
1855 info->io.regspacing = DEFAULT_REGSPACING;
1856 }
b0defcdb 1857 info->io.regsize = info->io.regspacing;
15a58ed1 1858 info->io.regshift = spmi->addr.bit_offset;
1da177e4 1859
15a58ed1 1860 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1da177e4 1861 info->io_setup = mem_setup;
b0defcdb 1862 info->io.addr_type = IPMI_IO_ADDR_SPACE;
15a58ed1 1863 } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1da177e4 1864 info->io_setup = port_setup;
b0defcdb 1865 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1da177e4
LT
1866 } else {
1867 kfree(info);
1868 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1869 return -EIO;
1870 }
b0defcdb 1871 info->io.addr_data = spmi->addr.address;
1da177e4 1872
b0defcdb 1873 try_smi_init(info);
1da177e4 1874
1da177e4
LT
1875 return 0;
1876}
b0defcdb
CM
1877
1878static __devinit void acpi_find_bmc(void)
1879{
1880 acpi_status status;
1881 struct SPMITable *spmi;
1882 int i;
1883
1884 if (acpi_disabled)
1885 return;
1886
1887 if (acpi_failure)
1888 return;
1889
1890 for (i = 0; ; i++) {
15a58ed1
AS
1891 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
1892 (struct acpi_table_header **)&spmi);
b0defcdb
CM
1893 if (status != AE_OK)
1894 return;
1895
1896 try_init_acpi(spmi);
1897 }
1898}
1da177e4
LT
1899#endif
1900
a9fad4cc 1901#ifdef CONFIG_DMI
b0defcdb 1902struct dmi_ipmi_data
1da177e4
LT
1903{
1904 u8 type;
1905 u8 addr_space;
1906 unsigned long base_addr;
1907 u8 irq;
1908 u8 offset;
1909 u8 slave_addr;
b0defcdb 1910};
1da177e4 1911
b0defcdb
CM
1912static int __devinit decode_dmi(struct dmi_header *dm,
1913 struct dmi_ipmi_data *dmi)
1da177e4 1914{
e8b33617 1915 u8 *data = (u8 *)dm;
1da177e4
LT
1916 unsigned long base_addr;
1917 u8 reg_spacing;
b224cd3a 1918 u8 len = dm->length;
1da177e4 1919
b0defcdb 1920 dmi->type = data[4];
1da177e4
LT
1921
1922 memcpy(&base_addr, data+8, sizeof(unsigned long));
1923 if (len >= 0x11) {
1924 if (base_addr & 1) {
1925 /* I/O */
1926 base_addr &= 0xFFFE;
b0defcdb 1927 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1da177e4
LT
1928 }
1929 else {
1930 /* Memory */
b0defcdb 1931 dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1da177e4
LT
1932 }
1933 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1934 is odd. */
b0defcdb 1935 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1da177e4 1936
b0defcdb 1937 dmi->irq = data[0x11];
1da177e4
LT
1938
1939 /* The top two bits of byte 0x10 hold the register spacing. */
b224cd3a 1940 reg_spacing = (data[0x10] & 0xC0) >> 6;
1da177e4
LT
1941 switch(reg_spacing){
1942 case 0x00: /* Byte boundaries */
b0defcdb 1943 dmi->offset = 1;
1da177e4
LT
1944 break;
1945 case 0x01: /* 32-bit boundaries */
b0defcdb 1946 dmi->offset = 4;
1da177e4
LT
1947 break;
1948 case 0x02: /* 16-byte boundaries */
b0defcdb 1949 dmi->offset = 16;
1da177e4
LT
1950 break;
1951 default:
1952 /* Some other interface, just ignore it. */
1953 return -EIO;
1954 }
1955 } else {
1956 /* Old DMI spec. */
92068801
CM
1957 /* Note that technically, the lower bit of the base
1958 * address should be 1 if the address is I/O and 0 if
1959 * the address is in memory. So many systems get that
1960 * wrong (and all that I have seen are I/O) so we just
1961 * ignore that bit and assume I/O. Systems that use
1962 * memory should use the newer spec, anyway. */
b0defcdb
CM
1963 dmi->base_addr = base_addr & 0xfffe;
1964 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1965 dmi->offset = 1;
1da177e4
LT
1966 }
1967
b0defcdb 1968 dmi->slave_addr = data[6];
1da177e4 1969
b0defcdb 1970 return 0;
1da177e4
LT
1971}
1972
b0defcdb 1973static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1da177e4 1974{
b0defcdb 1975 struct smi_info *info;
1da177e4 1976
b0defcdb
CM
1977 info = kzalloc(sizeof(*info), GFP_KERNEL);
1978 if (!info) {
1979 printk(KERN_ERR
1980 "ipmi_si: Could not allocate SI data\n");
1981 return;
1da177e4 1982 }
1da177e4 1983
b0defcdb 1984 info->addr_source = "SMBIOS";
1da177e4 1985
e8b33617 1986 switch (ipmi_data->type) {
b0defcdb
CM
1987 case 0x01: /* KCS */
1988 info->si_type = SI_KCS;
1989 break;
1990 case 0x02: /* SMIC */
1991 info->si_type = SI_SMIC;
1992 break;
1993 case 0x03: /* BT */
1994 info->si_type = SI_BT;
1995 break;
1996 default:
1997 return;
1da177e4 1998 }
1da177e4 1999
b0defcdb
CM
2000 switch (ipmi_data->addr_space) {
2001 case IPMI_MEM_ADDR_SPACE:
1da177e4 2002 info->io_setup = mem_setup;
b0defcdb
CM
2003 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2004 break;
2005
2006 case IPMI_IO_ADDR_SPACE:
1da177e4 2007 info->io_setup = port_setup;
b0defcdb
CM
2008 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2009 break;
2010
2011 default:
1da177e4 2012 kfree(info);
b0defcdb
CM
2013 printk(KERN_WARNING
2014 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2015 ipmi_data->addr_space);
2016 return;
1da177e4 2017 }
b0defcdb 2018 info->io.addr_data = ipmi_data->base_addr;
1da177e4 2019
b0defcdb
CM
2020 info->io.regspacing = ipmi_data->offset;
2021 if (!info->io.regspacing)
1da177e4
LT
2022 info->io.regspacing = DEFAULT_REGSPACING;
2023 info->io.regsize = DEFAULT_REGSPACING;
b0defcdb 2024 info->io.regshift = 0;
1da177e4
LT
2025
2026 info->slave_addr = ipmi_data->slave_addr;
2027
b0defcdb
CM
2028 info->irq = ipmi_data->irq;
2029 if (info->irq)
2030 info->irq_setup = std_irq_setup;
1da177e4 2031
b0defcdb
CM
2032 try_smi_init(info);
2033}
1da177e4 2034
b0defcdb
CM
2035static void __devinit dmi_find_bmc(void)
2036{
2037 struct dmi_device *dev = NULL;
2038 struct dmi_ipmi_data data;
2039 int rv;
2040
2041 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
397f4ebf 2042 memset(&data, 0, sizeof(data));
b0defcdb
CM
2043 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
2044 if (!rv)
2045 try_init_dmi(&data);
2046 }
1da177e4 2047}
a9fad4cc 2048#endif /* CONFIG_DMI */
1da177e4
LT
2049
2050#ifdef CONFIG_PCI
2051
b0defcdb
CM
2052#define PCI_ERMC_CLASSCODE 0x0C0700
2053#define PCI_ERMC_CLASSCODE_MASK 0xffffff00
2054#define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
2055#define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
2056#define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
2057#define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
2058
1da177e4
LT
2059#define PCI_HP_VENDOR_ID 0x103C
2060#define PCI_MMC_DEVICE_ID 0x121A
2061#define PCI_MMC_ADDR_CW 0x10
2062
b0defcdb
CM
2063static void ipmi_pci_cleanup(struct smi_info *info)
2064{
2065 struct pci_dev *pdev = info->addr_source_data;
2066
2067 pci_disable_device(pdev);
2068}
1da177e4 2069
b0defcdb
CM
2070static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2071 const struct pci_device_id *ent)
1da177e4 2072{
b0defcdb
CM
2073 int rv;
2074 int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2075 struct smi_info *info;
2076 int first_reg_offset = 0;
1da177e4 2077
b0defcdb
CM
2078 info = kzalloc(sizeof(*info), GFP_KERNEL);
2079 if (!info)
1cd441f9 2080 return -ENOMEM;
1da177e4 2081
b0defcdb 2082 info->addr_source = "PCI";
1da177e4 2083
b0defcdb
CM
2084 switch (class_type) {
2085 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2086 info->si_type = SI_SMIC;
2087 break;
1da177e4 2088
b0defcdb
CM
2089 case PCI_ERMC_CLASSCODE_TYPE_KCS:
2090 info->si_type = SI_KCS;
2091 break;
2092
2093 case PCI_ERMC_CLASSCODE_TYPE_BT:
2094 info->si_type = SI_BT;
2095 break;
2096
2097 default:
2098 kfree(info);
2099 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2100 pci_name(pdev), class_type);
1cd441f9 2101 return -ENOMEM;
1da177e4
LT
2102 }
2103
b0defcdb
CM
2104 rv = pci_enable_device(pdev);
2105 if (rv) {
2106 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2107 pci_name(pdev));
2108 kfree(info);
2109 return rv;
1da177e4
LT
2110 }
2111
b0defcdb
CM
2112 info->addr_source_cleanup = ipmi_pci_cleanup;
2113 info->addr_source_data = pdev;
1da177e4 2114
b0defcdb
CM
2115 if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2116 first_reg_offset = 1;
1da177e4 2117
b0defcdb
CM
2118 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2119 info->io_setup = port_setup;
2120 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2121 } else {
2122 info->io_setup = mem_setup;
2123 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1da177e4 2124 }
b0defcdb 2125 info->io.addr_data = pci_resource_start(pdev, 0);
1da177e4 2126
b0defcdb 2127 info->io.regspacing = DEFAULT_REGSPACING;
1da177e4 2128 info->io.regsize = DEFAULT_REGSPACING;
b0defcdb 2129 info->io.regshift = 0;
1da177e4 2130
b0defcdb
CM
2131 info->irq = pdev->irq;
2132 if (info->irq)
2133 info->irq_setup = std_irq_setup;
1da177e4 2134
50c812b2
CM
2135 info->dev = &pdev->dev;
2136
b0defcdb
CM
2137 return try_smi_init(info);
2138}
1da177e4 2139
b0defcdb
CM
2140static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2141{
2142}
1da177e4 2143
b0defcdb
CM
2144#ifdef CONFIG_PM
2145static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2146{
1da177e4
LT
2147 return 0;
2148}
1da177e4 2149
b0defcdb 2150static int ipmi_pci_resume(struct pci_dev *pdev)
1da177e4 2151{
b0defcdb
CM
2152 return 0;
2153}
1da177e4 2154#endif
1da177e4 2155
b0defcdb
CM
2156static struct pci_device_id ipmi_pci_devices[] = {
2157 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
d13adb60 2158 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }
b0defcdb
CM
2159};
2160MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2161
2162static struct pci_driver ipmi_pci_driver = {
2163 .name = DEVICE_NAME,
2164 .id_table = ipmi_pci_devices,
2165 .probe = ipmi_pci_probe,
2166 .remove = __devexit_p(ipmi_pci_remove),
2167#ifdef CONFIG_PM
2168 .suspend = ipmi_pci_suspend,
2169 .resume = ipmi_pci_resume,
2170#endif
2171};
2172#endif /* CONFIG_PCI */
1da177e4
LT
2173
2174
2175static int try_get_dev_id(struct smi_info *smi_info)
2176{
50c812b2
CM
2177 unsigned char msg[2];
2178 unsigned char *resp;
2179 unsigned long resp_len;
2180 enum si_sm_result smi_result;
2181 int rv = 0;
1da177e4
LT
2182
2183 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
b0defcdb 2184 if (!resp)
1da177e4
LT
2185 return -ENOMEM;
2186
2187 /* Do a Get Device ID command, since it comes back with some
2188 useful info. */
2189 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2190 msg[1] = IPMI_GET_DEVICE_ID_CMD;
2191 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2192
2193 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2194 for (;;)
2195 {
c3e7e791
CM
2196 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2197 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
da4cd8df 2198 schedule_timeout_uninterruptible(1);
1da177e4
LT
2199 smi_result = smi_info->handlers->event(
2200 smi_info->si_sm, 100);
2201 }
2202 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2203 {
2204 smi_result = smi_info->handlers->event(
2205 smi_info->si_sm, 0);
2206 }
2207 else
2208 break;
2209 }
2210 if (smi_result == SI_SM_HOSED) {
2211 /* We couldn't get the state machine to run, so whatever's at
2212 the port is probably not an IPMI SMI interface. */
2213 rv = -ENODEV;
2214 goto out;
2215 }
2216
2217 /* Otherwise, we got some data. */
2218 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2219 resp, IPMI_MAX_MSG_LENGTH);
50c812b2 2220 if (resp_len < 14) {
1da177e4
LT
2221 /* That's odd, it should be longer. */
2222 rv = -EINVAL;
2223 goto out;
2224 }
2225
2226 if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
2227 /* That's odd, it shouldn't be able to fail. */
2228 rv = -EINVAL;
2229 goto out;
2230 }
2231
2232 /* Record info from the get device id, in case we need it. */
50c812b2 2233 ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
1da177e4
LT
2234
2235 out:
2236 kfree(resp);
2237 return rv;
2238}
2239
2240static int type_file_read_proc(char *page, char **start, off_t off,
2241 int count, int *eof, void *data)
2242{
1da177e4
LT
2243 struct smi_info *smi = data;
2244
b361e27b 2245 return sprintf(page, "%s\n", si_to_str[smi->si_type]);
1da177e4
LT
2246}
2247
2248static int stat_file_read_proc(char *page, char **start, off_t off,
2249 int count, int *eof, void *data)
2250{
2251 char *out = (char *) page;
2252 struct smi_info *smi = data;
2253
2254 out += sprintf(out, "interrupts_enabled: %d\n",
b0defcdb 2255 smi->irq && !smi->interrupt_disabled);
1da177e4
LT
2256 out += sprintf(out, "short_timeouts: %ld\n",
2257 smi->short_timeouts);
2258 out += sprintf(out, "long_timeouts: %ld\n",
2259 smi->long_timeouts);
2260 out += sprintf(out, "timeout_restarts: %ld\n",
2261 smi->timeout_restarts);
2262 out += sprintf(out, "idles: %ld\n",
2263 smi->idles);
2264 out += sprintf(out, "interrupts: %ld\n",
2265 smi->interrupts);
2266 out += sprintf(out, "attentions: %ld\n",
2267 smi->attentions);
2268 out += sprintf(out, "flag_fetches: %ld\n",
2269 smi->flag_fetches);
2270 out += sprintf(out, "hosed_count: %ld\n",
2271 smi->hosed_count);
2272 out += sprintf(out, "complete_transactions: %ld\n",
2273 smi->complete_transactions);
2274 out += sprintf(out, "events: %ld\n",
2275 smi->events);
2276 out += sprintf(out, "watchdog_pretimeouts: %ld\n",
2277 smi->watchdog_pretimeouts);
2278 out += sprintf(out, "incoming_messages: %ld\n",
2279 smi->incoming_messages);
2280
b361e27b
CM
2281 return out - page;
2282}
2283
2284static int param_read_proc(char *page, char **start, off_t off,
2285 int count, int *eof, void *data)
2286{
2287 struct smi_info *smi = data;
2288
2289 return sprintf(page,
2290 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2291 si_to_str[smi->si_type],
2292 addr_space_to_str[smi->io.addr_type],
2293 smi->io.addr_data,
2294 smi->io.regspacing,
2295 smi->io.regsize,
2296 smi->io.regshift,
2297 smi->irq,
2298 smi->slave_addr);
1da177e4
LT
2299}
2300
3ae0e0f9
CM
2301/*
2302 * oem_data_avail_to_receive_msg_avail
2303 * @info - smi_info structure with msg_flags set
2304 *
2305 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2306 * Returns 1 indicating need to re-run handle_flags().
2307 */
2308static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2309{
e8b33617
CM
2310 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2311 RECEIVE_MSG_AVAIL);
3ae0e0f9
CM
2312 return 1;
2313}
2314
2315/*
2316 * setup_dell_poweredge_oem_data_handler
2317 * @info - smi_info.device_id must be populated
2318 *
2319 * Systems that match, but have firmware version < 1.40 may assert
2320 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2321 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
2322 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2323 * as RECEIVE_MSG_AVAIL instead.
2324 *
2325 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2326 * assert the OEM[012] bits, and if it did, the driver would have to
2327 * change to handle that properly, we don't actually check for the
2328 * firmware version.
2329 * Device ID = 0x20 BMC on PowerEdge 8G servers
2330 * Device Revision = 0x80
2331 * Firmware Revision1 = 0x01 BMC version 1.40
2332 * Firmware Revision2 = 0x40 BCD encoded
2333 * IPMI Version = 0x51 IPMI 1.5
2334 * Manufacturer ID = A2 02 00 Dell IANA
2335 *
d5a2b89a
CM
2336 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2337 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2338 *
3ae0e0f9
CM
2339 */
2340#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2341#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2342#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
50c812b2 2343#define DELL_IANA_MFR_ID 0x0002a2
3ae0e0f9
CM
2344static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2345{
2346 struct ipmi_device_id *id = &smi_info->device_id;
50c812b2 2347 if (id->manufacturer_id == DELL_IANA_MFR_ID) {
d5a2b89a
CM
2348 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
2349 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
50c812b2 2350 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
d5a2b89a
CM
2351 smi_info->oem_data_avail_handler =
2352 oem_data_avail_to_receive_msg_avail;
2353 }
2354 else if (ipmi_version_major(id) < 1 ||
2355 (ipmi_version_major(id) == 1 &&
2356 ipmi_version_minor(id) < 5)) {
2357 smi_info->oem_data_avail_handler =
2358 oem_data_avail_to_receive_msg_avail;
2359 }
3ae0e0f9
CM
2360 }
2361}
2362
ea94027b
CM
2363#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2364static void return_hosed_msg_badsize(struct smi_info *smi_info)
2365{
2366 struct ipmi_smi_msg *msg = smi_info->curr_msg;
2367
2368 /* Make it a reponse */
2369 msg->rsp[0] = msg->data[0] | 4;
2370 msg->rsp[1] = msg->data[1];
2371 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2372 msg->rsp_size = 3;
2373 smi_info->curr_msg = NULL;
2374 deliver_recv_msg(smi_info, msg);
2375}
2376
2377/*
2378 * dell_poweredge_bt_xaction_handler
2379 * @info - smi_info.device_id must be populated
2380 *
2381 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2382 * not respond to a Get SDR command if the length of the data
2383 * requested is exactly 0x3A, which leads to command timeouts and no
2384 * data returned. This intercepts such commands, and causes userspace
2385 * callers to try again with a different-sized buffer, which succeeds.
2386 */
2387
2388#define STORAGE_NETFN 0x0A
2389#define STORAGE_CMD_GET_SDR 0x23
2390static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2391 unsigned long unused,
2392 void *in)
2393{
2394 struct smi_info *smi_info = in;
2395 unsigned char *data = smi_info->curr_msg->data;
2396 unsigned int size = smi_info->curr_msg->data_size;
2397 if (size >= 8 &&
2398 (data[0]>>2) == STORAGE_NETFN &&
2399 data[1] == STORAGE_CMD_GET_SDR &&
2400 data[7] == 0x3A) {
2401 return_hosed_msg_badsize(smi_info);
2402 return NOTIFY_STOP;
2403 }
2404 return NOTIFY_DONE;
2405}
2406
2407static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2408 .notifier_call = dell_poweredge_bt_xaction_handler,
2409};
2410
2411/*
2412 * setup_dell_poweredge_bt_xaction_handler
2413 * @info - smi_info.device_id must be filled in already
2414 *
2415 * Fills in smi_info.device_id.start_transaction_pre_hook
2416 * when we know what function to use there.
2417 */
2418static void
2419setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2420{
2421 struct ipmi_device_id *id = &smi_info->device_id;
50c812b2 2422 if (id->manufacturer_id == DELL_IANA_MFR_ID &&
ea94027b
CM
2423 smi_info->si_type == SI_BT)
2424 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2425}
2426
3ae0e0f9
CM
2427/*
2428 * setup_oem_data_handler
2429 * @info - smi_info.device_id must be filled in already
2430 *
2431 * Fills in smi_info.device_id.oem_data_available_handler
2432 * when we know what function to use there.
2433 */
2434
2435static void setup_oem_data_handler(struct smi_info *smi_info)
2436{
2437 setup_dell_poweredge_oem_data_handler(smi_info);
2438}
2439
ea94027b
CM
2440static void setup_xaction_handlers(struct smi_info *smi_info)
2441{
2442 setup_dell_poweredge_bt_xaction_handler(smi_info);
2443}
2444
a9a2c44f
CM
2445static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2446{
453823ba
CM
2447 if (smi_info->intf) {
2448 /* The timer and thread are only running if the
2449 interface has been started up and registered. */
2450 if (smi_info->thread != NULL)
2451 kthread_stop(smi_info->thread);
2452 del_timer_sync(&smi_info->si_timer);
2453 }
a9a2c44f
CM
2454}
2455
7420884c 2456static __devinitdata struct ipmi_default_vals
b0defcdb
CM
2457{
2458 int type;
2459 int port;
7420884c 2460} ipmi_defaults[] =
b0defcdb
CM
2461{
2462 { .type = SI_KCS, .port = 0xca2 },
2463 { .type = SI_SMIC, .port = 0xca9 },
2464 { .type = SI_BT, .port = 0xe4 },
2465 { .port = 0 }
2466};
2467
2468static __devinit void default_find_bmc(void)
2469{
2470 struct smi_info *info;
2471 int i;
2472
2473 for (i = 0; ; i++) {
2474 if (!ipmi_defaults[i].port)
2475 break;
2476
2477 info = kzalloc(sizeof(*info), GFP_KERNEL);
2478 if (!info)
2479 return;
2480
2481 info->addr_source = NULL;
2482
2483 info->si_type = ipmi_defaults[i].type;
2484 info->io_setup = port_setup;
2485 info->io.addr_data = ipmi_defaults[i].port;
2486 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2487
2488 info->io.addr = NULL;
2489 info->io.regspacing = DEFAULT_REGSPACING;
2490 info->io.regsize = DEFAULT_REGSPACING;
2491 info->io.regshift = 0;
2492
2493 if (try_smi_init(info) == 0) {
2494 /* Found one... */
2495 printk(KERN_INFO "ipmi_si: Found default %s state"
2496 " machine at %s address 0x%lx\n",
2497 si_to_str[info->si_type],
2498 addr_space_to_str[info->io.addr_type],
2499 info->io.addr_data);
2500 return;
2501 }
2502 }
2503}
2504
2505static int is_new_interface(struct smi_info *info)
1da177e4 2506{
b0defcdb 2507 struct smi_info *e;
1da177e4 2508
b0defcdb
CM
2509 list_for_each_entry(e, &smi_infos, link) {
2510 if (e->io.addr_type != info->io.addr_type)
2511 continue;
2512 if (e->io.addr_data == info->io.addr_data)
2513 return 0;
2514 }
1da177e4 2515
b0defcdb
CM
2516 return 1;
2517}
1da177e4 2518
b0defcdb
CM
2519static int try_smi_init(struct smi_info *new_smi)
2520{
2521 int rv;
2522
2523 if (new_smi->addr_source) {
2524 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2525 " machine at %s address 0x%lx, slave address 0x%x,"
2526 " irq %d\n",
2527 new_smi->addr_source,
2528 si_to_str[new_smi->si_type],
2529 addr_space_to_str[new_smi->io.addr_type],
2530 new_smi->io.addr_data,
2531 new_smi->slave_addr, new_smi->irq);
2532 }
2533
d6dfd131 2534 mutex_lock(&smi_infos_lock);
b0defcdb
CM
2535 if (!is_new_interface(new_smi)) {
2536 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2537 rv = -EBUSY;
2538 goto out_err;
2539 }
1da177e4
LT
2540
2541 /* So we know not to free it unless we have allocated one. */
2542 new_smi->intf = NULL;
2543 new_smi->si_sm = NULL;
2544 new_smi->handlers = NULL;
2545
b0defcdb
CM
2546 switch (new_smi->si_type) {
2547 case SI_KCS:
1da177e4 2548 new_smi->handlers = &kcs_smi_handlers;
b0defcdb
CM
2549 break;
2550
2551 case SI_SMIC:
1da177e4 2552 new_smi->handlers = &smic_smi_handlers;
b0defcdb
CM
2553 break;
2554
2555 case SI_BT:
1da177e4 2556 new_smi->handlers = &bt_smi_handlers;
b0defcdb
CM
2557 break;
2558
2559 default:
1da177e4
LT
2560 /* No support for anything else yet. */
2561 rv = -EIO;
2562 goto out_err;
2563 }
2564
2565 /* Allocate the state machine's data and initialize it. */
2566 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
b0defcdb 2567 if (!new_smi->si_sm) {
1da177e4
LT
2568 printk(" Could not allocate state machine memory\n");
2569 rv = -ENOMEM;
2570 goto out_err;
2571 }
2572 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2573 &new_smi->io);
2574
2575 /* Now that we know the I/O size, we can set up the I/O. */
2576 rv = new_smi->io_setup(new_smi);
2577 if (rv) {
2578 printk(" Could not set up I/O space\n");
2579 goto out_err;
2580 }
2581
2582 spin_lock_init(&(new_smi->si_lock));
2583 spin_lock_init(&(new_smi->msg_lock));
2584 spin_lock_init(&(new_smi->count_lock));
2585
2586 /* Do low-level detection first. */
2587 if (new_smi->handlers->detect(new_smi->si_sm)) {
b0defcdb
CM
2588 if (new_smi->addr_source)
2589 printk(KERN_INFO "ipmi_si: Interface detection"
2590 " failed\n");
1da177e4
LT
2591 rv = -ENODEV;
2592 goto out_err;
2593 }
2594
2595 /* Attempt a get device id command. If it fails, we probably
b0defcdb 2596 don't have a BMC here. */
1da177e4 2597 rv = try_get_dev_id(new_smi);
b0defcdb
CM
2598 if (rv) {
2599 if (new_smi->addr_source)
2600 printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2601 " at this location\n");
1da177e4 2602 goto out_err;
b0defcdb 2603 }
1da177e4 2604
3ae0e0f9 2605 setup_oem_data_handler(new_smi);
ea94027b 2606 setup_xaction_handlers(new_smi);
3ae0e0f9 2607
1da177e4 2608 /* Try to claim any interrupts. */
b0defcdb
CM
2609 if (new_smi->irq_setup)
2610 new_smi->irq_setup(new_smi);
1da177e4
LT
2611
2612 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2613 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2614 new_smi->curr_msg = NULL;
2615 atomic_set(&new_smi->req_events, 0);
2616 new_smi->run_to_completion = 0;
2617
2618 new_smi->interrupt_disabled = 0;
a9a2c44f 2619 atomic_set(&new_smi->stop_operation, 0);
b0defcdb
CM
2620 new_smi->intf_num = smi_num;
2621 smi_num++;
1da177e4
LT
2622
2623 /* Start clearing the flags before we enable interrupts or the
2624 timer to avoid racing with the timer. */
2625 start_clear_flags(new_smi);
2626 /* IRQ is defined to be set when non-zero. */
2627 if (new_smi->irq)
2628 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2629
50c812b2
CM
2630 if (!new_smi->dev) {
2631 /* If we don't already have a device from something
2632 * else (like PCI), then register a new one. */
2633 new_smi->pdev = platform_device_alloc("ipmi_si",
2634 new_smi->intf_num);
2635 if (rv) {
2636 printk(KERN_ERR
2637 "ipmi_si_intf:"
2638 " Unable to allocate platform device\n");
453823ba 2639 goto out_err;
50c812b2
CM
2640 }
2641 new_smi->dev = &new_smi->pdev->dev;
2642 new_smi->dev->driver = &ipmi_driver;
2643
b48f5457 2644 rv = platform_device_add(new_smi->pdev);
50c812b2
CM
2645 if (rv) {
2646 printk(KERN_ERR
2647 "ipmi_si_intf:"
2648 " Unable to register system interface device:"
2649 " %d\n",
2650 rv);
453823ba 2651 goto out_err;
50c812b2
CM
2652 }
2653 new_smi->dev_registered = 1;
2654 }
2655
1da177e4
LT
2656 rv = ipmi_register_smi(&handlers,
2657 new_smi,
50c812b2
CM
2658 &new_smi->device_id,
2659 new_smi->dev,
759643b8 2660 "bmc",
453823ba 2661 new_smi->slave_addr);
1da177e4
LT
2662 if (rv) {
2663 printk(KERN_ERR
2664 "ipmi_si: Unable to register device: error %d\n",
2665 rv);
2666 goto out_err_stop_timer;
2667 }
2668
2669 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2670 type_file_read_proc, NULL,
2671 new_smi, THIS_MODULE);
2672 if (rv) {
2673 printk(KERN_ERR
2674 "ipmi_si: Unable to create proc entry: %d\n",
2675 rv);
2676 goto out_err_stop_timer;
2677 }
2678
2679 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2680 stat_file_read_proc, NULL,
2681 new_smi, THIS_MODULE);
2682 if (rv) {
2683 printk(KERN_ERR
2684 "ipmi_si: Unable to create proc entry: %d\n",
2685 rv);
2686 goto out_err_stop_timer;
2687 }
2688
b361e27b
CM
2689 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2690 param_read_proc, NULL,
2691 new_smi, THIS_MODULE);
2692 if (rv) {
2693 printk(KERN_ERR
2694 "ipmi_si: Unable to create proc entry: %d\n",
2695 rv);
2696 goto out_err_stop_timer;
2697 }
2698
b0defcdb
CM
2699 list_add_tail(&new_smi->link, &smi_infos);
2700
d6dfd131 2701 mutex_unlock(&smi_infos_lock);
1da177e4 2702
b0defcdb 2703 printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
1da177e4
LT
2704
2705 return 0;
2706
2707 out_err_stop_timer:
a9a2c44f
CM
2708 atomic_inc(&new_smi->stop_operation);
2709 wait_for_timer_and_thread(new_smi);
1da177e4
LT
2710
2711 out_err:
2712 if (new_smi->intf)
2713 ipmi_unregister_smi(new_smi->intf);
2714
b0defcdb
CM
2715 if (new_smi->irq_cleanup)
2716 new_smi->irq_cleanup(new_smi);
1da177e4
LT
2717
2718 /* Wait until we know that we are out of any interrupt
2719 handlers might have been running before we freed the
2720 interrupt. */
fbd568a3 2721 synchronize_sched();
1da177e4
LT
2722
2723 if (new_smi->si_sm) {
2724 if (new_smi->handlers)
2725 new_smi->handlers->cleanup(new_smi->si_sm);
2726 kfree(new_smi->si_sm);
2727 }
b0defcdb
CM
2728 if (new_smi->addr_source_cleanup)
2729 new_smi->addr_source_cleanup(new_smi);
7767e126
PG
2730 if (new_smi->io_cleanup)
2731 new_smi->io_cleanup(new_smi);
1da177e4 2732
50c812b2
CM
2733 if (new_smi->dev_registered)
2734 platform_device_unregister(new_smi->pdev);
2735
2736 kfree(new_smi);
2737
d6dfd131 2738 mutex_unlock(&smi_infos_lock);
b0defcdb 2739
1da177e4
LT
2740 return rv;
2741}
2742
b0defcdb 2743static __devinit int init_ipmi_si(void)
1da177e4 2744{
1da177e4
LT
2745 int i;
2746 char *str;
50c812b2 2747 int rv;
1da177e4
LT
2748
2749 if (initialized)
2750 return 0;
2751 initialized = 1;
2752
50c812b2
CM
2753 /* Register the device drivers. */
2754 rv = driver_register(&ipmi_driver);
2755 if (rv) {
2756 printk(KERN_ERR
2757 "init_ipmi_si: Unable to register driver: %d\n",
2758 rv);
2759 return rv;
2760 }
2761
2762
1da177e4
LT
2763 /* Parse out the si_type string into its components. */
2764 str = si_type_str;
2765 if (*str != '\0') {
e8b33617 2766 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
1da177e4
LT
2767 si_type[i] = str;
2768 str = strchr(str, ',');
2769 if (str) {
2770 *str = '\0';
2771 str++;
2772 } else {
2773 break;
2774 }
2775 }
2776 }
2777
1fdd75bd 2778 printk(KERN_INFO "IPMI System Interface driver.\n");
1da177e4 2779
b0defcdb
CM
2780 hardcode_find_bmc();
2781
a9fad4cc 2782#ifdef CONFIG_DMI
b224cd3a 2783 dmi_find_bmc();
1da177e4
LT
2784#endif
2785
b0defcdb 2786#ifdef CONFIG_ACPI
1d5636cc 2787 acpi_find_bmc();
b0defcdb 2788#endif
1da177e4 2789
b0defcdb 2790#ifdef CONFIG_PCI
168b35a7
CM
2791 rv = pci_register_driver(&ipmi_pci_driver);
2792 if (rv){
2793 printk(KERN_ERR
2794 "init_ipmi_si: Unable to register PCI driver: %d\n",
2795 rv);
2796 }
b0defcdb
CM
2797#endif
2798
2799 if (si_trydefaults) {
d6dfd131 2800 mutex_lock(&smi_infos_lock);
b0defcdb
CM
2801 if (list_empty(&smi_infos)) {
2802 /* No BMC was found, try defaults. */
d6dfd131 2803 mutex_unlock(&smi_infos_lock);
b0defcdb
CM
2804 default_find_bmc();
2805 } else {
d6dfd131 2806 mutex_unlock(&smi_infos_lock);
b0defcdb 2807 }
1da177e4
LT
2808 }
2809
d6dfd131 2810 mutex_lock(&smi_infos_lock);
b361e27b 2811 if (unload_when_empty && list_empty(&smi_infos)) {
d6dfd131 2812 mutex_unlock(&smi_infos_lock);
b0defcdb
CM
2813#ifdef CONFIG_PCI
2814 pci_unregister_driver(&ipmi_pci_driver);
2815#endif
55ebcc38 2816 driver_unregister(&ipmi_driver);
1da177e4
LT
2817 printk("ipmi_si: Unable to find any System Interface(s)\n");
2818 return -ENODEV;
b0defcdb 2819 } else {
d6dfd131 2820 mutex_unlock(&smi_infos_lock);
b0defcdb 2821 return 0;
1da177e4 2822 }
1da177e4
LT
2823}
2824module_init(init_ipmi_si);
2825
b361e27b 2826static void cleanup_one_si(struct smi_info *to_clean)
1da177e4
LT
2827{
2828 int rv;
2829 unsigned long flags;
2830
b0defcdb 2831 if (!to_clean)
1da177e4
LT
2832 return;
2833
b0defcdb
CM
2834 list_del(&to_clean->link);
2835
1da177e4
LT
2836 /* Tell the timer and interrupt handlers that we are shutting
2837 down. */
2838 spin_lock_irqsave(&(to_clean->si_lock), flags);
2839 spin_lock(&(to_clean->msg_lock));
2840
a9a2c44f 2841 atomic_inc(&to_clean->stop_operation);
b0defcdb
CM
2842
2843 if (to_clean->irq_cleanup)
2844 to_clean->irq_cleanup(to_clean);
1da177e4
LT
2845
2846 spin_unlock(&(to_clean->msg_lock));
2847 spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2848
2849 /* Wait until we know that we are out of any interrupt
2850 handlers might have been running before we freed the
2851 interrupt. */
fbd568a3 2852 synchronize_sched();
1da177e4 2853
a9a2c44f 2854 wait_for_timer_and_thread(to_clean);
1da177e4
LT
2855
2856 /* Interrupts and timeouts are stopped, now make sure the
2857 interface is in a clean state. */
e8b33617 2858 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
1da177e4 2859 poll(to_clean);
da4cd8df 2860 schedule_timeout_uninterruptible(1);
1da177e4
LT
2861 }
2862
2863 rv = ipmi_unregister_smi(to_clean->intf);
2864 if (rv) {
2865 printk(KERN_ERR
2866 "ipmi_si: Unable to unregister device: errno=%d\n",
2867 rv);
2868 }
2869
2870 to_clean->handlers->cleanup(to_clean->si_sm);
2871
2872 kfree(to_clean->si_sm);
2873
b0defcdb
CM
2874 if (to_clean->addr_source_cleanup)
2875 to_clean->addr_source_cleanup(to_clean);
7767e126
PG
2876 if (to_clean->io_cleanup)
2877 to_clean->io_cleanup(to_clean);
50c812b2
CM
2878
2879 if (to_clean->dev_registered)
2880 platform_device_unregister(to_clean->pdev);
2881
2882 kfree(to_clean);
1da177e4
LT
2883}
2884
2885static __exit void cleanup_ipmi_si(void)
2886{
b0defcdb 2887 struct smi_info *e, *tmp_e;
1da177e4 2888
b0defcdb 2889 if (!initialized)
1da177e4
LT
2890 return;
2891
b0defcdb
CM
2892#ifdef CONFIG_PCI
2893 pci_unregister_driver(&ipmi_pci_driver);
2894#endif
2895
d6dfd131 2896 mutex_lock(&smi_infos_lock);
b0defcdb
CM
2897 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2898 cleanup_one_si(e);
d6dfd131 2899 mutex_unlock(&smi_infos_lock);
50c812b2
CM
2900
2901 driver_unregister(&ipmi_driver);
1da177e4
LT
2902}
2903module_exit(cleanup_ipmi_si);
2904
2905MODULE_LICENSE("GPL");
1fdd75bd
CM
2906MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2907MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");