summaryrefslogtreecommitdiff
path: root/drivers/char/ipmi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/char/ipmi
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/char/ipmi')
-rw-r--r--drivers/char/ipmi/Kconfig67
-rw-r--r--drivers/char/ipmi/Makefile15
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c513
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c582
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c500
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c3174
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c549
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2359
-rw-r--r--drivers/char/ipmi/ipmi_si_sm.h120
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c599
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c1068
11 files changed, 9546 insertions, 0 deletions
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
new file mode 100644
index 000000000000..a6dcb2918157
--- /dev/null
+++ b/drivers/char/ipmi/Kconfig
@@ -0,0 +1,67 @@
+#
+# IPMI device configuration
+#
+
+menu "IPMI"
+config IPMI_HANDLER
+ tristate 'IPMI top-level message handler'
+ help
+ This enables the central IPMI message handler, required for IPMI
+ to work.
+
+ IPMI is a standard for managing sensors (temperature,
+ voltage, etc.) in a system.
+
+ See <file:Documentation/IPMI.txt> for more details on the driver.
+
+ If unsure, say N.
+
+config IPMI_PANIC_EVENT
+ bool 'Generate a panic event to all BMCs on a panic'
+ depends on IPMI_HANDLER
+ help
+ When a panic occurs, this will cause the IPMI message handler to
+ generate an IPMI event describing the panic to each interface
+ registered with the message handler.
+
+config IPMI_PANIC_STRING
+ bool 'Generate OEM events containing the panic string'
+ depends on IPMI_PANIC_EVENT
+ help
+ When a panic occurs, this will cause the IPMI message handler to
+ generate IPMI OEM type f0 events holding the IPMB address of the
+ panic generator (byte 4 of the event), a sequence number for the
+ string (byte 5 of the event) and part of the string (the rest of the
+ event). Bytes 1, 2, and 3 are the normal usage for an OEM event.
+ You can fetch these events and use the sequence numbers to piece the
+ string together.
+
+config IPMI_DEVICE_INTERFACE
+ tristate 'Device interface for IPMI'
+ depends on IPMI_HANDLER
+ help
+ This provides an IOCTL interface to the IPMI message handler so
+ userland processes may use IPMI. It supports poll() and select().
+
+config IPMI_SI
+ tristate 'IPMI System Interface handler'
+ depends on IPMI_HANDLER
+ help
+ Provides a driver for System Interfaces (KCS, SMIC, BT).
+ Currently, only KCS and SMIC are supported. If
+ you are using IPMI, you should probably say "y" here.
+
+config IPMI_WATCHDOG
+ tristate 'IPMI Watchdog Timer'
+ depends on IPMI_HANDLER
+ help
+ This enables the IPMI watchdog timer.
+
+config IPMI_POWEROFF
+ tristate 'IPMI Poweroff'
+ depends on IPMI_HANDLER
+ help
+ This enables a function to power off the system with IPMI if
+ the IPMI management controller is capable of this.
+
+endmenu
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
new file mode 100644
index 000000000000..553f0a408eda
--- /dev/null
+++ b/drivers/char/ipmi/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for the ipmi drivers.
+#
+
+ipmi_si-objs := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
+
+obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
+obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
+obj-$(CONFIG_IPMI_SI) += ipmi_si.o
+obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
+obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
+
+ipmi_si.o: $(ipmi_si-objs)
+ $(LD) -r -o $@ $(ipmi_si-objs)
+
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
new file mode 100644
index 000000000000..225b330115bb
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -0,0 +1,513 @@
+/*
+ * ipmi_bt_sm.c
+ *
+ * The state machine for an Open IPMI BT sub-driver under ipmi_si.c, part
+ * of the driver architecture at http://sourceforge.net/project/openipmi
+ *
+ * Author: Rocky Craig <first.last@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/string.h>
+#include <linux/ipmi_msgdefs.h> /* for completion codes */
+#include "ipmi_si_sm.h"
+
+#define IPMI_BT_VERSION "v33"
+
+static int bt_debug = 0x00; /* Production value 0, see following flags */
+
+#define BT_DEBUG_ENABLE 1
+#define BT_DEBUG_MSG 2
+#define BT_DEBUG_STATES 4
+
+/* Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds,
+ and 64 byte buffers. However, one HP implementation wants 255 bytes of
+ buffer (with a documented message of 160 bytes) so go for the max.
+ Since the Open IPMI architecture is single-message oriented at this
+ stage, the queue depth of BT is of no concern. */
+
+#define BT_NORMAL_TIMEOUT 2000000 /* seconds in microseconds */
+#define BT_RETRY_LIMIT 2
+#define BT_RESET_DELAY 6000000 /* 6 seconds after warm reset */
+
+enum bt_states {
+ BT_STATE_IDLE,
+ BT_STATE_XACTION_START,
+ BT_STATE_WRITE_BYTES,
+ BT_STATE_WRITE_END,
+ BT_STATE_WRITE_CONSUME,
+ BT_STATE_B2H_WAIT,
+ BT_STATE_READ_END,
+ BT_STATE_RESET1, /* These must come last */
+ BT_STATE_RESET2,
+ BT_STATE_RESET3,
+ BT_STATE_RESTART,
+ BT_STATE_HOSED
+};
+
+struct si_sm_data {
+ enum bt_states state;
+ enum bt_states last_state; /* assist printing and resets */
+ unsigned char seq; /* BT sequence number */
+ struct si_sm_io *io;
+ unsigned char write_data[IPMI_MAX_MSG_LENGTH];
+ int write_count;
+ unsigned char read_data[IPMI_MAX_MSG_LENGTH];
+ int read_count;
+ int truncated;
+ long timeout;
+ unsigned int error_retries; /* end of "common" fields */
+ int nonzero_status; /* hung BMCs stay all 0 */
+};
+
+#define BT_CLR_WR_PTR 0x01 /* See IPMI 1.5 table 11.6.4 */
+#define BT_CLR_RD_PTR 0x02
+#define BT_H2B_ATN 0x04
+#define BT_B2H_ATN 0x08
+#define BT_SMS_ATN 0x10
+#define BT_OEM0 0x20
+#define BT_H_BUSY 0x40
+#define BT_B_BUSY 0x80
+
+/* Some bits are toggled on each write: write once to set it, once
+ more to clear it; writing a zero does nothing. To absolutely
+ clear it, check its state and write if set. This avoids the "get
+ current then use as mask" scheme to modify one bit. Note that the
+ variable "bt" is hardcoded into these macros. */
+
+#define BT_STATUS bt->io->inputb(bt->io, 0)
+#define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x)
+
+#define BMC2HOST bt->io->inputb(bt->io, 1)
+#define HOST2BMC(x) bt->io->outputb(bt->io, 1, x)
+
+#define BT_INTMASK_R bt->io->inputb(bt->io, 2)
+#define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x)
+
+/* Convenience routines for debugging. These are not multi-open safe!
+ Note the macros have hardcoded variables in them. */
+
+static char *state2txt(unsigned char state)
+{
+ switch (state) {
+ case BT_STATE_IDLE: return("IDLE");
+ case BT_STATE_XACTION_START: return("XACTION");
+ case BT_STATE_WRITE_BYTES: return("WR_BYTES");
+ case BT_STATE_WRITE_END: return("WR_END");
+ case BT_STATE_WRITE_CONSUME: return("WR_CONSUME");
+ case BT_STATE_B2H_WAIT: return("B2H_WAIT");
+ case BT_STATE_READ_END: return("RD_END");
+ case BT_STATE_RESET1: return("RESET1");
+ case BT_STATE_RESET2: return("RESET2");
+ case BT_STATE_RESET3: return("RESET3");
+ case BT_STATE_RESTART: return("RESTART");
+ case BT_STATE_HOSED: return("HOSED");
+ }
+ return("BAD STATE");
+}
+#define STATE2TXT state2txt(bt->state)
+
+static char *status2txt(unsigned char status, char *buf)
+{
+ strcpy(buf, "[ ");
+ if (status & BT_B_BUSY) strcat(buf, "B_BUSY ");
+ if (status & BT_H_BUSY) strcat(buf, "H_BUSY ");
+ if (status & BT_OEM0) strcat(buf, "OEM0 ");
+ if (status & BT_SMS_ATN) strcat(buf, "SMS ");
+ if (status & BT_B2H_ATN) strcat(buf, "B2H ");
+ if (status & BT_H2B_ATN) strcat(buf, "H2B ");
+ strcat(buf, "]");
+ return buf;
+}
+#define STATUS2TXT(buf) status2txt(status, buf)
+
+/* This will be called from within this module on a hosed condition */
+#define FIRST_SEQ 0
+static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
+{
+ bt->state = BT_STATE_IDLE;
+ bt->last_state = BT_STATE_IDLE;
+ bt->seq = FIRST_SEQ;
+ bt->io = io;
+ bt->write_count = 0;
+ bt->read_count = 0;
+ bt->error_retries = 0;
+ bt->nonzero_status = 0;
+ bt->truncated = 0;
+ bt->timeout = BT_NORMAL_TIMEOUT;
+ return 3; /* We claim 3 bytes of space; ought to check SPMI table */
+}
+
+static int bt_start_transaction(struct si_sm_data *bt,
+ unsigned char *data,
+ unsigned int size)
+{
+ unsigned int i;
+
+ if ((size < 2) || (size > IPMI_MAX_MSG_LENGTH)) return -1;
+
+ if ((bt->state != BT_STATE_IDLE) && (bt->state != BT_STATE_HOSED))
+ return -2;
+
+ if (bt_debug & BT_DEBUG_MSG) {
+ printk(KERN_WARNING "+++++++++++++++++++++++++++++++++++++\n");
+ printk(KERN_WARNING "BT: write seq=0x%02X:", bt->seq);
+ for (i = 0; i < size; i ++) printk (" %02x", data[i]);
+ printk("\n");
+ }
+ bt->write_data[0] = size + 1; /* all data plus seq byte */
+ bt->write_data[1] = *data; /* NetFn/LUN */
+ bt->write_data[2] = bt->seq;
+ memcpy(bt->write_data + 3, data + 1, size - 1);
+ bt->write_count = size + 2;
+
+ bt->error_retries = 0;
+ bt->nonzero_status = 0;
+ bt->read_count = 0;
+ bt->truncated = 0;
+ bt->state = BT_STATE_XACTION_START;
+ bt->last_state = BT_STATE_IDLE;
+ bt->timeout = BT_NORMAL_TIMEOUT;
+ return 0;
+}
+
+/* After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE
+ it calls this. Strip out the length and seq bytes. */
+
+static int bt_get_result(struct si_sm_data *bt,
+ unsigned char *data,
+ unsigned int length)
+{
+ int i, msg_len;
+
+ msg_len = bt->read_count - 2; /* account for length & seq */
+ /* Always NetFn, Cmd, cCode */
+ if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) {
+ printk(KERN_WARNING "BT results: bad msg_len = %d\n", msg_len);
+ data[0] = bt->write_data[1] | 0x4; /* Kludge a response */
+ data[1] = bt->write_data[3];
+ data[2] = IPMI_ERR_UNSPECIFIED;
+ msg_len = 3;
+ } else {
+ data[0] = bt->read_data[1];
+ data[1] = bt->read_data[3];
+ if (length < msg_len) bt->truncated = 1;
+ if (bt->truncated) { /* can be set in read_all_bytes() */
+ data[2] = IPMI_ERR_MSG_TRUNCATED;
+ msg_len = 3;
+ } else memcpy(data + 2, bt->read_data + 4, msg_len - 2);
+
+ if (bt_debug & BT_DEBUG_MSG) {
+ printk (KERN_WARNING "BT: res (raw)");
+ for (i = 0; i < msg_len; i++) printk(" %02x", data[i]);
+ printk ("\n");
+ }
+ }
+ bt->read_count = 0; /* paranoia */
+ return msg_len;
+}
+
+/* This bit's functionality is optional */
+#define BT_BMC_HWRST 0x80
+
+static void reset_flags(struct si_sm_data *bt)
+{
+ if (BT_STATUS & BT_H_BUSY) BT_CONTROL(BT_H_BUSY);
+ if (BT_STATUS & BT_B_BUSY) BT_CONTROL(BT_B_BUSY);
+ BT_CONTROL(BT_CLR_WR_PTR);
+ BT_CONTROL(BT_SMS_ATN);
+ BT_INTMASK_W(BT_BMC_HWRST);
+#ifdef DEVELOPMENT_ONLY_NOT_FOR_PRODUCTION
+ if (BT_STATUS & BT_B2H_ATN) {
+ int i;
+ BT_CONTROL(BT_H_BUSY);
+ BT_CONTROL(BT_B2H_ATN);
+ BT_CONTROL(BT_CLR_RD_PTR);
+ for (i = 0; i < IPMI_MAX_MSG_LENGTH + 2; i++) BMC2HOST;
+ BT_CONTROL(BT_H_BUSY);
+ }
+#endif
+}
+
+static inline void write_all_bytes(struct si_sm_data *bt)
+{
+ int i;
+
+ if (bt_debug & BT_DEBUG_MSG) {
+ printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
+ bt->write_count, bt->seq);
+ for (i = 0; i < bt->write_count; i++)
+ printk (" %02x", bt->write_data[i]);
+ printk ("\n");
+ }
+ for (i = 0; i < bt->write_count; i++) HOST2BMC(bt->write_data[i]);
+}
+
+static inline int read_all_bytes(struct si_sm_data *bt)
+{
+ unsigned char i;
+
+ bt->read_data[0] = BMC2HOST;
+ bt->read_count = bt->read_data[0];
+ if (bt_debug & BT_DEBUG_MSG)
+ printk(KERN_WARNING "BT: read %d bytes:", bt->read_count);
+
+ /* minimum: length, NetFn, Seq, Cmd, cCode == 5 total, or 4 more
+ following the length byte. */
+ if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) {
+ if (bt_debug & BT_DEBUG_MSG)
+ printk("bad length %d\n", bt->read_count);
+ bt->truncated = 1;
+ return 1; /* let next XACTION START clean it up */
+ }
+ for (i = 1; i <= bt->read_count; i++) bt->read_data[i] = BMC2HOST;
+ bt->read_count++; /* account for the length byte */
+
+ if (bt_debug & BT_DEBUG_MSG) {
+ for (i = 0; i < bt->read_count; i++)
+ printk (" %02x", bt->read_data[i]);
+ printk ("\n");
+ }
+ if (bt->seq != bt->write_data[2]) /* idiot check */
+ printk(KERN_WARNING "BT: internal error: sequence mismatch\n");
+
+ /* per the spec, the (NetFn, Seq, Cmd) tuples should match */
+ if ((bt->read_data[3] == bt->write_data[3]) && /* Cmd */
+ (bt->read_data[2] == bt->write_data[2]) && /* Sequence */
+ ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
+ return 1;
+
+ if (bt_debug & BT_DEBUG_MSG) printk(KERN_WARNING "BT: bad packet: "
+ "want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n",
+ bt->write_data[1], bt->write_data[2], bt->write_data[3],
+ bt->read_data[1], bt->read_data[2], bt->read_data[3]);
+ return 0;
+}
+
+/* Modifies bt->state appropriately, need to get into the bt_event() switch */
+
+static void error_recovery(struct si_sm_data *bt, char *reason)
+{
+ unsigned char status;
+ char buf[40]; /* For getting status */
+
+ bt->timeout = BT_NORMAL_TIMEOUT; /* various places want to retry */
+
+ status = BT_STATUS;
+ printk(KERN_WARNING "BT: %s in %s %s ", reason, STATE2TXT,
+ STATUS2TXT(buf));
+
+ (bt->error_retries)++;
+ if (bt->error_retries > BT_RETRY_LIMIT) {
+ printk("retry limit (%d) exceeded\n", BT_RETRY_LIMIT);
+ bt->state = BT_STATE_HOSED;
+ if (!bt->nonzero_status)
+ printk(KERN_ERR "IPMI: BT stuck, try power cycle\n");
+ else if (bt->seq == FIRST_SEQ + BT_RETRY_LIMIT) {
+ /* most likely during insmod */
+ printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n");
+ bt->state = BT_STATE_RESET1;
+ }
+ return;
+ }
+
+ /* Sometimes the BMC queues get in an "off-by-one" state...*/
+ if ((bt->state == BT_STATE_B2H_WAIT) && (status & BT_B2H_ATN)) {
+ printk("retry B2H_WAIT\n");
+ return;
+ }
+
+ printk("restart command\n");
+ bt->state = BT_STATE_RESTART;
+}
+
+/* Check the status and (possibly) advance the BT state machine. The
+ default return is SI_SM_CALL_WITH_DELAY. */
+
+static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
+{
+ unsigned char status;
+ char buf[40]; /* For getting status */
+ int i;
+
+ status = BT_STATUS;
+ bt->nonzero_status |= status;
+
+ if ((bt_debug & BT_DEBUG_STATES) && (bt->state != bt->last_state))
+ printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n",
+ STATE2TXT,
+ STATUS2TXT(buf),
+ bt->timeout,
+ time);
+ bt->last_state = bt->state;
+
+ if (bt->state == BT_STATE_HOSED) return SI_SM_HOSED;
+
+ if (bt->state != BT_STATE_IDLE) { /* do timeout test */
+
+ /* Certain states, on error conditions, can lock up a CPU
+ because they are effectively in an infinite loop with
+ CALL_WITHOUT_DELAY (right back here with time == 0).
+ Prevent infinite lockup by ALWAYS decrementing timeout. */
+
+ /* FIXME: bt_event is sometimes called with time > BT_NORMAL_TIMEOUT
+ (noticed in ipmi_smic_sm.c January 2004) */
+
+ if ((time <= 0) || (time >= BT_NORMAL_TIMEOUT)) time = 100;
+ bt->timeout -= time;
+ if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) {
+ error_recovery(bt, "timed out");
+ return SI_SM_CALL_WITHOUT_DELAY;
+ }
+ }
+
+ switch (bt->state) {
+
+ case BT_STATE_IDLE: /* check for asynchronous messages */
+ if (status & BT_SMS_ATN) {
+ BT_CONTROL(BT_SMS_ATN); /* clear it */
+ return SI_SM_ATTN;
+ }
+ return SI_SM_IDLE;
+
+ case BT_STATE_XACTION_START:
+ if (status & BT_H_BUSY) {
+ BT_CONTROL(BT_H_BUSY);
+ break;
+ }
+ if (status & BT_B2H_ATN) break;
+ bt->state = BT_STATE_WRITE_BYTES;
+ return SI_SM_CALL_WITHOUT_DELAY; /* for logging */
+
+ case BT_STATE_WRITE_BYTES:
+ if (status & (BT_B_BUSY | BT_H2B_ATN)) break;
+ BT_CONTROL(BT_CLR_WR_PTR);
+ write_all_bytes(bt);
+ BT_CONTROL(BT_H2B_ATN); /* clears too fast to catch? */
+ bt->state = BT_STATE_WRITE_CONSUME;
+ return SI_SM_CALL_WITHOUT_DELAY; /* it MIGHT sail through */
+
+ case BT_STATE_WRITE_CONSUME: /* BMCs usually blow right thru here */
+ if (status & (BT_H2B_ATN | BT_B_BUSY)) break;
+ bt->state = BT_STATE_B2H_WAIT;
+ /* fall through with status */
+
+ /* Stay in BT_STATE_B2H_WAIT until a packet matches. However, spinning
+ hard here, constantly reading status, seems to hold off the
+ generation of B2H_ATN so ALWAYS return CALL_WITH_DELAY. */
+
+ case BT_STATE_B2H_WAIT:
+ if (!(status & BT_B2H_ATN)) break;
+
+ /* Assume ordered, uncached writes: no need to wait */
+ if (!(status & BT_H_BUSY)) BT_CONTROL(BT_H_BUSY); /* set */
+ BT_CONTROL(BT_B2H_ATN); /* clear it, ACK to the BMC */
+ BT_CONTROL(BT_CLR_RD_PTR); /* reset the queue */
+ i = read_all_bytes(bt);
+ BT_CONTROL(BT_H_BUSY); /* clear */
+ if (!i) break; /* Try this state again */
+ bt->state = BT_STATE_READ_END;
+ return SI_SM_CALL_WITHOUT_DELAY; /* for logging */
+
+ case BT_STATE_READ_END:
+
+ /* I could wait on BT_H_BUSY to go clear for a truly clean
+ exit. However, this is already done in XACTION_START
+ and the (possible) extra loop/status/possible wait affects
+ performance. So, as long as it works, just ignore H_BUSY */
+
+#ifdef MAKE_THIS_TRUE_IF_NECESSARY
+
+ if (status & BT_H_BUSY) break;
+#endif
+ bt->seq++;
+ bt->state = BT_STATE_IDLE;
+ return SI_SM_TRANSACTION_COMPLETE;
+
+ case BT_STATE_RESET1:
+ reset_flags(bt);
+ bt->timeout = BT_RESET_DELAY;
+ bt->state = BT_STATE_RESET2;
+ break;
+
+ case BT_STATE_RESET2: /* Send a soft reset */
+ BT_CONTROL(BT_CLR_WR_PTR);
+ HOST2BMC(3); /* number of bytes following */
+ HOST2BMC(0x18); /* NetFn/LUN == Application, LUN 0 */
+ HOST2BMC(42); /* Sequence number */
+ HOST2BMC(3); /* Cmd == Soft reset */
+ BT_CONTROL(BT_H2B_ATN);
+ bt->state = BT_STATE_RESET3;
+ break;
+
+ case BT_STATE_RESET3:
+ if (bt->timeout > 0) return SI_SM_CALL_WITH_DELAY;
+ bt->state = BT_STATE_RESTART; /* printk in debug modes */
+ break;
+
+ case BT_STATE_RESTART: /* don't reset retries! */
+ bt->write_data[2] = ++bt->seq;
+ bt->read_count = 0;
+ bt->nonzero_status = 0;
+ bt->timeout = BT_NORMAL_TIMEOUT;
+ bt->state = BT_STATE_XACTION_START;
+ break;
+
+ default: /* HOSED is supposed to be caught much earlier */
+ error_recovery(bt, "internal logic error");
+ break;
+ }
+ return SI_SM_CALL_WITH_DELAY;
+}
+
+static int bt_detect(struct si_sm_data *bt)
+{
+ /* It's impossible for the BT status and interrupt registers to be
+ all 1's, (assuming a properly functioning, self-initialized BMC)
+ but that's what you get from reading a bogus address, so we
+ test that first. The calling routine uses negative logic. */
+
+ if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) return 1;
+ reset_flags(bt);
+ return 0;
+}
+
+static void bt_cleanup(struct si_sm_data *bt)
+{
+}
+
+static int bt_size(void)
+{
+ return sizeof(struct si_sm_data);
+}
+
+struct si_sm_handlers bt_smi_handlers =
+{
+ .version = IPMI_BT_VERSION,
+ .init_data = bt_init_data,
+ .start_transaction = bt_start_transaction,
+ .get_result = bt_get_result,
+ .event = bt_event,
+ .detect = bt_detect,
+ .cleanup = bt_cleanup,
+ .size = bt_size,
+};
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
new file mode 100644
index 000000000000..49d67f5384a2
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -0,0 +1,582 @@
+/*
+ * ipmi_devintf.c
+ *
+ * Linux device interface for the IPMI message handler.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <asm/system.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/ipmi.h>
+#include <asm/semaphore.h>
+#include <linux/init.h>
+
+#define IPMI_DEVINTF_VERSION "v33"
+
+struct ipmi_file_private
+{
+ ipmi_user_t user;
+ spinlock_t recv_msg_lock;
+ struct list_head recv_msgs;
+ struct file *file;
+ struct fasync_struct *fasync_queue;
+ wait_queue_head_t wait;
+ struct semaphore recv_sem;
+ int default_retries;
+ unsigned int default_retry_time_ms;
+};
+
+static void file_receive_handler(struct ipmi_recv_msg *msg,
+ void *handler_data)
+{
+ struct ipmi_file_private *priv = handler_data;
+ int was_empty;
+ unsigned long flags;
+
+ spin_lock_irqsave(&(priv->recv_msg_lock), flags);
+
+ was_empty = list_empty(&(priv->recv_msgs));
+ list_add_tail(&(msg->link), &(priv->recv_msgs));
+
+ if (was_empty) {
+ wake_up_interruptible(&priv->wait);
+ kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
+ }
+
+ spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
+}
+
+static unsigned int ipmi_poll(struct file *file, poll_table *wait)
+{
+ struct ipmi_file_private *priv = file->private_data;
+ unsigned int mask = 0;
+ unsigned long flags;
+
+ poll_wait(file, &priv->wait, wait);
+
+ spin_lock_irqsave(&priv->recv_msg_lock, flags);
+
+ if (! list_empty(&(priv->recv_msgs)))
+ mask |= (POLLIN | POLLRDNORM);
+
+ spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+
+ return mask;
+}
+
+static int ipmi_fasync(int fd, struct file *file, int on)
+{
+ struct ipmi_file_private *priv = file->private_data;
+ int result;
+
+ result = fasync_helper(fd, file, on, &priv->fasync_queue);
+
+ return (result);
+}
+
+static struct ipmi_user_hndl ipmi_hndlrs =
+{
+ .ipmi_recv_hndl = file_receive_handler,
+};
+
+static int ipmi_open(struct inode *inode, struct file *file)
+{
+ int if_num = iminor(inode);
+ int rv;
+ struct ipmi_file_private *priv;
+
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->file = file;
+
+ rv = ipmi_create_user(if_num,
+ &ipmi_hndlrs,
+ priv,
+ &(priv->user));
+ if (rv) {
+ kfree(priv);
+ return rv;
+ }
+
+ file->private_data = priv;
+
+ spin_lock_init(&(priv->recv_msg_lock));
+ INIT_LIST_HEAD(&(priv->recv_msgs));
+ init_waitqueue_head(&priv->wait);
+ priv->fasync_queue = NULL;
+ sema_init(&(priv->recv_sem), 1);
+
+ /* Use the low-level defaults. */
+ priv->default_retries = -1;
+ priv->default_retry_time_ms = 0;
+
+ return 0;
+}
+
+static int ipmi_release(struct inode *inode, struct file *file)
+{
+ struct ipmi_file_private *priv = file->private_data;
+ int rv;
+
+ rv = ipmi_destroy_user(priv->user);
+ if (rv)
+ return rv;
+
+ ipmi_fasync (-1, file, 0);
+
+ /* FIXME - free the messages in the list. */
+ kfree(priv);
+
+ return 0;
+}
+
+static int handle_send_req(ipmi_user_t user,
+ struct ipmi_req *req,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ int rv;
+ struct ipmi_addr addr;
+ struct kernel_ipmi_msg msg;
+
+ if (req->addr_len > sizeof(struct ipmi_addr))
+ return -EINVAL;
+
+ if (copy_from_user(&addr, req->addr, req->addr_len))
+ return -EFAULT;
+
+ msg.netfn = req->msg.netfn;
+ msg.cmd = req->msg.cmd;
+ msg.data_len = req->msg.data_len;
+ msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!msg.data)
+ return -ENOMEM;
+
+ /* From here out we cannot return, we must jump to "out" for
+ error exits to free msgdata. */
+
+ rv = ipmi_validate_addr(&addr, req->addr_len);
+ if (rv)
+ goto out;
+
+ if (req->msg.data != NULL) {
+ if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
+ rv = -EMSGSIZE;
+ goto out;
+ }
+
+ if (copy_from_user(msg.data,
+ req->msg.data,
+ req->msg.data_len))
+ {
+ rv = -EFAULT;
+ goto out;
+ }
+ } else {
+ msg.data_len = 0;
+ }
+
+ rv = ipmi_request_settime(user,
+ &addr,
+ req->msgid,
+ &msg,
+ NULL,
+ 0,
+ retries,
+ retry_time_ms);
+ out:
+ kfree(msg.data);
+ return rv;
+}
+
+static int ipmi_ioctl(struct inode *inode,
+ struct file *file,
+ unsigned int cmd,
+ unsigned long data)
+{
+ int rv = -EINVAL;
+ struct ipmi_file_private *priv = file->private_data;
+ void __user *arg = (void __user *)data;
+
+ switch (cmd)
+ {
+ case IPMICTL_SEND_COMMAND:
+ {
+ struct ipmi_req req;
+
+ if (copy_from_user(&req, arg, sizeof(req))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = handle_send_req(priv->user,
+ &req,
+ priv->default_retries,
+ priv->default_retry_time_ms);
+ break;
+ }
+
+ case IPMICTL_SEND_COMMAND_SETTIME:
+ {
+ struct ipmi_req_settime req;
+
+ if (copy_from_user(&req, arg, sizeof(req))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = handle_send_req(priv->user,
+ &req.req,
+ req.retries,
+ req.retry_time_ms);
+ break;
+ }
+
+ case IPMICTL_RECEIVE_MSG:
+ case IPMICTL_RECEIVE_MSG_TRUNC:
+ {
+ struct ipmi_recv rsp;
+ int addr_len;
+ struct list_head *entry;
+ struct ipmi_recv_msg *msg;
+ unsigned long flags;
+
+
+ rv = 0;
+ if (copy_from_user(&rsp, arg, sizeof(rsp))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ /* We claim a semaphore because we don't want two
+ users getting something from the queue at a time.
+ Since we have to release the spinlock before we can
+ copy the data to the user, it's possible another
+ user will grab something from the queue, too. Then
+ the messages might get out of order if something
+ fails and the message gets put back onto the
+ queue. This semaphore prevents that problem. */
+ down(&(priv->recv_sem));
+
+ /* Grab the message off the list. */
+ spin_lock_irqsave(&(priv->recv_msg_lock), flags);
+ if (list_empty(&(priv->recv_msgs))) {
+ spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
+ rv = -EAGAIN;
+ goto recv_err;
+ }
+ entry = priv->recv_msgs.next;
+ msg = list_entry(entry, struct ipmi_recv_msg, link);
+ list_del(entry);
+ spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
+
+ addr_len = ipmi_addr_length(msg->addr.addr_type);
+ if (rsp.addr_len < addr_len)
+ {
+ rv = -EINVAL;
+ goto recv_putback_on_err;
+ }
+
+ if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) {
+ rv = -EFAULT;
+ goto recv_putback_on_err;
+ }
+ rsp.addr_len = addr_len;
+
+ rsp.recv_type = msg->recv_type;
+ rsp.msgid = msg->msgid;
+ rsp.msg.netfn = msg->msg.netfn;
+ rsp.msg.cmd = msg->msg.cmd;
+
+ if (msg->msg.data_len > 0) {
+ if (rsp.msg.data_len < msg->msg.data_len) {
+ rv = -EMSGSIZE;
+ if (cmd == IPMICTL_RECEIVE_MSG_TRUNC) {
+ msg->msg.data_len = rsp.msg.data_len;
+ } else {
+ goto recv_putback_on_err;
+ }
+ }
+
+ if (copy_to_user(rsp.msg.data,
+ msg->msg.data,
+ msg->msg.data_len))
+ {
+ rv = -EFAULT;
+ goto recv_putback_on_err;
+ }
+ rsp.msg.data_len = msg->msg.data_len;
+ } else {
+ rsp.msg.data_len = 0;
+ }
+
+ if (copy_to_user(arg, &rsp, sizeof(rsp))) {
+ rv = -EFAULT;
+ goto recv_putback_on_err;
+ }
+
+ up(&(priv->recv_sem));
+ ipmi_free_recv_msg(msg);
+ break;
+
+ recv_putback_on_err:
+ /* If we got an error, put the message back onto
+ the head of the queue. */
+ spin_lock_irqsave(&(priv->recv_msg_lock), flags);
+ list_add(entry, &(priv->recv_msgs));
+ spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
+ up(&(priv->recv_sem));
+ break;
+
+ recv_err:
+ up(&(priv->recv_sem));
+ break;
+ }
+
+ case IPMICTL_REGISTER_FOR_CMD:
+ {
+ struct ipmi_cmdspec val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd);
+ break;
+ }
+
+ case IPMICTL_UNREGISTER_FOR_CMD:
+ {
+ struct ipmi_cmdspec val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd);
+ break;
+ }
+
+ case IPMICTL_SET_GETS_EVENTS_CMD:
+ {
+ int val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_set_gets_events(priv->user, val);
+ break;
+ }
+
+ case IPMICTL_SET_MY_ADDRESS_CMD:
+ {
+ unsigned int val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ ipmi_set_my_address(priv->user, val);
+ rv = 0;
+ break;
+ }
+
+ case IPMICTL_GET_MY_ADDRESS_CMD:
+ {
+ unsigned int val;
+
+ val = ipmi_get_my_address(priv->user);
+
+ if (copy_to_user(arg, &val, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+ rv = 0;
+ break;
+ }
+
+ case IPMICTL_SET_MY_LUN_CMD:
+ {
+ unsigned int val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ ipmi_set_my_LUN(priv->user, val);
+ rv = 0;
+ break;
+ }
+
+ case IPMICTL_GET_MY_LUN_CMD:
+ {
+ unsigned int val;
+
+ val = ipmi_get_my_LUN(priv->user);
+
+ if (copy_to_user(arg, &val, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+ rv = 0;
+ break;
+ }
+ case IPMICTL_SET_TIMING_PARMS_CMD:
+ {
+ struct ipmi_timing_parms parms;
+
+ if (copy_from_user(&parms, arg, sizeof(parms))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ priv->default_retries = parms.retries;
+ priv->default_retry_time_ms = parms.retry_time_ms;
+ rv = 0;
+ break;
+ }
+
+ case IPMICTL_GET_TIMING_PARMS_CMD:
+ {
+ struct ipmi_timing_parms parms;
+
+ parms.retries = priv->default_retries;
+ parms.retry_time_ms = priv->default_retry_time_ms;
+
+ if (copy_to_user(arg, &parms, sizeof(parms))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = 0;
+ break;
+ }
+ }
+
+ return rv;
+}
+
+
+static struct file_operations ipmi_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = ipmi_ioctl,
+ .open = ipmi_open,
+ .release = ipmi_release,
+ .fasync = ipmi_fasync,
+ .poll = ipmi_poll,
+};
+
+#define DEVICE_NAME "ipmidev"
+
+static int ipmi_major = 0;
+module_param(ipmi_major, int, 0);
+MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
+ " default, or if you set it to zero, it will choose the next"
+ " available device. Setting it to -1 will disable the"
+ " interface. Other values will set the major device number"
+ " to that value.");
+
+static void ipmi_new_smi(int if_num)
+{
+ devfs_mk_cdev(MKDEV(ipmi_major, if_num),
+ S_IFCHR | S_IRUSR | S_IWUSR,
+ "ipmidev/%d", if_num);
+}
+
+static void ipmi_smi_gone(int if_num)
+{
+ devfs_remove("ipmidev/%d", if_num);
+}
+
+static struct ipmi_smi_watcher smi_watcher =
+{
+ .owner = THIS_MODULE,
+ .new_smi = ipmi_new_smi,
+ .smi_gone = ipmi_smi_gone,
+};
+
+static __init int init_ipmi_devintf(void)
+{
+ int rv;
+
+ if (ipmi_major < 0)
+ return -EINVAL;
+
+ printk(KERN_INFO "ipmi device interface version "
+ IPMI_DEVINTF_VERSION "\n");
+
+ rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
+ if (rv < 0) {
+ printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
+ return rv;
+ }
+
+ if (ipmi_major == 0) {
+ ipmi_major = rv;
+ }
+
+ devfs_mk_dir(DEVICE_NAME);
+
+ rv = ipmi_smi_watcher_register(&smi_watcher);
+ if (rv) {
+ unregister_chrdev(ipmi_major, DEVICE_NAME);
+ printk(KERN_WARNING "ipmi: can't register smi watcher\n");
+ return rv;
+ }
+
+ return 0;
+}
+module_init(init_ipmi_devintf);
+
+static __exit void cleanup_ipmi(void)
+{
+ ipmi_smi_watcher_unregister(&smi_watcher);
+ devfs_remove(DEVICE_NAME);
+ unregister_chrdev(ipmi_major, DEVICE_NAME);
+}
+module_exit(cleanup_ipmi);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
new file mode 100644
index 000000000000..48cce24329be
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -0,0 +1,500 @@
+/*
+ * ipmi_kcs_sm.c
+ *
+ * State machine for handling IPMI KCS interfaces.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * This state machine is taken from the state machine in the IPMI spec,
+ * pretty much verbatim. If you have questions about the states, see
+ * that document.
+ */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/string.h>
+#include <linux/ipmi_msgdefs.h> /* for completion codes */
+#include "ipmi_si_sm.h"
+
+#define IPMI_KCS_VERSION "v33"
+
+/* Set this if you want a printout of why the state machine was hosed
+ when it gets hosed. */
+#define DEBUG_HOSED_REASON
+
+/* Print the state machine state on entry every time. */
+#undef DEBUG_STATE
+
+/* The states the KCS driver may be in. */
+enum kcs_states {
+ KCS_IDLE, /* The KCS interface is currently
+ doing nothing. */
+ KCS_START_OP, /* We are starting an operation. The
+ data is in the output buffer, but
+ nothing has been done to the
+ interface yet. This was added to
+ the state machine in the spec to
+ wait for the initial IBF. */
+ KCS_WAIT_WRITE_START, /* We have written a write cmd to the
+ interface. */
+ KCS_WAIT_WRITE, /* We are writing bytes to the
+ interface. */
+ KCS_WAIT_WRITE_END, /* We have written the write end cmd
+ to the interface, and still need to
+ write the last byte. */
+ KCS_WAIT_READ, /* We are waiting to read data from
+ the interface. */
+ KCS_ERROR0, /* State to transition to the error
+ handler, this was added to the
+ state machine in the spec to be
+ sure IBF was there. */
+ KCS_ERROR1, /* First stage error handler, wait for
+ the interface to respond. */
+ KCS_ERROR2, /* The abort cmd has been written,
+ wait for the interface to
+ respond. */
+ KCS_ERROR3, /* We wrote some data to the
+ interface, wait for it to switch to
+ read mode. */
+ KCS_HOSED /* The hardware failed to follow the
+ state machine. */
+};
+
+#define MAX_KCS_READ_SIZE 80
+#define MAX_KCS_WRITE_SIZE 80
+
+/* Timeouts in microseconds. */
+#define IBF_RETRY_TIMEOUT 1000000
+#define OBF_RETRY_TIMEOUT 1000000
+#define MAX_ERROR_RETRIES 10
+
+struct si_sm_data
+{
+ enum kcs_states state;
+ struct si_sm_io *io;
+ unsigned char write_data[MAX_KCS_WRITE_SIZE];
+ int write_pos;
+ int write_count;
+ int orig_write_count;
+ unsigned char read_data[MAX_KCS_READ_SIZE];
+ int read_pos;
+ int truncated;
+
+ unsigned int error_retries;
+ long ibf_timeout;
+ long obf_timeout;
+};
+
+static unsigned int init_kcs_data(struct si_sm_data *kcs,
+ struct si_sm_io *io)
+{
+ kcs->state = KCS_IDLE;
+ kcs->io = io;
+ kcs->write_pos = 0;
+ kcs->write_count = 0;
+ kcs->orig_write_count = 0;
+ kcs->read_pos = 0;
+ kcs->error_retries = 0;
+ kcs->truncated = 0;
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+
+ /* Reserve 2 I/O bytes. */
+ return 2;
+}
+
+static inline unsigned char read_status(struct si_sm_data *kcs)
+{
+ return kcs->io->inputb(kcs->io, 1);
+}
+
+static inline unsigned char read_data(struct si_sm_data *kcs)
+{
+ return kcs->io->inputb(kcs->io, 0);
+}
+
+static inline void write_cmd(struct si_sm_data *kcs, unsigned char data)
+{
+ kcs->io->outputb(kcs->io, 1, data);
+}
+
+static inline void write_data(struct si_sm_data *kcs, unsigned char data)
+{
+ kcs->io->outputb(kcs->io, 0, data);
+}
+
+/* Control codes. */
+#define KCS_GET_STATUS_ABORT 0x60
+#define KCS_WRITE_START 0x61
+#define KCS_WRITE_END 0x62
+#define KCS_READ_BYTE 0x68
+
+/* Status bits. */
+#define GET_STATUS_STATE(status) (((status) >> 6) & 0x03)
+#define KCS_IDLE_STATE 0
+#define KCS_READ_STATE 1
+#define KCS_WRITE_STATE 2
+#define KCS_ERROR_STATE 3
+#define GET_STATUS_ATN(status) ((status) & 0x04)
+#define GET_STATUS_IBF(status) ((status) & 0x02)
+#define GET_STATUS_OBF(status) ((status) & 0x01)
+
+
+static inline void write_next_byte(struct si_sm_data *kcs)
+{
+ write_data(kcs, kcs->write_data[kcs->write_pos]);
+ (kcs->write_pos)++;
+ (kcs->write_count)--;
+}
+
+static inline void start_error_recovery(struct si_sm_data *kcs, char *reason)
+{
+ (kcs->error_retries)++;
+ if (kcs->error_retries > MAX_ERROR_RETRIES) {
+#ifdef DEBUG_HOSED_REASON
+ printk("ipmi_kcs_sm: kcs hosed: %s\n", reason);
+#endif
+ kcs->state = KCS_HOSED;
+ } else {
+ kcs->state = KCS_ERROR0;
+ }
+}
+
+static inline void read_next_byte(struct si_sm_data *kcs)
+{
+ if (kcs->read_pos >= MAX_KCS_READ_SIZE) {
+ /* Throw the data away and mark it truncated. */
+ read_data(kcs);
+ kcs->truncated = 1;
+ } else {
+ kcs->read_data[kcs->read_pos] = read_data(kcs);
+ (kcs->read_pos)++;
+ }
+ write_data(kcs, KCS_READ_BYTE);
+}
+
+static inline int check_ibf(struct si_sm_data *kcs, unsigned char status,
+ long time)
+{
+ if (GET_STATUS_IBF(status)) {
+ kcs->ibf_timeout -= time;
+ if (kcs->ibf_timeout < 0) {
+ start_error_recovery(kcs, "IBF not ready in time");
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ return 1;
+ }
+ return 0;
+ }
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ return 1;
+}
+
+static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
+ long time)
+{
+ if (! GET_STATUS_OBF(status)) {
+ kcs->obf_timeout -= time;
+ if (kcs->obf_timeout < 0) {
+ start_error_recovery(kcs, "OBF not ready in time");
+ return 1;
+ }
+ return 0;
+ }
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+ return 1;
+}
+
+static void clear_obf(struct si_sm_data *kcs, unsigned char status)
+{
+ if (GET_STATUS_OBF(status))
+ read_data(kcs);
+}
+
+static void restart_kcs_transaction(struct si_sm_data *kcs)
+{
+ kcs->write_count = kcs->orig_write_count;
+ kcs->write_pos = 0;
+ kcs->read_pos = 0;
+ kcs->state = KCS_WAIT_WRITE_START;
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+ write_cmd(kcs, KCS_WRITE_START);
+}
+
+static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
+ unsigned int size)
+{
+ if ((size < 2) || (size > MAX_KCS_WRITE_SIZE)) {
+ return -1;
+ }
+
+ if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
+ return -2;
+ }
+
+ kcs->error_retries = 0;
+ memcpy(kcs->write_data, data, size);
+ kcs->write_count = size;
+ kcs->orig_write_count = size;
+ kcs->write_pos = 0;
+ kcs->read_pos = 0;
+ kcs->state = KCS_START_OP;
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+ return 0;
+}
+
+static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data,
+ unsigned int length)
+{
+ if (length < kcs->read_pos) {
+ kcs->read_pos = length;
+ kcs->truncated = 1;
+ }
+
+ memcpy(data, kcs->read_data, kcs->read_pos);
+
+ if ((length >= 3) && (kcs->read_pos < 3)) {
+ /* Guarantee that we return at least 3 bytes, with an
+ error in the third byte if it is too short. */
+ data[2] = IPMI_ERR_UNSPECIFIED;
+ kcs->read_pos = 3;
+ }
+ if (kcs->truncated) {
+ /* Report a truncated error. We might overwrite
+ another error, but that's too bad, the user needs
+ to know it was truncated. */
+ data[2] = IPMI_ERR_MSG_TRUNCATED;
+ kcs->truncated = 0;
+ }
+
+ return kcs->read_pos;
+}
+
+/* This implements the state machine defined in the IPMI manual, see
+ that for details on how this works. Divide that flowchart into
+ sections delimited by "Wait for IBF" and this will become clear. */
+static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
+{
+ unsigned char status;
+ unsigned char state;
+
+ status = read_status(kcs);
+
+#ifdef DEBUG_STATE
+ printk(" State = %d, %x\n", kcs->state, status);
+#endif
+ /* All states wait for ibf, so just do it here. */
+ if (!check_ibf(kcs, status, time))
+ return SI_SM_CALL_WITH_DELAY;
+
+ /* Just about everything looks at the KCS state, so grab that, too. */
+ state = GET_STATUS_STATE(status);
+
+ switch (kcs->state) {
+ case KCS_IDLE:
+ /* If there's and interrupt source, turn it off. */
+ clear_obf(kcs, status);
+
+ if (GET_STATUS_ATN(status))
+ return SI_SM_ATTN;
+ else
+ return SI_SM_IDLE;
+
+ case KCS_START_OP:
+ if (state != KCS_IDLE) {
+ start_error_recovery(kcs,
+ "State machine not idle at start");
+ break;
+ }
+
+ clear_obf(kcs, status);
+ write_cmd(kcs, KCS_WRITE_START);
+ kcs->state = KCS_WAIT_WRITE_START;
+ break;
+
+ case KCS_WAIT_WRITE_START:
+ if (state != KCS_WRITE_STATE) {
+ start_error_recovery(
+ kcs,
+ "Not in write state at write start");
+ break;
+ }
+ read_data(kcs);
+ if (kcs->write_count == 1) {
+ write_cmd(kcs, KCS_WRITE_END);
+ kcs->state = KCS_WAIT_WRITE_END;
+ } else {
+ write_next_byte(kcs);
+ kcs->state = KCS_WAIT_WRITE;
+ }
+ break;
+
+ case KCS_WAIT_WRITE:
+ if (state != KCS_WRITE_STATE) {
+ start_error_recovery(kcs,
+ "Not in write state for write");
+ break;
+ }
+ clear_obf(kcs, status);
+ if (kcs->write_count == 1) {
+ write_cmd(kcs, KCS_WRITE_END);
+ kcs->state = KCS_WAIT_WRITE_END;
+ } else {
+ write_next_byte(kcs);
+ }
+ break;
+
+ case KCS_WAIT_WRITE_END:
+ if (state != KCS_WRITE_STATE) {
+ start_error_recovery(kcs,
+ "Not in write state for write end");
+ break;
+ }
+ clear_obf(kcs, status);
+ write_next_byte(kcs);
+ kcs->state = KCS_WAIT_READ;
+ break;
+
+ case KCS_WAIT_READ:
+ if ((state != KCS_READ_STATE) && (state != KCS_IDLE_STATE)) {
+ start_error_recovery(
+ kcs,
+ "Not in read or idle in read state");
+ break;
+ }
+
+ if (state == KCS_READ_STATE) {
+ if (! check_obf(kcs, status, time))
+ return SI_SM_CALL_WITH_DELAY;
+ read_next_byte(kcs);
+ } else {
+ /* We don't implement this exactly like the state
+ machine in the spec. Some broken hardware
+ does not write the final dummy byte to the
+ read register. Thus obf will never go high
+ here. We just go straight to idle, and we
+ handle clearing out obf in idle state if it
+ happens to come in. */
+ clear_obf(kcs, status);
+ kcs->orig_write_count = 0;
+ kcs->state = KCS_IDLE;
+ return SI_SM_TRANSACTION_COMPLETE;
+ }
+ break;
+
+ case KCS_ERROR0:
+ clear_obf(kcs, status);
+ write_cmd(kcs, KCS_GET_STATUS_ABORT);
+ kcs->state = KCS_ERROR1;
+ break;
+
+ case KCS_ERROR1:
+ clear_obf(kcs, status);
+ write_data(kcs, 0);
+ kcs->state = KCS_ERROR2;
+ break;
+
+ case KCS_ERROR2:
+ if (state != KCS_READ_STATE) {
+ start_error_recovery(kcs,
+ "Not in read state for error2");
+ break;
+ }
+ if (! check_obf(kcs, status, time))
+ return SI_SM_CALL_WITH_DELAY;
+
+ clear_obf(kcs, status);
+ write_data(kcs, KCS_READ_BYTE);
+ kcs->state = KCS_ERROR3;
+ break;
+
+ case KCS_ERROR3:
+ if (state != KCS_IDLE_STATE) {
+ start_error_recovery(kcs,
+ "Not in idle state for error3");
+ break;
+ }
+
+ if (! check_obf(kcs, status, time))
+ return SI_SM_CALL_WITH_DELAY;
+
+ clear_obf(kcs, status);
+ if (kcs->orig_write_count) {
+ restart_kcs_transaction(kcs);
+ } else {
+ kcs->state = KCS_IDLE;
+ return SI_SM_TRANSACTION_COMPLETE;
+ }
+ break;
+
+ case KCS_HOSED:
+ break;
+ }
+
+ if (kcs->state == KCS_HOSED) {
+ init_kcs_data(kcs, kcs->io);
+ return SI_SM_HOSED;
+ }
+
+ return SI_SM_CALL_WITHOUT_DELAY;
+}
+
+static int kcs_size(void)
+{
+ return sizeof(struct si_sm_data);
+}
+
+static int kcs_detect(struct si_sm_data *kcs)
+{
+ /* It's impossible for the KCS status register to be all 1's,
+ (assuming a properly functioning, self-initialized BMC)
+ but that's what you get from reading a bogus address, so we
+ test that first. */
+ if (read_status(kcs) == 0xff)
+ return 1;
+
+ return 0;
+}
+
+static void kcs_cleanup(struct si_sm_data *kcs)
+{
+}
+
+struct si_sm_handlers kcs_smi_handlers =
+{
+ .version = IPMI_KCS_VERSION,
+ .init_data = init_kcs_data,
+ .start_transaction = start_kcs_transaction,
+ .get_result = get_kcs_result,
+ .event = kcs_event,
+ .detect = kcs_detect,
+ .cleanup = kcs_cleanup,
+ .size = kcs_size,
+};
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
new file mode 100644
index 000000000000..a6606a1aced7
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -0,0 +1,3174 @@
+/*
+ * ipmi_msghandler.c
+ *
+ * Incoming and outgoing message routing for an IPMI interface.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <asm/system.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+
+#define PFX "IPMI message handler: "
+#define IPMI_MSGHANDLER_VERSION "v33"
+
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
+static int ipmi_init_msghandler(void);
+
+static int initialized = 0;
+
+static struct proc_dir_entry *proc_ipmi_root = NULL;
+
+#define MAX_EVENTS_IN_QUEUE 25
+
+/* Don't let a message sit in a queue forever, always time it with at lest
+ the max message timer. This is in milliseconds. */
+#define MAX_MSG_TIMEOUT 60000
+
+struct ipmi_user
+{
+ struct list_head link;
+
+ /* The upper layer that handles receive messages. */
+ struct ipmi_user_hndl *handler;
+ void *handler_data;
+
+ /* The interface this user is bound to. */
+ ipmi_smi_t intf;
+
+ /* Does this interface receive IPMI events? */
+ int gets_events;
+};
+
+struct cmd_rcvr
+{
+ struct list_head link;
+
+ ipmi_user_t user;
+ unsigned char netfn;
+ unsigned char cmd;
+};
+
+struct seq_table
+{
+ unsigned int inuse : 1;
+ unsigned int broadcast : 1;
+
+ unsigned long timeout;
+ unsigned long orig_timeout;
+ unsigned int retries_left;
+
+ /* To verify on an incoming send message response that this is
+ the message that the response is for, we keep a sequence id
+ and increment it every time we send a message. */
+ long seqid;
+
+ /* This is held so we can properly respond to the message on a
+ timeout, and it is used to hold the temporary data for
+ retransmission, too. */
+ struct ipmi_recv_msg *recv_msg;
+};
+
+/* Store the information in a msgid (long) to allow us to find a
+ sequence table entry from the msgid. */
+#define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
+
+#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
+ do { \
+ seq = ((msgid >> 26) & 0x3f); \
+ seqid = (msgid & 0x3fffff); \
+ } while(0)
+
+#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
+
+struct ipmi_channel
+{
+ unsigned char medium;
+ unsigned char protocol;
+};
+
+struct ipmi_proc_entry
+{
+ char *name;
+ struct ipmi_proc_entry *next;
+};
+
+#define IPMI_IPMB_NUM_SEQ 64
+#define IPMI_MAX_CHANNELS 8
+struct ipmi_smi
+{
+ /* What interface number are we? */
+ int intf_num;
+
+ /* The list of upper layers that are using me. We read-lock
+ this when delivering messages to the upper layer to keep
+ the user from going away while we are processing the
+ message. This means that you cannot add or delete a user
+ from the receive callback. */
+ rwlock_t users_lock;
+ struct list_head users;
+
+ /* Used for wake ups at startup. */
+ wait_queue_head_t waitq;
+
+ /* The IPMI version of the BMC on the other end. */
+ unsigned char version_major;
+ unsigned char version_minor;
+
+ /* This is the lower-layer's sender routine. */
+ struct ipmi_smi_handlers *handlers;
+ void *send_info;
+
+ /* A list of proc entries for this interface. This does not
+ need a lock, only one thread creates it and only one thread
+ destroys it. */
+ struct ipmi_proc_entry *proc_entries;
+
+ /* A table of sequence numbers for this interface. We use the
+ sequence numbers for IPMB messages that go out of the
+ interface to match them up with their responses. A routine
+ is called periodically to time the items in this list. */
+ spinlock_t seq_lock;
+ struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
+ int curr_seq;
+
+ /* Messages that were delayed for some reason (out of memory,
+ for instance), will go in here to be processed later in a
+ periodic timer interrupt. */
+ spinlock_t waiting_msgs_lock;
+ struct list_head waiting_msgs;
+
+ /* The list of command receivers that are registered for commands
+ on this interface. */
+ rwlock_t cmd_rcvr_lock;
+ struct list_head cmd_rcvrs;
+
+ /* Events that were queues because no one was there to receive
+ them. */
+ spinlock_t events_lock; /* For dealing with event stuff. */
+ struct list_head waiting_events;
+ unsigned int waiting_events_count; /* How many events in queue? */
+
+ /* This will be non-null if someone registers to receive all
+ IPMI commands (this is for interface emulation). There
+ may not be any things in the cmd_rcvrs list above when
+ this is registered. */
+ ipmi_user_t all_cmd_rcvr;
+
+ /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
+ but may be changed by the user. */
+ unsigned char my_address;
+
+ /* My LUN. This should generally stay the SMS LUN, but just in
+ case... */
+ unsigned char my_lun;
+
+ /* The event receiver for my BMC, only really used at panic
+ shutdown as a place to store this. */
+ unsigned char event_receiver;
+ unsigned char event_receiver_lun;
+ unsigned char local_sel_device;
+ unsigned char local_event_generator;
+
+ /* A cheap hack, if this is non-null and a message to an
+ interface comes in with a NULL user, call this routine with
+ it. Note that the message will still be freed by the
+ caller. This only works on the system interface. */
+ void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_smi_msg *msg);
+
+ /* When we are scanning the channels for an SMI, this will
+ tell which channel we are scanning. */
+ int curr_channel;
+
+ /* Channel information */
+ struct ipmi_channel channels[IPMI_MAX_CHANNELS];
+
+ /* Proc FS stuff. */
+ struct proc_dir_entry *proc_dir;
+ char proc_dir_name[10];
+
+ spinlock_t counter_lock; /* For making counters atomic. */
+
+ /* Commands we got that were invalid. */
+ unsigned int sent_invalid_commands;
+
+ /* Commands we sent to the MC. */
+ unsigned int sent_local_commands;
+ /* Responses from the MC that were delivered to a user. */
+ unsigned int handled_local_responses;
+ /* Responses from the MC that were not delivered to a user. */
+ unsigned int unhandled_local_responses;
+
+ /* Commands we sent out to the IPMB bus. */
+ unsigned int sent_ipmb_commands;
+ /* Commands sent on the IPMB that had errors on the SEND CMD */
+ unsigned int sent_ipmb_command_errs;
+ /* Each retransmit increments this count. */
+ unsigned int retransmitted_ipmb_commands;
+ /* When a message times out (runs out of retransmits) this is
+ incremented. */
+ unsigned int timed_out_ipmb_commands;
+
+ /* This is like above, but for broadcasts. Broadcasts are
+ *not* included in the above count (they are expected to
+ time out). */
+ unsigned int timed_out_ipmb_broadcasts;
+
+ /* Responses I have sent to the IPMB bus. */
+ unsigned int sent_ipmb_responses;
+
+ /* The response was delivered to the user. */
+ unsigned int handled_ipmb_responses;
+ /* The response had invalid data in it. */
+ unsigned int invalid_ipmb_responses;
+ /* The response didn't have anyone waiting for it. */
+ unsigned int unhandled_ipmb_responses;
+
+ /* Commands we sent out to the IPMB bus. */
+ unsigned int sent_lan_commands;
+ /* Commands sent on the IPMB that had errors on the SEND CMD */
+ unsigned int sent_lan_command_errs;
+ /* Each retransmit increments this count. */
+ unsigned int retransmitted_lan_commands;
+ /* When a message times out (runs out of retransmits) this is
+ incremented. */
+ unsigned int timed_out_lan_commands;
+
+ /* Responses I have sent to the IPMB bus. */
+ unsigned int sent_lan_responses;
+
+ /* The response was delivered to the user. */
+ unsigned int handled_lan_responses;
+ /* The response had invalid data in it. */
+ unsigned int invalid_lan_responses;
+ /* The response didn't have anyone waiting for it. */
+ unsigned int unhandled_lan_responses;
+
+ /* The command was delivered to the user. */
+ unsigned int handled_commands;
+ /* The command had invalid data in it. */
+ unsigned int invalid_commands;
+ /* The command didn't have anyone waiting for it. */
+ unsigned int unhandled_commands;
+
+ /* Invalid data in an event. */
+ unsigned int invalid_events;
+ /* Events that were received with the proper format. */
+ unsigned int events;
+};
+
+#define MAX_IPMI_INTERFACES 4
+static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
+
+/* Used to keep interfaces from going away while operations are
+ operating on interfaces. Grab read if you are not modifying the
+ interfaces, write if you are. */
+static DECLARE_RWSEM(interfaces_sem);
+
+/* Directly protects the ipmi_interfaces data structure. This is
+ claimed in the timer interrupt. */
+static DEFINE_SPINLOCK(interfaces_lock);
+
+/* List of watchers that want to know when smi's are added and
+ deleted. */
+static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
+static DECLARE_RWSEM(smi_watchers_sem);
+
+int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
+{
+ int i;
+
+ down_read(&interfaces_sem);
+ down_write(&smi_watchers_sem);
+ list_add(&(watcher->link), &smi_watchers);
+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
+ if (ipmi_interfaces[i] != NULL) {
+ watcher->new_smi(i);
+ }
+ }
+ up_write(&smi_watchers_sem);
+ up_read(&interfaces_sem);
+ return 0;
+}
+
+int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
+{
+ down_write(&smi_watchers_sem);
+ list_del(&(watcher->link));
+ up_write(&smi_watchers_sem);
+ return 0;
+}
+
+static void
+call_smi_watchers(int i)
+{
+ struct ipmi_smi_watcher *w;
+
+ down_read(&smi_watchers_sem);
+ list_for_each_entry(w, &smi_watchers, link) {
+ if (try_module_get(w->owner)) {
+ w->new_smi(i);
+ module_put(w->owner);
+ }
+ }
+ up_read(&smi_watchers_sem);
+}
+
+static int
+ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
+{
+ if (addr1->addr_type != addr2->addr_type)
+ return 0;
+
+ if (addr1->channel != addr2->channel)
+ return 0;
+
+ if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+ struct ipmi_system_interface_addr *smi_addr1
+ = (struct ipmi_system_interface_addr *) addr1;
+ struct ipmi_system_interface_addr *smi_addr2
+ = (struct ipmi_system_interface_addr *) addr2;
+ return (smi_addr1->lun == smi_addr2->lun);
+ }
+
+ if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
+ || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
+ {
+ struct ipmi_ipmb_addr *ipmb_addr1
+ = (struct ipmi_ipmb_addr *) addr1;
+ struct ipmi_ipmb_addr *ipmb_addr2
+ = (struct ipmi_ipmb_addr *) addr2;
+
+ return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
+ && (ipmb_addr1->lun == ipmb_addr2->lun));
+ }
+
+ if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
+ struct ipmi_lan_addr *lan_addr1
+ = (struct ipmi_lan_addr *) addr1;
+ struct ipmi_lan_addr *lan_addr2
+ = (struct ipmi_lan_addr *) addr2;
+
+ return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
+ && (lan_addr1->local_SWID == lan_addr2->local_SWID)
+ && (lan_addr1->session_handle
+ == lan_addr2->session_handle)
+ && (lan_addr1->lun == lan_addr2->lun));
+ }
+
+ return 1;
+}
+
+int ipmi_validate_addr(struct ipmi_addr *addr, int len)
+{
+ if (len < sizeof(struct ipmi_system_interface_addr)) {
+ return -EINVAL;
+ }
+
+ if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+ if (addr->channel != IPMI_BMC_CHANNEL)
+ return -EINVAL;
+ return 0;
+ }
+
+ if ((addr->channel == IPMI_BMC_CHANNEL)
+ || (addr->channel >= IPMI_NUM_CHANNELS)
+ || (addr->channel < 0))
+ return -EINVAL;
+
+ if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
+ || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
+ {
+ if (len < sizeof(struct ipmi_ipmb_addr)) {
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
+ if (len < sizeof(struct ipmi_lan_addr)) {
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+unsigned int ipmi_addr_length(int addr_type)
+{
+ if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ return sizeof(struct ipmi_system_interface_addr);
+
+ if ((addr_type == IPMI_IPMB_ADDR_TYPE)
+ || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
+ {
+ return sizeof(struct ipmi_ipmb_addr);
+ }
+
+ if (addr_type == IPMI_LAN_ADDR_TYPE)
+ return sizeof(struct ipmi_lan_addr);
+
+ return 0;
+}
+
+static void deliver_response(struct ipmi_recv_msg *msg)
+{
+ msg->user->handler->ipmi_recv_hndl(msg, msg->user->handler_data);
+}
+
+/* Find the next sequence number not being used and add the given
+ message with the given timeout to the sequence table. This must be
+ called with the interface's seq_lock held. */
+static int intf_next_seq(ipmi_smi_t intf,
+ struct ipmi_recv_msg *recv_msg,
+ unsigned long timeout,
+ int retries,
+ int broadcast,
+ unsigned char *seq,
+ long *seqid)
+{
+ int rv = 0;
+ unsigned int i;
+
+ for (i=intf->curr_seq;
+ (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
+ i=(i+1)%IPMI_IPMB_NUM_SEQ)
+ {
+ if (! intf->seq_table[i].inuse)
+ break;
+ }
+
+ if (! intf->seq_table[i].inuse) {
+ intf->seq_table[i].recv_msg = recv_msg;
+
+ /* Start with the maximum timeout, when the send response
+ comes in we will start the real timer. */
+ intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
+ intf->seq_table[i].orig_timeout = timeout;
+ intf->seq_table[i].retries_left = retries;
+ intf->seq_table[i].broadcast = broadcast;
+ intf->seq_table[i].inuse = 1;
+ intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
+ *seq = i;
+ *seqid = intf->seq_table[i].seqid;
+ intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
+ } else {
+ rv = -EAGAIN;
+ }
+
+ return rv;
+}
+
+/* Return the receive message for the given sequence number and
+ release the sequence number so it can be reused. Some other data
+ is passed in to be sure the message matches up correctly (to help
+ guard against message coming in after their timeout and the
+ sequence number being reused). */
+static int intf_find_seq(ipmi_smi_t intf,
+ unsigned char seq,
+ short channel,
+ unsigned char cmd,
+ unsigned char netfn,
+ struct ipmi_addr *addr,
+ struct ipmi_recv_msg **recv_msg)
+{
+ int rv = -ENODEV;
+ unsigned long flags;
+
+ if (seq >= IPMI_IPMB_NUM_SEQ)
+ return -EINVAL;
+
+ spin_lock_irqsave(&(intf->seq_lock), flags);
+ if (intf->seq_table[seq].inuse) {
+ struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
+
+ if ((msg->addr.channel == channel)
+ && (msg->msg.cmd == cmd)
+ && (msg->msg.netfn == netfn)
+ && (ipmi_addr_equal(addr, &(msg->addr))))
+ {
+ *recv_msg = msg;
+ intf->seq_table[seq].inuse = 0;
+ rv = 0;
+ }
+ }
+ spin_unlock_irqrestore(&(intf->seq_lock), flags);
+
+ return rv;
+}
+
+
+/* Start the timer for a specific sequence table entry. */
+static int intf_start_seq_timer(ipmi_smi_t intf,
+ long msgid)
+{
+ int rv = -ENODEV;
+ unsigned long flags;
+ unsigned char seq;
+ unsigned long seqid;
+
+
+ GET_SEQ_FROM_MSGID(msgid, seq, seqid);
+
+ spin_lock_irqsave(&(intf->seq_lock), flags);
+ /* We do this verification because the user can be deleted
+ while a message is outstanding. */
+ if ((intf->seq_table[seq].inuse)
+ && (intf->seq_table[seq].seqid == seqid))
+ {
+ struct seq_table *ent = &(intf->seq_table[seq]);
+ ent->timeout = ent->orig_timeout;
+ rv = 0;
+ }
+ spin_unlock_irqrestore(&(intf->seq_lock), flags);
+
+ return rv;
+}
+
+/* Got an error for the send message for a specific sequence number. */
+static int intf_err_seq(ipmi_smi_t intf,
+ long msgid,
+ unsigned int err)
+{
+ int rv = -ENODEV;
+ unsigned long flags;
+ unsigned char seq;
+ unsigned long seqid;
+ struct ipmi_recv_msg *msg = NULL;
+
+
+ GET_SEQ_FROM_MSGID(msgid, seq, seqid);
+
+ spin_lock_irqsave(&(intf->seq_lock), flags);
+ /* We do this verification because the user can be deleted
+ while a message is outstanding. */
+ if ((intf->seq_table[seq].inuse)
+ && (intf->seq_table[seq].seqid == seqid))
+ {
+ struct seq_table *ent = &(intf->seq_table[seq]);
+
+ ent->inuse = 0;
+ msg = ent->recv_msg;
+ rv = 0;
+ }
+ spin_unlock_irqrestore(&(intf->seq_lock), flags);
+
+ if (msg) {
+ msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ msg->msg_data[0] = err;
+ msg->msg.netfn |= 1; /* Convert to a response. */
+ msg->msg.data_len = 1;
+ msg->msg.data = msg->msg_data;
+ deliver_response(msg);
+ }
+
+ return rv;
+}
+
+
+int ipmi_create_user(unsigned int if_num,
+ struct ipmi_user_hndl *handler,
+ void *handler_data,
+ ipmi_user_t *user)
+{
+ unsigned long flags;
+ ipmi_user_t new_user;
+ int rv = 0;
+ ipmi_smi_t intf;
+
+ /* There is no module usecount here, because it's not
+ required. Since this can only be used by and called from
+ other modules, they will implicitly use this module, and
+ thus this can't be removed unless the other modules are
+ removed. */
+
+ if (handler == NULL)
+ return -EINVAL;
+
+ /* Make sure the driver is actually initialized, this handles
+ problems with initialization order. */
+ if (!initialized) {
+ rv = ipmi_init_msghandler();
+ if (rv)
+ return rv;
+
+ /* The init code doesn't return an error if it was turned
+ off, but it won't initialize. Check that. */
+ if (!initialized)
+ return -ENODEV;
+ }
+
+ new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
+ if (! new_user)
+ return -ENOMEM;
+
+ down_read(&interfaces_sem);
+ if ((if_num > MAX_IPMI_INTERFACES) || ipmi_interfaces[if_num] == NULL)
+ {
+ rv = -EINVAL;
+ goto out_unlock;
+ }
+
+ intf = ipmi_interfaces[if_num];
+
+ new_user->handler = handler;
+ new_user->handler_data = handler_data;
+ new_user->intf = intf;
+ new_user->gets_events = 0;
+
+ if (!try_module_get(intf->handlers->owner)) {
+ rv = -ENODEV;
+ goto out_unlock;
+ }
+
+ if (intf->handlers->inc_usecount) {
+ rv = intf->handlers->inc_usecount(intf->send_info);
+ if (rv) {
+ module_put(intf->handlers->owner);
+ goto out_unlock;
+ }
+ }
+
+ write_lock_irqsave(&intf->users_lock, flags);
+ list_add_tail(&new_user->link, &intf->users);
+ write_unlock_irqrestore(&intf->users_lock, flags);
+
+ out_unlock:
+ if (rv) {
+ kfree(new_user);
+ } else {
+ *user = new_user;
+ }
+
+ up_read(&interfaces_sem);
+ return rv;
+}
+
+static int ipmi_destroy_user_nolock(ipmi_user_t user)
+{
+ int rv = -ENODEV;
+ ipmi_user_t t_user;
+ struct cmd_rcvr *rcvr, *rcvr2;
+ int i;
+ unsigned long flags;
+
+ /* Find the user and delete them from the list. */
+ list_for_each_entry(t_user, &(user->intf->users), link) {
+ if (t_user == user) {
+ list_del(&t_user->link);
+ rv = 0;
+ break;
+ }
+ }
+
+ if (rv) {
+ goto out_unlock;
+ }
+
+ /* Remove the user from the interfaces sequence table. */
+ spin_lock_irqsave(&(user->intf->seq_lock), flags);
+ for (i=0; i<IPMI_IPMB_NUM_SEQ; i++) {
+ if (user->intf->seq_table[i].inuse
+ && (user->intf->seq_table[i].recv_msg->user == user))
+ {
+ user->intf->seq_table[i].inuse = 0;
+ }
+ }
+ spin_unlock_irqrestore(&(user->intf->seq_lock), flags);
+
+ /* Remove the user from the command receiver's table. */
+ write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
+ list_for_each_entry_safe(rcvr, rcvr2, &(user->intf->cmd_rcvrs), link) {
+ if (rcvr->user == user) {
+ list_del(&rcvr->link);
+ kfree(rcvr);
+ }
+ }
+ write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
+
+ kfree(user);
+
+ out_unlock:
+
+ return rv;
+}
+
+int ipmi_destroy_user(ipmi_user_t user)
+{
+ int rv;
+ ipmi_smi_t intf = user->intf;
+ unsigned long flags;
+
+ down_read(&interfaces_sem);
+ write_lock_irqsave(&intf->users_lock, flags);
+ rv = ipmi_destroy_user_nolock(user);
+ if (!rv) {
+ module_put(intf->handlers->owner);
+ if (intf->handlers->dec_usecount)
+ intf->handlers->dec_usecount(intf->send_info);
+ }
+
+ write_unlock_irqrestore(&intf->users_lock, flags);
+ up_read(&interfaces_sem);
+ return rv;
+}
+
+void ipmi_get_version(ipmi_user_t user,
+ unsigned char *major,
+ unsigned char *minor)
+{
+ *major = user->intf->version_major;
+ *minor = user->intf->version_minor;
+}
+
+void ipmi_set_my_address(ipmi_user_t user,
+ unsigned char address)
+{
+ user->intf->my_address = address;
+}
+
+unsigned char ipmi_get_my_address(ipmi_user_t user)
+{
+ return user->intf->my_address;
+}
+
+void ipmi_set_my_LUN(ipmi_user_t user,
+ unsigned char LUN)
+{
+ user->intf->my_lun = LUN & 0x3;
+}
+
+unsigned char ipmi_get_my_LUN(ipmi_user_t user)
+{
+ return user->intf->my_lun;
+}
+
+int ipmi_set_gets_events(ipmi_user_t user, int val)
+{
+ unsigned long flags;
+ struct ipmi_recv_msg *msg, *msg2;
+
+ read_lock(&(user->intf->users_lock));
+ spin_lock_irqsave(&(user->intf->events_lock), flags);
+ user->gets_events = val;
+
+ if (val) {
+ /* Deliver any queued events. */
+ list_for_each_entry_safe(msg, msg2, &(user->intf->waiting_events), link) {
+ list_del(&msg->link);
+ msg->user = user;
+ deliver_response(msg);
+ }
+ }
+
+ spin_unlock_irqrestore(&(user->intf->events_lock), flags);
+ read_unlock(&(user->intf->users_lock));
+
+ return 0;
+}
+
+int ipmi_register_for_cmd(ipmi_user_t user,
+ unsigned char netfn,
+ unsigned char cmd)
+{
+ struct cmd_rcvr *cmp;
+ unsigned long flags;
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+
+
+ rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
+ if (! rcvr)
+ return -ENOMEM;
+
+ read_lock(&(user->intf->users_lock));
+ write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
+ if (user->intf->all_cmd_rcvr != NULL) {
+ rv = -EBUSY;
+ goto out_unlock;
+ }
+
+ /* Make sure the command/netfn is not already registered. */
+ list_for_each_entry(cmp, &(user->intf->cmd_rcvrs), link) {
+ if ((cmp->netfn == netfn) && (cmp->cmd == cmd)) {
+ rv = -EBUSY;
+ break;
+ }
+ }
+
+ if (! rv) {
+ rcvr->cmd = cmd;
+ rcvr->netfn = netfn;
+ rcvr->user = user;
+ list_add_tail(&(rcvr->link), &(user->intf->cmd_rcvrs));
+ }
+ out_unlock:
+ write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
+ read_unlock(&(user->intf->users_lock));
+
+ if (rv)
+ kfree(rcvr);
+
+ return rv;
+}
+
+int ipmi_unregister_for_cmd(ipmi_user_t user,
+ unsigned char netfn,
+ unsigned char cmd)
+{
+ unsigned long flags;
+ struct cmd_rcvr *rcvr;
+ int rv = -ENOENT;
+
+ read_lock(&(user->intf->users_lock));
+ write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
+ /* Make sure the command/netfn is not already registered. */
+ list_for_each_entry(rcvr, &(user->intf->cmd_rcvrs), link) {
+ if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
+ rv = 0;
+ list_del(&rcvr->link);
+ kfree(rcvr);
+ break;
+ }
+ }
+ write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
+ read_unlock(&(user->intf->users_lock));
+
+ return rv;
+}
+
+void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
+{
+ user->intf->handlers->set_run_to_completion(user->intf->send_info,
+ val);
+}
+
+static unsigned char
+ipmb_checksum(unsigned char *data, int size)
+{
+ unsigned char csum = 0;
+
+ for (; size > 0; size--, data++)
+ csum += *data;
+
+ return -csum;
+}
+
+static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_ipmb_addr *ipmb_addr,
+ long msgid,
+ unsigned char ipmb_seq,
+ int broadcast,
+ unsigned char source_address,
+ unsigned char source_lun)
+{
+ int i = broadcast;
+
+ /* Format the IPMB header data. */
+ smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_msg->data[1] = IPMI_SEND_MSG_CMD;
+ smi_msg->data[2] = ipmb_addr->channel;
+ if (broadcast)
+ smi_msg->data[3] = 0;
+ smi_msg->data[i+3] = ipmb_addr->slave_addr;
+ smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
+ smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
+ smi_msg->data[i+6] = source_address;
+ smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
+ smi_msg->data[i+8] = msg->cmd;
+
+ /* Now tack on the data to the message. */
+ if (msg->data_len > 0)
+ memcpy(&(smi_msg->data[i+9]), msg->data,
+ msg->data_len);
+ smi_msg->data_size = msg->data_len + 9;
+
+ /* Now calculate the checksum and tack it on. */
+ smi_msg->data[i+smi_msg->data_size]
+ = ipmb_checksum(&(smi_msg->data[i+6]),
+ smi_msg->data_size-6);
+
+ /* Add on the checksum size and the offset from the
+ broadcast. */
+ smi_msg->data_size += 1 + i;
+
+ smi_msg->msgid = msgid;
+}
+
+static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_lan_addr *lan_addr,
+ long msgid,
+ unsigned char ipmb_seq,
+ unsigned char source_lun)
+{
+ /* Format the IPMB header data. */
+ smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_msg->data[1] = IPMI_SEND_MSG_CMD;
+ smi_msg->data[2] = lan_addr->channel;
+ smi_msg->data[3] = lan_addr->session_handle;
+ smi_msg->data[4] = lan_addr->remote_SWID;
+ smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
+ smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
+ smi_msg->data[7] = lan_addr->local_SWID;
+ smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
+ smi_msg->data[9] = msg->cmd;
+
+ /* Now tack on the data to the message. */
+ if (msg->data_len > 0)
+ memcpy(&(smi_msg->data[10]), msg->data,
+ msg->data_len);
+ smi_msg->data_size = msg->data_len + 10;
+
+ /* Now calculate the checksum and tack it on. */
+ smi_msg->data[smi_msg->data_size]
+ = ipmb_checksum(&(smi_msg->data[7]),
+ smi_msg->data_size-7);
+
+ /* Add on the checksum size and the offset from the
+ broadcast. */
+ smi_msg->data_size += 1;
+
+ smi_msg->msgid = msgid;
+}
+
+/* Separate from ipmi_request so that the user does not have to be
+ supplied in certain circumstances (mainly at panic time). If
+ messages are supplied, they will be freed, even if an error
+ occurs. */
+static inline int i_ipmi_request(ipmi_user_t user,
+ ipmi_smi_t intf,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ void *user_msg_data,
+ void *supplied_smi,
+ struct ipmi_recv_msg *supplied_recv,
+ int priority,
+ unsigned char source_address,
+ unsigned char source_lun,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ int rv = 0;
+ struct ipmi_smi_msg *smi_msg;
+ struct ipmi_recv_msg *recv_msg;
+ unsigned long flags;
+
+
+ if (supplied_recv) {
+ recv_msg = supplied_recv;
+ } else {
+ recv_msg = ipmi_alloc_recv_msg();
+ if (recv_msg == NULL) {
+ return -ENOMEM;
+ }
+ }
+ recv_msg->user_msg_data = user_msg_data;
+
+ if (supplied_smi) {
+ smi_msg = (struct ipmi_smi_msg *) supplied_smi;
+ } else {
+ smi_msg = ipmi_alloc_smi_msg();
+ if (smi_msg == NULL) {
+ ipmi_free_recv_msg(recv_msg);
+ return -ENOMEM;
+ }
+ }
+
+ recv_msg->user = user;
+ recv_msg->msgid = msgid;
+ /* Store the message to send in the receive message so timeout
+ responses can get the proper response data. */
+ recv_msg->msg = *msg;
+
+ if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+ struct ipmi_system_interface_addr *smi_addr;
+
+ if (msg->netfn & 1) {
+ /* Responses are not allowed to the SMI. */
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ smi_addr = (struct ipmi_system_interface_addr *) addr;
+ if (smi_addr->lun > 3) {
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->sent_invalid_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
+
+ if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
+ && ((msg->cmd == IPMI_SEND_MSG_CMD)
+ || (msg->cmd == IPMI_GET_MSG_CMD)
+ || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
+ {
+ /* We don't let the user do these, since we manage
+ the sequence numbers. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->sent_invalid_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->sent_invalid_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ rv = -EMSGSIZE;
+ goto out_err;
+ }
+
+ smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
+ smi_msg->data[1] = msg->cmd;
+ smi_msg->msgid = msgid;
+ smi_msg->user_data = recv_msg;
+ if (msg->data_len > 0)
+ memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
+ smi_msg->data_size = msg->data_len + 2;
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->sent_local_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
+ || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
+ {
+ struct ipmi_ipmb_addr *ipmb_addr;
+ unsigned char ipmb_seq;
+ long seqid;
+ int broadcast = 0;
+
+ if (addr->channel > IPMI_NUM_CHANNELS) {
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->sent_invalid_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ if (intf->channels[addr->channel].medium
+ != IPMI_CHANNEL_MEDIUM_IPMB)
+ {
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->sent_invalid_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ if (retries < 0) {
+ if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
+ retries = 0; /* Don't retry broadcasts. */
+ else
+ retries = 4;
+ }
+ if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
+ /* Broadcasts add a zero at the beginning of the
+ message, but otherwise is the same as an IPMB
+ address. */
+ addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+ broadcast = 1;
+ }
+
+
+ /* Default to 1 second retries. */
+ if (retry_time_ms == 0)
+ retry_time_ms = 1000;
+
+ /* 9 for the header and 1 for the checksum, plus
+ possibly one for the broadcast. */
+ if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->sent_invalid_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ rv = -EMSGSIZE;
+ goto out_err;
+ }
+
+ ipmb_addr = (struct ipmi_ipmb_addr *) addr;
+ if (ipmb_addr->lun > 3) {
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->sent_invalid_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
+
+ if (recv_msg->msg.netfn & 0x1) {
+ /* It's a response, so use the user's sequence
+ from msgid. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->sent_ipmb_responses++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
+ msgid, broadcast,
+ source_address, source_lun);
+
+ /* Save the receive message so we can use it
+ to deliver the response. */
+ smi_msg->user_data = recv_msg;
+ } else {
+ /* It's a command, so get a sequence for it. */
+
+ spin_lock_irqsave(&(intf->seq_lock), flags);
+
+ spin_lock(&intf->counter_lock);
+ intf->sent_ipmb_commands++;
+ spin_unlock(&intf->counter_lock);
+
+ /* Create a sequence number with a 1 second
+ timeout and 4 retries. */
+ rv = intf_next_seq(intf,
+ recv_msg,
+ retry_time_ms,
+ retries,
+ broadcast,
+ &ipmb_seq,
+ &seqid);
+ if (rv) {
+ /* We have used up all the sequence numbers,
+ probably, so abort. */
+ spin_unlock_irqrestore(&(intf->seq_lock),
+ flags);
+ goto out_err;
+ }
+
+ /* Store the sequence number in the message,
+ so that when the send message response
+ comes back we can start the timer. */
+ format_ipmb_msg(smi_msg, msg, ipmb_addr,
+ STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
+ ipmb_seq, broadcast,
+ source_address, source_lun);
+
+ /* Copy the message into the recv message data, so we
+ can retransmit it later if necessary. */
+ memcpy(recv_msg->msg_data, smi_msg->data,
+ smi_msg->data_size);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = smi_msg->data_size;
+
+ /* We don't unlock until here, because we need
+ to copy the completed message into the
+ recv_msg before we release the lock.
+ Otherwise, race conditions may bite us. I
+ know that's pretty paranoid, but I prefer
+ to be correct. */
+ spin_unlock_irqrestore(&(intf->seq_lock), flags);
+ }
+ } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
+ struct ipmi_lan_addr *lan_addr;
+ unsigned char ipmb_seq;
+ long seqid;
+
+ if (addr->channel > IPMI_NUM_CHANNELS) {
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->sent_invalid_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ if ((intf->channels[addr->channel].medium
+ != IPMI_CHANNEL_MEDIUM_8023LAN)
+ && (intf->channels[addr->channel].medium
+ != IPMI_CHANNEL_MEDIUM_ASYNC))
+ {
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->sent_invalid_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ retries = 4;
+
+ /* Default to 1 second retries. */
+ if (retry_time_ms == 0)
+ retry_time_ms = 1000;
+
+ /* 11 for the header and 1 for the checksum. */
+ if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->sent_invalid_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ rv = -EMSGSIZE;
+ goto out_err;
+ }
+
+ lan_addr = (struct ipmi_lan_addr *) addr;
+ if (lan_addr->lun > 3) {
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->sent_invalid_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
+
+ if (recv_msg->msg.netfn & 0x1) {
+ /* It's a response, so use the user's sequence
+ from msgid. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->sent_lan_responses++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ format_lan_msg(smi_msg, msg, lan_addr, msgid,
+ msgid, source_lun);
+
+ /* Save the receive message so we can use it
+ to deliver the response. */
+ smi_msg->user_data = recv_msg;
+ } else {
+ /* It's a command, so get a sequence for it. */
+
+ spin_lock_irqsave(&(intf->seq_lock), flags);
+
+ spin_lock(&intf->counter_lock);
+ intf->sent_lan_commands++;
+ spin_unlock(&intf->counter_lock);
+
+ /* Create a sequence number with a 1 second
+ timeout and 4 retries. */
+ rv = intf_next_seq(intf,
+ recv_msg,
+ retry_time_ms,
+ retries,
+ 0,
+ &ipmb_seq,
+ &seqid);
+ if (rv) {
+ /* We have used up all the sequence numbers,
+ probably, so abort. */
+ spin_unlock_irqrestore(&(intf->seq_lock),
+ flags);
+ goto out_err;
+ }
+
+ /* Store the sequence number in the message,
+ so that when the send message response
+ comes back we can start the timer. */
+ format_lan_msg(smi_msg, msg, lan_addr,
+ STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
+ ipmb_seq, source_lun);
+
+ /* Copy the message into the recv message data, so we
+ can retransmit it later if necessary. */
+ memcpy(recv_msg->msg_data, smi_msg->data,
+ smi_msg->data_size);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = smi_msg->data_size;
+
+ /* We don't unlock until here, because we need
+ to copy the completed message into the
+ recv_msg before we release the lock.
+ Otherwise, race conditions may bite us. I
+ know that's pretty paranoid, but I prefer
+ to be correct. */
+ spin_unlock_irqrestore(&(intf->seq_lock), flags);
+ }
+ } else {
+ /* Unknown address type. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->sent_invalid_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+#ifdef DEBUG_MSGING
+ {
+ int m;
+ for (m=0; m<smi_msg->data_size; m++)
+ printk(" %2.2x", smi_msg->data[m]);
+ printk("\n");
+ }
+#endif
+ intf->handlers->sender(intf->send_info, smi_msg, priority);
+
+ return 0;
+
+ out_err:
+ ipmi_free_smi_msg(smi_msg);
+ ipmi_free_recv_msg(recv_msg);
+ return rv;
+}
+
+int ipmi_request_settime(ipmi_user_t user,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ void *user_msg_data,
+ int priority,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ return i_ipmi_request(user,
+ user->intf,
+ addr,
+ msgid,
+ msg,
+ user_msg_data,
+ NULL, NULL,
+ priority,
+ user->intf->my_address,
+ user->intf->my_lun,
+ retries,
+ retry_time_ms);
+}
+
+int ipmi_request_supply_msgs(ipmi_user_t user,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ void *user_msg_data,
+ void *supplied_smi,
+ struct ipmi_recv_msg *supplied_recv,
+ int priority)
+{
+ return i_ipmi_request(user,
+ user->intf,
+ addr,
+ msgid,
+ msg,
+ user_msg_data,
+ supplied_smi,
+ supplied_recv,
+ priority,
+ user->intf->my_address,
+ user->intf->my_lun,
+ -1, 0);
+}
+
+static int ipmb_file_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ char *out = (char *) page;
+ ipmi_smi_t intf = data;
+
+ return sprintf(out, "%x\n", intf->my_address);
+}
+
+static int version_file_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ char *out = (char *) page;
+ ipmi_smi_t intf = data;
+
+ return sprintf(out, "%d.%d\n",
+ intf->version_major, intf->version_minor);
+}
+
+static int stat_file_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ char *out = (char *) page;
+ ipmi_smi_t intf = data;
+
+ out += sprintf(out, "sent_invalid_commands: %d\n",
+ intf->sent_invalid_commands);
+ out += sprintf(out, "sent_local_commands: %d\n",
+ intf->sent_local_commands);
+ out += sprintf(out, "handled_local_responses: %d\n",
+ intf->handled_local_responses);
+ out += sprintf(out, "unhandled_local_responses: %d\n",
+ intf->unhandled_local_responses);
+ out += sprintf(out, "sent_ipmb_commands: %d\n",
+ intf->sent_ipmb_commands);
+ out += sprintf(out, "sent_ipmb_command_errs: %d\n",
+ intf->sent_ipmb_command_errs);
+ out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
+ intf->retransmitted_ipmb_commands);
+ out += sprintf(out, "timed_out_ipmb_commands: %d\n",
+ intf->timed_out_ipmb_commands);
+ out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
+ intf->timed_out_ipmb_broadcasts);
+ out += sprintf(out, "sent_ipmb_responses: %d\n",
+ intf->sent_ipmb_responses);
+ out += sprintf(out, "handled_ipmb_responses: %d\n",
+ intf->handled_ipmb_responses);
+ out += sprintf(out, "invalid_ipmb_responses: %d\n",
+ intf->invalid_ipmb_responses);
+ out += sprintf(out, "unhandled_ipmb_responses: %d\n",
+ intf->unhandled_ipmb_responses);
+ out += sprintf(out, "sent_lan_commands: %d\n",
+ intf->sent_lan_commands);
+ out += sprintf(out, "sent_lan_command_errs: %d\n",
+ intf->sent_lan_command_errs);
+ out += sprintf(out, "retransmitted_lan_commands: %d\n",
+ intf->retransmitted_lan_commands);
+ out += sprintf(out, "timed_out_lan_commands: %d\n",
+ intf->timed_out_lan_commands);
+ out += sprintf(out, "sent_lan_responses: %d\n",
+ intf->sent_lan_responses);
+ out += sprintf(out, "handled_lan_responses: %d\n",
+ intf->handled_lan_responses);
+ out += sprintf(out, "invalid_lan_responses: %d\n",
+ intf->invalid_lan_responses);
+ out += sprintf(out, "unhandled_lan_responses: %d\n",
+ intf->unhandled_lan_responses);
+ out += sprintf(out, "handled_commands: %d\n",
+ intf->handled_commands);
+ out += sprintf(out, "invalid_commands: %d\n",
+ intf->invalid_commands);
+ out += sprintf(out, "unhandled_commands: %d\n",
+ intf->unhandled_commands);
+ out += sprintf(out, "invalid_events: %d\n",
+ intf->invalid_events);
+ out += sprintf(out, "events: %d\n",
+ intf->events);
+
+ return (out - ((char *) page));
+}
+
+int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
+ read_proc_t *read_proc, write_proc_t *write_proc,
+ void *data, struct module *owner)
+{
+ struct proc_dir_entry *file;
+ int rv = 0;
+ struct ipmi_proc_entry *entry;
+
+ /* Create a list element. */
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+ entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
+ if (!entry->name) {
+ kfree(entry);
+ return -ENOMEM;
+ }
+ strcpy(entry->name, name);
+
+ file = create_proc_entry(name, 0, smi->proc_dir);
+ if (!file) {
+ kfree(entry->name);
+ kfree(entry);
+ rv = -ENOMEM;
+ } else {
+ file->nlink = 1;
+ file->data = data;
+ file->read_proc = read_proc;
+ file->write_proc = write_proc;
+ file->owner = owner;
+
+ /* Stick it on the list. */
+ entry->next = smi->proc_entries;
+ smi->proc_entries = entry;
+ }
+
+ return rv;
+}
+
+static int add_proc_entries(ipmi_smi_t smi, int num)
+{
+ int rv = 0;
+
+ sprintf(smi->proc_dir_name, "%d", num);
+ smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
+ if (!smi->proc_dir)
+ rv = -ENOMEM;
+ else {
+ smi->proc_dir->owner = THIS_MODULE;
+ }
+
+ if (rv == 0)
+ rv = ipmi_smi_add_proc_entry(smi, "stats",
+ stat_file_read_proc, NULL,
+ smi, THIS_MODULE);
+
+ if (rv == 0)
+ rv = ipmi_smi_add_proc_entry(smi, "ipmb",
+ ipmb_file_read_proc, NULL,
+ smi, THIS_MODULE);
+
+ if (rv == 0)
+ rv = ipmi_smi_add_proc_entry(smi, "version",
+ version_file_read_proc, NULL,
+ smi, THIS_MODULE);
+
+ return rv;
+}
+
+static void remove_proc_entries(ipmi_smi_t smi)
+{
+ struct ipmi_proc_entry *entry;
+
+ while (smi->proc_entries) {
+ entry = smi->proc_entries;
+ smi->proc_entries = entry->next;
+
+ remove_proc_entry(entry->name, smi->proc_dir);
+ kfree(entry->name);
+ kfree(entry);
+ }
+ remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
+}
+
+static int
+send_channel_info_cmd(ipmi_smi_t intf, int chan)
+{
+ struct kernel_ipmi_msg msg;
+ unsigned char data[1];
+ struct ipmi_system_interface_addr si;
+
+ si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si.channel = IPMI_BMC_CHANNEL;
+ si.lun = 0;
+
+ msg.netfn = IPMI_NETFN_APP_REQUEST;
+ msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
+ msg.data = data;
+ msg.data_len = 1;
+ data[0] = chan;
+ return i_ipmi_request(NULL,
+ intf,
+ (struct ipmi_addr *) &si,
+ 0,
+ &msg,
+ NULL,
+ NULL,
+ NULL,
+ 0,
+ intf->my_address,
+ intf->my_lun,
+ -1, 0);
+}
+
+static void
+channel_handler(ipmi_smi_t intf, struct ipmi_smi_msg *msg)
+{
+ int rv = 0;
+ int chan;
+
+ if ((msg->rsp[0] == (IPMI_NETFN_APP_RESPONSE << 2))
+ && (msg->rsp[1] == IPMI_GET_CHANNEL_INFO_CMD))
+ {
+ /* It's the one we want */
+ if (msg->rsp[2] != 0) {
+ /* Got an error from the channel, just go on. */
+
+ if (msg->rsp[2] == IPMI_INVALID_COMMAND_ERR) {
+ /* If the MC does not support this
+ command, that is legal. We just
+ assume it has one IPMB at channel
+ zero. */
+ intf->channels[0].medium
+ = IPMI_CHANNEL_MEDIUM_IPMB;
+ intf->channels[0].protocol
+ = IPMI_CHANNEL_PROTOCOL_IPMB;
+ rv = -ENOSYS;
+
+ intf->curr_channel = IPMI_MAX_CHANNELS;
+ wake_up(&intf->waitq);
+ goto out;
+ }
+ goto next_channel;
+ }
+ if (msg->rsp_size < 6) {
+ /* Message not big enough, just go on. */
+ goto next_channel;
+ }
+ chan = intf->curr_channel;
+ intf->channels[chan].medium = msg->rsp[4] & 0x7f;
+ intf->channels[chan].protocol = msg->rsp[5] & 0x1f;
+
+ next_channel:
+ intf->curr_channel++;
+ if (intf->curr_channel >= IPMI_MAX_CHANNELS)
+ wake_up(&intf->waitq);
+ else
+ rv = send_channel_info_cmd(intf, intf->curr_channel);
+
+ if (rv) {
+ /* Got an error somehow, just give up. */
+ intf->curr_channel = IPMI_MAX_CHANNELS;
+ wake_up(&intf->waitq);
+
+ printk(KERN_WARNING PFX
+ "Error sending channel information: %d\n",
+ rv);
+ }
+ }
+ out:
+ return;
+}
+
+int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
+ void *send_info,
+ unsigned char version_major,
+ unsigned char version_minor,
+ unsigned char slave_addr,
+ ipmi_smi_t *intf)
+{
+ int i, j;
+ int rv;
+ ipmi_smi_t new_intf;
+ unsigned long flags;
+
+
+ /* Make sure the driver is actually initialized, this handles
+ problems with initialization order. */
+ if (!initialized) {
+ rv = ipmi_init_msghandler();
+ if (rv)
+ return rv;
+ /* The init code doesn't return an error if it was turned
+ off, but it won't initialize. Check that. */
+ if (!initialized)
+ return -ENODEV;
+ }
+
+ new_intf = kmalloc(sizeof(*new_intf), GFP_KERNEL);
+ if (!new_intf)
+ return -ENOMEM;
+ memset(new_intf, 0, sizeof(*new_intf));
+
+ new_intf->proc_dir = NULL;
+
+ rv = -ENOMEM;
+
+ down_write(&interfaces_sem);
+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
+ if (ipmi_interfaces[i] == NULL) {
+ new_intf->intf_num = i;
+ new_intf->version_major = version_major;
+ new_intf->version_minor = version_minor;
+ if (slave_addr == 0)
+ new_intf->my_address = IPMI_BMC_SLAVE_ADDR;
+ else
+ new_intf->my_address = slave_addr;
+ new_intf->my_lun = 2; /* the SMS LUN. */
+ rwlock_init(&(new_intf->users_lock));
+ INIT_LIST_HEAD(&(new_intf->users));
+ new_intf->handlers = handlers;
+ new_intf->send_info = send_info;
+ spin_lock_init(&(new_intf->seq_lock));
+ for (j=0; j<IPMI_IPMB_NUM_SEQ; j++) {
+ new_intf->seq_table[j].inuse = 0;
+ new_intf->seq_table[j].seqid = 0;
+ }
+ new_intf->curr_seq = 0;
+ spin_lock_init(&(new_intf->waiting_msgs_lock));
+ INIT_LIST_HEAD(&(new_intf->waiting_msgs));
+ spin_lock_init(&(new_intf->events_lock));
+ INIT_LIST_HEAD(&(new_intf->waiting_events));
+ new_intf->waiting_events_count = 0;
+ rwlock_init(&(new_intf->cmd_rcvr_lock));
+ init_waitqueue_head(&new_intf->waitq);
+ INIT_LIST_HEAD(&(new_intf->cmd_rcvrs));
+ new_intf->all_cmd_rcvr = NULL;
+
+ spin_lock_init(&(new_intf->counter_lock));
+
+ spin_lock_irqsave(&interfaces_lock, flags);
+ ipmi_interfaces[i] = new_intf;
+ spin_unlock_irqrestore(&interfaces_lock, flags);
+
+ rv = 0;
+ *intf = new_intf;
+ break;
+ }
+ }
+
+ downgrade_write(&interfaces_sem);
+
+ if (rv == 0)
+ rv = add_proc_entries(*intf, i);
+
+ if (rv == 0) {
+ if ((version_major > 1)
+ || ((version_major == 1) && (version_minor >= 5)))
+ {
+ /* Start scanning the channels to see what is
+ available. */
+ (*intf)->null_user_handler = channel_handler;
+ (*intf)->curr_channel = 0;
+ rv = send_channel_info_cmd(*intf, 0);
+ if (rv)
+ goto out;
+
+ /* Wait for the channel info to be read. */
+ up_read(&interfaces_sem);
+ wait_event((*intf)->waitq,
+ ((*intf)->curr_channel>=IPMI_MAX_CHANNELS));
+ down_read(&interfaces_sem);
+
+ if (ipmi_interfaces[i] != new_intf)
+ /* Well, it went away. Just return. */
+ goto out;
+ } else {
+ /* Assume a single IPMB channel at zero. */
+ (*intf)->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
+ (*intf)->channels[0].protocol
+ = IPMI_CHANNEL_PROTOCOL_IPMB;
+ }
+
+ /* Call all the watcher interfaces to tell
+ them that a new interface is available. */
+ call_smi_watchers(i);
+ }
+
+ out:
+ up_read(&interfaces_sem);
+
+ if (rv) {
+ if (new_intf->proc_dir)
+ remove_proc_entries(new_intf);
+ kfree(new_intf);
+ }
+
+ return rv;
+}
+
+static void free_recv_msg_list(struct list_head *q)
+{
+ struct ipmi_recv_msg *msg, *msg2;
+
+ list_for_each_entry_safe(msg, msg2, q, link) {
+ list_del(&msg->link);
+ ipmi_free_recv_msg(msg);
+ }
+}
+
+static void free_cmd_rcvr_list(struct list_head *q)
+{
+ struct cmd_rcvr *rcvr, *rcvr2;
+
+ list_for_each_entry_safe(rcvr, rcvr2, q, link) {
+ list_del(&rcvr->link);
+ kfree(rcvr);
+ }
+}
+
+static void clean_up_interface_data(ipmi_smi_t intf)
+{
+ int i;
+
+ free_recv_msg_list(&(intf->waiting_msgs));
+ free_recv_msg_list(&(intf->waiting_events));
+ free_cmd_rcvr_list(&(intf->cmd_rcvrs));
+
+ for (i=0; i<IPMI_IPMB_NUM_SEQ; i++) {
+ if ((intf->seq_table[i].inuse)
+ && (intf->seq_table[i].recv_msg))
+ {
+ ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
+ }
+ }
+}
+
+int ipmi_unregister_smi(ipmi_smi_t intf)
+{
+ int rv = -ENODEV;
+ int i;
+ struct ipmi_smi_watcher *w;
+ unsigned long flags;
+
+ down_write(&interfaces_sem);
+ if (list_empty(&(intf->users)))
+ {
+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
+ if (ipmi_interfaces[i] == intf) {
+ remove_proc_entries(intf);
+ spin_lock_irqsave(&interfaces_lock, flags);
+ ipmi_interfaces[i] = NULL;
+ clean_up_interface_data(intf);
+ spin_unlock_irqrestore(&interfaces_lock,flags);
+ kfree(intf);
+ rv = 0;
+ goto out_call_watcher;
+ }
+ }
+ } else {
+ rv = -EBUSY;
+ }
+ up_write(&interfaces_sem);
+
+ return rv;
+
+ out_call_watcher:
+ downgrade_write(&interfaces_sem);
+
+ /* Call all the watcher interfaces to tell them that
+ an interface is gone. */
+ down_read(&smi_watchers_sem);
+ list_for_each_entry(w, &smi_watchers, link) {
+ w->smi_gone(i);
+ }
+ up_read(&smi_watchers_sem);
+ up_read(&interfaces_sem);
+ return 0;
+}
+
+static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_ipmb_addr ipmb_addr;
+ struct ipmi_recv_msg *recv_msg;
+ unsigned long flags;
+
+
+ /* This is 11, not 10, because the response must contain a
+ * completion code. */
+ if (msg->rsp_size < 11) {
+ /* Message not big enough, just ignore it. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->invalid_ipmb_responses++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr.slave_addr = msg->rsp[6];
+ ipmb_addr.channel = msg->rsp[3] & 0x0f;
+ ipmb_addr.lun = msg->rsp[7] & 3;
+
+ /* It's a response from a remote entity. Look up the sequence
+ number and handle the response. */
+ if (intf_find_seq(intf,
+ msg->rsp[7] >> 2,
+ msg->rsp[3] & 0x0f,
+ msg->rsp[8],
+ (msg->rsp[4] >> 2) & (~1),
+ (struct ipmi_addr *) &(ipmb_addr),
+ &recv_msg))
+ {
+ /* We were unable to find the sequence number,
+ so just nuke the message. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->unhandled_ipmb_responses++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ return 0;
+ }
+
+ memcpy(recv_msg->msg_data,
+ &(msg->rsp[9]),
+ msg->rsp_size - 9);
+ /* THe other fields matched, so no need to set them, except
+ for netfn, which needs to be the response that was
+ returned, not the request value. */
+ recv_msg->msg.netfn = msg->rsp[4] >> 2;
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 10;
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->handled_ipmb_responses++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ deliver_response(recv_msg);
+
+ return 0;
+}
+
+static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ unsigned char netfn;
+ unsigned char cmd;
+ ipmi_user_t user = NULL;
+ struct ipmi_ipmb_addr *ipmb_addr;
+ struct ipmi_recv_msg *recv_msg;
+ unsigned long flags;
+
+ if (msg->rsp_size < 10) {
+ /* Message not big enough, just ignore it. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->invalid_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ netfn = msg->rsp[4] >> 2;
+ cmd = msg->rsp[8];
+
+ read_lock(&(intf->cmd_rcvr_lock));
+
+ if (intf->all_cmd_rcvr) {
+ user = intf->all_cmd_rcvr;
+ } else {
+ /* Find the command/netfn. */
+ list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) {
+ if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
+ user = rcvr->user;
+ break;
+ }
+ }
+ }
+ read_unlock(&(intf->cmd_rcvr_lock));
+
+ if (user == NULL) {
+ /* We didn't find a user, deliver an error response. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->unhandled_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_SEND_MSG_CMD;
+ msg->data[2] = msg->rsp[3];
+ msg->data[3] = msg->rsp[6];
+ msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
+ msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
+ msg->data[6] = intf->my_address;
+ /* rqseq/lun */
+ msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
+ msg->data[8] = msg->rsp[8]; /* cmd */
+ msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
+ msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
+ msg->data_size = 11;
+
+#ifdef DEBUG_MSGING
+ {
+ int m;
+ printk("Invalid command:");
+ for (m=0; m<msg->data_size; m++)
+ printk(" %2.2x", msg->data[m]);
+ printk("\n");
+ }
+#endif
+ intf->handlers->sender(intf->send_info, msg, 0);
+
+ rv = -1; /* We used the message, so return the value that
+ causes it to not be freed or queued. */
+ } else {
+ /* Deliver the message to the user. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->handled_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+
+ recv_msg = ipmi_alloc_recv_msg();
+ if (! recv_msg) {
+ /* We couldn't allocate memory for the
+ message, so requeue it for handling
+ later. */
+ rv = 1;
+ } else {
+ /* Extract the source address from the data. */
+ ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+ ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr->slave_addr = msg->rsp[6];
+ ipmb_addr->lun = msg->rsp[7] & 3;
+ ipmb_addr->channel = msg->rsp[3] & 0xf;
+
+ /* Extract the rest of the message information
+ from the IPMB header.*/
+ recv_msg->user = user;
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[7] >> 2;
+ recv_msg->msg.netfn = msg->rsp[4] >> 2;
+ recv_msg->msg.cmd = msg->rsp[8];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ /* We chop off 10, not 9 bytes because the checksum
+ at the end also needs to be removed. */
+ recv_msg->msg.data_len = msg->rsp_size - 10;
+ memcpy(recv_msg->msg_data,
+ &(msg->rsp[9]),
+ msg->rsp_size - 10);
+ deliver_response(recv_msg);
+ }
+ }
+
+ return rv;
+}
+
+static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_lan_addr lan_addr;
+ struct ipmi_recv_msg *recv_msg;
+ unsigned long flags;
+
+
+ /* This is 13, not 12, because the response must contain a
+ * completion code. */
+ if (msg->rsp_size < 13) {
+ /* Message not big enough, just ignore it. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->invalid_lan_responses++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
+ lan_addr.session_handle = msg->rsp[4];
+ lan_addr.remote_SWID = msg->rsp[8];
+ lan_addr.local_SWID = msg->rsp[5];
+ lan_addr.channel = msg->rsp[3] & 0x0f;
+ lan_addr.privilege = msg->rsp[3] >> 4;
+ lan_addr.lun = msg->rsp[9] & 3;
+
+ /* It's a response from a remote entity. Look up the sequence
+ number and handle the response. */
+ if (intf_find_seq(intf,
+ msg->rsp[9] >> 2,
+ msg->rsp[3] & 0x0f,
+ msg->rsp[10],
+ (msg->rsp[6] >> 2) & (~1),
+ (struct ipmi_addr *) &(lan_addr),
+ &recv_msg))
+ {
+ /* We were unable to find the sequence number,
+ so just nuke the message. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->unhandled_lan_responses++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ return 0;
+ }
+
+ memcpy(recv_msg->msg_data,
+ &(msg->rsp[11]),
+ msg->rsp_size - 11);
+ /* The other fields matched, so no need to set them, except
+ for netfn, which needs to be the response that was
+ returned, not the request value. */
+ recv_msg->msg.netfn = msg->rsp[6] >> 2;
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 12;
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->handled_lan_responses++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ deliver_response(recv_msg);
+
+ return 0;
+}
+
+static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ unsigned char netfn;
+ unsigned char cmd;
+ ipmi_user_t user = NULL;
+ struct ipmi_lan_addr *lan_addr;
+ struct ipmi_recv_msg *recv_msg;
+ unsigned long flags;
+
+ if (msg->rsp_size < 12) {
+ /* Message not big enough, just ignore it. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->invalid_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ netfn = msg->rsp[6] >> 2;
+ cmd = msg->rsp[10];
+
+ read_lock(&(intf->cmd_rcvr_lock));
+
+ if (intf->all_cmd_rcvr) {
+ user = intf->all_cmd_rcvr;
+ } else {
+ /* Find the command/netfn. */
+ list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) {
+ if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
+ user = rcvr->user;
+ break;
+ }
+ }
+ }
+ read_unlock(&(intf->cmd_rcvr_lock));
+
+ if (user == NULL) {
+ /* We didn't find a user, deliver an error response. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->unhandled_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+
+ rv = 0; /* Don't do anything with these messages, just
+ allow them to be freed. */
+ } else {
+ /* Deliver the message to the user. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->handled_commands++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+
+ recv_msg = ipmi_alloc_recv_msg();
+ if (! recv_msg) {
+ /* We couldn't allocate memory for the
+ message, so requeue it for handling
+ later. */
+ rv = 1;
+ } else {
+ /* Extract the source address from the data. */
+ lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+ lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
+ lan_addr->session_handle = msg->rsp[4];
+ lan_addr->remote_SWID = msg->rsp[8];
+ lan_addr->local_SWID = msg->rsp[5];
+ lan_addr->lun = msg->rsp[9] & 3;
+ lan_addr->channel = msg->rsp[3] & 0xf;
+ lan_addr->privilege = msg->rsp[3] >> 4;
+
+ /* Extract the rest of the message information
+ from the IPMB header.*/
+ recv_msg->user = user;
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[9] >> 2;
+ recv_msg->msg.netfn = msg->rsp[6] >> 2;
+ recv_msg->msg.cmd = msg->rsp[10];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ /* We chop off 12, not 11 bytes because the checksum
+ at the end also needs to be removed. */
+ recv_msg->msg.data_len = msg->rsp_size - 12;
+ memcpy(recv_msg->msg_data,
+ &(msg->rsp[11]),
+ msg->rsp_size - 12);
+ deliver_response(recv_msg);
+ }
+ }
+
+ return rv;
+}
+
+static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_system_interface_addr *smi_addr;
+
+ recv_msg->msgid = 0;
+ smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+ recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 3;
+}
+
+/* This will be called with the intf->users_lock read-locked, so no need
+ to do that here. */
+static int handle_read_event_rsp(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_recv_msg *recv_msg, *recv_msg2;
+ struct list_head msgs;
+ ipmi_user_t user;
+ int rv = 0;
+ int deliver_count = 0;
+ unsigned long flags;
+
+ if (msg->rsp_size < 19) {
+ /* Message is too small to be an IPMB event. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->invalid_events++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the event, just ignore it. */
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&msgs);
+
+ spin_lock_irqsave(&(intf->events_lock), flags);
+
+ spin_lock(&intf->counter_lock);
+ intf->events++;
+ spin_unlock(&intf->counter_lock);
+
+ /* Allocate and fill in one message for every user that is getting
+ events. */
+ list_for_each_entry(user, &(intf->users), link) {
+ if (! user->gets_events)
+ continue;
+
+ recv_msg = ipmi_alloc_recv_msg();
+ if (! recv_msg) {
+ list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
+ list_del(&recv_msg->link);
+ ipmi_free_recv_msg(recv_msg);
+ }
+ /* We couldn't allocate memory for the
+ message, so requeue it for handling
+ later. */
+ rv = 1;
+ goto out;
+ }
+
+ deliver_count++;
+
+ copy_event_into_recv_msg(recv_msg, msg);
+ recv_msg->user = user;
+ list_add_tail(&(recv_msg->link), &msgs);
+ }
+
+ if (deliver_count) {
+ /* Now deliver all the messages. */
+ list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
+ list_del(&recv_msg->link);
+ deliver_response(recv_msg);
+ }
+ } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
+ /* No one to receive the message, put it in queue if there's
+ not already too many things in the queue. */
+ recv_msg = ipmi_alloc_recv_msg();
+ if (! recv_msg) {
+ /* We couldn't allocate memory for the
+ message, so requeue it for handling
+ later. */
+ rv = 1;
+ goto out;
+ }
+
+ copy_event_into_recv_msg(recv_msg, msg);
+ list_add_tail(&(recv_msg->link), &(intf->waiting_events));
+ } else {
+ /* There's too many things in the queue, discard this
+ message. */
+ printk(KERN_WARNING PFX "Event queue full, discarding an"
+ " incoming event\n");
+ }
+
+ out:
+ spin_unlock_irqrestore(&(intf->events_lock), flags);
+
+ return rv;
+}
+
+static int handle_bmc_rsp(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_recv_msg *recv_msg;
+ int found = 0;
+ struct ipmi_user *user;
+ unsigned long flags;
+
+ recv_msg = (struct ipmi_recv_msg *) msg->user_data;
+
+ /* Make sure the user still exists. */
+ list_for_each_entry(user, &(intf->users), link) {
+ if (user == recv_msg->user) {
+ /* Found it, so we can deliver it */
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ /* Special handling for NULL users. */
+ if (!recv_msg->user && intf->null_user_handler){
+ intf->null_user_handler(intf, msg);
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->handled_local_responses++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ }else{
+ /* The user for the message went away, so give up. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->unhandled_local_responses++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ }
+ ipmi_free_recv_msg(recv_msg);
+ } else {
+ struct ipmi_system_interface_addr *smi_addr;
+
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ intf->handled_local_responses++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ recv_msg->msgid = msg->msgid;
+ smi_addr = ((struct ipmi_system_interface_addr *)
+ &(recv_msg->addr));
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ memcpy(recv_msg->msg_data,
+ &(msg->rsp[2]),
+ msg->rsp_size - 2);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 2;
+ deliver_response(recv_msg);
+ }
+
+ return 0;
+}
+
+/* Handle a new message. Return 1 if the message should be requeued,
+ 0 if the message should be freed, or -1 if the message should not
+ be freed or requeued. */
+static int handle_new_recv_msg(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ int requeue;
+ int chan;
+
+#ifdef DEBUG_MSGING
+ int m;
+ printk("Recv:");
+ for (m=0; m<msg->rsp_size; m++)
+ printk(" %2.2x", msg->rsp[m]);
+ printk("\n");
+#endif
+ if (msg->rsp_size < 2) {
+ /* Message is too small to be correct. */
+ printk(KERN_WARNING PFX "BMC returned to small a message"
+ " for netfn %x cmd %x, got %d bytes\n",
+ (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+
+ /* Generate an error response for the message. */
+ msg->rsp[0] = msg->data[0] | (1 << 2);
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+ msg->rsp_size = 3;
+ } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
+ || (msg->rsp[1] != msg->data[1])) /* Command */
+ {
+ /* The response is not even marginally correct. */
+ printk(KERN_WARNING PFX "BMC returned incorrect response,"
+ " expected netfn %x cmd %x, got netfn %x cmd %x\n",
+ (msg->data[0] >> 2) | 1, msg->data[1],
+ msg->rsp[0] >> 2, msg->rsp[1]);
+
+ /* Generate an error response for the message. */
+ msg->rsp[0] = msg->data[0] | (1 << 2);
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+ msg->rsp_size = 3;
+ }
+
+ if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+ && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
+ && (msg->user_data != NULL))
+ {
+ /* It's a response to a response we sent. For this we
+ deliver a send message response to the user. */
+ struct ipmi_recv_msg *recv_msg = msg->user_data;
+
+ requeue = 0;
+ if (msg->rsp_size < 2)
+ /* Message is too small to be correct. */
+ goto out;
+
+ chan = msg->data[2] & 0x0f;
+ if (chan >= IPMI_MAX_CHANNELS)
+ /* Invalid channel number */
+ goto out;
+
+ if (recv_msg) {
+ recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = 1;
+ recv_msg->msg_data[0] = msg->rsp[2];
+ deliver_response(recv_msg);
+ }
+ } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+ && (msg->rsp[1] == IPMI_GET_MSG_CMD))
+ {
+ /* It's from the receive queue. */
+ chan = msg->rsp[3] & 0xf;
+ if (chan >= IPMI_MAX_CHANNELS) {
+ /* Invalid channel number */
+ requeue = 0;
+ goto out;
+ }
+
+ switch (intf->channels[chan].medium) {
+ case IPMI_CHANNEL_MEDIUM_IPMB:
+ if (msg->rsp[4] & 0x04) {
+ /* It's a response, so find the
+ requesting message and send it up. */
+ requeue = handle_ipmb_get_msg_rsp(intf, msg);
+ } else {
+ /* It's a command to the SMS from some other
+ entity. Handle that. */
+ requeue = handle_ipmb_get_msg_cmd(intf, msg);
+ }
+ break;
+
+ case IPMI_CHANNEL_MEDIUM_8023LAN:
+ case IPMI_CHANNEL_MEDIUM_ASYNC:
+ if (msg->rsp[6] & 0x04) {
+ /* It's a response, so find the
+ requesting message and send it up. */
+ requeue = handle_lan_get_msg_rsp(intf, msg);
+ } else {
+ /* It's a command to the SMS from some other
+ entity. Handle that. */
+ requeue = handle_lan_get_msg_cmd(intf, msg);
+ }
+ break;
+
+ default:
+ /* We don't handle the channel type, so just
+ * free the message. */
+ requeue = 0;
+ }
+
+ } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+ && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
+ {
+ /* It's an asyncronous event. */
+ requeue = handle_read_event_rsp(intf, msg);
+ } else {
+ /* It's a response from the local BMC. */
+ requeue = handle_bmc_rsp(intf, msg);
+ }
+
+ out:
+ return requeue;
+}
+
+/* Handle a new message from the lower layer. */
+void ipmi_smi_msg_received(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ unsigned long flags;
+ int rv;
+
+
+ /* Lock the user lock so the user can't go away while we are
+ working on it. */
+ read_lock(&(intf->users_lock));
+
+ if ((msg->data_size >= 2)
+ && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
+ && (msg->data[1] == IPMI_SEND_MSG_CMD)
+ && (msg->user_data == NULL)) {
+ /* This is the local response to a command send, start
+ the timer for these. The user_data will not be
+ NULL if this is a response send, and we will let
+ response sends just go through. */
+
+ /* Check for errors, if we get certain errors (ones
+ that mean basically we can try again later), we
+ ignore them and start the timer. Otherwise we
+ report the error immediately. */
+ if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
+ && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
+ && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR))
+ {
+ int chan = msg->rsp[3] & 0xf;
+
+ /* Got an error sending the message, handle it. */
+ spin_lock_irqsave(&intf->counter_lock, flags);
+ if (chan >= IPMI_MAX_CHANNELS)
+ ; /* This shouldn't happen */
+ else if ((intf->channels[chan].medium
+ == IPMI_CHANNEL_MEDIUM_8023LAN)
+ || (intf->channels[chan].medium
+ == IPMI_CHANNEL_MEDIUM_ASYNC))
+ intf->sent_lan_command_errs++;
+ else
+ intf->sent_ipmb_command_errs++;
+ spin_unlock_irqrestore(&intf->counter_lock, flags);
+ intf_err_seq(intf, msg->msgid, msg->rsp[2]);
+ } else {
+ /* The message was sent, start the timer. */
+ intf_start_seq_timer(intf, msg->msgid);
+ }
+
+ ipmi_free_smi_msg(msg);
+ goto out_unlock;
+ }
+
+ /* To preserve message order, if the list is not empty, we
+ tack this message onto the end of the list. */
+ spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
+ if (!list_empty(&(intf->waiting_msgs))) {
+ list_add_tail(&(msg->link), &(intf->waiting_msgs));
+ spin_unlock(&(intf->waiting_msgs_lock));
+ goto out_unlock;
+ }
+ spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
+
+ rv = handle_new_recv_msg(intf, msg);
+ if (rv > 0) {
+ /* Could not handle the message now, just add it to a
+ list to handle later. */
+ spin_lock(&(intf->waiting_msgs_lock));
+ list_add_tail(&(msg->link), &(intf->waiting_msgs));
+ spin_unlock(&(intf->waiting_msgs_lock));
+ } else if (rv == 0) {
+ ipmi_free_smi_msg(msg);
+ }
+
+ out_unlock:
+ read_unlock(&(intf->users_lock));
+}
+
+void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
+{
+ ipmi_user_t user;
+
+ read_lock(&(intf->users_lock));
+ list_for_each_entry(user, &(intf->users), link) {
+ if (! user->handler->ipmi_watchdog_pretimeout)
+ continue;
+
+ user->handler->ipmi_watchdog_pretimeout(user->handler_data);
+ }
+ read_unlock(&(intf->users_lock));
+}
+
+static void
+handle_msg_timeout(struct ipmi_recv_msg *msg)
+{
+ msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
+ msg->msg.netfn |= 1; /* Convert to a response. */
+ msg->msg.data_len = 1;
+ msg->msg.data = msg->msg_data;
+ deliver_response(msg);
+}
+
+static void
+send_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
+ struct ipmi_smi_msg *smi_msg,
+ unsigned char seq, long seqid)
+{
+ if (!smi_msg)
+ smi_msg = ipmi_alloc_smi_msg();
+ if (!smi_msg)
+ /* If we can't allocate the message, then just return, we
+ get 4 retries, so this should be ok. */
+ return;
+
+ memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
+ smi_msg->data_size = recv_msg->msg.data_len;
+ smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
+
+ /* Send the new message. We send with a zero priority. It
+ timed out, I doubt time is that critical now, and high
+ priority messages are really only for messages to the local
+ MC, which don't get resent. */
+ intf->handlers->sender(intf->send_info, smi_msg, 0);
+
+#ifdef DEBUG_MSGING
+ {
+ int m;
+ printk("Resend: ");
+ for (m=0; m<smi_msg->data_size; m++)
+ printk(" %2.2x", smi_msg->data[m]);
+ printk("\n");
+ }
+#endif
+}
+
+static void
+ipmi_timeout_handler(long timeout_period)
+{
+ ipmi_smi_t intf;
+ struct list_head timeouts;
+ struct ipmi_recv_msg *msg, *msg2;
+ struct ipmi_smi_msg *smi_msg, *smi_msg2;
+ unsigned long flags;
+ int i, j;
+
+ INIT_LIST_HEAD(&timeouts);
+
+ spin_lock(&interfaces_lock);
+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
+ intf = ipmi_interfaces[i];
+ if (intf == NULL)
+ continue;
+
+ read_lock(&(intf->users_lock));
+
+ /* See if any waiting messages need to be processed. */
+ spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
+ list_for_each_entry_safe(smi_msg, smi_msg2, &(intf->waiting_msgs), link) {
+ if (! handle_new_recv_msg(intf, smi_msg)) {
+ list_del(&smi_msg->link);
+ ipmi_free_smi_msg(smi_msg);
+ } else {
+ /* To preserve message order, quit if we
+ can't handle a message. */
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
+
+ /* Go through the seq table and find any messages that
+ have timed out, putting them in the timeouts
+ list. */
+ spin_lock_irqsave(&(intf->seq_lock), flags);
+ for (j=0; j<IPMI_IPMB_NUM_SEQ; j++) {
+ struct seq_table *ent = &(intf->seq_table[j]);
+ if (!ent->inuse)
+ continue;
+
+ ent->timeout -= timeout_period;
+ if (ent->timeout > 0)
+ continue;
+
+ if (ent->retries_left == 0) {
+ /* The message has used all its retries. */
+ ent->inuse = 0;
+ msg = ent->recv_msg;
+ list_add_tail(&(msg->link), &timeouts);
+ spin_lock(&intf->counter_lock);
+ if (ent->broadcast)
+ intf->timed_out_ipmb_broadcasts++;
+ else if (ent->recv_msg->addr.addr_type
+ == IPMI_LAN_ADDR_TYPE)
+ intf->timed_out_lan_commands++;
+ else
+ intf->timed_out_ipmb_commands++;
+ spin_unlock(&intf->counter_lock);
+ } else {
+ /* More retries, send again. */
+
+ /* Start with the max timer, set to normal
+ timer after the message is sent. */
+ ent->timeout = MAX_MSG_TIMEOUT;
+ ent->retries_left--;
+ send_from_recv_msg(intf, ent->recv_msg, NULL,
+ j, ent->seqid);
+ spin_lock(&intf->counter_lock);
+ if (ent->recv_msg->addr.addr_type
+ == IPMI_LAN_ADDR_TYPE)
+ intf->retransmitted_lan_commands++;
+ else
+ intf->retransmitted_ipmb_commands++;
+ spin_unlock(&intf->counter_lock);
+ }
+ }
+ spin_unlock_irqrestore(&(intf->seq_lock), flags);
+
+ list_for_each_entry_safe(msg, msg2, &timeouts, link) {
+ handle_msg_timeout(msg);
+ }
+
+ read_unlock(&(intf->users_lock));
+ }
+ spin_unlock(&interfaces_lock);
+}
+
+static void ipmi_request_event(void)
+{
+ ipmi_smi_t intf;
+ int i;
+
+ spin_lock(&interfaces_lock);
+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
+ intf = ipmi_interfaces[i];
+ if (intf == NULL)
+ continue;
+
+ intf->handlers->request_events(intf->send_info);
+ }
+ spin_unlock(&interfaces_lock);
+}
+
+static struct timer_list ipmi_timer;
+
+/* Call every ~100 ms. */
+#define IPMI_TIMEOUT_TIME 100
+
+/* How many jiffies does it take to get to the timeout time. */
+#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
+
+/* Request events from the queue every second (this is the number of
+ IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
+ future, IPMI will add a way to know immediately if an event is in
+ the queue and this silliness can go away. */
+#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
+
+static volatile int stop_operation = 0;
+static volatile int timer_stopped = 0;
+static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+
+static void ipmi_timeout(unsigned long data)
+{
+ if (stop_operation) {
+ timer_stopped = 1;
+ return;
+ }
+
+ ticks_to_req_ev--;
+ if (ticks_to_req_ev == 0) {
+ ipmi_request_event();
+ ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+ }
+
+ ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
+
+ ipmi_timer.expires += IPMI_TIMEOUT_JIFFIES;
+ add_timer(&ipmi_timer);
+}
+
+
+static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
+static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
+
+/* FIXME - convert these to slabs. */
+static void free_smi_msg(struct ipmi_smi_msg *msg)
+{
+ atomic_dec(&smi_msg_inuse_count);
+ kfree(msg);
+}
+
+struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
+{
+ struct ipmi_smi_msg *rv;
+ rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
+ if (rv) {
+ rv->done = free_smi_msg;
+ rv->user_data = NULL;
+ atomic_inc(&smi_msg_inuse_count);
+ }
+ return rv;
+}
+
+static void free_recv_msg(struct ipmi_recv_msg *msg)
+{
+ atomic_dec(&recv_msg_inuse_count);
+ kfree(msg);
+}
+
+struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
+{
+ struct ipmi_recv_msg *rv;
+
+ rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
+ if (rv) {
+ rv->done = free_recv_msg;
+ atomic_inc(&recv_msg_inuse_count);
+ }
+ return rv;
+}
+
+#ifdef CONFIG_IPMI_PANIC_EVENT
+
+static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
+{
+}
+
+static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
+{
+}
+
+#ifdef CONFIG_IPMI_PANIC_STRING
+static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_smi_msg *msg)
+{
+ if ((msg->rsp[0] == (IPMI_NETFN_SENSOR_EVENT_RESPONSE << 2))
+ && (msg->rsp[1] == IPMI_GET_EVENT_RECEIVER_CMD)
+ && (msg->rsp[2] == IPMI_CC_NO_ERROR))
+ {
+ /* A get event receiver command, save it. */
+ intf->event_receiver = msg->rsp[3];
+ intf->event_receiver_lun = msg->rsp[4] & 0x3;
+ }
+}
+
+static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_smi_msg *msg)
+{
+ if ((msg->rsp[0] == (IPMI_NETFN_APP_RESPONSE << 2))
+ && (msg->rsp[1] == IPMI_GET_DEVICE_ID_CMD)
+ && (msg->rsp[2] == IPMI_CC_NO_ERROR))
+ {
+ /* A get device id command, save if we are an event
+ receiver or generator. */
+ intf->local_sel_device = (msg->rsp[8] >> 2) & 1;
+ intf->local_event_generator = (msg->rsp[8] >> 5) & 1;
+ }
+}
+#endif
+
+static void send_panic_events(char *str)
+{
+ struct kernel_ipmi_msg msg;
+ ipmi_smi_t intf;
+ unsigned char data[16];
+ int i;
+ struct ipmi_system_interface_addr *si;
+ struct ipmi_addr addr;
+ struct ipmi_smi_msg smi_msg;
+ struct ipmi_recv_msg recv_msg;
+
+ si = (struct ipmi_system_interface_addr *) &addr;
+ si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si->channel = IPMI_BMC_CHANNEL;
+ si->lun = 0;
+
+ /* Fill in an event telling that we have failed. */
+ msg.netfn = 0x04; /* Sensor or Event. */
+ msg.cmd = 2; /* Platform event command. */
+ msg.data = data;
+ msg.data_len = 8;
+ data[0] = 0x21; /* Kernel generator ID, IPMI table 5-4 */
+ data[1] = 0x03; /* This is for IPMI 1.0. */
+ data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
+ data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
+ data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
+
+ /* Put a few breadcrumbs in. Hopefully later we can add more things
+ to make the panic events more useful. */
+ if (str) {
+ data[3] = str[0];
+ data[6] = str[1];
+ data[7] = str[2];
+ }
+
+ smi_msg.done = dummy_smi_done_handler;
+ recv_msg.done = dummy_recv_done_handler;
+
+ /* For every registered interface, send the event. */
+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
+ intf = ipmi_interfaces[i];
+ if (intf == NULL)
+ continue;
+
+ /* Send the event announcing the panic. */
+ intf->handlers->set_run_to_completion(intf->send_info, 1);
+ i_ipmi_request(NULL,
+ intf,
+ &addr,
+ 0,
+ &msg,
+ NULL,
+ &smi_msg,
+ &recv_msg,
+ 0,
+ intf->my_address,
+ intf->my_lun,
+ 0, 1); /* Don't retry, and don't wait. */
+ }
+
+#ifdef CONFIG_IPMI_PANIC_STRING
+ /* On every interface, dump a bunch of OEM event holding the
+ string. */
+ if (!str)
+ return;
+
+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
+ char *p = str;
+ struct ipmi_ipmb_addr *ipmb;
+ int j;
+
+ intf = ipmi_interfaces[i];
+ if (intf == NULL)
+ continue;
+
+ /* First job here is to figure out where to send the
+ OEM events. There's no way in IPMI to send OEM
+ events using an event send command, so we have to
+ find the SEL to put them in and stick them in
+ there. */
+
+ /* Get capabilities from the get device id. */
+ intf->local_sel_device = 0;
+ intf->local_event_generator = 0;
+ intf->event_receiver = 0;
+
+ /* Request the device info from the local MC. */
+ msg.netfn = IPMI_NETFN_APP_REQUEST;
+ msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+ msg.data = NULL;
+ msg.data_len = 0;
+ intf->null_user_handler = device_id_fetcher;
+ i_ipmi_request(NULL,
+ intf,
+ &addr,
+ 0,
+ &msg,
+ NULL,
+ &smi_msg,
+ &recv_msg,
+ 0,
+ intf->my_address,
+ intf->my_lun,
+ 0, 1); /* Don't retry, and don't wait. */
+
+ if (intf->local_event_generator) {
+ /* Request the event receiver from the local MC. */
+ msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
+ msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
+ msg.data = NULL;
+ msg.data_len = 0;
+ intf->null_user_handler = event_receiver_fetcher;
+ i_ipmi_request(NULL,
+ intf,
+ &addr,
+ 0,
+ &msg,
+ NULL,
+ &smi_msg,
+ &recv_msg,
+ 0,
+ intf->my_address,
+ intf->my_lun,
+ 0, 1); /* no retry, and no wait. */
+ }
+ intf->null_user_handler = NULL;
+
+ /* Validate the event receiver. The low bit must not
+ be 1 (it must be a valid IPMB address), it cannot
+ be zero, and it must not be my address. */
+ if (((intf->event_receiver & 1) == 0)
+ && (intf->event_receiver != 0)
+ && (intf->event_receiver != intf->my_address))
+ {
+ /* The event receiver is valid, send an IPMB
+ message. */
+ ipmb = (struct ipmi_ipmb_addr *) &addr;
+ ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb->channel = 0; /* FIXME - is this right? */
+ ipmb->lun = intf->event_receiver_lun;
+ ipmb->slave_addr = intf->event_receiver;
+ } else if (intf->local_sel_device) {
+ /* The event receiver was not valid (or was
+ me), but I am an SEL device, just dump it
+ in my SEL. */
+ si = (struct ipmi_system_interface_addr *) &addr;
+ si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si->channel = IPMI_BMC_CHANNEL;
+ si->lun = 0;
+ } else
+ continue; /* No where to send the event. */
+
+
+ msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
+ msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
+ msg.data = data;
+ msg.data_len = 16;
+
+ j = 0;
+ while (*p) {
+ int size = strlen(p);
+
+ if (size > 11)
+ size = 11;
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0xf0; /* OEM event without timestamp. */
+ data[3] = intf->my_address;
+ data[4] = j++; /* sequence # */
+ /* Always give 11 bytes, so strncpy will fill
+ it with zeroes for me. */
+ strncpy(data+5, p, 11);
+ p += size;
+
+ i_ipmi_request(NULL,
+ intf,
+ &addr,
+ 0,
+ &msg,
+ NULL,
+ &smi_msg,
+ &recv_msg,
+ 0,
+ intf->my_address,
+ intf->my_lun,
+ 0, 1); /* no retry, and no wait. */
+ }
+ }
+#endif /* CONFIG_IPMI_PANIC_STRING */
+}
+#endif /* CONFIG_IPMI_PANIC_EVENT */
+
+static int has_paniced = 0;
+
+static int panic_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
+{
+ int i;
+ ipmi_smi_t intf;
+
+ if (has_paniced)
+ return NOTIFY_DONE;
+ has_paniced = 1;
+
+ /* For every registered interface, set it to run to completion. */
+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
+ intf = ipmi_interfaces[i];
+ if (intf == NULL)
+ continue;
+
+ intf->handlers->set_run_to_completion(intf->send_info, 1);
+ }
+
+#ifdef CONFIG_IPMI_PANIC_EVENT
+ send_panic_events(ptr);
+#endif
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_block = {
+ .notifier_call = panic_event,
+ .next = NULL,
+ .priority = 200 /* priority: INT_MAX >= x >= 0 */
+};
+
+static int ipmi_init_msghandler(void)
+{
+ int i;
+
+ if (initialized)
+ return 0;
+
+ printk(KERN_INFO "ipmi message handler version "
+ IPMI_MSGHANDLER_VERSION "\n");
+
+ for (i=0; i<MAX_IPMI_INTERFACES; i++) {
+ ipmi_interfaces[i] = NULL;
+ }
+
+ proc_ipmi_root = proc_mkdir("ipmi", NULL);
+ if (!proc_ipmi_root) {
+ printk(KERN_ERR PFX "Unable to create IPMI proc dir");
+ return -ENOMEM;
+ }
+
+ proc_ipmi_root->owner = THIS_MODULE;
+
+ init_timer(&ipmi_timer);
+ ipmi_timer.data = 0;
+ ipmi_timer.function = ipmi_timeout;
+ ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES;
+ add_timer(&ipmi_timer);
+
+ notifier_chain_register(&panic_notifier_list, &panic_block);
+
+ initialized = 1;
+
+ return 0;
+}
+
+static __init int ipmi_init_msghandler_mod(void)
+{
+ ipmi_init_msghandler();
+ return 0;
+}
+
+static __exit void cleanup_ipmi(void)
+{
+ int count;
+
+ if (!initialized)
+ return;
+
+ notifier_chain_unregister(&panic_notifier_list, &panic_block);
+
+ /* This can't be called if any interfaces exist, so no worry about
+ shutting down the interfaces. */
+
+ /* Tell the timer to stop, then wait for it to stop. This avoids
+ problems with race conditions removing the timer here. */
+ stop_operation = 1;
+ while (!timer_stopped) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+
+ remove_proc_entry(proc_ipmi_root->name, &proc_root);
+
+ initialized = 0;
+
+ /* Check for buffer leaks. */
+ count = atomic_read(&smi_msg_inuse_count);
+ if (count != 0)
+ printk(KERN_WARNING PFX "SMI message count %d at exit\n",
+ count);
+ count = atomic_read(&recv_msg_inuse_count);
+ if (count != 0)
+ printk(KERN_WARNING PFX "recv message count %d at exit\n",
+ count);
+}
+module_exit(cleanup_ipmi);
+
+module_init(ipmi_init_msghandler_mod);
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(ipmi_create_user);
+EXPORT_SYMBOL(ipmi_destroy_user);
+EXPORT_SYMBOL(ipmi_get_version);
+EXPORT_SYMBOL(ipmi_request_settime);
+EXPORT_SYMBOL(ipmi_request_supply_msgs);
+EXPORT_SYMBOL(ipmi_register_smi);
+EXPORT_SYMBOL(ipmi_unregister_smi);
+EXPORT_SYMBOL(ipmi_register_for_cmd);
+EXPORT_SYMBOL(ipmi_unregister_for_cmd);
+EXPORT_SYMBOL(ipmi_smi_msg_received);
+EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
+EXPORT_SYMBOL(ipmi_alloc_smi_msg);
+EXPORT_SYMBOL(ipmi_addr_length);
+EXPORT_SYMBOL(ipmi_validate_addr);
+EXPORT_SYMBOL(ipmi_set_gets_events);
+EXPORT_SYMBOL(ipmi_smi_watcher_register);
+EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
+EXPORT_SYMBOL(ipmi_set_my_address);
+EXPORT_SYMBOL(ipmi_get_my_address);
+EXPORT_SYMBOL(ipmi_set_my_LUN);
+EXPORT_SYMBOL(ipmi_get_my_LUN);
+EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
+EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
new file mode 100644
index 000000000000..cb5cdc6f14bf
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -0,0 +1,549 @@
+/*
+ * ipmi_poweroff.c
+ *
+ * MontaVista IPMI Poweroff extension to sys_reboot
+ *
+ * Author: MontaVista Software, Inc.
+ * Steven Dake <sdake@mvista.com>
+ * Corey Minyard <cminyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002,2004 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <asm/semaphore.h>
+#include <linux/kdev_t.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+
+#define PFX "IPMI poweroff: "
+#define IPMI_POWEROFF_VERSION "v33"
+
+/* Where to we insert our poweroff function? */
+extern void (*pm_power_off)(void);
+
+/* Stuff from the get device id command. */
+static unsigned int mfg_id;
+static unsigned int prod_id;
+static unsigned char capabilities;
+
+/* We use our own messages for this operation, we don't let the system
+ allocate them, since we may be in a panic situation. The whole
+ thing is single-threaded, anyway, so multiple messages are not
+ required. */
+static void dummy_smi_free(struct ipmi_smi_msg *msg)
+{
+}
+static void dummy_recv_free(struct ipmi_recv_msg *msg)
+{
+}
+static struct ipmi_smi_msg halt_smi_msg =
+{
+ .done = dummy_smi_free
+};
+static struct ipmi_recv_msg halt_recv_msg =
+{
+ .done = dummy_recv_free
+};
+
+
+/*
+ * Code to send a message and wait for the reponse.
+ */
+
+static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data)
+{
+ struct semaphore *sem = recv_msg->user_msg_data;
+
+ if (sem)
+ up(sem);
+}
+
+static struct ipmi_user_hndl ipmi_poweroff_handler =
+{
+ .ipmi_recv_hndl = receive_handler
+};
+
+
+static int ipmi_request_wait_for_response(ipmi_user_t user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *send_msg)
+{
+ int rv;
+ struct semaphore sem;
+
+ sema_init (&sem, 0);
+
+ rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, &sem,
+ &halt_smi_msg, &halt_recv_msg, 0);
+ if (rv)
+ return rv;
+
+ down (&sem);
+
+ return halt_recv_msg.msg.data[0];
+}
+
+/* We are in run-to-completion mode, no semaphore is desired. */
+static int ipmi_request_in_rc_mode(ipmi_user_t user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *send_msg)
+{
+ int rv;
+
+ rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL,
+ &halt_smi_msg, &halt_recv_msg, 0);
+ if (rv)
+ return rv;
+
+ return halt_recv_msg.msg.data[0];
+}
+
+/*
+ * ATCA Support
+ */
+
+#define IPMI_NETFN_ATCA 0x2c
+#define IPMI_ATCA_SET_POWER_CMD 0x11
+#define IPMI_ATCA_GET_ADDR_INFO_CMD 0x01
+#define IPMI_PICMG_ID 0
+
+static int ipmi_atca_detect (ipmi_user_t user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ unsigned char data[1];
+
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ /*
+ * Use get address info to check and see if we are ATCA
+ */
+ send_msg.netfn = IPMI_NETFN_ATCA;
+ send_msg.cmd = IPMI_ATCA_GET_ADDR_INFO_CMD;
+ data[0] = IPMI_PICMG_ID;
+ send_msg.data = data;
+ send_msg.data_len = sizeof(data);
+ rv = ipmi_request_wait_for_response(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ return !rv;
+}
+
+static void ipmi_poweroff_atca (ipmi_user_t user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ unsigned char data[4];
+
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ printk(KERN_INFO PFX "Powering down via ATCA power command\n");
+
+ /*
+ * Power down
+ */
+ send_msg.netfn = IPMI_NETFN_ATCA;
+ send_msg.cmd = IPMI_ATCA_SET_POWER_CMD;
+ data[0] = IPMI_PICMG_ID;
+ data[1] = 0; /* FRU id */
+ data[2] = 0; /* Power Level */
+ data[3] = 0; /* Don't change saved presets */
+ send_msg.data = data;
+ send_msg.data_len = sizeof (data);
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv) {
+ printk(KERN_ERR PFX "Unable to send ATCA powerdown message,"
+ " IPMI error 0x%x\n", rv);
+ goto out;
+ }
+
+ out:
+ return;
+}
+
+/*
+ * CPI1 Support
+ */
+
+#define IPMI_NETFN_OEM_1 0xf8
+#define OEM_GRP_CMD_SET_RESET_STATE 0x84
+#define OEM_GRP_CMD_SET_POWER_STATE 0x82
+#define IPMI_NETFN_OEM_8 0xf8
+#define OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL 0x80
+#define OEM_GRP_CMD_GET_SLOT_GA 0xa3
+#define IPMI_NETFN_SENSOR_EVT 0x10
+#define IPMI_CMD_GET_EVENT_RECEIVER 0x01
+
+#define IPMI_CPI1_PRODUCT_ID 0x000157
+#define IPMI_CPI1_MANUFACTURER_ID 0x0108
+
+static int ipmi_cpi1_detect (ipmi_user_t user)
+{
+ return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID)
+ && (prod_id == IPMI_CPI1_PRODUCT_ID));
+}
+
+static void ipmi_poweroff_cpi1 (ipmi_user_t user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct ipmi_ipmb_addr ipmb_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ unsigned char data[1];
+ int slot;
+ unsigned char hotswap_ipmb;
+ unsigned char aer_addr;
+ unsigned char aer_lun;
+
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ printk(KERN_INFO PFX "Powering down via CPI1 power command\n");
+
+ /*
+ * Get IPMI ipmb address
+ */
+ send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
+ send_msg.cmd = OEM_GRP_CMD_GET_SLOT_GA;
+ send_msg.data = NULL;
+ send_msg.data_len = 0;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv)
+ goto out;
+ slot = halt_recv_msg.msg.data[1];
+ hotswap_ipmb = (slot > 9) ? (0xb0 + 2 * slot) : (0xae + 2 * slot);
+
+ /*
+ * Get active event receiver
+ */
+ send_msg.netfn = IPMI_NETFN_SENSOR_EVT >> 2;
+ send_msg.cmd = IPMI_CMD_GET_EVENT_RECEIVER;
+ send_msg.data = NULL;
+ send_msg.data_len = 0;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv)
+ goto out;
+ aer_addr = halt_recv_msg.msg.data[1];
+ aer_lun = halt_recv_msg.msg.data[2];
+
+ /*
+ * Setup IPMB address target instead of local target
+ */
+ ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr.channel = 0;
+ ipmb_addr.slave_addr = aer_addr;
+ ipmb_addr.lun = aer_lun;
+
+ /*
+ * Send request hotswap control to remove blade from dpv
+ */
+ send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
+ send_msg.cmd = OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL;
+ send_msg.data = &hotswap_ipmb;
+ send_msg.data_len = 1;
+ ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &ipmb_addr,
+ &send_msg);
+
+ /*
+ * Set reset asserted
+ */
+ send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
+ send_msg.cmd = OEM_GRP_CMD_SET_RESET_STATE;
+ send_msg.data = data;
+ data[0] = 1; /* Reset asserted state */
+ send_msg.data_len = 1;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv)
+ goto out;
+
+ /*
+ * Power down
+ */
+ send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
+ send_msg.cmd = OEM_GRP_CMD_SET_POWER_STATE;
+ send_msg.data = data;
+ data[0] = 1; /* Power down state */
+ send_msg.data_len = 1;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv)
+ goto out;
+
+ out:
+ return;
+}
+
+/*
+ * Standard chassis support
+ */
+
+#define IPMI_NETFN_CHASSIS_REQUEST 0
+#define IPMI_CHASSIS_CONTROL_CMD 0x02
+
+static int ipmi_chassis_detect (ipmi_user_t user)
+{
+ /* Chassis support, use it. */
+ return (capabilities & 0x80);
+}
+
+static void ipmi_poweroff_chassis (ipmi_user_t user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ unsigned char data[1];
+
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ printk(KERN_INFO PFX "Powering down via IPMI chassis control command\n");
+
+ /*
+ * Power down
+ */
+ send_msg.netfn = IPMI_NETFN_CHASSIS_REQUEST;
+ send_msg.cmd = IPMI_CHASSIS_CONTROL_CMD;
+ data[0] = 0; /* Power down */
+ send_msg.data = data;
+ send_msg.data_len = sizeof(data);
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv) {
+ printk(KERN_ERR PFX "Unable to send chassis powerdown message,"
+ " IPMI error 0x%x\n", rv);
+ goto out;
+ }
+
+ out:
+ return;
+}
+
+
+/* Table of possible power off functions. */
+struct poweroff_function {
+ char *platform_type;
+ int (*detect)(ipmi_user_t user);
+ void (*poweroff_func)(ipmi_user_t user);
+};
+
+static struct poweroff_function poweroff_functions[] = {
+ { .platform_type = "ATCA",
+ .detect = ipmi_atca_detect,
+ .poweroff_func = ipmi_poweroff_atca },
+ { .platform_type = "CPI1",
+ .detect = ipmi_cpi1_detect,
+ .poweroff_func = ipmi_poweroff_cpi1 },
+ /* Chassis should generally be last, other things should override
+ it. */
+ { .platform_type = "chassis",
+ .detect = ipmi_chassis_detect,
+ .poweroff_func = ipmi_poweroff_chassis },
+};
+#define NUM_PO_FUNCS (sizeof(poweroff_functions) \
+ / sizeof(struct poweroff_function))
+
+
+/* Our local state. */
+static int ready = 0;
+static ipmi_user_t ipmi_user;
+static void (*specific_poweroff_func)(ipmi_user_t user) = NULL;
+
+/* Holds the old poweroff function so we can restore it on removal. */
+static void (*old_poweroff_func)(void);
+
+
+/* Called on a powerdown request. */
+static void ipmi_poweroff_function (void)
+{
+ if (!ready)
+ return;
+
+ /* Use run-to-completion mode, since interrupts may be off. */
+ ipmi_user_set_run_to_completion(ipmi_user, 1);
+ specific_poweroff_func(ipmi_user);
+ ipmi_user_set_run_to_completion(ipmi_user, 0);
+}
+
+/* Wait for an IPMI interface to be installed, the first one installed
+ will be grabbed by this code and used to perform the powerdown. */
+static void ipmi_po_new_smi(int if_num)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ int i;
+
+ if (ready)
+ return;
+
+ rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL, &ipmi_user);
+ if (rv) {
+ printk(KERN_ERR PFX "could not create IPMI user, error %d\n",
+ rv);
+ return;
+ }
+
+ /*
+ * Do a get device ide and store some results, since this is
+ * used by several functions.
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ send_msg.netfn = IPMI_NETFN_APP_REQUEST;
+ send_msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+ send_msg.data = NULL;
+ send_msg.data_len = 0;
+ rv = ipmi_request_wait_for_response(ipmi_user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv) {
+ printk(KERN_ERR PFX "Unable to send IPMI get device id info,"
+ " IPMI error 0x%x\n", rv);
+ goto out_err;
+ }
+
+ if (halt_recv_msg.msg.data_len < 12) {
+ printk(KERN_ERR PFX "(chassis) IPMI get device id info too,"
+ " short, was %d bytes, needed %d bytes\n",
+ halt_recv_msg.msg.data_len, 12);
+ goto out_err;
+ }
+
+ mfg_id = (halt_recv_msg.msg.data[7]
+ | (halt_recv_msg.msg.data[8] << 8)
+ | (halt_recv_msg.msg.data[9] << 16));
+ prod_id = (halt_recv_msg.msg.data[10]
+ | (halt_recv_msg.msg.data[11] << 8));
+ capabilities = halt_recv_msg.msg.data[6];
+
+
+ /* Scan for a poweroff method */
+ for (i=0; i<NUM_PO_FUNCS; i++) {
+ if (poweroff_functions[i].detect(ipmi_user))
+ goto found;
+ }
+
+ out_err:
+ printk(KERN_ERR PFX "Unable to find a poweroff function that"
+ " will work, giving up\n");
+ ipmi_destroy_user(ipmi_user);
+ return;
+
+ found:
+ printk(KERN_INFO PFX "Found a %s style poweroff function\n",
+ poweroff_functions[i].platform_type);
+ specific_poweroff_func = poweroff_functions[i].poweroff_func;
+ old_poweroff_func = pm_power_off;
+ pm_power_off = ipmi_poweroff_function;
+ ready = 1;
+}
+
+static void ipmi_po_smi_gone(int if_num)
+{
+ /* This can never be called, because once poweroff driver is
+ registered, the interface can't go away until the power
+ driver is unregistered. */
+}
+
+static struct ipmi_smi_watcher smi_watcher =
+{
+ .owner = THIS_MODULE,
+ .new_smi = ipmi_po_new_smi,
+ .smi_gone = ipmi_po_smi_gone
+};
+
+
+/*
+ * Startup and shutdown functions.
+ */
+static int ipmi_poweroff_init (void)
+{
+ int rv;
+
+ printk ("Copyright (C) 2004 MontaVista Software -"
+ " IPMI Powerdown via sys_reboot version "
+ IPMI_POWEROFF_VERSION ".\n");
+
+ rv = ipmi_smi_watcher_register(&smi_watcher);
+ if (rv)
+ printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv);
+
+ return rv;
+}
+
+#ifdef MODULE
+static __exit void ipmi_poweroff_cleanup(void)
+{
+ int rv;
+
+ ipmi_smi_watcher_unregister(&smi_watcher);
+
+ if (ready) {
+ rv = ipmi_destroy_user(ipmi_user);
+ if (rv)
+ printk(KERN_ERR PFX "could not cleanup the IPMI"
+ " user: 0x%x\n", rv);
+ pm_power_off = old_poweroff_func;
+ }
+}
+module_exit(ipmi_poweroff_cleanup);
+#endif
+
+module_init(ipmi_poweroff_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
new file mode 100644
index 000000000000..29de259a981e
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -0,0 +1,2359 @@
+/*
+ * ipmi_si.c
+ *
+ * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
+ * BT).
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * This file holds the "policy" for the interface to the SMI state
+ * machine. It does the configuration, handles timers and interrupts,
+ * and drives the real SMI state machine.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <asm/system.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <asm/irq.h>
+#ifdef CONFIG_HIGH_RES_TIMERS
+#include <linux/hrtime.h>
+# if defined(schedule_next_int)
+/* Old high-res timer code, do translations. */
+# define get_arch_cycles(a) quick_update_jiffies_sub(a)
+# define arch_cycles_per_jiffy cycles_per_jiffies
+# endif
+static inline void add_usec_to_timer(struct timer_list *t, long v)
+{
+ t->sub_expires += nsec_to_arch_cycle(v * 1000);
+ while (t->sub_expires >= arch_cycles_per_jiffy)
+ {
+ t->expires++;
+ t->sub_expires -= arch_cycles_per_jiffy;
+ }
+}
+#endif
+#include <linux/interrupt.h>
+#include <linux/rcupdate.h>
+#include <linux/ipmi_smi.h>
+#include <asm/io.h>
+#include "ipmi_si_sm.h"
+#include <linux/init.h>
+
+#define IPMI_SI_VERSION "v33"
+
+/* Measure times between events in the driver. */
+#undef DEBUG_TIMING
+
+/* Call every 10 ms. */
+#define SI_TIMEOUT_TIME_USEC 10000
+#define SI_USEC_PER_JIFFY (1000000/HZ)
+#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
+#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
+ short timeout */
+
+enum si_intf_state {
+ SI_NORMAL,
+ SI_GETTING_FLAGS,
+ SI_GETTING_EVENTS,
+ SI_CLEARING_FLAGS,
+ SI_CLEARING_FLAGS_THEN_SET_IRQ,
+ SI_GETTING_MESSAGES,
+ SI_ENABLE_INTERRUPTS1,
+ SI_ENABLE_INTERRUPTS2
+ /* FIXME - add watchdog stuff. */
+};
+
+enum si_type {
+ SI_KCS, SI_SMIC, SI_BT
+};
+
+struct smi_info
+{
+ ipmi_smi_t intf;
+ struct si_sm_data *si_sm;
+ struct si_sm_handlers *handlers;
+ enum si_type si_type;
+ spinlock_t si_lock;
+ spinlock_t msg_lock;
+ struct list_head xmit_msgs;
+ struct list_head hp_xmit_msgs;
+ struct ipmi_smi_msg *curr_msg;
+ enum si_intf_state si_state;
+
+ /* Used to handle the various types of I/O that can occur with
+ IPMI */
+ struct si_sm_io io;
+ int (*io_setup)(struct smi_info *info);
+ void (*io_cleanup)(struct smi_info *info);
+ int (*irq_setup)(struct smi_info *info);
+ void (*irq_cleanup)(struct smi_info *info);
+ unsigned int io_size;
+
+ /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
+ is set to hold the flags until we are done handling everything
+ from the flags. */
+#define RECEIVE_MSG_AVAIL 0x01
+#define EVENT_MSG_BUFFER_FULL 0x02
+#define WDT_PRE_TIMEOUT_INT 0x08
+ unsigned char msg_flags;
+
+ /* If set to true, this will request events the next time the
+ state machine is idle. */
+ atomic_t req_events;
+
+ /* If true, run the state machine to completion on every send
+ call. Generally used after a panic to make sure stuff goes
+ out. */
+ int run_to_completion;
+
+ /* The I/O port of an SI interface. */
+ int port;
+
+ /* The space between start addresses of the two ports. For
+ instance, if the first port is 0xca2 and the spacing is 4, then
+ the second port is 0xca6. */
+ unsigned int spacing;
+
+ /* zero if no irq; */
+ int irq;
+
+ /* The timer for this si. */
+ struct timer_list si_timer;
+
+ /* The time (in jiffies) the last timeout occurred at. */
+ unsigned long last_timeout_jiffies;
+
+ /* Used to gracefully stop the timer without race conditions. */
+ volatile int stop_operation;
+ volatile int timer_stopped;
+
+ /* The driver will disable interrupts when it gets into a
+ situation where it cannot handle messages due to lack of
+ memory. Once that situation clears up, it will re-enable
+ interrupts. */
+ int interrupt_disabled;
+
+ unsigned char ipmi_si_dev_rev;
+ unsigned char ipmi_si_fw_rev_major;
+ unsigned char ipmi_si_fw_rev_minor;
+ unsigned char ipmi_version_major;
+ unsigned char ipmi_version_minor;
+
+ /* Slave address, could be reported from DMI. */
+ unsigned char slave_addr;
+
+ /* Counters and things for the proc filesystem. */
+ spinlock_t count_lock;
+ unsigned long short_timeouts;
+ unsigned long long_timeouts;
+ unsigned long timeout_restarts;
+ unsigned long idles;
+ unsigned long interrupts;
+ unsigned long attentions;
+ unsigned long flag_fetches;
+ unsigned long hosed_count;
+ unsigned long complete_transactions;
+ unsigned long events;
+ unsigned long watchdog_pretimeouts;
+ unsigned long incoming_messages;
+};
+
+static void si_restart_short_timer(struct smi_info *smi_info);
+
+static void deliver_recv_msg(struct smi_info *smi_info,
+ struct ipmi_smi_msg *msg)
+{
+ /* Deliver the message to the upper layer with the lock
+ released. */
+ spin_unlock(&(smi_info->si_lock));
+ ipmi_smi_msg_received(smi_info->intf, msg);
+ spin_lock(&(smi_info->si_lock));
+}
+
+static void return_hosed_msg(struct smi_info *smi_info)
+{
+ struct ipmi_smi_msg *msg = smi_info->curr_msg;
+
+ /* Make it a reponse */
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = 0xFF; /* Unknown error. */
+ msg->rsp_size = 3;
+
+ smi_info->curr_msg = NULL;
+ deliver_recv_msg(smi_info, msg);
+}
+
+static enum si_sm_result start_next_msg(struct smi_info *smi_info)
+{
+ int rv;
+ struct list_head *entry = NULL;
+#ifdef DEBUG_TIMING
+ struct timeval t;
+#endif
+
+ /* No need to save flags, we aleady have interrupts off and we
+ already hold the SMI lock. */
+ spin_lock(&(smi_info->msg_lock));
+
+ /* Pick the high priority queue first. */
+ if (! list_empty(&(smi_info->hp_xmit_msgs))) {
+ entry = smi_info->hp_xmit_msgs.next;
+ } else if (! list_empty(&(smi_info->xmit_msgs))) {
+ entry = smi_info->xmit_msgs.next;
+ }
+
+ if (!entry) {
+ smi_info->curr_msg = NULL;
+ rv = SI_SM_IDLE;
+ } else {
+ int err;
+
+ list_del(entry);
+ smi_info->curr_msg = list_entry(entry,
+ struct ipmi_smi_msg,
+ link);
+#ifdef DEBUG_TIMING
+ do_gettimeofday(&t);
+ printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+#endif
+ err = smi_info->handlers->start_transaction(
+ smi_info->si_sm,
+ smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ if (err) {
+ return_hosed_msg(smi_info);
+ }
+
+ rv = SI_SM_CALL_WITHOUT_DELAY;
+ }
+ spin_unlock(&(smi_info->msg_lock));
+
+ return rv;
+}
+
+static void start_enable_irq(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+
+ /* If we are enabling interrupts, we have to tell the
+ BMC to use them. */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+ smi_info->si_state = SI_ENABLE_INTERRUPTS1;
+}
+
+static void start_clear_flags(struct smi_info *smi_info)
+{
+ unsigned char msg[3];
+
+ /* Make sure the watchdog pre-timeout flag is not set at startup. */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+ msg[2] = WDT_PRE_TIMEOUT_INT;
+
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+ smi_info->si_state = SI_CLEARING_FLAGS;
+}
+
+/* When we have a situtaion where we run out of memory and cannot
+ allocate messages, we just leave them in the BMC and run the system
+ polled until we can allocate some memory. Once we have some
+ memory, we will re-enable the interrupt. */
+static inline void disable_si_irq(struct smi_info *smi_info)
+{
+ if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
+ disable_irq_nosync(smi_info->irq);
+ smi_info->interrupt_disabled = 1;
+ }
+}
+
+static inline void enable_si_irq(struct smi_info *smi_info)
+{
+ if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
+ enable_irq(smi_info->irq);
+ smi_info->interrupt_disabled = 0;
+ }
+}
+
+static void handle_flags(struct smi_info *smi_info)
+{
+ if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
+ /* Watchdog pre-timeout */
+ spin_lock(&smi_info->count_lock);
+ smi_info->watchdog_pretimeouts++;
+ spin_unlock(&smi_info->count_lock);
+
+ start_clear_flags(smi_info);
+ smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
+ spin_unlock(&(smi_info->si_lock));
+ ipmi_smi_watchdog_pretimeout(smi_info->intf);
+ spin_lock(&(smi_info->si_lock));
+ } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
+ /* Messages available. */
+ smi_info->curr_msg = ipmi_alloc_smi_msg();
+ if (!smi_info->curr_msg) {
+ disable_si_irq(smi_info);
+ smi_info->si_state = SI_NORMAL;
+ return;
+ }
+ enable_si_irq(smi_info);
+
+ smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
+ smi_info->curr_msg->data_size = 2;
+
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm,
+ smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ smi_info->si_state = SI_GETTING_MESSAGES;
+ } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
+ /* Events available. */
+ smi_info->curr_msg = ipmi_alloc_smi_msg();
+ if (!smi_info->curr_msg) {
+ disable_si_irq(smi_info);
+ smi_info->si_state = SI_NORMAL;
+ return;
+ }
+ enable_si_irq(smi_info);
+
+ smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+ smi_info->curr_msg->data_size = 2;
+
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm,
+ smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ smi_info->si_state = SI_GETTING_EVENTS;
+ } else {
+ smi_info->si_state = SI_NORMAL;
+ }
+}
+
+static void handle_transaction_done(struct smi_info *smi_info)
+{
+ struct ipmi_smi_msg *msg;
+#ifdef DEBUG_TIMING
+ struct timeval t;
+
+ do_gettimeofday(&t);
+ printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+#endif
+ switch (smi_info->si_state) {
+ case SI_NORMAL:
+ if (!smi_info->curr_msg)
+ break;
+
+ smi_info->curr_msg->rsp_size
+ = smi_info->handlers->get_result(
+ smi_info->si_sm,
+ smi_info->curr_msg->rsp,
+ IPMI_MAX_MSG_LENGTH);
+
+ /* Do this here becase deliver_recv_msg() releases the
+ lock, and a new message can be put in during the
+ time the lock is released. */
+ msg = smi_info->curr_msg;
+ smi_info->curr_msg = NULL;
+ deliver_recv_msg(smi_info, msg);
+ break;
+
+ case SI_GETTING_FLAGS:
+ {
+ unsigned char msg[4];
+ unsigned int len;
+
+ /* We got the flags from the SMI, now handle them. */
+ len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+ if (msg[2] != 0) {
+ /* Error fetching flags, just give up for
+ now. */
+ smi_info->si_state = SI_NORMAL;
+ } else if (len < 4) {
+ /* Hmm, no flags. That's technically illegal, but
+ don't use uninitialized data. */
+ smi_info->si_state = SI_NORMAL;
+ } else {
+ smi_info->msg_flags = msg[3];
+ handle_flags(smi_info);
+ }
+ break;
+ }
+
+ case SI_CLEARING_FLAGS:
+ case SI_CLEARING_FLAGS_THEN_SET_IRQ:
+ {
+ unsigned char msg[3];
+
+ /* We cleared the flags. */
+ smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
+ if (msg[2] != 0) {
+ /* Error clearing flags */
+ printk(KERN_WARNING
+ "ipmi_si: Error clearing flags: %2.2x\n",
+ msg[2]);
+ }
+ if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
+ start_enable_irq(smi_info);
+ else
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+
+ case SI_GETTING_EVENTS:
+ {
+ smi_info->curr_msg->rsp_size
+ = smi_info->handlers->get_result(
+ smi_info->si_sm,
+ smi_info->curr_msg->rsp,
+ IPMI_MAX_MSG_LENGTH);
+
+ /* Do this here becase deliver_recv_msg() releases the
+ lock, and a new message can be put in during the
+ time the lock is released. */
+ msg = smi_info->curr_msg;
+ smi_info->curr_msg = NULL;
+ if (msg->rsp[2] != 0) {
+ /* Error getting event, probably done. */
+ msg->done(msg);
+
+ /* Take off the event flag. */
+ smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+ handle_flags(smi_info);
+ } else {
+ spin_lock(&smi_info->count_lock);
+ smi_info->events++;
+ spin_unlock(&smi_info->count_lock);
+
+ /* Do this before we deliver the message
+ because delivering the message releases the
+ lock and something else can mess with the
+ state. */
+ handle_flags(smi_info);
+
+ deliver_recv_msg(smi_info, msg);
+ }
+ break;
+ }
+
+ case SI_GETTING_MESSAGES:
+ {
+ smi_info->curr_msg->rsp_size
+ = smi_info->handlers->get_result(
+ smi_info->si_sm,
+ smi_info->curr_msg->rsp,
+ IPMI_MAX_MSG_LENGTH);
+
+ /* Do this here becase deliver_recv_msg() releases the
+ lock, and a new message can be put in during the
+ time the lock is released. */
+ msg = smi_info->curr_msg;
+ smi_info->curr_msg = NULL;
+ if (msg->rsp[2] != 0) {
+ /* Error getting event, probably done. */
+ msg->done(msg);
+
+ /* Take off the msg flag. */
+ smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+ handle_flags(smi_info);
+ } else {
+ spin_lock(&smi_info->count_lock);
+ smi_info->incoming_messages++;
+ spin_unlock(&smi_info->count_lock);
+
+ /* Do this before we deliver the message
+ because delivering the message releases the
+ lock and something else can mess with the
+ state. */
+ handle_flags(smi_info);
+
+ deliver_recv_msg(smi_info, msg);
+ }
+ break;
+ }
+
+ case SI_ENABLE_INTERRUPTS1:
+ {
+ unsigned char msg[4];
+
+ /* We got the flags from the SMI, now handle them. */
+ smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+ if (msg[2] != 0) {
+ printk(KERN_WARNING
+ "ipmi_si: Could not enable interrupts"
+ ", failed get, using polled mode.\n");
+ smi_info->si_state = SI_NORMAL;
+ } else {
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = msg[3] | 1; /* enable msg queue int */
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm, msg, 3);
+ smi_info->si_state = SI_ENABLE_INTERRUPTS2;
+ }
+ break;
+ }
+
+ case SI_ENABLE_INTERRUPTS2:
+ {
+ unsigned char msg[4];
+
+ /* We got the flags from the SMI, now handle them. */
+ smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+ if (msg[2] != 0) {
+ printk(KERN_WARNING
+ "ipmi_si: Could not enable interrupts"
+ ", failed set, using polled mode.\n");
+ }
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+ }
+}
+
+/* Called on timeouts and events. Timeouts should pass the elapsed
+ time, interrupts should pass in zero. */
+static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
+ int time)
+{
+ enum si_sm_result si_sm_result;
+
+ restart:
+ /* There used to be a loop here that waited a little while
+ (around 25us) before giving up. That turned out to be
+ pointless, the minimum delays I was seeing were in the 300us
+ range, which is far too long to wait in an interrupt. So
+ we just run until the state machine tells us something
+ happened or it needs a delay. */
+ si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
+ time = 0;
+ while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
+ {
+ si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
+ }
+
+ if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
+ {
+ spin_lock(&smi_info->count_lock);
+ smi_info->complete_transactions++;
+ spin_unlock(&smi_info->count_lock);
+
+ handle_transaction_done(smi_info);
+ si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
+ }
+ else if (si_sm_result == SI_SM_HOSED)
+ {
+ spin_lock(&smi_info->count_lock);
+ smi_info->hosed_count++;
+ spin_unlock(&smi_info->count_lock);
+
+ /* Do the before return_hosed_msg, because that
+ releases the lock. */
+ smi_info->si_state = SI_NORMAL;
+ if (smi_info->curr_msg != NULL) {
+ /* If we were handling a user message, format
+ a response to send to the upper layer to
+ tell it about the error. */
+ return_hosed_msg(smi_info);
+ }
+ si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
+ }
+
+ /* We prefer handling attn over new messages. */
+ if (si_sm_result == SI_SM_ATTN)
+ {
+ unsigned char msg[2];
+
+ spin_lock(&smi_info->count_lock);
+ smi_info->attentions++;
+ spin_unlock(&smi_info->count_lock);
+
+ /* Got a attn, send down a get message flags to see
+ what's causing it. It would be better to handle
+ this in the upper layer, but due to the way
+ interrupts work with the SMI, that's not really
+ possible. */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm, msg, 2);
+ smi_info->si_state = SI_GETTING_FLAGS;
+ goto restart;
+ }
+
+ /* If we are currently idle, try to start the next message. */
+ if (si_sm_result == SI_SM_IDLE) {
+ spin_lock(&smi_info->count_lock);
+ smi_info->idles++;
+ spin_unlock(&smi_info->count_lock);
+
+ si_sm_result = start_next_msg(smi_info);
+ if (si_sm_result != SI_SM_IDLE)
+ goto restart;
+ }
+
+ if ((si_sm_result == SI_SM_IDLE)
+ && (atomic_read(&smi_info->req_events)))
+ {
+ /* We are idle and the upper layer requested that I fetch
+ events, so do so. */
+ unsigned char msg[2];
+
+ spin_lock(&smi_info->count_lock);
+ smi_info->flag_fetches++;
+ spin_unlock(&smi_info->count_lock);
+
+ atomic_set(&smi_info->req_events, 0);
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm, msg, 2);
+ smi_info->si_state = SI_GETTING_FLAGS;
+ goto restart;
+ }
+
+ return si_sm_result;
+}
+
+static void sender(void *send_info,
+ struct ipmi_smi_msg *msg,
+ int priority)
+{
+ struct smi_info *smi_info = send_info;
+ enum si_sm_result result;
+ unsigned long flags;
+#ifdef DEBUG_TIMING
+ struct timeval t;
+#endif
+
+ spin_lock_irqsave(&(smi_info->msg_lock), flags);
+#ifdef DEBUG_TIMING
+ do_gettimeofday(&t);
+ printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+#endif
+
+ if (smi_info->run_to_completion) {
+ /* If we are running to completion, then throw it in
+ the list and run transactions until everything is
+ clear. Priority doesn't matter here. */
+ list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
+
+ /* We have to release the msg lock and claim the smi
+ lock in this case, because of race conditions. */
+ spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+ result = smi_event_handler(smi_info, 0);
+ while (result != SI_SM_IDLE) {
+ udelay(SI_SHORT_TIMEOUT_USEC);
+ result = smi_event_handler(smi_info,
+ SI_SHORT_TIMEOUT_USEC);
+ }
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ return;
+ } else {
+ if (priority > 0) {
+ list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
+ } else {
+ list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
+ }
+ }
+ spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+ if ((smi_info->si_state == SI_NORMAL)
+ && (smi_info->curr_msg == NULL))
+ {
+ start_next_msg(smi_info);
+ si_restart_short_timer(smi_info);
+ }
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+}
+
+static void set_run_to_completion(void *send_info, int i_run_to_completion)
+{
+ struct smi_info *smi_info = send_info;
+ enum si_sm_result result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+
+ smi_info->run_to_completion = i_run_to_completion;
+ if (i_run_to_completion) {
+ result = smi_event_handler(smi_info, 0);
+ while (result != SI_SM_IDLE) {
+ udelay(SI_SHORT_TIMEOUT_USEC);
+ result = smi_event_handler(smi_info,
+ SI_SHORT_TIMEOUT_USEC);
+ }
+ }
+
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+}
+
+static void poll(void *send_info)
+{
+ struct smi_info *smi_info = send_info;
+
+ smi_event_handler(smi_info, 0);
+}
+
+static void request_events(void *send_info)
+{
+ struct smi_info *smi_info = send_info;
+
+ atomic_set(&smi_info->req_events, 1);
+}
+
+static int initialized = 0;
+
+/* Must be called with interrupts off and with the si_lock held. */
+static void si_restart_short_timer(struct smi_info *smi_info)
+{
+#if defined(CONFIG_HIGH_RES_TIMERS)
+ unsigned long flags;
+ unsigned long jiffies_now;
+
+ if (del_timer(&(smi_info->si_timer))) {
+ /* If we don't delete the timer, then it will go off
+ immediately, anyway. So we only process if we
+ actually delete the timer. */
+
+ /* We already have irqsave on, so no need for it
+ here. */
+ read_lock(&xtime_lock);
+ jiffies_now = jiffies;
+ smi_info->si_timer.expires = jiffies_now;
+ smi_info->si_timer.sub_expires = get_arch_cycles(jiffies_now);
+
+ add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
+
+ add_timer(&(smi_info->si_timer));
+ spin_lock_irqsave(&smi_info->count_lock, flags);
+ smi_info->timeout_restarts++;
+ spin_unlock_irqrestore(&smi_info->count_lock, flags);
+ }
+#endif
+}
+
+static void smi_timeout(unsigned long data)
+{
+ struct smi_info *smi_info = (struct smi_info *) data;
+ enum si_sm_result smi_result;
+ unsigned long flags;
+ unsigned long jiffies_now;
+ unsigned long time_diff;
+#ifdef DEBUG_TIMING
+ struct timeval t;
+#endif
+
+ if (smi_info->stop_operation) {
+ smi_info->timer_stopped = 1;
+ return;
+ }
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+#ifdef DEBUG_TIMING
+ do_gettimeofday(&t);
+ printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+#endif
+ jiffies_now = jiffies;
+ time_diff = ((jiffies_now - smi_info->last_timeout_jiffies)
+ * SI_USEC_PER_JIFFY);
+ smi_result = smi_event_handler(smi_info, time_diff);
+
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+
+ smi_info->last_timeout_jiffies = jiffies_now;
+
+ if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
+ /* Running with interrupts, only do long timeouts. */
+ smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
+ spin_lock_irqsave(&smi_info->count_lock, flags);
+ smi_info->long_timeouts++;
+ spin_unlock_irqrestore(&smi_info->count_lock, flags);
+ goto do_add_timer;
+ }
+
+ /* If the state machine asks for a short delay, then shorten
+ the timer timeout. */
+ if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ spin_lock_irqsave(&smi_info->count_lock, flags);
+ smi_info->short_timeouts++;
+ spin_unlock_irqrestore(&smi_info->count_lock, flags);
+#if defined(CONFIG_HIGH_RES_TIMERS)
+ read_lock(&xtime_lock);
+ smi_info->si_timer.expires = jiffies;
+ smi_info->si_timer.sub_expires
+ = get_arch_cycles(smi_info->si_timer.expires);
+ read_unlock(&xtime_lock);
+ add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
+#else
+ smi_info->si_timer.expires = jiffies + 1;
+#endif
+ } else {
+ spin_lock_irqsave(&smi_info->count_lock, flags);
+ smi_info->long_timeouts++;
+ spin_unlock_irqrestore(&smi_info->count_lock, flags);
+ smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
+#if defined(CONFIG_HIGH_RES_TIMERS)
+ smi_info->si_timer.sub_expires = 0;
+#endif
+ }
+
+ do_add_timer:
+ add_timer(&(smi_info->si_timer));
+}
+
+static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
+{
+ struct smi_info *smi_info = data;
+ unsigned long flags;
+#ifdef DEBUG_TIMING
+ struct timeval t;
+#endif
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+
+ spin_lock(&smi_info->count_lock);
+ smi_info->interrupts++;
+ spin_unlock(&smi_info->count_lock);
+
+ if (smi_info->stop_operation)
+ goto out;
+
+#ifdef DEBUG_TIMING
+ do_gettimeofday(&t);
+ printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+#endif
+ smi_event_handler(smi_info, 0);
+ out:
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ return IRQ_HANDLED;
+}
+
+static struct ipmi_smi_handlers handlers =
+{
+ .owner = THIS_MODULE,
+ .sender = sender,
+ .request_events = request_events,
+ .set_run_to_completion = set_run_to_completion,
+ .poll = poll,
+};
+
+/* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
+ a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
+
+#define SI_MAX_PARMS 4
+#define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2)
+static struct smi_info *smi_infos[SI_MAX_DRIVERS] =
+{ NULL, NULL, NULL, NULL };
+
+#define DEVICE_NAME "ipmi_si"
+
+#define DEFAULT_KCS_IO_PORT 0xca2
+#define DEFAULT_SMIC_IO_PORT 0xca9
+#define DEFAULT_BT_IO_PORT 0xe4
+#define DEFAULT_REGSPACING 1
+
+static int si_trydefaults = 1;
+static char *si_type[SI_MAX_PARMS];
+#define MAX_SI_TYPE_STR 30
+static char si_type_str[MAX_SI_TYPE_STR];
+static unsigned long addrs[SI_MAX_PARMS];
+static int num_addrs;
+static unsigned int ports[SI_MAX_PARMS];
+static int num_ports;
+static int irqs[SI_MAX_PARMS];
+static int num_irqs;
+static int regspacings[SI_MAX_PARMS];
+static int num_regspacings = 0;
+static int regsizes[SI_MAX_PARMS];
+static int num_regsizes = 0;
+static int regshifts[SI_MAX_PARMS];
+static int num_regshifts = 0;
+static int slave_addrs[SI_MAX_PARMS];
+static int num_slave_addrs = 0;
+
+
+module_param_named(trydefaults, si_trydefaults, bool, 0);
+MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
+ " default scan of the KCS and SMIC interface at the standard"
+ " address");
+module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
+MODULE_PARM_DESC(type, "Defines the type of each interface, each"
+ " interface separated by commas. The types are 'kcs',"
+ " 'smic', and 'bt'. For example si_type=kcs,bt will set"
+ " the first interface to kcs and the second to bt");
+module_param_array(addrs, long, &num_addrs, 0);
+MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
+ " addresses separated by commas. Only use if an interface"
+ " is in memory. Otherwise, set it to zero or leave"
+ " it blank.");
+module_param_array(ports, int, &num_ports, 0);
+MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
+ " addresses separated by commas. Only use if an interface"
+ " is a port. Otherwise, set it to zero or leave"
+ " it blank.");
+module_param_array(irqs, int, &num_irqs, 0);
+MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
+ " addresses separated by commas. Only use if an interface"
+ " has an interrupt. Otherwise, set it to zero or leave"
+ " it blank.");
+module_param_array(regspacings, int, &num_regspacings, 0);
+MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
+ " and each successive register used by the interface. For"
+ " instance, if the start address is 0xca2 and the spacing"
+ " is 2, then the second address is at 0xca4. Defaults"
+ " to 1.");
+module_param_array(regsizes, int, &num_regsizes, 0);
+MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
+ " This should generally be 1, 2, 4, or 8 for an 8-bit,"
+ " 16-bit, 32-bit, or 64-bit register. Use this if you"
+ " the 8-bit IPMI register has to be read from a larger"
+ " register.");
+module_param_array(regshifts, int, &num_regshifts, 0);
+MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
+ " IPMI register, in bits. For instance, if the data"
+ " is read from a 32-bit word and the IPMI data is in"
+ " bit 8-15, then the shift would be 8");
+module_param_array(slave_addrs, int, &num_slave_addrs, 0);
+MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
+ " the controller. Normally this is 0x20, but can be"
+ " overridden by this parm. This is an array indexed"
+ " by interface number.");
+
+
+#define IPMI_MEM_ADDR_SPACE 1
+#define IPMI_IO_ADDR_SPACE 2
+
+#if defined(CONFIG_ACPI_INTERPRETER) || defined(CONFIG_X86) || defined(CONFIG_PCI)
+static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr)
+{
+ int i;
+
+ for (i = 0; i < SI_MAX_PARMS; ++i) {
+ /* Don't check our address. */
+ if (i == intf)
+ continue;
+ if (si_type[i] != NULL) {
+ if ((addr_space == IPMI_MEM_ADDR_SPACE &&
+ base_addr == addrs[i]) ||
+ (addr_space == IPMI_IO_ADDR_SPACE &&
+ base_addr == ports[i]))
+ return 0;
+ }
+ else
+ break;
+ }
+
+ return 1;
+}
+#endif
+
+static int std_irq_setup(struct smi_info *info)
+{
+ int rv;
+
+ if (!info->irq)
+ return 0;
+
+ rv = request_irq(info->irq,
+ si_irq_handler,
+ SA_INTERRUPT,
+ DEVICE_NAME,
+ info);
+ if (rv) {
+ printk(KERN_WARNING
+ "ipmi_si: %s unable to claim interrupt %d,"
+ " running polled\n",
+ DEVICE_NAME, info->irq);
+ info->irq = 0;
+ } else {
+ printk(" Using irq %d\n", info->irq);
+ }
+
+ return rv;
+}
+
+static void std_irq_cleanup(struct smi_info *info)
+{
+ if (!info->irq)
+ return;
+
+ free_irq(info->irq, info);
+}
+
+static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int *addr = io->info;
+
+ return inb((*addr)+(offset*io->regspacing));
+}
+
+static void port_outb(struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int *addr = io->info;
+
+ outb(b, (*addr)+(offset * io->regspacing));
+}
+
+static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int *addr = io->info;
+
+ return (inw((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff;
+}
+
+static void port_outw(struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int *addr = io->info;
+
+ outw(b << io->regshift, (*addr)+(offset * io->regspacing));
+}
+
+static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int *addr = io->info;
+
+ return (inl((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff;
+}
+
+static void port_outl(struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int *addr = io->info;
+
+ outl(b << io->regshift, (*addr)+(offset * io->regspacing));
+}
+
+static void port_cleanup(struct smi_info *info)
+{
+ unsigned int *addr = info->io.info;
+ int mapsize;
+
+ if (addr && (*addr)) {
+ mapsize = ((info->io_size * info->io.regspacing)
+ - (info->io.regspacing - info->io.regsize));
+
+ release_region (*addr, mapsize);
+ }
+ kfree(info);
+}
+
+static int port_setup(struct smi_info *info)
+{
+ unsigned int *addr = info->io.info;
+ int mapsize;
+
+ if (!addr || (!*addr))
+ return -ENODEV;
+
+ info->io_cleanup = port_cleanup;
+
+ /* Figure out the actual inb/inw/inl/etc routine to use based
+ upon the register size. */
+ switch (info->io.regsize) {
+ case 1:
+ info->io.inputb = port_inb;
+ info->io.outputb = port_outb;
+ break;
+ case 2:
+ info->io.inputb = port_inw;
+ info->io.outputb = port_outw;
+ break;
+ case 4:
+ info->io.inputb = port_inl;
+ info->io.outputb = port_outl;
+ break;
+ default:
+ printk("ipmi_si: Invalid register size: %d\n",
+ info->io.regsize);
+ return -EINVAL;
+ }
+
+ /* Calculate the total amount of memory to claim. This is an
+ * unusual looking calculation, but it avoids claiming any
+ * more memory than it has to. It will claim everything
+ * between the first address to the end of the last full
+ * register. */
+ mapsize = ((info->io_size * info->io.regspacing)
+ - (info->io.regspacing - info->io.regsize));
+
+ if (request_region(*addr, mapsize, DEVICE_NAME) == NULL)
+ return -EIO;
+ return 0;
+}
+
+static int try_init_port(int intf_num, struct smi_info **new_info)
+{
+ struct smi_info *info;
+
+ if (!ports[intf_num])
+ return -ENODEV;
+
+ if (!is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
+ ports[intf_num]))
+ return -ENODEV;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n");
+ return -ENOMEM;
+ }
+ memset(info, 0, sizeof(*info));
+
+ info->io_setup = port_setup;
+ info->io.info = &(ports[intf_num]);
+ info->io.addr = NULL;
+ info->io.regspacing = regspacings[intf_num];
+ if (!info->io.regspacing)
+ info->io.regspacing = DEFAULT_REGSPACING;
+ info->io.regsize = regsizes[intf_num];
+ if (!info->io.regsize)
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = regshifts[intf_num];
+ info->irq = 0;
+ info->irq_setup = NULL;
+ *new_info = info;
+
+ if (si_type[intf_num] == NULL)
+ si_type[intf_num] = "kcs";
+
+ printk("ipmi_si: Trying \"%s\" at I/O port 0x%x\n",
+ si_type[intf_num], ports[intf_num]);
+ return 0;
+}
+
+static unsigned char mem_inb(struct si_sm_io *io, unsigned int offset)
+{
+ return readb((io->addr)+(offset * io->regspacing));
+}
+
+static void mem_outb(struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeb(b, (io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char mem_inw(struct si_sm_io *io, unsigned int offset)
+{
+ return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ && 0xff;
+}
+
+static void mem_outw(struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char mem_inl(struct si_sm_io *io, unsigned int offset)
+{
+ return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ && 0xff;
+}
+
+static void mem_outl(struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+
+#ifdef readq
+static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
+{
+ return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ && 0xff;
+}
+
+static void mem_outq(struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+#endif
+
+static void mem_cleanup(struct smi_info *info)
+{
+ unsigned long *addr = info->io.info;
+ int mapsize;
+
+ if (info->io.addr) {
+ iounmap(info->io.addr);
+
+ mapsize = ((info->io_size * info->io.regspacing)
+ - (info->io.regspacing - info->io.regsize));
+
+ release_mem_region(*addr, mapsize);
+ }
+ kfree(info);
+}
+
+static int mem_setup(struct smi_info *info)
+{
+ unsigned long *addr = info->io.info;
+ int mapsize;
+
+ if (!addr || (!*addr))
+ return -ENODEV;
+
+ info->io_cleanup = mem_cleanup;
+
+ /* Figure out the actual readb/readw/readl/etc routine to use based
+ upon the register size. */
+ switch (info->io.regsize) {
+ case 1:
+ info->io.inputb = mem_inb;
+ info->io.outputb = mem_outb;
+ break;
+ case 2:
+ info->io.inputb = mem_inw;
+ info->io.outputb = mem_outw;
+ break;
+ case 4:
+ info->io.inputb = mem_inl;
+ info->io.outputb = mem_outl;
+ break;
+#ifdef readq
+ case 8:
+ info->io.inputb = mem_inq;
+ info->io.outputb = mem_outq;
+ break;
+#endif
+ default:
+ printk("ipmi_si: Invalid register size: %d\n",
+ info->io.regsize);
+ return -EINVAL;
+ }
+
+ /* Calculate the total amount of memory to claim. This is an
+ * unusual looking calculation, but it avoids claiming any
+ * more memory than it has to. It will claim everything
+ * between the first address to the end of the last full
+ * register. */
+ mapsize = ((info->io_size * info->io.regspacing)
+ - (info->io.regspacing - info->io.regsize));
+
+ if (request_mem_region(*addr, mapsize, DEVICE_NAME) == NULL)
+ return -EIO;
+
+ info->io.addr = ioremap(*addr, mapsize);
+ if (info->io.addr == NULL) {
+ release_mem_region(*addr, mapsize);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int try_init_mem(int intf_num, struct smi_info **new_info)
+{
+ struct smi_info *info;
+
+ if (!addrs[intf_num])
+ return -ENODEV;
+
+ if (!is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE,
+ addrs[intf_num]))
+ return -ENODEV;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n");
+ return -ENOMEM;
+ }
+ memset(info, 0, sizeof(*info));
+
+ info->io_setup = mem_setup;
+ info->io.info = &addrs[intf_num];
+ info->io.addr = NULL;
+ info->io.regspacing = regspacings[intf_num];
+ if (!info->io.regspacing)
+ info->io.regspacing = DEFAULT_REGSPACING;
+ info->io.regsize = regsizes[intf_num];
+ if (!info->io.regsize)
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = regshifts[intf_num];
+ info->irq = 0;
+ info->irq_setup = NULL;
+ *new_info = info;
+
+ if (si_type[intf_num] == NULL)
+ si_type[intf_num] = "kcs";
+
+ printk("ipmi_si: Trying \"%s\" at memory address 0x%lx\n",
+ si_type[intf_num], addrs[intf_num]);
+ return 0;
+}
+
+
+#ifdef CONFIG_ACPI_INTERPRETER
+
+#include <linux/acpi.h>
+
+/* Once we get an ACPI failure, we don't try any more, because we go
+ through the tables sequentially. Once we don't find a table, there
+ are no more. */
+static int acpi_failure = 0;
+
+/* For GPE-type interrupts. */
+static u32 ipmi_acpi_gpe(void *context)
+{
+ struct smi_info *smi_info = context;
+ unsigned long flags;
+#ifdef DEBUG_TIMING
+ struct timeval t;
+#endif
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+
+ spin_lock(&smi_info->count_lock);
+ smi_info->interrupts++;
+ spin_unlock(&smi_info->count_lock);
+
+ if (smi_info->stop_operation)
+ goto out;
+
+#ifdef DEBUG_TIMING
+ do_gettimeofday(&t);
+ printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+#endif
+ smi_event_handler(smi_info, 0);
+ out:
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+
+ return ACPI_INTERRUPT_HANDLED;
+}
+
+static int acpi_gpe_irq_setup(struct smi_info *info)
+{
+ acpi_status status;
+
+ if (!info->irq)
+ return 0;
+
+ /* FIXME - is level triggered right? */
+ status = acpi_install_gpe_handler(NULL,
+ info->irq,
+ ACPI_GPE_LEVEL_TRIGGERED,
+ &ipmi_acpi_gpe,
+ info);
+ if (status != AE_OK) {
+ printk(KERN_WARNING
+ "ipmi_si: %s unable to claim ACPI GPE %d,"
+ " running polled\n",
+ DEVICE_NAME, info->irq);
+ info->irq = 0;
+ return -EINVAL;
+ } else {
+ printk(" Using ACPI GPE %d\n", info->irq);
+ return 0;
+ }
+}
+
+static void acpi_gpe_irq_cleanup(struct smi_info *info)
+{
+ if (!info->irq)
+ return;
+
+ acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
+}
+
+/*
+ * Defined at
+ * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
+ */
+struct SPMITable {
+ s8 Signature[4];
+ u32 Length;
+ u8 Revision;
+ u8 Checksum;
+ s8 OEMID[6];
+ s8 OEMTableID[8];
+ s8 OEMRevision[4];
+ s8 CreatorID[4];
+ s8 CreatorRevision[4];
+ u8 InterfaceType;
+ u8 IPMIlegacy;
+ s16 SpecificationRevision;
+
+ /*
+ * Bit 0 - SCI interrupt supported
+ * Bit 1 - I/O APIC/SAPIC
+ */
+ u8 InterruptType;
+
+ /* If bit 0 of InterruptType is set, then this is the SCI
+ interrupt in the GPEx_STS register. */
+ u8 GPE;
+
+ s16 Reserved;
+
+ /* If bit 1 of InterruptType is set, then this is the I/O
+ APIC/SAPIC interrupt. */
+ u32 GlobalSystemInterrupt;
+
+ /* The actual register address. */
+ struct acpi_generic_address addr;
+
+ u8 UID[4];
+
+ s8 spmi_id[1]; /* A '\0' terminated array starts here. */
+};
+
+static int try_init_acpi(int intf_num, struct smi_info **new_info)
+{
+ struct smi_info *info;
+ acpi_status status;
+ struct SPMITable *spmi;
+ char *io_type;
+ u8 addr_space;
+
+ if (acpi_failure)
+ return -ENODEV;
+
+ status = acpi_get_firmware_table("SPMI", intf_num+1,
+ ACPI_LOGICAL_ADDRESSING,
+ (struct acpi_table_header **) &spmi);
+ if (status != AE_OK) {
+ acpi_failure = 1;
+ return -ENODEV;
+ }
+
+ if (spmi->IPMIlegacy != 1) {
+ printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
+ return -ENODEV;
+ }
+
+ if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ addr_space = IPMI_MEM_ADDR_SPACE;
+ else
+ addr_space = IPMI_IO_ADDR_SPACE;
+ if (!is_new_interface(-1, addr_space, spmi->addr.address))
+ return -ENODEV;
+
+ if (!spmi->addr.register_bit_width) {
+ acpi_failure = 1;
+ return -ENODEV;
+ }
+
+ /* Figure out the interface type. */
+ switch (spmi->InterfaceType)
+ {
+ case 1: /* KCS */
+ si_type[intf_num] = "kcs";
+ break;
+
+ case 2: /* SMIC */
+ si_type[intf_num] = "smic";
+ break;
+
+ case 3: /* BT */
+ si_type[intf_num] = "bt";
+ break;
+
+ default:
+ printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
+ spmi->InterfaceType);
+ return -EIO;
+ }
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
+ return -ENOMEM;
+ }
+ memset(info, 0, sizeof(*info));
+
+ if (spmi->InterruptType & 1) {
+ /* We've got a GPE interrupt. */
+ info->irq = spmi->GPE;
+ info->irq_setup = acpi_gpe_irq_setup;
+ info->irq_cleanup = acpi_gpe_irq_cleanup;
+ } else if (spmi->InterruptType & 2) {
+ /* We've got an APIC/SAPIC interrupt. */
+ info->irq = spmi->GlobalSystemInterrupt;
+ info->irq_setup = std_irq_setup;
+ info->irq_cleanup = std_irq_cleanup;
+ } else {
+ /* Use the default interrupt setting. */
+ info->irq = 0;
+ info->irq_setup = NULL;
+ }
+
+ regspacings[intf_num] = spmi->addr.register_bit_width / 8;
+ info->io.regspacing = spmi->addr.register_bit_width / 8;
+ regsizes[intf_num] = regspacings[intf_num];
+ info->io.regsize = regsizes[intf_num];
+ regshifts[intf_num] = spmi->addr.register_bit_offset;
+ info->io.regshift = regshifts[intf_num];
+
+ if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ io_type = "memory";
+ info->io_setup = mem_setup;
+ addrs[intf_num] = spmi->addr.address;
+ info->io.info = &(addrs[intf_num]);
+ } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ io_type = "I/O";
+ info->io_setup = port_setup;
+ ports[intf_num] = spmi->addr.address;
+ info->io.info = &(ports[intf_num]);
+ } else {
+ kfree(info);
+ printk("ipmi_si: Unknown ACPI I/O Address type\n");
+ return -EIO;
+ }
+
+ *new_info = info;
+
+ printk("ipmi_si: ACPI/SPMI specifies \"%s\" %s SI @ 0x%lx\n",
+ si_type[intf_num], io_type, (unsigned long) spmi->addr.address);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_X86
+typedef struct dmi_ipmi_data
+{
+ u8 type;
+ u8 addr_space;
+ unsigned long base_addr;
+ u8 irq;
+ u8 offset;
+ u8 slave_addr;
+} dmi_ipmi_data_t;
+
+static dmi_ipmi_data_t dmi_data[SI_MAX_DRIVERS];
+static int dmi_data_entries;
+
+typedef struct dmi_header
+{
+ u8 type;
+ u8 length;
+ u16 handle;
+} dmi_header_t;
+
+static int decode_dmi(dmi_header_t *dm, int intf_num)
+{
+ u8 *data = (u8 *)dm;
+ unsigned long base_addr;
+ u8 reg_spacing;
+ u8 len = dm->length;
+ dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
+
+ ipmi_data->type = data[4];
+
+ memcpy(&base_addr, data+8, sizeof(unsigned long));
+ if (len >= 0x11) {
+ if (base_addr & 1) {
+ /* I/O */
+ base_addr &= 0xFFFE;
+ ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
+ }
+ else {
+ /* Memory */
+ ipmi_data->addr_space = IPMI_MEM_ADDR_SPACE;
+ }
+ /* If bit 4 of byte 0x10 is set, then the lsb for the address
+ is odd. */
+ ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
+
+ ipmi_data->irq = data[0x11];
+
+ /* The top two bits of byte 0x10 hold the register spacing. */
+ reg_spacing = (data[0x10] & 0xC0) >> 6;
+ switch(reg_spacing){
+ case 0x00: /* Byte boundaries */
+ ipmi_data->offset = 1;
+ break;
+ case 0x01: /* 32-bit boundaries */
+ ipmi_data->offset = 4;
+ break;
+ case 0x02: /* 16-byte boundaries */
+ ipmi_data->offset = 16;
+ break;
+ default:
+ /* Some other interface, just ignore it. */
+ return -EIO;
+ }
+ } else {
+ /* Old DMI spec. */
+ ipmi_data->base_addr = base_addr;
+ ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
+ ipmi_data->offset = 1;
+ }
+
+ ipmi_data->slave_addr = data[6];
+
+ if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) {
+ dmi_data_entries++;
+ return 0;
+ }
+
+ memset(ipmi_data, 0, sizeof(dmi_ipmi_data_t));
+
+ return -1;
+}
+
+static int dmi_table(u32 base, int len, int num)
+{
+ u8 *buf;
+ struct dmi_header *dm;
+ u8 *data;
+ int i=1;
+ int status=-1;
+ int intf_num = 0;
+
+ buf = ioremap(base, len);
+ if(buf==NULL)
+ return -1;
+
+ data = buf;
+
+ while(i<num && (data - buf) < len)
+ {
+ dm=(dmi_header_t *)data;
+
+ if((data-buf+dm->length) >= len)
+ break;
+
+ if (dm->type == 38) {
+ if (decode_dmi(dm, intf_num) == 0) {
+ intf_num++;
+ if (intf_num >= SI_MAX_DRIVERS)
+ break;
+ }
+ }
+
+ data+=dm->length;
+ while((data-buf) < len && (*data || data[1]))
+ data++;
+ data+=2;
+ i++;
+ }
+ iounmap(buf);
+
+ return status;
+}
+
+inline static int dmi_checksum(u8 *buf)
+{
+ u8 sum=0;
+ int a;
+
+ for(a=0; a<15; a++)
+ sum+=buf[a];
+ return (sum==0);
+}
+
+static int dmi_decode(void)
+{
+ u8 buf[15];
+ u32 fp=0xF0000;
+
+#ifdef CONFIG_SIMNOW
+ return -1;
+#endif
+
+ while(fp < 0xFFFFF)
+ {
+ isa_memcpy_fromio(buf, fp, 15);
+ if(memcmp(buf, "_DMI_", 5)==0 && dmi_checksum(buf))
+ {
+ u16 num=buf[13]<<8|buf[12];
+ u16 len=buf[7]<<8|buf[6];
+ u32 base=buf[11]<<24|buf[10]<<16|buf[9]<<8|buf[8];
+
+ if(dmi_table(base, len, num) == 0)
+ return 0;
+ }
+ fp+=16;
+ }
+
+ return -1;
+}
+
+static int try_init_smbios(int intf_num, struct smi_info **new_info)
+{
+ struct smi_info *info;
+ dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
+ char *io_type;
+
+ if (intf_num >= dmi_data_entries)
+ return -ENODEV;
+
+ switch(ipmi_data->type) {
+ case 0x01: /* KCS */
+ si_type[intf_num] = "kcs";
+ break;
+ case 0x02: /* SMIC */
+ si_type[intf_num] = "smic";
+ break;
+ case 0x03: /* BT */
+ si_type[intf_num] = "bt";
+ break;
+ default:
+ return -EIO;
+ }
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n");
+ return -ENOMEM;
+ }
+ memset(info, 0, sizeof(*info));
+
+ if (ipmi_data->addr_space == 1) {
+ io_type = "memory";
+ info->io_setup = mem_setup;
+ addrs[intf_num] = ipmi_data->base_addr;
+ info->io.info = &(addrs[intf_num]);
+ } else if (ipmi_data->addr_space == 2) {
+ io_type = "I/O";
+ info->io_setup = port_setup;
+ ports[intf_num] = ipmi_data->base_addr;
+ info->io.info = &(ports[intf_num]);
+ } else {
+ kfree(info);
+ printk("ipmi_si: Unknown SMBIOS I/O Address type.\n");
+ return -EIO;
+ }
+
+ regspacings[intf_num] = ipmi_data->offset;
+ info->io.regspacing = regspacings[intf_num];
+ if (!info->io.regspacing)
+ info->io.regspacing = DEFAULT_REGSPACING;
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = regshifts[intf_num];
+
+ info->slave_addr = ipmi_data->slave_addr;
+
+ irqs[intf_num] = ipmi_data->irq;
+
+ *new_info = info;
+
+ printk("ipmi_si: Found SMBIOS-specified state machine at %s"
+ " address 0x%lx, slave address 0x%x\n",
+ io_type, (unsigned long)ipmi_data->base_addr,
+ ipmi_data->slave_addr);
+ return 0;
+}
+#endif /* CONFIG_X86 */
+
+#ifdef CONFIG_PCI
+
+#define PCI_ERMC_CLASSCODE 0x0C0700
+#define PCI_HP_VENDOR_ID 0x103C
+#define PCI_MMC_DEVICE_ID 0x121A
+#define PCI_MMC_ADDR_CW 0x10
+
+/* Avoid more than one attempt to probe pci smic. */
+static int pci_smic_checked = 0;
+
+static int find_pci_smic(int intf_num, struct smi_info **new_info)
+{
+ struct smi_info *info;
+ int error;
+ struct pci_dev *pci_dev = NULL;
+ u16 base_addr;
+ int fe_rmc = 0;
+
+ if (pci_smic_checked)
+ return -ENODEV;
+
+ pci_smic_checked = 1;
+
+ if ((pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID,
+ NULL)))
+ ;
+ else if ((pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL)) &&
+ pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID)
+ fe_rmc = 1;
+ else
+ return -ENODEV;
+
+ error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr);
+ if (error)
+ {
+ pci_dev_put(pci_dev);
+ printk(KERN_ERR
+ "ipmi_si: pci_read_config_word() failed (%d).\n",
+ error);
+ return -ENODEV;
+ }
+
+ /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */
+ if (!(base_addr & 0x0001))
+ {
+ pci_dev_put(pci_dev);
+ printk(KERN_ERR
+ "ipmi_si: memory mapped I/O not supported for PCI"
+ " smic.\n");
+ return -ENODEV;
+ }
+
+ base_addr &= 0xFFFE;
+ if (!fe_rmc)
+ /* Data register starts at base address + 1 in eRMC */
+ ++base_addr;
+
+ if (!is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) {
+ pci_dev_put(pci_dev);
+ return -ENODEV;
+ }
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ pci_dev_put(pci_dev);
+ printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n");
+ return -ENOMEM;
+ }
+ memset(info, 0, sizeof(*info));
+
+ info->io_setup = port_setup;
+ ports[intf_num] = base_addr;
+ info->io.info = &(ports[intf_num]);
+ info->io.regspacing = regspacings[intf_num];
+ if (!info->io.regspacing)
+ info->io.regspacing = DEFAULT_REGSPACING;
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = regshifts[intf_num];
+
+ *new_info = info;
+
+ irqs[intf_num] = pci_dev->irq;
+ si_type[intf_num] = "smic";
+
+ printk("ipmi_si: Found PCI SMIC at I/O address 0x%lx\n",
+ (long unsigned int) base_addr);
+
+ pci_dev_put(pci_dev);
+ return 0;
+}
+#endif /* CONFIG_PCI */
+
+static int try_init_plug_and_play(int intf_num, struct smi_info **new_info)
+{
+#ifdef CONFIG_PCI
+ if (find_pci_smic(intf_num, new_info)==0)
+ return 0;
+#endif
+ /* Include other methods here. */
+
+ return -ENODEV;
+}
+
+
+static int try_get_dev_id(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+ unsigned char *resp;
+ unsigned long resp_len;
+ enum si_sm_result smi_result;
+ int rv = 0;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ /* Do a Get Device ID command, since it comes back with some
+ useful info. */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_DEVICE_ID_CMD;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+ smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
+ for (;;)
+ {
+ if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+ smi_result = smi_info->handlers->event(
+ smi_info->si_sm, 100);
+ }
+ else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
+ {
+ smi_result = smi_info->handlers->event(
+ smi_info->si_sm, 0);
+ }
+ else
+ break;
+ }
+ if (smi_result == SI_SM_HOSED) {
+ /* We couldn't get the state machine to run, so whatever's at
+ the port is probably not an IPMI SMI interface. */
+ rv = -ENODEV;
+ goto out;
+ }
+
+ /* Otherwise, we got some data. */
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+ if (resp_len < 6) {
+ /* That's odd, it should be longer. */
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
+ /* That's odd, it shouldn't be able to fail. */
+ rv = -EINVAL;
+ goto out;
+ }
+
+ /* Record info from the get device id, in case we need it. */
+ smi_info->ipmi_si_dev_rev = resp[4] & 0xf;
+ smi_info->ipmi_si_fw_rev_major = resp[5] & 0x7f;
+ smi_info->ipmi_si_fw_rev_minor = resp[6];
+ smi_info->ipmi_version_major = resp[7] & 0xf;
+ smi_info->ipmi_version_minor = resp[7] >> 4;
+
+ out:
+ kfree(resp);
+ return rv;
+}
+
+static int type_file_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ char *out = (char *) page;
+ struct smi_info *smi = data;
+
+ switch (smi->si_type) {
+ case SI_KCS:
+ return sprintf(out, "kcs\n");
+ case SI_SMIC:
+ return sprintf(out, "smic\n");
+ case SI_BT:
+ return sprintf(out, "bt\n");
+ default:
+ return 0;
+ }
+}
+
+static int stat_file_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ char *out = (char *) page;
+ struct smi_info *smi = data;
+
+ out += sprintf(out, "interrupts_enabled: %d\n",
+ smi->irq && !smi->interrupt_disabled);
+ out += sprintf(out, "short_timeouts: %ld\n",
+ smi->short_timeouts);
+ out += sprintf(out, "long_timeouts: %ld\n",
+ smi->long_timeouts);
+ out += sprintf(out, "timeout_restarts: %ld\n",
+ smi->timeout_restarts);
+ out += sprintf(out, "idles: %ld\n",
+ smi->idles);
+ out += sprintf(out, "interrupts: %ld\n",
+ smi->interrupts);
+ out += sprintf(out, "attentions: %ld\n",
+ smi->attentions);
+ out += sprintf(out, "flag_fetches: %ld\n",
+ smi->flag_fetches);
+ out += sprintf(out, "hosed_count: %ld\n",
+ smi->hosed_count);
+ out += sprintf(out, "complete_transactions: %ld\n",
+ smi->complete_transactions);
+ out += sprintf(out, "events: %ld\n",
+ smi->events);
+ out += sprintf(out, "watchdog_pretimeouts: %ld\n",
+ smi->watchdog_pretimeouts);
+ out += sprintf(out, "incoming_messages: %ld\n",
+ smi->incoming_messages);
+
+ return (out - ((char *) page));
+}
+
+/* Returns 0 if initialized, or negative on an error. */
+static int init_one_smi(int intf_num, struct smi_info **smi)
+{
+ int rv;
+ struct smi_info *new_smi;
+
+
+ rv = try_init_mem(intf_num, &new_smi);
+ if (rv)
+ rv = try_init_port(intf_num, &new_smi);
+#ifdef CONFIG_ACPI_INTERPRETER
+ if ((rv) && (si_trydefaults)) {
+ rv = try_init_acpi(intf_num, &new_smi);
+ }
+#endif
+#ifdef CONFIG_X86
+ if ((rv) && (si_trydefaults)) {
+ rv = try_init_smbios(intf_num, &new_smi);
+ }
+#endif
+ if ((rv) && (si_trydefaults)) {
+ rv = try_init_plug_and_play(intf_num, &new_smi);
+ }
+
+
+ if (rv)
+ return rv;
+
+ /* So we know not to free it unless we have allocated one. */
+ new_smi->intf = NULL;
+ new_smi->si_sm = NULL;
+ new_smi->handlers = NULL;
+
+ if (!new_smi->irq_setup) {
+ new_smi->irq = irqs[intf_num];
+ new_smi->irq_setup = std_irq_setup;
+ new_smi->irq_cleanup = std_irq_cleanup;
+ }
+
+ /* Default to KCS if no type is specified. */
+ if (si_type[intf_num] == NULL) {
+ if (si_trydefaults)
+ si_type[intf_num] = "kcs";
+ else {
+ rv = -EINVAL;
+ goto out_err;
+ }
+ }
+
+ /* Set up the state machine to use. */
+ if (strcmp(si_type[intf_num], "kcs") == 0) {
+ new_smi->handlers = &kcs_smi_handlers;
+ new_smi->si_type = SI_KCS;
+ } else if (strcmp(si_type[intf_num], "smic") == 0) {
+ new_smi->handlers = &smic_smi_handlers;
+ new_smi->si_type = SI_SMIC;
+ } else if (strcmp(si_type[intf_num], "bt") == 0) {
+ new_smi->handlers = &bt_smi_handlers;
+ new_smi->si_type = SI_BT;
+ } else {
+ /* No support for anything else yet. */
+ rv = -EIO;
+ goto out_err;
+ }
+
+ /* Allocate the state machine's data and initialize it. */
+ new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
+ if (!new_smi->si_sm) {
+ printk(" Could not allocate state machine memory\n");
+ rv = -ENOMEM;
+ goto out_err;
+ }
+ new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
+ &new_smi->io);
+
+ /* Now that we know the I/O size, we can set up the I/O. */
+ rv = new_smi->io_setup(new_smi);
+ if (rv) {
+ printk(" Could not set up I/O space\n");
+ goto out_err;
+ }
+
+ spin_lock_init(&(new_smi->si_lock));
+ spin_lock_init(&(new_smi->msg_lock));
+ spin_lock_init(&(new_smi->count_lock));
+
+ /* Do low-level detection first. */
+ if (new_smi->handlers->detect(new_smi->si_sm)) {
+ rv = -ENODEV;
+ goto out_err;
+ }
+
+ /* Attempt a get device id command. If it fails, we probably
+ don't have a SMI here. */
+ rv = try_get_dev_id(new_smi);
+ if (rv)
+ goto out_err;
+
+ /* Try to claim any interrupts. */
+ new_smi->irq_setup(new_smi);
+
+ INIT_LIST_HEAD(&(new_smi->xmit_msgs));
+ INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
+ new_smi->curr_msg = NULL;
+ atomic_set(&new_smi->req_events, 0);
+ new_smi->run_to_completion = 0;
+
+ new_smi->interrupt_disabled = 0;
+ new_smi->timer_stopped = 0;
+ new_smi->stop_operation = 0;
+
+ /* Start clearing the flags before we enable interrupts or the
+ timer to avoid racing with the timer. */
+ start_clear_flags(new_smi);
+ /* IRQ is defined to be set when non-zero. */
+ if (new_smi->irq)
+ new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
+
+ /* The ipmi_register_smi() code does some operations to
+ determine the channel information, so we must be ready to
+ handle operations before it is called. This means we have
+ to stop the timer if we get an error after this point. */
+ init_timer(&(new_smi->si_timer));
+ new_smi->si_timer.data = (long) new_smi;
+ new_smi->si_timer.function = smi_timeout;
+ new_smi->last_timeout_jiffies = jiffies;
+ new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
+ add_timer(&(new_smi->si_timer));
+
+ rv = ipmi_register_smi(&handlers,
+ new_smi,
+ new_smi->ipmi_version_major,
+ new_smi->ipmi_version_minor,
+ new_smi->slave_addr,
+ &(new_smi->intf));
+ if (rv) {
+ printk(KERN_ERR
+ "ipmi_si: Unable to register device: error %d\n",
+ rv);
+ goto out_err_stop_timer;
+ }
+
+ rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
+ type_file_read_proc, NULL,
+ new_smi, THIS_MODULE);
+ if (rv) {
+ printk(KERN_ERR
+ "ipmi_si: Unable to create proc entry: %d\n",
+ rv);
+ goto out_err_stop_timer;
+ }
+
+ rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
+ stat_file_read_proc, NULL,
+ new_smi, THIS_MODULE);
+ if (rv) {
+ printk(KERN_ERR
+ "ipmi_si: Unable to create proc entry: %d\n",
+ rv);
+ goto out_err_stop_timer;
+ }
+
+ *smi = new_smi;
+
+ printk(" IPMI %s interface initialized\n", si_type[intf_num]);
+
+ return 0;
+
+ out_err_stop_timer:
+ new_smi->stop_operation = 1;
+
+ /* Wait for the timer to stop. This avoids problems with race
+ conditions removing the timer here. */
+ while (!new_smi->timer_stopped) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+
+ out_err:
+ if (new_smi->intf)
+ ipmi_unregister_smi(new_smi->intf);
+
+ new_smi->irq_cleanup(new_smi);
+
+ /* Wait until we know that we are out of any interrupt
+ handlers might have been running before we freed the
+ interrupt. */
+ synchronize_kernel();
+
+ if (new_smi->si_sm) {
+ if (new_smi->handlers)
+ new_smi->handlers->cleanup(new_smi->si_sm);
+ kfree(new_smi->si_sm);
+ }
+ new_smi->io_cleanup(new_smi);
+
+ return rv;
+}
+
+static __init int init_ipmi_si(void)
+{
+ int rv = 0;
+ int pos = 0;
+ int i;
+ char *str;
+
+ if (initialized)
+ return 0;
+ initialized = 1;
+
+ /* Parse out the si_type string into its components. */
+ str = si_type_str;
+ if (*str != '\0') {
+ for (i=0; (i<SI_MAX_PARMS) && (*str != '\0'); i++) {
+ si_type[i] = str;
+ str = strchr(str, ',');
+ if (str) {
+ *str = '\0';
+ str++;
+ } else {
+ break;
+ }
+ }
+ }
+
+ printk(KERN_INFO "IPMI System Interface driver version "
+ IPMI_SI_VERSION);
+ if (kcs_smi_handlers.version)
+ printk(", KCS version %s", kcs_smi_handlers.version);
+ if (smic_smi_handlers.version)
+ printk(", SMIC version %s", smic_smi_handlers.version);
+ if (bt_smi_handlers.version)
+ printk(", BT version %s", bt_smi_handlers.version);
+ printk("\n");
+
+#ifdef CONFIG_X86
+ dmi_decode();
+#endif
+
+ rv = init_one_smi(0, &(smi_infos[pos]));
+ if (rv && !ports[0] && si_trydefaults) {
+ /* If we are trying defaults and the initial port is
+ not set, then set it. */
+ si_type[0] = "kcs";
+ ports[0] = DEFAULT_KCS_IO_PORT;
+ rv = init_one_smi(0, &(smi_infos[pos]));
+ if (rv) {
+ /* No KCS - try SMIC */
+ si_type[0] = "smic";
+ ports[0] = DEFAULT_SMIC_IO_PORT;
+ rv = init_one_smi(0, &(smi_infos[pos]));
+ }
+ if (rv) {
+ /* No SMIC - try BT */
+ si_type[0] = "bt";
+ ports[0] = DEFAULT_BT_IO_PORT;
+ rv = init_one_smi(0, &(smi_infos[pos]));
+ }
+ }
+ if (rv == 0)
+ pos++;
+
+ for (i=1; i < SI_MAX_PARMS; i++) {
+ rv = init_one_smi(i, &(smi_infos[pos]));
+ if (rv == 0)
+ pos++;
+ }
+
+ if (smi_infos[0] == NULL) {
+ printk("ipmi_si: Unable to find any System Interface(s)\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+module_init(init_ipmi_si);
+
+static void __exit cleanup_one_si(struct smi_info *to_clean)
+{
+ int rv;
+ unsigned long flags;
+
+ if (! to_clean)
+ return;
+
+ /* Tell the timer and interrupt handlers that we are shutting
+ down. */
+ spin_lock_irqsave(&(to_clean->si_lock), flags);
+ spin_lock(&(to_clean->msg_lock));
+
+ to_clean->stop_operation = 1;
+
+ to_clean->irq_cleanup(to_clean);
+
+ spin_unlock(&(to_clean->msg_lock));
+ spin_unlock_irqrestore(&(to_clean->si_lock), flags);
+
+ /* Wait until we know that we are out of any interrupt
+ handlers might have been running before we freed the
+ interrupt. */
+ synchronize_kernel();
+
+ /* Wait for the timer to stop. This avoids problems with race
+ conditions removing the timer here. */
+ while (!to_clean->timer_stopped) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+
+ /* Interrupts and timeouts are stopped, now make sure the
+ interface is in a clean state. */
+ while ((to_clean->curr_msg) || (to_clean->si_state != SI_NORMAL)) {
+ poll(to_clean);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+
+ rv = ipmi_unregister_smi(to_clean->intf);
+ if (rv) {
+ printk(KERN_ERR
+ "ipmi_si: Unable to unregister device: errno=%d\n",
+ rv);
+ }
+
+ to_clean->handlers->cleanup(to_clean->si_sm);
+
+ kfree(to_clean->si_sm);
+
+ to_clean->io_cleanup(to_clean);
+}
+
+static __exit void cleanup_ipmi_si(void)
+{
+ int i;
+
+ if (!initialized)
+ return;
+
+ for (i=0; i<SI_MAX_DRIVERS; i++) {
+ cleanup_one_si(smi_infos[i]);
+ }
+}
+module_exit(cleanup_ipmi_si);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
new file mode 100644
index 000000000000..a0212b004016
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -0,0 +1,120 @@
+/*
+ * ipmi_si_sm.h
+ *
+ * State machine interface for low-level IPMI system management
+ * interface state machines. This code is the interface between
+ * the ipmi_smi code (that handles the policy of a KCS, SMIC, or
+ * BT interface) and the actual low-level state machine.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* This is defined by the state machines themselves, it is an opaque
+ data type for them to use. */
+struct si_sm_data;
+
+/* The structure for doing I/O in the state machine. The state
+ machine doesn't have the actual I/O routines, they are done through
+ this interface. */
+struct si_sm_io
+{
+ unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset);
+ void (*outputb)(struct si_sm_io *io,
+ unsigned int offset,
+ unsigned char b);
+
+ /* Generic info used by the actual handling routines, the
+ state machine shouldn't touch these. */
+ void *info;
+ void *addr;
+ int regspacing;
+ int regsize;
+ int regshift;
+};
+
+/* Results of SMI events. */
+enum si_sm_result
+{
+ SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */
+ SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */
+ SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */
+ SI_SM_IDLE, /* The SM is in idle state. */
+ SI_SM_HOSED, /* The hardware violated the state machine. */
+ SI_SM_ATTN /* The hardware is asserting attn and the
+ state machine is idle. */
+};
+
+/* Handlers for the SMI state machine. */
+struct si_sm_handlers
+{
+ /* Put the version number of the state machine here so the
+ upper layer can print it. */
+ char *version;
+
+ /* Initialize the data and return the amount of I/O space to
+ reserve for the space. */
+ unsigned int (*init_data)(struct si_sm_data *smi,
+ struct si_sm_io *io);
+
+ /* Start a new transaction in the state machine. This will
+ return -2 if the state machine is not idle, -1 if the size
+ is invalid (to large or too small), or 0 if the transaction
+ is successfully completed. */
+ int (*start_transaction)(struct si_sm_data *smi,
+ unsigned char *data, unsigned int size);
+
+ /* Return the results after the transaction. This will return
+ -1 if the buffer is too small, zero if no transaction is
+ present, or the actual length of the result data. */
+ int (*get_result)(struct si_sm_data *smi,
+ unsigned char *data, unsigned int length);
+
+ /* Call this periodically (for a polled interface) or upon
+ receiving an interrupt (for a interrupt-driven interface).
+ If interrupt driven, you should probably poll this
+ periodically when not in idle state. This should be called
+ with the time that passed since the last call, if it is
+ significant. Time is in microseconds. */
+ enum si_sm_result (*event)(struct si_sm_data *smi, long time);
+
+ /* Attempt to detect an SMI. Returns 0 on success or nonzero
+ on failure. */
+ int (*detect)(struct si_sm_data *smi);
+
+ /* The interface is shutting down, so clean it up. */
+ void (*cleanup)(struct si_sm_data *smi);
+
+ /* Return the size of the SMI structure in bytes. */
+ int (*size)(void);
+};
+
+/* Current state machines that we can use. */
+extern struct si_sm_handlers kcs_smi_handlers;
+extern struct si_sm_handlers smic_smi_handlers;
+extern struct si_sm_handlers bt_smi_handlers;
+
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
new file mode 100644
index 000000000000..ae18747e670b
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -0,0 +1,599 @@
+/*
+ * ipmi_smic_sm.c
+ *
+ * The state-machine driver for an IPMI SMIC driver
+ *
+ * It started as a copy of Corey Minyard's driver for the KSC interface
+ * and the kernel patch "mmcdev-patch-245" by HP
+ *
+ * modified by: Hannes Schulz <schulz@schwaar.com>
+ * ipmi@schwaar.com
+ *
+ *
+ * Corey Minyard's driver for the KSC interface has the following
+ * copyright notice:
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * the kernel patch "mmcdev-patch-245" by HP has the following
+ * copyright notice:
+ * (c) Copyright 2001 Grant Grundler (c) Copyright
+ * 2001 Hewlett-Packard Company
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/string.h>
+#include <linux/ipmi_msgdefs.h> /* for completion codes */
+#include "ipmi_si_sm.h"
+
+#define IPMI_SMIC_VERSION "v33"
+
+/* smic_debug is a bit-field
+ * SMIC_DEBUG_ENABLE - turned on for now
+ * SMIC_DEBUG_MSG - commands and their responses
+ * SMIC_DEBUG_STATES - state machine
+*/
+#define SMIC_DEBUG_STATES 4
+#define SMIC_DEBUG_MSG 2
+#define SMIC_DEBUG_ENABLE 1
+
+static int smic_debug = 1;
+
+enum smic_states {
+ SMIC_IDLE,
+ SMIC_START_OP,
+ SMIC_OP_OK,
+ SMIC_WRITE_START,
+ SMIC_WRITE_NEXT,
+ SMIC_WRITE_END,
+ SMIC_WRITE2READ,
+ SMIC_READ_START,
+ SMIC_READ_NEXT,
+ SMIC_READ_END,
+ SMIC_HOSED
+};
+
+#define MAX_SMIC_READ_SIZE 80
+#define MAX_SMIC_WRITE_SIZE 80
+#define SMIC_MAX_ERROR_RETRIES 3
+
+/* Timeouts in microseconds. */
+#define SMIC_RETRY_TIMEOUT 100000
+
+/* SMIC Flags Register Bits */
+#define SMIC_RX_DATA_READY 0x80
+#define SMIC_TX_DATA_READY 0x40
+#define SMIC_SMI 0x10
+#define SMIC_EVM_DATA_AVAIL 0x08
+#define SMIC_SMS_DATA_AVAIL 0x04
+#define SMIC_FLAG_BSY 0x01
+
+/* SMIC Error Codes */
+#define EC_NO_ERROR 0x00
+#define EC_ABORTED 0x01
+#define EC_ILLEGAL_CONTROL 0x02
+#define EC_NO_RESPONSE 0x03
+#define EC_ILLEGAL_COMMAND 0x04
+#define EC_BUFFER_FULL 0x05
+
+struct si_sm_data
+{
+ enum smic_states state;
+ struct si_sm_io *io;
+ unsigned char write_data[MAX_SMIC_WRITE_SIZE];
+ int write_pos;
+ int write_count;
+ int orig_write_count;
+ unsigned char read_data[MAX_SMIC_READ_SIZE];
+ int read_pos;
+ int truncated;
+ unsigned int error_retries;
+ long smic_timeout;
+};
+
+static unsigned int init_smic_data (struct si_sm_data *smic,
+ struct si_sm_io *io)
+{
+ smic->state = SMIC_IDLE;
+ smic->io = io;
+ smic->write_pos = 0;
+ smic->write_count = 0;
+ smic->orig_write_count = 0;
+ smic->read_pos = 0;
+ smic->error_retries = 0;
+ smic->truncated = 0;
+ smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+
+ /* We use 3 bytes of I/O. */
+ return 3;
+}
+
+static int start_smic_transaction(struct si_sm_data *smic,
+ unsigned char *data, unsigned int size)
+{
+ unsigned int i;
+
+ if ((size < 2) || (size > MAX_SMIC_WRITE_SIZE)) {
+ return -1;
+ }
+ if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED)) {
+ return -2;
+ }
+ if (smic_debug & SMIC_DEBUG_MSG) {
+ printk(KERN_INFO "start_smic_transaction -");
+ for (i = 0; i < size; i ++) {
+ printk (" %02x", (unsigned char) (data [i]));
+ }
+ printk ("\n");
+ }
+ smic->error_retries = 0;
+ memcpy(smic->write_data, data, size);
+ smic->write_count = size;
+ smic->orig_write_count = size;
+ smic->write_pos = 0;
+ smic->read_pos = 0;
+ smic->state = SMIC_START_OP;
+ smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+ return 0;
+}
+
+static int smic_get_result(struct si_sm_data *smic,
+ unsigned char *data, unsigned int length)
+{
+ int i;
+
+ if (smic_debug & SMIC_DEBUG_MSG) {
+ printk (KERN_INFO "smic_get result -");
+ for (i = 0; i < smic->read_pos; i ++) {
+ printk (" %02x", (smic->read_data [i]));
+ }
+ printk ("\n");
+ }
+ if (length < smic->read_pos) {
+ smic->read_pos = length;
+ smic->truncated = 1;
+ }
+ memcpy(data, smic->read_data, smic->read_pos);
+
+ if ((length >= 3) && (smic->read_pos < 3)) {
+ data[2] = IPMI_ERR_UNSPECIFIED;
+ smic->read_pos = 3;
+ }
+ if (smic->truncated) {
+ data[2] = IPMI_ERR_MSG_TRUNCATED;
+ smic->truncated = 0;
+ }
+ return smic->read_pos;
+}
+
+static inline unsigned char read_smic_flags(struct si_sm_data *smic)
+{
+ return smic->io->inputb(smic->io, 2);
+}
+
+static inline unsigned char read_smic_status(struct si_sm_data *smic)
+{
+ return smic->io->inputb(smic->io, 1);
+}
+
+static inline unsigned char read_smic_data(struct si_sm_data *smic)
+{
+ return smic->io->inputb(smic->io, 0);
+}
+
+static inline void write_smic_flags(struct si_sm_data *smic,
+ unsigned char flags)
+{
+ smic->io->outputb(smic->io, 2, flags);
+}
+
+static inline void write_smic_control(struct si_sm_data *smic,
+ unsigned char control)
+{
+ smic->io->outputb(smic->io, 1, control);
+}
+
+static inline void write_si_sm_data (struct si_sm_data *smic,
+ unsigned char data)
+{
+ smic->io->outputb(smic->io, 0, data);
+}
+
+static inline void start_error_recovery(struct si_sm_data *smic, char *reason)
+{
+ (smic->error_retries)++;
+ if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) {
+ if (smic_debug & SMIC_DEBUG_ENABLE) {
+ printk(KERN_WARNING
+ "ipmi_smic_drv: smic hosed: %s\n", reason);
+ }
+ smic->state = SMIC_HOSED;
+ } else {
+ smic->write_count = smic->orig_write_count;
+ smic->write_pos = 0;
+ smic->read_pos = 0;
+ smic->state = SMIC_START_OP;
+ smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+ }
+}
+
+static inline void write_next_byte(struct si_sm_data *smic)
+{
+ write_si_sm_data(smic, smic->write_data[smic->write_pos]);
+ (smic->write_pos)++;
+ (smic->write_count)--;
+}
+
+static inline void read_next_byte (struct si_sm_data *smic)
+{
+ if (smic->read_pos >= MAX_SMIC_READ_SIZE) {
+ read_smic_data (smic);
+ smic->truncated = 1;
+ } else {
+ smic->read_data[smic->read_pos] = read_smic_data(smic);
+ (smic->read_pos)++;
+ }
+}
+
+/* SMIC Control/Status Code Components */
+#define SMIC_GET_STATUS 0x00 /* Control form's name */
+#define SMIC_READY 0x00 /* Status form's name */
+#define SMIC_WR_START 0x01 /* Unified Control/Status names... */
+#define SMIC_WR_NEXT 0x02
+#define SMIC_WR_END 0x03
+#define SMIC_RD_START 0x04
+#define SMIC_RD_NEXT 0x05
+#define SMIC_RD_END 0x06
+#define SMIC_CODE_MASK 0x0f
+
+#define SMIC_CONTROL 0x00
+#define SMIC_STATUS 0x80
+#define SMIC_CS_MASK 0x80
+
+#define SMIC_SMS 0x40
+#define SMIC_SMM 0x60
+#define SMIC_STREAM_MASK 0x60
+
+/* SMIC Control Codes */
+#define SMIC_CC_SMS_GET_STATUS (SMIC_CONTROL|SMIC_SMS|SMIC_GET_STATUS)
+#define SMIC_CC_SMS_WR_START (SMIC_CONTROL|SMIC_SMS|SMIC_WR_START)
+#define SMIC_CC_SMS_WR_NEXT (SMIC_CONTROL|SMIC_SMS|SMIC_WR_NEXT)
+#define SMIC_CC_SMS_WR_END (SMIC_CONTROL|SMIC_SMS|SMIC_WR_END)
+#define SMIC_CC_SMS_RD_START (SMIC_CONTROL|SMIC_SMS|SMIC_RD_START)
+#define SMIC_CC_SMS_RD_NEXT (SMIC_CONTROL|SMIC_SMS|SMIC_RD_NEXT)
+#define SMIC_CC_SMS_RD_END (SMIC_CONTROL|SMIC_SMS|SMIC_RD_END)
+
+#define SMIC_CC_SMM_GET_STATUS (SMIC_CONTROL|SMIC_SMM|SMIC_GET_STATUS)
+#define SMIC_CC_SMM_WR_START (SMIC_CONTROL|SMIC_SMM|SMIC_WR_START)
+#define SMIC_CC_SMM_WR_NEXT (SMIC_CONTROL|SMIC_SMM|SMIC_WR_NEXT)
+#define SMIC_CC_SMM_WR_END (SMIC_CONTROL|SMIC_SMM|SMIC_WR_END)
+#define SMIC_CC_SMM_RD_START (SMIC_CONTROL|SMIC_SMM|SMIC_RD_START)
+#define SMIC_CC_SMM_RD_NEXT (SMIC_CONTROL|SMIC_SMM|SMIC_RD_NEXT)
+#define SMIC_CC_SMM_RD_END (SMIC_CONTROL|SMIC_SMM|SMIC_RD_END)
+
+/* SMIC Status Codes */
+#define SMIC_SC_SMS_READY (SMIC_STATUS|SMIC_SMS|SMIC_READY)
+#define SMIC_SC_SMS_WR_START (SMIC_STATUS|SMIC_SMS|SMIC_WR_START)
+#define SMIC_SC_SMS_WR_NEXT (SMIC_STATUS|SMIC_SMS|SMIC_WR_NEXT)
+#define SMIC_SC_SMS_WR_END (SMIC_STATUS|SMIC_SMS|SMIC_WR_END)
+#define SMIC_SC_SMS_RD_START (SMIC_STATUS|SMIC_SMS|SMIC_RD_START)
+#define SMIC_SC_SMS_RD_NEXT (SMIC_STATUS|SMIC_SMS|SMIC_RD_NEXT)
+#define SMIC_SC_SMS_RD_END (SMIC_STATUS|SMIC_SMS|SMIC_RD_END)
+
+#define SMIC_SC_SMM_READY (SMIC_STATUS|SMIC_SMM|SMIC_READY)
+#define SMIC_SC_SMM_WR_START (SMIC_STATUS|SMIC_SMM|SMIC_WR_START)
+#define SMIC_SC_SMM_WR_NEXT (SMIC_STATUS|SMIC_SMM|SMIC_WR_NEXT)
+#define SMIC_SC_SMM_WR_END (SMIC_STATUS|SMIC_SMM|SMIC_WR_END)
+#define SMIC_SC_SMM_RD_START (SMIC_STATUS|SMIC_SMM|SMIC_RD_START)
+#define SMIC_SC_SMM_RD_NEXT (SMIC_STATUS|SMIC_SMM|SMIC_RD_NEXT)
+#define SMIC_SC_SMM_RD_END (SMIC_STATUS|SMIC_SMM|SMIC_RD_END)
+
+/* these are the control/status codes we actually use
+ SMIC_CC_SMS_GET_STATUS 0x40
+ SMIC_CC_SMS_WR_START 0x41
+ SMIC_CC_SMS_WR_NEXT 0x42
+ SMIC_CC_SMS_WR_END 0x43
+ SMIC_CC_SMS_RD_START 0x44
+ SMIC_CC_SMS_RD_NEXT 0x45
+ SMIC_CC_SMS_RD_END 0x46
+
+ SMIC_SC_SMS_READY 0xC0
+ SMIC_SC_SMS_WR_START 0xC1
+ SMIC_SC_SMS_WR_NEXT 0xC2
+ SMIC_SC_SMS_WR_END 0xC3
+ SMIC_SC_SMS_RD_START 0xC4
+ SMIC_SC_SMS_RD_NEXT 0xC5
+ SMIC_SC_SMS_RD_END 0xC6
+*/
+
+static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
+{
+ unsigned char status;
+ unsigned char flags;
+ unsigned char data;
+
+ if (smic->state == SMIC_HOSED) {
+ init_smic_data(smic, smic->io);
+ return SI_SM_HOSED;
+ }
+ if (smic->state != SMIC_IDLE) {
+ if (smic_debug & SMIC_DEBUG_STATES) {
+ printk(KERN_INFO
+ "smic_event - smic->smic_timeout = %ld,"
+ " time = %ld\n",
+ smic->smic_timeout, time);
+ }
+/* FIXME: smic_event is sometimes called with time > SMIC_RETRY_TIMEOUT */
+ if (time < SMIC_RETRY_TIMEOUT) {
+ smic->smic_timeout -= time;
+ if (smic->smic_timeout < 0) {
+ start_error_recovery(smic, "smic timed out.");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ }
+ }
+ flags = read_smic_flags(smic);
+ if (flags & SMIC_FLAG_BSY)
+ return SI_SM_CALL_WITH_DELAY;
+
+ status = read_smic_status (smic);
+ if (smic_debug & SMIC_DEBUG_STATES)
+ printk(KERN_INFO
+ "smic_event - state = %d, flags = 0x%02x,"
+ " status = 0x%02x\n",
+ smic->state, flags, status);
+
+ switch (smic->state) {
+ case SMIC_IDLE:
+ /* in IDLE we check for available messages */
+ if (flags & (SMIC_SMI |
+ SMIC_EVM_DATA_AVAIL | SMIC_SMS_DATA_AVAIL))
+ {
+ return SI_SM_ATTN;
+ }
+ return SI_SM_IDLE;
+
+ case SMIC_START_OP:
+ /* sanity check whether smic is really idle */
+ write_smic_control(smic, SMIC_CC_SMS_GET_STATUS);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_OP_OK;
+ break;
+
+ case SMIC_OP_OK:
+ if (status != SMIC_SC_SMS_READY) {
+ /* this should not happen */
+ start_error_recovery(smic,
+ "state = SMIC_OP_OK,"
+ " status != SMIC_SC_SMS_READY");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ /* OK so far; smic is idle let us start ... */
+ write_smic_control(smic, SMIC_CC_SMS_WR_START);
+ write_next_byte(smic);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_WRITE_START;
+ break;
+
+ case SMIC_WRITE_START:
+ if (status != SMIC_SC_SMS_WR_START) {
+ start_error_recovery(smic,
+ "state = SMIC_WRITE_START, "
+ "status != SMIC_SC_SMS_WR_START");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ /* we must not issue WR_(NEXT|END) unless
+ TX_DATA_READY is set */
+ if (flags & SMIC_TX_DATA_READY) {
+ if (smic->write_count == 1) {
+ /* last byte */
+ write_smic_control(smic, SMIC_CC_SMS_WR_END);
+ smic->state = SMIC_WRITE_END;
+ } else {
+ write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
+ smic->state = SMIC_WRITE_NEXT;
+ }
+ write_next_byte(smic);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ }
+ else {
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ break;
+
+ case SMIC_WRITE_NEXT:
+ if (status != SMIC_SC_SMS_WR_NEXT) {
+ start_error_recovery(smic,
+ "state = SMIC_WRITE_NEXT, "
+ "status != SMIC_SC_SMS_WR_NEXT");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ /* this is the same code as in SMIC_WRITE_START */
+ if (flags & SMIC_TX_DATA_READY) {
+ if (smic->write_count == 1) {
+ write_smic_control(smic, SMIC_CC_SMS_WR_END);
+ smic->state = SMIC_WRITE_END;
+ }
+ else {
+ write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
+ smic->state = SMIC_WRITE_NEXT;
+ }
+ write_next_byte(smic);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ }
+ else {
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ break;
+
+ case SMIC_WRITE_END:
+ if (status != SMIC_SC_SMS_WR_END) {
+ start_error_recovery (smic,
+ "state = SMIC_WRITE_END, "
+ "status != SMIC_SC_SMS_WR_END");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ /* data register holds an error code */
+ data = read_smic_data(smic);
+ if (data != 0) {
+ if (smic_debug & SMIC_DEBUG_ENABLE) {
+ printk(KERN_INFO
+ "SMIC_WRITE_END: data = %02x\n", data);
+ }
+ start_error_recovery(smic,
+ "state = SMIC_WRITE_END, "
+ "data != SUCCESS");
+ return SI_SM_CALL_WITH_DELAY;
+ } else {
+ smic->state = SMIC_WRITE2READ;
+ }
+ break;
+
+ case SMIC_WRITE2READ:
+ /* we must wait for RX_DATA_READY to be set before we
+ can continue */
+ if (flags & SMIC_RX_DATA_READY) {
+ write_smic_control(smic, SMIC_CC_SMS_RD_START);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_READ_START;
+ } else {
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ break;
+
+ case SMIC_READ_START:
+ if (status != SMIC_SC_SMS_RD_START) {
+ start_error_recovery(smic,
+ "state = SMIC_READ_START, "
+ "status != SMIC_SC_SMS_RD_START");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ if (flags & SMIC_RX_DATA_READY) {
+ read_next_byte(smic);
+ write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_READ_NEXT;
+ } else {
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ break;
+
+ case SMIC_READ_NEXT:
+ switch (status) {
+ /* smic tells us that this is the last byte to be read
+ --> clean up */
+ case SMIC_SC_SMS_RD_END:
+ read_next_byte(smic);
+ write_smic_control(smic, SMIC_CC_SMS_RD_END);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_READ_END;
+ break;
+ case SMIC_SC_SMS_RD_NEXT:
+ if (flags & SMIC_RX_DATA_READY) {
+ read_next_byte(smic);
+ write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_READ_NEXT;
+ } else {
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ break;
+ default:
+ start_error_recovery(
+ smic,
+ "state = SMIC_READ_NEXT, "
+ "status != SMIC_SC_SMS_RD_(NEXT|END)");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ break;
+
+ case SMIC_READ_END:
+ if (status != SMIC_SC_SMS_READY) {
+ start_error_recovery(smic,
+ "state = SMIC_READ_END, "
+ "status != SMIC_SC_SMS_READY");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ data = read_smic_data(smic);
+ /* data register holds an error code */
+ if (data != 0) {
+ if (smic_debug & SMIC_DEBUG_ENABLE) {
+ printk(KERN_INFO
+ "SMIC_READ_END: data = %02x\n", data);
+ }
+ start_error_recovery(smic,
+ "state = SMIC_READ_END, "
+ "data != SUCCESS");
+ return SI_SM_CALL_WITH_DELAY;
+ } else {
+ smic->state = SMIC_IDLE;
+ return SI_SM_TRANSACTION_COMPLETE;
+ }
+
+ case SMIC_HOSED:
+ init_smic_data(smic, smic->io);
+ return SI_SM_HOSED;
+
+ default:
+ if (smic_debug & SMIC_DEBUG_ENABLE) {
+ printk(KERN_WARNING "smic->state = %d\n", smic->state);
+ start_error_recovery(smic, "state = UNKNOWN");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ }
+ smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+ return SI_SM_CALL_WITHOUT_DELAY;
+}
+
+static int smic_detect(struct si_sm_data *smic)
+{
+ /* It's impossible for the SMIC fnags register to be all 1's,
+ (assuming a properly functioning, self-initialized BMC)
+ but that's what you get from reading a bogus address, so we
+ test that first. */
+ if (read_smic_flags(smic) == 0xff)
+ return 1;
+
+ return 0;
+}
+
+static void smic_cleanup(struct si_sm_data *kcs)
+{
+}
+
+static int smic_size(void)
+{
+ return sizeof(struct si_sm_data);
+}
+
+struct si_sm_handlers smic_smi_handlers =
+{
+ .version = IPMI_SMIC_VERSION,
+ .init_data = init_smic_data,
+ .start_transaction = start_smic_transaction,
+ .get_result = smic_get_result,
+ .event = smic_event,
+ .detect = smic_detect,
+ .cleanup = smic_cleanup,
+ .size = smic_size,
+};
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
new file mode 100644
index 000000000000..fd7093879c66
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -0,0 +1,1068 @@
+/*
+ * ipmi_watchdog.c
+ *
+ * A watchdog timer based upon the IPMI interface.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include <linux/watchdog.h>
+#include <linux/miscdevice.h>
+#include <linux/init.h>
+#include <linux/rwsem.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <linux/notifier.h>
+#include <linux/nmi.h>
+#include <linux/reboot.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#ifdef CONFIG_X86_LOCAL_APIC
+#include <asm/apic.h>
+#endif
+
+#define PFX "IPMI Watchdog: "
+
+#define IPMI_WATCHDOG_VERSION "v33"
+
+/*
+ * The IPMI command/response information for the watchdog timer.
+ */
+
+/* values for byte 1 of the set command, byte 2 of the get response. */
+#define WDOG_DONT_LOG (1 << 7)
+#define WDOG_DONT_STOP_ON_SET (1 << 6)
+#define WDOG_SET_TIMER_USE(byte, use) \
+ byte = ((byte) & 0xf8) | ((use) & 0x7)
+#define WDOG_GET_TIMER_USE(byte) ((byte) & 0x7)
+#define WDOG_TIMER_USE_BIOS_FRB2 1
+#define WDOG_TIMER_USE_BIOS_POST 2
+#define WDOG_TIMER_USE_OS_LOAD 3
+#define WDOG_TIMER_USE_SMS_OS 4
+#define WDOG_TIMER_USE_OEM 5
+
+/* values for byte 2 of the set command, byte 3 of the get response. */
+#define WDOG_SET_PRETIMEOUT_ACT(byte, use) \
+ byte = ((byte) & 0x8f) | (((use) & 0x7) << 4)
+#define WDOG_GET_PRETIMEOUT_ACT(byte) (((byte) >> 4) & 0x7)
+#define WDOG_PRETIMEOUT_NONE 0
+#define WDOG_PRETIMEOUT_SMI 1
+#define WDOG_PRETIMEOUT_NMI 2
+#define WDOG_PRETIMEOUT_MSG_INT 3
+
+/* Operations that can be performed on a pretimout. */
+#define WDOG_PREOP_NONE 0
+#define WDOG_PREOP_PANIC 1
+#define WDOG_PREOP_GIVE_DATA 2 /* Cause data to be available to
+ read. Doesn't work in NMI
+ mode. */
+
+/* Actions to perform on a full timeout. */
+#define WDOG_SET_TIMEOUT_ACT(byte, use) \
+ byte = ((byte) & 0xf8) | ((use) & 0x7)
+#define WDOG_GET_TIMEOUT_ACT(byte) ((byte) & 0x7)
+#define WDOG_TIMEOUT_NONE 0
+#define WDOG_TIMEOUT_RESET 1
+#define WDOG_TIMEOUT_POWER_DOWN 2
+#define WDOG_TIMEOUT_POWER_CYCLE 3
+
+/* Byte 3 of the get command, byte 4 of the get response is the
+ pre-timeout in seconds. */
+
+/* Bits for setting byte 4 of the set command, byte 5 of the get response. */
+#define WDOG_EXPIRE_CLEAR_BIOS_FRB2 (1 << 1)
+#define WDOG_EXPIRE_CLEAR_BIOS_POST (1 << 2)
+#define WDOG_EXPIRE_CLEAR_OS_LOAD (1 << 3)
+#define WDOG_EXPIRE_CLEAR_SMS_OS (1 << 4)
+#define WDOG_EXPIRE_CLEAR_OEM (1 << 5)
+
+/* Setting/getting the watchdog timer value. This is for bytes 5 and
+ 6 (the timeout time) of the set command, and bytes 6 and 7 (the
+ timeout time) and 8 and 9 (the current countdown value) of the
+ response. The timeout value is given in seconds (in the command it
+ is 100ms intervals). */
+#define WDOG_SET_TIMEOUT(byte1, byte2, val) \
+ (byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8)
+#define WDOG_GET_TIMEOUT(byte1, byte2) \
+ (((byte1) | ((byte2) << 8)) / 10)
+
+#define IPMI_WDOG_RESET_TIMER 0x22
+#define IPMI_WDOG_SET_TIMER 0x24
+#define IPMI_WDOG_GET_TIMER 0x25
+
+/* These are here until the real ones get into the watchdog.h interface. */
+#ifndef WDIOC_GETTIMEOUT
+#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
+#endif
+#ifndef WDIOC_SET_PRETIMEOUT
+#define WDIOC_SET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 21, int)
+#endif
+#ifndef WDIOC_GET_PRETIMEOUT
+#define WDIOC_GET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 22, int)
+#endif
+
+#ifdef CONFIG_WATCHDOG_NOWAYOUT
+static int nowayout = 1;
+#else
+static int nowayout;
+#endif
+
+static ipmi_user_t watchdog_user = NULL;
+
+/* Default the timeout to 10 seconds. */
+static int timeout = 10;
+
+/* The pre-timeout is disabled by default. */
+static int pretimeout = 0;
+
+/* Default action is to reset the board on a timeout. */
+static unsigned char action_val = WDOG_TIMEOUT_RESET;
+
+static char action[16] = "reset";
+
+static unsigned char preaction_val = WDOG_PRETIMEOUT_NONE;
+
+static char preaction[16] = "pre_none";
+
+static unsigned char preop_val = WDOG_PREOP_NONE;
+
+static char preop[16] = "preop_none";
+static DEFINE_SPINLOCK(ipmi_read_lock);
+static char data_to_read = 0;
+static DECLARE_WAIT_QUEUE_HEAD(read_q);
+static struct fasync_struct *fasync_q = NULL;
+static char pretimeout_since_last_heartbeat = 0;
+static char expect_close;
+
+/* If true, the driver will start running as soon as it is configured
+ and ready. */
+static int start_now = 0;
+
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
+module_param(pretimeout, int, 0);
+MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
+module_param_string(action, action, sizeof(action), 0);
+MODULE_PARM_DESC(action, "Timeout action. One of: "
+ "reset, none, power_cycle, power_off.");
+module_param_string(preaction, preaction, sizeof(preaction), 0);
+MODULE_PARM_DESC(preaction, "Pretimeout action. One of: "
+ "pre_none, pre_smi, pre_nmi, pre_int.");
+module_param_string(preop, preop, sizeof(preop), 0);
+MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: "
+ "preop_none, preop_panic, preop_give_data.");
+module_param(start_now, int, 0);
+MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
+ "soon as the driver is loaded.");
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
+
+/* Default state of the timer. */
+static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+
+/* If shutting down via IPMI, we ignore the heartbeat. */
+static int ipmi_ignore_heartbeat = 0;
+
+/* Is someone using the watchdog? Only one user is allowed. */
+static unsigned long ipmi_wdog_open = 0;
+
+/* If set to 1, the heartbeat command will set the state to reset and
+ start the timer. The timer doesn't normally run when the driver is
+ first opened until the heartbeat is set the first time, this
+ variable is used to accomplish this. */
+static int ipmi_start_timer_on_heartbeat = 0;
+
+/* IPMI version of the BMC. */
+static unsigned char ipmi_version_major;
+static unsigned char ipmi_version_minor;
+
+
+static int ipmi_heartbeat(void);
+static void panic_halt_ipmi_heartbeat(void);
+
+
+/* We use a semaphore to make sure that only one thing can send a set
+ timeout at one time, because we only have one copy of the data.
+ The semaphore is claimed when the set_timeout is sent and freed
+ when both messages are free. */
+static atomic_t set_timeout_tofree = ATOMIC_INIT(0);
+static DECLARE_MUTEX(set_timeout_lock);
+static void set_timeout_free_smi(struct ipmi_smi_msg *msg)
+{
+ if (atomic_dec_and_test(&set_timeout_tofree))
+ up(&set_timeout_lock);
+}
+static void set_timeout_free_recv(struct ipmi_recv_msg *msg)
+{
+ if (atomic_dec_and_test(&set_timeout_tofree))
+ up(&set_timeout_lock);
+}
+static struct ipmi_smi_msg set_timeout_smi_msg =
+{
+ .done = set_timeout_free_smi
+};
+static struct ipmi_recv_msg set_timeout_recv_msg =
+{
+ .done = set_timeout_free_recv
+};
+
+static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
+ struct ipmi_recv_msg *recv_msg,
+ int *send_heartbeat_now)
+{
+ struct kernel_ipmi_msg msg;
+ unsigned char data[6];
+ int rv;
+ struct ipmi_system_interface_addr addr;
+ int hbnow = 0;
+
+
+ data[0] = 0;
+ WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
+
+ if ((ipmi_version_major > 1)
+ || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5)))
+ {
+ /* This is an IPMI 1.5-only feature. */
+ data[0] |= WDOG_DONT_STOP_ON_SET;
+ } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+ /* In ipmi 1.0, setting the timer stops the watchdog, we
+ need to start it back up again. */
+ hbnow = 1;
+ }
+
+ data[1] = 0;
+ WDOG_SET_TIMEOUT_ACT(data[1], ipmi_watchdog_state);
+ if (pretimeout > 0) {
+ WDOG_SET_PRETIMEOUT_ACT(data[1], preaction_val);
+ data[2] = pretimeout;
+ } else {
+ WDOG_SET_PRETIMEOUT_ACT(data[1], WDOG_PRETIMEOUT_NONE);
+ data[2] = 0; /* No pretimeout. */
+ }
+ data[3] = 0;
+ WDOG_SET_TIMEOUT(data[4], data[5], timeout);
+
+ addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ addr.channel = IPMI_BMC_CHANNEL;
+ addr.lun = 0;
+
+ msg.netfn = 0x06;
+ msg.cmd = IPMI_WDOG_SET_TIMER;
+ msg.data = data;
+ msg.data_len = sizeof(data);
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ smi_msg,
+ recv_msg,
+ 1);
+ if (rv) {
+ printk(KERN_WARNING PFX "set timeout error: %d\n",
+ rv);
+ }
+
+ if (send_heartbeat_now)
+ *send_heartbeat_now = hbnow;
+
+ return rv;
+}
+
+/* Parameters to ipmi_set_timeout */
+#define IPMI_SET_TIMEOUT_NO_HB 0
+#define IPMI_SET_TIMEOUT_HB_IF_NECESSARY 1
+#define IPMI_SET_TIMEOUT_FORCE_HB 2
+
+static int ipmi_set_timeout(int do_heartbeat)
+{
+ int send_heartbeat_now;
+ int rv;
+
+
+ /* We can only send one of these at a time. */
+ down(&set_timeout_lock);
+
+ atomic_set(&set_timeout_tofree, 2);
+
+ rv = i_ipmi_set_timeout(&set_timeout_smi_msg,
+ &set_timeout_recv_msg,
+ &send_heartbeat_now);
+ if (rv) {
+ up(&set_timeout_lock);
+ } else {
+ if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
+ || ((send_heartbeat_now)
+ && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
+ {
+ rv = ipmi_heartbeat();
+ }
+ }
+
+ return rv;
+}
+
+static void dummy_smi_free(struct ipmi_smi_msg *msg)
+{
+}
+static void dummy_recv_free(struct ipmi_recv_msg *msg)
+{
+}
+static struct ipmi_smi_msg panic_halt_smi_msg =
+{
+ .done = dummy_smi_free
+};
+static struct ipmi_recv_msg panic_halt_recv_msg =
+{
+ .done = dummy_recv_free
+};
+
+/* Special call, doesn't claim any locks. This is only to be called
+ at panic or halt time, in run-to-completion mode, when the caller
+ is the only CPU and the only thing that will be going is these IPMI
+ calls. */
+static void panic_halt_ipmi_set_timeout(void)
+{
+ int send_heartbeat_now;
+ int rv;
+
+ rv = i_ipmi_set_timeout(&panic_halt_smi_msg,
+ &panic_halt_recv_msg,
+ &send_heartbeat_now);
+ if (!rv) {
+ if (send_heartbeat_now)
+ panic_halt_ipmi_heartbeat();
+ }
+}
+
+/* We use a semaphore to make sure that only one thing can send a
+ heartbeat at one time, because we only have one copy of the data.
+ The semaphore is claimed when the set_timeout is sent and freed
+ when both messages are free. */
+static atomic_t heartbeat_tofree = ATOMIC_INIT(0);
+static DECLARE_MUTEX(heartbeat_lock);
+static DECLARE_MUTEX_LOCKED(heartbeat_wait_lock);
+static void heartbeat_free_smi(struct ipmi_smi_msg *msg)
+{
+ if (atomic_dec_and_test(&heartbeat_tofree))
+ up(&heartbeat_wait_lock);
+}
+static void heartbeat_free_recv(struct ipmi_recv_msg *msg)
+{
+ if (atomic_dec_and_test(&heartbeat_tofree))
+ up(&heartbeat_wait_lock);
+}
+static struct ipmi_smi_msg heartbeat_smi_msg =
+{
+ .done = heartbeat_free_smi
+};
+static struct ipmi_recv_msg heartbeat_recv_msg =
+{
+ .done = heartbeat_free_recv
+};
+
+static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg =
+{
+ .done = dummy_smi_free
+};
+static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg =
+{
+ .done = dummy_recv_free
+};
+
+static int ipmi_heartbeat(void)
+{
+ struct kernel_ipmi_msg msg;
+ int rv;
+ struct ipmi_system_interface_addr addr;
+
+ if (ipmi_ignore_heartbeat) {
+ return 0;
+ }
+
+ if (ipmi_start_timer_on_heartbeat) {
+ ipmi_start_timer_on_heartbeat = 0;
+ ipmi_watchdog_state = action_val;
+ return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+ } else if (pretimeout_since_last_heartbeat) {
+ /* A pretimeout occurred, make sure we set the timeout.
+ We don't want to set the action, though, we want to
+ leave that alone (thus it can't be combined with the
+ above operation. */
+ pretimeout_since_last_heartbeat = 0;
+ return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+ }
+
+ down(&heartbeat_lock);
+
+ atomic_set(&heartbeat_tofree, 2);
+
+ /* Don't reset the timer if we have the timer turned off, that
+ re-enables the watchdog. */
+ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) {
+ up(&heartbeat_lock);
+ return 0;
+ }
+
+ addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ addr.channel = IPMI_BMC_CHANNEL;
+ addr.lun = 0;
+
+ msg.netfn = 0x06;
+ msg.cmd = IPMI_WDOG_RESET_TIMER;
+ msg.data = NULL;
+ msg.data_len = 0;
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ &heartbeat_smi_msg,
+ &heartbeat_recv_msg,
+ 1);
+ if (rv) {
+ up(&heartbeat_lock);
+ printk(KERN_WARNING PFX "heartbeat failure: %d\n",
+ rv);
+ return rv;
+ }
+
+ /* Wait for the heartbeat to be sent. */
+ down(&heartbeat_wait_lock);
+
+ if (heartbeat_recv_msg.msg.data[0] != 0) {
+ /* Got an error in the heartbeat response. It was already
+ reported in ipmi_wdog_msg_handler, but we should return
+ an error here. */
+ rv = -EINVAL;
+ }
+
+ up(&heartbeat_lock);
+
+ return rv;
+}
+
+static void panic_halt_ipmi_heartbeat(void)
+{
+ struct kernel_ipmi_msg msg;
+ struct ipmi_system_interface_addr addr;
+
+
+ /* Don't reset the timer if we have the timer turned off, that
+ re-enables the watchdog. */
+ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
+ return;
+
+ addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ addr.channel = IPMI_BMC_CHANNEL;
+ addr.lun = 0;
+
+ msg.netfn = 0x06;
+ msg.cmd = IPMI_WDOG_RESET_TIMER;
+ msg.data = NULL;
+ msg.data_len = 0;
+ ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ &panic_halt_heartbeat_smi_msg,
+ &panic_halt_heartbeat_recv_msg,
+ 1);
+}
+
+static struct watchdog_info ident=
+{
+ .options = 0, /* WDIOF_SETTIMEOUT, */
+ .firmware_version = 1,
+ .identity = "IPMI"
+};
+
+static int ipmi_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int i;
+ int val;
+
+ switch(cmd) {
+ case WDIOC_GETSUPPORT:
+ i = copy_to_user(argp, &ident, sizeof(ident));
+ return i ? -EFAULT : 0;
+
+ case WDIOC_SETTIMEOUT:
+ i = copy_from_user(&val, argp, sizeof(int));
+ if (i)
+ return -EFAULT;
+ timeout = val;
+ return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+ case WDIOC_GETTIMEOUT:
+ i = copy_to_user(argp, &timeout, sizeof(timeout));
+ if (i)
+ return -EFAULT;
+ return 0;
+
+ case WDIOC_SET_PRETIMEOUT:
+ i = copy_from_user(&val, argp, sizeof(int));
+ if (i)
+ return -EFAULT;
+ pretimeout = val;
+ return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+ case WDIOC_GET_PRETIMEOUT:
+ i = copy_to_user(argp, &pretimeout, sizeof(pretimeout));
+ if (i)
+ return -EFAULT;
+ return 0;
+
+ case WDIOC_KEEPALIVE:
+ return ipmi_heartbeat();
+
+ case WDIOC_SETOPTIONS:
+ i = copy_from_user(&val, argp, sizeof(int));
+ if (i)
+ return -EFAULT;
+ if (val & WDIOS_DISABLECARD)
+ {
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ ipmi_start_timer_on_heartbeat = 0;
+ }
+
+ if (val & WDIOS_ENABLECARD)
+ {
+ ipmi_watchdog_state = action_val;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+ }
+ return 0;
+
+ case WDIOC_GETSTATUS:
+ val = 0;
+ i = copy_to_user(argp, &val, sizeof(val));
+ if (i)
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static ssize_t ipmi_write(struct file *file,
+ const char __user *buf,
+ size_t len,
+ loff_t *ppos)
+{
+ int rv;
+
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ /* In case it was set long ago */
+ expect_close = 0;
+
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, buf + i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_close = 42;
+ }
+ }
+ rv = ipmi_heartbeat();
+ if (rv)
+ return rv;
+ return 1;
+ }
+ return 0;
+}
+
+static ssize_t ipmi_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int rv = 0;
+ wait_queue_t wait;
+
+ if (count <= 0)
+ return 0;
+
+ /* Reading returns if the pretimeout has gone off, and it only does
+ it once per pretimeout. */
+ spin_lock(&ipmi_read_lock);
+ if (!data_to_read) {
+ if (file->f_flags & O_NONBLOCK) {
+ rv = -EAGAIN;
+ goto out;
+ }
+
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&read_q, &wait);
+ while (!data_to_read) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock(&ipmi_read_lock);
+ schedule();
+ spin_lock(&ipmi_read_lock);
+ }
+ remove_wait_queue(&read_q, &wait);
+
+ if (signal_pending(current)) {
+ rv = -ERESTARTSYS;
+ goto out;
+ }
+ }
+ data_to_read = 0;
+
+ out:
+ spin_unlock(&ipmi_read_lock);
+
+ if (rv == 0) {
+ if (copy_to_user(buf, &data_to_read, 1))
+ rv = -EFAULT;
+ else
+ rv = 1;
+ }
+
+ return rv;
+}
+
+static int ipmi_open(struct inode *ino, struct file *filep)
+{
+ switch (iminor(ino))
+ {
+ case WATCHDOG_MINOR:
+ if(test_and_set_bit(0, &ipmi_wdog_open))
+ return -EBUSY;
+
+ /* Don't start the timer now, let it start on the
+ first heartbeat. */
+ ipmi_start_timer_on_heartbeat = 1;
+ return nonseekable_open(ino, filep);
+
+ default:
+ return (-ENODEV);
+ }
+}
+
+static unsigned int ipmi_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+
+ poll_wait(file, &read_q, wait);
+
+ spin_lock(&ipmi_read_lock);
+ if (data_to_read)
+ mask |= (POLLIN | POLLRDNORM);
+ spin_unlock(&ipmi_read_lock);
+
+ return mask;
+}
+
+static int ipmi_fasync(int fd, struct file *file, int on)
+{
+ int result;
+
+ result = fasync_helper(fd, file, on, &fasync_q);
+
+ return (result);
+}
+
+static int ipmi_close(struct inode *ino, struct file *filep)
+{
+ if (iminor(ino)==WATCHDOG_MINOR)
+ {
+ if (expect_close == 42) {
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ clear_bit(0, &ipmi_wdog_open);
+ } else {
+ printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n");
+ ipmi_heartbeat();
+ }
+ }
+
+ ipmi_fasync (-1, filep, 0);
+ expect_close = 0;
+
+ return 0;
+}
+
+static struct file_operations ipmi_wdog_fops = {
+ .owner = THIS_MODULE,
+ .read = ipmi_read,
+ .poll = ipmi_poll,
+ .write = ipmi_write,
+ .ioctl = ipmi_ioctl,
+ .open = ipmi_open,
+ .release = ipmi_close,
+ .fasync = ipmi_fasync,
+};
+
+static struct miscdevice ipmi_wdog_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &ipmi_wdog_fops
+};
+
+static DECLARE_RWSEM(register_sem);
+
+static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
+ void *handler_data)
+{
+ if (msg->msg.data[0] != 0) {
+ printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
+ msg->msg.data[0],
+ msg->msg.cmd);
+ }
+
+ ipmi_free_recv_msg(msg);
+}
+
+static void ipmi_wdog_pretimeout_handler(void *handler_data)
+{
+ if (preaction_val != WDOG_PRETIMEOUT_NONE) {
+ if (preop_val == WDOG_PREOP_PANIC)
+ panic("Watchdog pre-timeout");
+ else if (preop_val == WDOG_PREOP_GIVE_DATA) {
+ spin_lock(&ipmi_read_lock);
+ data_to_read = 1;
+ wake_up_interruptible(&read_q);
+ kill_fasync(&fasync_q, SIGIO, POLL_IN);
+
+ spin_unlock(&ipmi_read_lock);
+ }
+ }
+
+ /* On some machines, the heartbeat will give
+ an error and not work unless we re-enable
+ the timer. So do so. */
+ pretimeout_since_last_heartbeat = 1;
+}
+
+static struct ipmi_user_hndl ipmi_hndlrs =
+{
+ .ipmi_recv_hndl = ipmi_wdog_msg_handler,
+ .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler
+};
+
+static void ipmi_register_watchdog(int ipmi_intf)
+{
+ int rv = -EBUSY;
+
+ down_write(&register_sem);
+ if (watchdog_user)
+ goto out;
+
+ rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user);
+ if (rv < 0) {
+ printk(KERN_CRIT PFX "Unable to register with ipmi\n");
+ goto out;
+ }
+
+ ipmi_get_version(watchdog_user,
+ &ipmi_version_major,
+ &ipmi_version_minor);
+
+ rv = misc_register(&ipmi_wdog_miscdev);
+ if (rv < 0) {
+ ipmi_destroy_user(watchdog_user);
+ watchdog_user = NULL;
+ printk(KERN_CRIT PFX "Unable to register misc device\n");
+ }
+
+ out:
+ up_write(&register_sem);
+
+ if ((start_now) && (rv == 0)) {
+ /* Run from startup, so start the timer now. */
+ start_now = 0; /* Disable this function after first startup. */
+ ipmi_watchdog_state = action_val;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+ printk(KERN_INFO PFX "Starting now!\n");
+ }
+}
+
+#ifdef HAVE_NMI_HANDLER
+static int
+ipmi_nmi(void *dev_id, struct pt_regs *regs, int cpu, int handled)
+{
+ /* If no one else handled the NMI, we assume it was the IPMI
+ watchdog. */
+ if ((!handled) && (preop_val == WDOG_PREOP_PANIC))
+ panic(PFX "pre-timeout");
+
+ /* On some machines, the heartbeat will give
+ an error and not work unless we re-enable
+ the timer. So do so. */
+ pretimeout_since_last_heartbeat = 1;
+
+ return NOTIFY_DONE;
+}
+
+static struct nmi_handler ipmi_nmi_handler =
+{
+ .link = LIST_HEAD_INIT(ipmi_nmi_handler.link),
+ .dev_name = "ipmi_watchdog",
+ .dev_id = NULL,
+ .handler = ipmi_nmi,
+ .priority = 0, /* Call us last. */
+};
+#endif
+
+static int wdog_reboot_handler(struct notifier_block *this,
+ unsigned long code,
+ void *unused)
+{
+ static int reboot_event_handled = 0;
+
+ if ((watchdog_user) && (!reboot_event_handled)) {
+ /* Make sure we only do this once. */
+ reboot_event_handled = 1;
+
+ if (code == SYS_DOWN || code == SYS_HALT) {
+ /* Disable the WDT if we are shutting down. */
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ panic_halt_ipmi_set_timeout();
+ } else {
+ /* Set a long timer to let the reboot happens, but
+ reboot if it hangs. */
+ timeout = 120;
+ pretimeout = 0;
+ ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
+ panic_halt_ipmi_set_timeout();
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block wdog_reboot_notifier = {
+ .notifier_call = wdog_reboot_handler,
+ .next = NULL,
+ .priority = 0
+};
+
+static int wdog_panic_handler(struct notifier_block *this,
+ unsigned long event,
+ void *unused)
+{
+ static int panic_event_handled = 0;
+
+ /* On a panic, if we have a panic timeout, make sure that the thing
+ reboots, even if it hangs during that panic. */
+ if (watchdog_user && !panic_event_handled) {
+ /* Make sure the panic doesn't hang, and make sure we
+ do this only once. */
+ panic_event_handled = 1;
+
+ timeout = 255;
+ pretimeout = 0;
+ ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
+ panic_halt_ipmi_set_timeout();
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block wdog_panic_notifier = {
+ .notifier_call = wdog_panic_handler,
+ .next = NULL,
+ .priority = 150 /* priority: INT_MAX >= x >= 0 */
+};
+
+
+static void ipmi_new_smi(int if_num)
+{
+ ipmi_register_watchdog(if_num);
+}
+
+static void ipmi_smi_gone(int if_num)
+{
+ /* This can never be called, because once the watchdog is
+ registered, the interface can't go away until the watchdog
+ is unregistered. */
+}
+
+static struct ipmi_smi_watcher smi_watcher =
+{
+ .owner = THIS_MODULE,
+ .new_smi = ipmi_new_smi,
+ .smi_gone = ipmi_smi_gone
+};
+
+static int __init ipmi_wdog_init(void)
+{
+ int rv;
+
+ printk(KERN_INFO PFX "driver version "
+ IPMI_WATCHDOG_VERSION "\n");
+
+ if (strcmp(action, "reset") == 0) {
+ action_val = WDOG_TIMEOUT_RESET;
+ } else if (strcmp(action, "none") == 0) {
+ action_val = WDOG_TIMEOUT_NONE;
+ } else if (strcmp(action, "power_cycle") == 0) {
+ action_val = WDOG_TIMEOUT_POWER_CYCLE;
+ } else if (strcmp(action, "power_off") == 0) {
+ action_val = WDOG_TIMEOUT_POWER_DOWN;
+ } else {
+ action_val = WDOG_TIMEOUT_RESET;
+ printk(KERN_INFO PFX "Unknown action '%s', defaulting to"
+ " reset\n", action);
+ }
+
+ if (strcmp(preaction, "pre_none") == 0) {
+ preaction_val = WDOG_PRETIMEOUT_NONE;
+ } else if (strcmp(preaction, "pre_smi") == 0) {
+ preaction_val = WDOG_PRETIMEOUT_SMI;
+#ifdef HAVE_NMI_HANDLER
+ } else if (strcmp(preaction, "pre_nmi") == 0) {
+ preaction_val = WDOG_PRETIMEOUT_NMI;
+#endif
+ } else if (strcmp(preaction, "pre_int") == 0) {
+ preaction_val = WDOG_PRETIMEOUT_MSG_INT;
+ } else {
+ preaction_val = WDOG_PRETIMEOUT_NONE;
+ printk(KERN_INFO PFX "Unknown preaction '%s', defaulting to"
+ " none\n", preaction);
+ }
+
+ if (strcmp(preop, "preop_none") == 0) {
+ preop_val = WDOG_PREOP_NONE;
+ } else if (strcmp(preop, "preop_panic") == 0) {
+ preop_val = WDOG_PREOP_PANIC;
+ } else if (strcmp(preop, "preop_give_data") == 0) {
+ preop_val = WDOG_PREOP_GIVE_DATA;
+ } else {
+ preop_val = WDOG_PREOP_NONE;
+ printk(KERN_INFO PFX "Unknown preop '%s', defaulting to"
+ " none\n", preop);
+ }
+
+#ifdef HAVE_NMI_HANDLER
+ if (preaction_val == WDOG_PRETIMEOUT_NMI) {
+ if (preop_val == WDOG_PREOP_GIVE_DATA) {
+ printk(KERN_WARNING PFX "Pretimeout op is to give data"
+ " but NMI pretimeout is enabled, setting"
+ " pretimeout op to none\n");
+ preop_val = WDOG_PREOP_NONE;
+ }
+#ifdef CONFIG_X86_LOCAL_APIC
+ if (nmi_watchdog == NMI_IO_APIC) {
+ printk(KERN_WARNING PFX "nmi_watchdog is set to IO APIC"
+ " mode (value is %d), that is incompatible"
+ " with using NMI in the IPMI watchdog."
+ " Disabling IPMI nmi pretimeout.\n",
+ nmi_watchdog);
+ preaction_val = WDOG_PRETIMEOUT_NONE;
+ } else {
+#endif
+ rv = request_nmi(&ipmi_nmi_handler);
+ if (rv) {
+ printk(KERN_WARNING PFX "Can't register nmi handler\n");
+ return rv;
+ }
+#ifdef CONFIG_X86_LOCAL_APIC
+ }
+#endif
+ }
+#endif
+
+ rv = ipmi_smi_watcher_register(&smi_watcher);
+ if (rv) {
+#ifdef HAVE_NMI_HANDLER
+ if (preaction_val == WDOG_PRETIMEOUT_NMI)
+ release_nmi(&ipmi_nmi_handler);
+#endif
+ printk(KERN_WARNING PFX "can't register smi watcher\n");
+ return rv;
+ }
+
+ register_reboot_notifier(&wdog_reboot_notifier);
+ notifier_chain_register(&panic_notifier_list, &wdog_panic_notifier);
+
+ return 0;
+}
+
+static __exit void ipmi_unregister_watchdog(void)
+{
+ int rv;
+
+ down_write(&register_sem);
+
+#ifdef HAVE_NMI_HANDLER
+ if (preaction_val == WDOG_PRETIMEOUT_NMI)
+ release_nmi(&ipmi_nmi_handler);
+#endif
+
+ notifier_chain_unregister(&panic_notifier_list, &wdog_panic_notifier);
+ unregister_reboot_notifier(&wdog_reboot_notifier);
+
+ if (! watchdog_user)
+ goto out;
+
+ /* Make sure no one can call us any more. */
+ misc_deregister(&ipmi_wdog_miscdev);
+
+ /* Wait to make sure the message makes it out. The lower layer has
+ pointers to our buffers, we want to make sure they are done before
+ we release our memory. */
+ while (atomic_read(&set_timeout_tofree)) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+
+ /* Disconnect from IPMI. */
+ rv = ipmi_destroy_user(watchdog_user);
+ if (rv) {
+ printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n",
+ rv);
+ }
+ watchdog_user = NULL;
+
+ out:
+ up_write(&register_sem);
+}
+
+static void __exit ipmi_wdog_exit(void)
+{
+ ipmi_smi_watcher_unregister(&smi_watcher);
+ ipmi_unregister_watchdog();
+}
+module_exit(ipmi_wdog_exit);
+module_init(ipmi_wdog_init);
+MODULE_LICENSE("GPL");