Merge ../linus
[sfrench/cifs-2.6.git] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  *
13  *  This program is free software; you can redistribute it and/or modify it
14  *  under the terms of the GNU General Public License as published by the
15  *  Free Software Foundation; either version 2 of the License, or (at your
16  *  option) any later version.
17  *
18  *
19  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  *  You should have received a copy of the GNU General Public License along
31  *  with this program; if not, write to the Free Software Foundation, Inc.,
32  *  675 Mass Ave, Cambridge, MA 02139, USA.
33  */
34
35 /*
36  * This file holds the "policy" for the interface to the SMI state
37  * machine.  It does the configuration, handles timers and interrupts,
38  * and drives the real SMI state machine.
39  */
40
41 #include <linux/config.h>
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/notifier.h>
55 #include <linux/mutex.h>
56 #include <linux/kthread.h>
57 #include <asm/irq.h>
58 #include <linux/interrupt.h>
59 #include <linux/rcupdate.h>
60 #include <linux/ipmi_smi.h>
61 #include <asm/io.h>
62 #include "ipmi_si_sm.h"
63 #include <linux/init.h>
64 #include <linux/dmi.h>
65
66 /* Measure times between events in the driver. */
67 #undef DEBUG_TIMING
68
69 /* Call every 10 ms. */
70 #define SI_TIMEOUT_TIME_USEC    10000
71 #define SI_USEC_PER_JIFFY       (1000000/HZ)
72 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
73 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
74                                        short timeout */
75
76 enum si_intf_state {
77         SI_NORMAL,
78         SI_GETTING_FLAGS,
79         SI_GETTING_EVENTS,
80         SI_CLEARING_FLAGS,
81         SI_CLEARING_FLAGS_THEN_SET_IRQ,
82         SI_GETTING_MESSAGES,
83         SI_ENABLE_INTERRUPTS1,
84         SI_ENABLE_INTERRUPTS2
85         /* FIXME - add watchdog stuff. */
86 };
87
88 /* Some BT-specific defines we need here. */
89 #define IPMI_BT_INTMASK_REG             2
90 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
91 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
92
93 enum si_type {
94     SI_KCS, SI_SMIC, SI_BT
95 };
96 static char *si_to_str[] = { "KCS", "SMIC", "BT" };
97
98 #define DEVICE_NAME "ipmi_si"
99
100 static struct device_driver ipmi_driver =
101 {
102         .name = DEVICE_NAME,
103         .bus = &platform_bus_type
104 };
105
106 struct smi_info
107 {
108         int                    intf_num;
109         ipmi_smi_t             intf;
110         struct si_sm_data      *si_sm;
111         struct si_sm_handlers  *handlers;
112         enum si_type           si_type;
113         spinlock_t             si_lock;
114         spinlock_t             msg_lock;
115         struct list_head       xmit_msgs;
116         struct list_head       hp_xmit_msgs;
117         struct ipmi_smi_msg    *curr_msg;
118         enum si_intf_state     si_state;
119
120         /* Used to handle the various types of I/O that can occur with
121            IPMI */
122         struct si_sm_io io;
123         int (*io_setup)(struct smi_info *info);
124         void (*io_cleanup)(struct smi_info *info);
125         int (*irq_setup)(struct smi_info *info);
126         void (*irq_cleanup)(struct smi_info *info);
127         unsigned int io_size;
128         char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
129         void (*addr_source_cleanup)(struct smi_info *info);
130         void *addr_source_data;
131
132         /* Per-OEM handler, called from handle_flags().
133            Returns 1 when handle_flags() needs to be re-run
134            or 0 indicating it set si_state itself.
135         */
136         int (*oem_data_avail_handler)(struct smi_info *smi_info);
137
138         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
139            is set to hold the flags until we are done handling everything
140            from the flags. */
141 #define RECEIVE_MSG_AVAIL       0x01
142 #define EVENT_MSG_BUFFER_FULL   0x02
143 #define WDT_PRE_TIMEOUT_INT     0x08
144 #define OEM0_DATA_AVAIL     0x20
145 #define OEM1_DATA_AVAIL     0x40
146 #define OEM2_DATA_AVAIL     0x80
147 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
148                              OEM1_DATA_AVAIL | \
149                              OEM2_DATA_AVAIL)
150         unsigned char       msg_flags;
151
152         /* If set to true, this will request events the next time the
153            state machine is idle. */
154         atomic_t            req_events;
155
156         /* If true, run the state machine to completion on every send
157            call.  Generally used after a panic to make sure stuff goes
158            out. */
159         int                 run_to_completion;
160
161         /* The I/O port of an SI interface. */
162         int                 port;
163
164         /* The space between start addresses of the two ports.  For
165            instance, if the first port is 0xca2 and the spacing is 4, then
166            the second port is 0xca6. */
167         unsigned int        spacing;
168
169         /* zero if no irq; */
170         int                 irq;
171
172         /* The timer for this si. */
173         struct timer_list   si_timer;
174
175         /* The time (in jiffies) the last timeout occurred at. */
176         unsigned long       last_timeout_jiffies;
177
178         /* Used to gracefully stop the timer without race conditions. */
179         atomic_t            stop_operation;
180
181         /* The driver will disable interrupts when it gets into a
182            situation where it cannot handle messages due to lack of
183            memory.  Once that situation clears up, it will re-enable
184            interrupts. */
185         int interrupt_disabled;
186
187         /* From the get device id response... */
188         struct ipmi_device_id device_id;
189
190         /* Driver model stuff. */
191         struct device *dev;
192         struct platform_device *pdev;
193
194          /* True if we allocated the device, false if it came from
195           * someplace else (like PCI). */
196         int dev_registered;
197
198         /* Slave address, could be reported from DMI. */
199         unsigned char slave_addr;
200
201         /* Counters and things for the proc filesystem. */
202         spinlock_t count_lock;
203         unsigned long short_timeouts;
204         unsigned long long_timeouts;
205         unsigned long timeout_restarts;
206         unsigned long idles;
207         unsigned long interrupts;
208         unsigned long attentions;
209         unsigned long flag_fetches;
210         unsigned long hosed_count;
211         unsigned long complete_transactions;
212         unsigned long events;
213         unsigned long watchdog_pretimeouts;
214         unsigned long incoming_messages;
215
216         struct task_struct *thread;
217
218         struct list_head link;
219 };
220
221 static int try_smi_init(struct smi_info *smi);
222
223 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
224 static int register_xaction_notifier(struct notifier_block * nb)
225 {
226         return atomic_notifier_chain_register(&xaction_notifier_list, nb);
227 }
228
229 static void deliver_recv_msg(struct smi_info *smi_info,
230                              struct ipmi_smi_msg *msg)
231 {
232         /* Deliver the message to the upper layer with the lock
233            released. */
234         spin_unlock(&(smi_info->si_lock));
235         ipmi_smi_msg_received(smi_info->intf, msg);
236         spin_lock(&(smi_info->si_lock));
237 }
238
239 static void return_hosed_msg(struct smi_info *smi_info)
240 {
241         struct ipmi_smi_msg *msg = smi_info->curr_msg;
242
243         /* Make it a reponse */
244         msg->rsp[0] = msg->data[0] | 4;
245         msg->rsp[1] = msg->data[1];
246         msg->rsp[2] = 0xFF; /* Unknown error. */
247         msg->rsp_size = 3;
248
249         smi_info->curr_msg = NULL;
250         deliver_recv_msg(smi_info, msg);
251 }
252
253 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
254 {
255         int              rv;
256         struct list_head *entry = NULL;
257 #ifdef DEBUG_TIMING
258         struct timeval t;
259 #endif
260
261         /* No need to save flags, we aleady have interrupts off and we
262            already hold the SMI lock. */
263         spin_lock(&(smi_info->msg_lock));
264
265         /* Pick the high priority queue first. */
266         if (!list_empty(&(smi_info->hp_xmit_msgs))) {
267                 entry = smi_info->hp_xmit_msgs.next;
268         } else if (!list_empty(&(smi_info->xmit_msgs))) {
269                 entry = smi_info->xmit_msgs.next;
270         }
271
272         if (!entry) {
273                 smi_info->curr_msg = NULL;
274                 rv = SI_SM_IDLE;
275         } else {
276                 int err;
277
278                 list_del(entry);
279                 smi_info->curr_msg = list_entry(entry,
280                                                 struct ipmi_smi_msg,
281                                                 link);
282 #ifdef DEBUG_TIMING
283                 do_gettimeofday(&t);
284                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
285 #endif
286                 err = atomic_notifier_call_chain(&xaction_notifier_list,
287                                 0, smi_info);
288                 if (err & NOTIFY_STOP_MASK) {
289                         rv = SI_SM_CALL_WITHOUT_DELAY;
290                         goto out;
291                 }
292                 err = smi_info->handlers->start_transaction(
293                         smi_info->si_sm,
294                         smi_info->curr_msg->data,
295                         smi_info->curr_msg->data_size);
296                 if (err) {
297                         return_hosed_msg(smi_info);
298                 }
299
300                 rv = SI_SM_CALL_WITHOUT_DELAY;
301         }
302         out:
303         spin_unlock(&(smi_info->msg_lock));
304
305         return rv;
306 }
307
308 static void start_enable_irq(struct smi_info *smi_info)
309 {
310         unsigned char msg[2];
311
312         /* If we are enabling interrupts, we have to tell the
313            BMC to use them. */
314         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
315         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
316
317         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
318         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
319 }
320
321 static void start_clear_flags(struct smi_info *smi_info)
322 {
323         unsigned char msg[3];
324
325         /* Make sure the watchdog pre-timeout flag is not set at startup. */
326         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
327         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
328         msg[2] = WDT_PRE_TIMEOUT_INT;
329
330         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
331         smi_info->si_state = SI_CLEARING_FLAGS;
332 }
333
334 /* When we have a situtaion where we run out of memory and cannot
335    allocate messages, we just leave them in the BMC and run the system
336    polled until we can allocate some memory.  Once we have some
337    memory, we will re-enable the interrupt. */
338 static inline void disable_si_irq(struct smi_info *smi_info)
339 {
340         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
341                 disable_irq_nosync(smi_info->irq);
342                 smi_info->interrupt_disabled = 1;
343         }
344 }
345
346 static inline void enable_si_irq(struct smi_info *smi_info)
347 {
348         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
349                 enable_irq(smi_info->irq);
350                 smi_info->interrupt_disabled = 0;
351         }
352 }
353
354 static void handle_flags(struct smi_info *smi_info)
355 {
356  retry:
357         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
358                 /* Watchdog pre-timeout */
359                 spin_lock(&smi_info->count_lock);
360                 smi_info->watchdog_pretimeouts++;
361                 spin_unlock(&smi_info->count_lock);
362
363                 start_clear_flags(smi_info);
364                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
365                 spin_unlock(&(smi_info->si_lock));
366                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
367                 spin_lock(&(smi_info->si_lock));
368         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
369                 /* Messages available. */
370                 smi_info->curr_msg = ipmi_alloc_smi_msg();
371                 if (!smi_info->curr_msg) {
372                         disable_si_irq(smi_info);
373                         smi_info->si_state = SI_NORMAL;
374                         return;
375                 }
376                 enable_si_irq(smi_info);
377
378                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
379                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
380                 smi_info->curr_msg->data_size = 2;
381
382                 smi_info->handlers->start_transaction(
383                         smi_info->si_sm,
384                         smi_info->curr_msg->data,
385                         smi_info->curr_msg->data_size);
386                 smi_info->si_state = SI_GETTING_MESSAGES;
387         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
388                 /* Events available. */
389                 smi_info->curr_msg = ipmi_alloc_smi_msg();
390                 if (!smi_info->curr_msg) {
391                         disable_si_irq(smi_info);
392                         smi_info->si_state = SI_NORMAL;
393                         return;
394                 }
395                 enable_si_irq(smi_info);
396
397                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
398                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
399                 smi_info->curr_msg->data_size = 2;
400
401                 smi_info->handlers->start_transaction(
402                         smi_info->si_sm,
403                         smi_info->curr_msg->data,
404                         smi_info->curr_msg->data_size);
405                 smi_info->si_state = SI_GETTING_EVENTS;
406         } else if (smi_info->msg_flags & OEM_DATA_AVAIL) {
407                 if (smi_info->oem_data_avail_handler)
408                         if (smi_info->oem_data_avail_handler(smi_info))
409                                 goto retry;
410         } else {
411                 smi_info->si_state = SI_NORMAL;
412         }
413 }
414
415 static void handle_transaction_done(struct smi_info *smi_info)
416 {
417         struct ipmi_smi_msg *msg;
418 #ifdef DEBUG_TIMING
419         struct timeval t;
420
421         do_gettimeofday(&t);
422         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
423 #endif
424         switch (smi_info->si_state) {
425         case SI_NORMAL:
426                 if (!smi_info->curr_msg)
427                         break;
428
429                 smi_info->curr_msg->rsp_size
430                         = smi_info->handlers->get_result(
431                                 smi_info->si_sm,
432                                 smi_info->curr_msg->rsp,
433                                 IPMI_MAX_MSG_LENGTH);
434
435                 /* Do this here becase deliver_recv_msg() releases the
436                    lock, and a new message can be put in during the
437                    time the lock is released. */
438                 msg = smi_info->curr_msg;
439                 smi_info->curr_msg = NULL;
440                 deliver_recv_msg(smi_info, msg);
441                 break;
442
443         case SI_GETTING_FLAGS:
444         {
445                 unsigned char msg[4];
446                 unsigned int  len;
447
448                 /* We got the flags from the SMI, now handle them. */
449                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
450                 if (msg[2] != 0) {
451                         /* Error fetching flags, just give up for
452                            now. */
453                         smi_info->si_state = SI_NORMAL;
454                 } else if (len < 4) {
455                         /* Hmm, no flags.  That's technically illegal, but
456                            don't use uninitialized data. */
457                         smi_info->si_state = SI_NORMAL;
458                 } else {
459                         smi_info->msg_flags = msg[3];
460                         handle_flags(smi_info);
461                 }
462                 break;
463         }
464
465         case SI_CLEARING_FLAGS:
466         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
467         {
468                 unsigned char msg[3];
469
470                 /* We cleared the flags. */
471                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
472                 if (msg[2] != 0) {
473                         /* Error clearing flags */
474                         printk(KERN_WARNING
475                                "ipmi_si: Error clearing flags: %2.2x\n",
476                                msg[2]);
477                 }
478                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
479                         start_enable_irq(smi_info);
480                 else
481                         smi_info->si_state = SI_NORMAL;
482                 break;
483         }
484
485         case SI_GETTING_EVENTS:
486         {
487                 smi_info->curr_msg->rsp_size
488                         = smi_info->handlers->get_result(
489                                 smi_info->si_sm,
490                                 smi_info->curr_msg->rsp,
491                                 IPMI_MAX_MSG_LENGTH);
492
493                 /* Do this here becase deliver_recv_msg() releases the
494                    lock, and a new message can be put in during the
495                    time the lock is released. */
496                 msg = smi_info->curr_msg;
497                 smi_info->curr_msg = NULL;
498                 if (msg->rsp[2] != 0) {
499                         /* Error getting event, probably done. */
500                         msg->done(msg);
501
502                         /* Take off the event flag. */
503                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
504                         handle_flags(smi_info);
505                 } else {
506                         spin_lock(&smi_info->count_lock);
507                         smi_info->events++;
508                         spin_unlock(&smi_info->count_lock);
509
510                         /* Do this before we deliver the message
511                            because delivering the message releases the
512                            lock and something else can mess with the
513                            state. */
514                         handle_flags(smi_info);
515
516                         deliver_recv_msg(smi_info, msg);
517                 }
518                 break;
519         }
520
521         case SI_GETTING_MESSAGES:
522         {
523                 smi_info->curr_msg->rsp_size
524                         = smi_info->handlers->get_result(
525                                 smi_info->si_sm,
526                                 smi_info->curr_msg->rsp,
527                                 IPMI_MAX_MSG_LENGTH);
528
529                 /* Do this here becase deliver_recv_msg() releases the
530                    lock, and a new message can be put in during the
531                    time the lock is released. */
532                 msg = smi_info->curr_msg;
533                 smi_info->curr_msg = NULL;
534                 if (msg->rsp[2] != 0) {
535                         /* Error getting event, probably done. */
536                         msg->done(msg);
537
538                         /* Take off the msg flag. */
539                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
540                         handle_flags(smi_info);
541                 } else {
542                         spin_lock(&smi_info->count_lock);
543                         smi_info->incoming_messages++;
544                         spin_unlock(&smi_info->count_lock);
545
546                         /* Do this before we deliver the message
547                            because delivering the message releases the
548                            lock and something else can mess with the
549                            state. */
550                         handle_flags(smi_info);
551
552                         deliver_recv_msg(smi_info, msg);
553                 }
554                 break;
555         }
556
557         case SI_ENABLE_INTERRUPTS1:
558         {
559                 unsigned char msg[4];
560
561                 /* We got the flags from the SMI, now handle them. */
562                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
563                 if (msg[2] != 0) {
564                         printk(KERN_WARNING
565                                "ipmi_si: Could not enable interrupts"
566                                ", failed get, using polled mode.\n");
567                         smi_info->si_state = SI_NORMAL;
568                 } else {
569                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
570                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
571                         msg[2] = msg[3] | 1; /* enable msg queue int */
572                         smi_info->handlers->start_transaction(
573                                 smi_info->si_sm, msg, 3);
574                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
575                 }
576                 break;
577         }
578
579         case SI_ENABLE_INTERRUPTS2:
580         {
581                 unsigned char msg[4];
582
583                 /* We got the flags from the SMI, now handle them. */
584                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
585                 if (msg[2] != 0) {
586                         printk(KERN_WARNING
587                                "ipmi_si: Could not enable interrupts"
588                                ", failed set, using polled mode.\n");
589                 }
590                 smi_info->si_state = SI_NORMAL;
591                 break;
592         }
593         }
594 }
595
596 /* Called on timeouts and events.  Timeouts should pass the elapsed
597    time, interrupts should pass in zero. */
598 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
599                                            int time)
600 {
601         enum si_sm_result si_sm_result;
602
603  restart:
604         /* There used to be a loop here that waited a little while
605            (around 25us) before giving up.  That turned out to be
606            pointless, the minimum delays I was seeing were in the 300us
607            range, which is far too long to wait in an interrupt.  So
608            we just run until the state machine tells us something
609            happened or it needs a delay. */
610         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
611         time = 0;
612         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
613         {
614                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
615         }
616
617         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
618         {
619                 spin_lock(&smi_info->count_lock);
620                 smi_info->complete_transactions++;
621                 spin_unlock(&smi_info->count_lock);
622
623                 handle_transaction_done(smi_info);
624                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
625         }
626         else if (si_sm_result == SI_SM_HOSED)
627         {
628                 spin_lock(&smi_info->count_lock);
629                 smi_info->hosed_count++;
630                 spin_unlock(&smi_info->count_lock);
631
632                 /* Do the before return_hosed_msg, because that
633                    releases the lock. */
634                 smi_info->si_state = SI_NORMAL;
635                 if (smi_info->curr_msg != NULL) {
636                         /* If we were handling a user message, format
637                            a response to send to the upper layer to
638                            tell it about the error. */
639                         return_hosed_msg(smi_info);
640                 }
641                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
642         }
643
644         /* We prefer handling attn over new messages. */
645         if (si_sm_result == SI_SM_ATTN)
646         {
647                 unsigned char msg[2];
648
649                 spin_lock(&smi_info->count_lock);
650                 smi_info->attentions++;
651                 spin_unlock(&smi_info->count_lock);
652
653                 /* Got a attn, send down a get message flags to see
654                    what's causing it.  It would be better to handle
655                    this in the upper layer, but due to the way
656                    interrupts work with the SMI, that's not really
657                    possible. */
658                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
659                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
660
661                 smi_info->handlers->start_transaction(
662                         smi_info->si_sm, msg, 2);
663                 smi_info->si_state = SI_GETTING_FLAGS;
664                 goto restart;
665         }
666
667         /* If we are currently idle, try to start the next message. */
668         if (si_sm_result == SI_SM_IDLE) {
669                 spin_lock(&smi_info->count_lock);
670                 smi_info->idles++;
671                 spin_unlock(&smi_info->count_lock);
672
673                 si_sm_result = start_next_msg(smi_info);
674                 if (si_sm_result != SI_SM_IDLE)
675                         goto restart;
676         }
677
678         if ((si_sm_result == SI_SM_IDLE)
679             && (atomic_read(&smi_info->req_events)))
680         {
681                 /* We are idle and the upper layer requested that I fetch
682                    events, so do so. */
683                 unsigned char msg[2];
684
685                 spin_lock(&smi_info->count_lock);
686                 smi_info->flag_fetches++;
687                 spin_unlock(&smi_info->count_lock);
688
689                 atomic_set(&smi_info->req_events, 0);
690                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
691                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
692
693                 smi_info->handlers->start_transaction(
694                         smi_info->si_sm, msg, 2);
695                 smi_info->si_state = SI_GETTING_FLAGS;
696                 goto restart;
697         }
698
699         return si_sm_result;
700 }
701
702 static void sender(void                *send_info,
703                    struct ipmi_smi_msg *msg,
704                    int                 priority)
705 {
706         struct smi_info   *smi_info = send_info;
707         enum si_sm_result result;
708         unsigned long     flags;
709 #ifdef DEBUG_TIMING
710         struct timeval    t;
711 #endif
712
713         spin_lock_irqsave(&(smi_info->msg_lock), flags);
714 #ifdef DEBUG_TIMING
715         do_gettimeofday(&t);
716         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
717 #endif
718
719         if (smi_info->run_to_completion) {
720                 /* If we are running to completion, then throw it in
721                    the list and run transactions until everything is
722                    clear.  Priority doesn't matter here. */
723                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
724
725                 /* We have to release the msg lock and claim the smi
726                    lock in this case, because of race conditions. */
727                 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
728
729                 spin_lock_irqsave(&(smi_info->si_lock), flags);
730                 result = smi_event_handler(smi_info, 0);
731                 while (result != SI_SM_IDLE) {
732                         udelay(SI_SHORT_TIMEOUT_USEC);
733                         result = smi_event_handler(smi_info,
734                                                    SI_SHORT_TIMEOUT_USEC);
735                 }
736                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
737                 return;
738         } else {
739                 if (priority > 0) {
740                         list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
741                 } else {
742                         list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
743                 }
744         }
745         spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
746
747         spin_lock_irqsave(&(smi_info->si_lock), flags);
748         if ((smi_info->si_state == SI_NORMAL)
749             && (smi_info->curr_msg == NULL))
750         {
751                 start_next_msg(smi_info);
752         }
753         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
754 }
755
756 static void set_run_to_completion(void *send_info, int i_run_to_completion)
757 {
758         struct smi_info   *smi_info = send_info;
759         enum si_sm_result result;
760         unsigned long     flags;
761
762         spin_lock_irqsave(&(smi_info->si_lock), flags);
763
764         smi_info->run_to_completion = i_run_to_completion;
765         if (i_run_to_completion) {
766                 result = smi_event_handler(smi_info, 0);
767                 while (result != SI_SM_IDLE) {
768                         udelay(SI_SHORT_TIMEOUT_USEC);
769                         result = smi_event_handler(smi_info,
770                                                    SI_SHORT_TIMEOUT_USEC);
771                 }
772         }
773
774         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
775 }
776
777 static int ipmi_thread(void *data)
778 {
779         struct smi_info *smi_info = data;
780         unsigned long flags;
781         enum si_sm_result smi_result;
782
783         set_user_nice(current, 19);
784         while (!kthread_should_stop()) {
785                 spin_lock_irqsave(&(smi_info->si_lock), flags);
786                 smi_result = smi_event_handler(smi_info, 0);
787                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
788                 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
789                         /* do nothing */
790                 }
791                 else if (smi_result == SI_SM_CALL_WITH_DELAY)
792                         schedule();
793                 else
794                         schedule_timeout_interruptible(1);
795         }
796         return 0;
797 }
798
799
800 static void poll(void *send_info)
801 {
802         struct smi_info *smi_info = send_info;
803
804         smi_event_handler(smi_info, 0);
805 }
806
807 static void request_events(void *send_info)
808 {
809         struct smi_info *smi_info = send_info;
810
811         atomic_set(&smi_info->req_events, 1);
812 }
813
814 static int initialized = 0;
815
816 static void smi_timeout(unsigned long data)
817 {
818         struct smi_info   *smi_info = (struct smi_info *) data;
819         enum si_sm_result smi_result;
820         unsigned long     flags;
821         unsigned long     jiffies_now;
822         long              time_diff;
823 #ifdef DEBUG_TIMING
824         struct timeval    t;
825 #endif
826
827         if (atomic_read(&smi_info->stop_operation))
828                 return;
829
830         spin_lock_irqsave(&(smi_info->si_lock), flags);
831 #ifdef DEBUG_TIMING
832         do_gettimeofday(&t);
833         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
834 #endif
835         jiffies_now = jiffies;
836         time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
837                      * SI_USEC_PER_JIFFY);
838         smi_result = smi_event_handler(smi_info, time_diff);
839
840         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
841
842         smi_info->last_timeout_jiffies = jiffies_now;
843
844         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
845                 /* Running with interrupts, only do long timeouts. */
846                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
847                 spin_lock_irqsave(&smi_info->count_lock, flags);
848                 smi_info->long_timeouts++;
849                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
850                 goto do_add_timer;
851         }
852
853         /* If the state machine asks for a short delay, then shorten
854            the timer timeout. */
855         if (smi_result == SI_SM_CALL_WITH_DELAY) {
856                 spin_lock_irqsave(&smi_info->count_lock, flags);
857                 smi_info->short_timeouts++;
858                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
859                 smi_info->si_timer.expires = jiffies + 1;
860         } else {
861                 spin_lock_irqsave(&smi_info->count_lock, flags);
862                 smi_info->long_timeouts++;
863                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
864                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
865         }
866
867  do_add_timer:
868         add_timer(&(smi_info->si_timer));
869 }
870
871 static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
872 {
873         struct smi_info *smi_info = data;
874         unsigned long   flags;
875 #ifdef DEBUG_TIMING
876         struct timeval  t;
877 #endif
878
879         spin_lock_irqsave(&(smi_info->si_lock), flags);
880
881         spin_lock(&smi_info->count_lock);
882         smi_info->interrupts++;
883         spin_unlock(&smi_info->count_lock);
884
885         if (atomic_read(&smi_info->stop_operation))
886                 goto out;
887
888 #ifdef DEBUG_TIMING
889         do_gettimeofday(&t);
890         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
891 #endif
892         smi_event_handler(smi_info, 0);
893  out:
894         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
895         return IRQ_HANDLED;
896 }
897
898 static irqreturn_t si_bt_irq_handler(int irq, void *data, struct pt_regs *regs)
899 {
900         struct smi_info *smi_info = data;
901         /* We need to clear the IRQ flag for the BT interface. */
902         smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
903                              IPMI_BT_INTMASK_CLEAR_IRQ_BIT
904                              | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
905         return si_irq_handler(irq, data, regs);
906 }
907
908 static int smi_start_processing(void       *send_info,
909                                 ipmi_smi_t intf)
910 {
911         struct smi_info *new_smi = send_info;
912
913         new_smi->intf = intf;
914
915         /* Set up the timer that drives the interface. */
916         setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
917         new_smi->last_timeout_jiffies = jiffies;
918         mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
919
920         if (new_smi->si_type != SI_BT) {
921                 new_smi->thread = kthread_run(ipmi_thread, new_smi,
922                                               "kipmi%d", new_smi->intf_num);
923                 if (IS_ERR(new_smi->thread)) {
924                         printk(KERN_NOTICE "ipmi_si_intf: Could not start"
925                                " kernel thread due to error %ld, only using"
926                                " timers to drive the interface\n",
927                                PTR_ERR(new_smi->thread));
928                         new_smi->thread = NULL;
929                 }
930         }
931
932         return 0;
933 }
934
935 static struct ipmi_smi_handlers handlers =
936 {
937         .owner                  = THIS_MODULE,
938         .start_processing       = smi_start_processing,
939         .sender                 = sender,
940         .request_events         = request_events,
941         .set_run_to_completion  = set_run_to_completion,
942         .poll                   = poll,
943 };
944
945 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
946    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
947
948 #define SI_MAX_PARMS 4
949 static LIST_HEAD(smi_infos);
950 static DEFINE_MUTEX(smi_infos_lock);
951 static int smi_num; /* Used to sequence the SMIs */
952
953 #define DEFAULT_REGSPACING      1
954
955 static int           si_trydefaults = 1;
956 static char          *si_type[SI_MAX_PARMS];
957 #define MAX_SI_TYPE_STR 30
958 static char          si_type_str[MAX_SI_TYPE_STR];
959 static unsigned long addrs[SI_MAX_PARMS];
960 static int num_addrs;
961 static unsigned int  ports[SI_MAX_PARMS];
962 static int num_ports;
963 static int           irqs[SI_MAX_PARMS];
964 static int num_irqs;
965 static int           regspacings[SI_MAX_PARMS];
966 static int num_regspacings = 0;
967 static int           regsizes[SI_MAX_PARMS];
968 static int num_regsizes = 0;
969 static int           regshifts[SI_MAX_PARMS];
970 static int num_regshifts = 0;
971 static int slave_addrs[SI_MAX_PARMS];
972 static int num_slave_addrs = 0;
973
974
975 module_param_named(trydefaults, si_trydefaults, bool, 0);
976 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
977                  " default scan of the KCS and SMIC interface at the standard"
978                  " address");
979 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
980 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
981                  " interface separated by commas.  The types are 'kcs',"
982                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
983                  " the first interface to kcs and the second to bt");
984 module_param_array(addrs, long, &num_addrs, 0);
985 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
986                  " addresses separated by commas.  Only use if an interface"
987                  " is in memory.  Otherwise, set it to zero or leave"
988                  " it blank.");
989 module_param_array(ports, int, &num_ports, 0);
990 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
991                  " addresses separated by commas.  Only use if an interface"
992                  " is a port.  Otherwise, set it to zero or leave"
993                  " it blank.");
994 module_param_array(irqs, int, &num_irqs, 0);
995 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
996                  " addresses separated by commas.  Only use if an interface"
997                  " has an interrupt.  Otherwise, set it to zero or leave"
998                  " it blank.");
999 module_param_array(regspacings, int, &num_regspacings, 0);
1000 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1001                  " and each successive register used by the interface.  For"
1002                  " instance, if the start address is 0xca2 and the spacing"
1003                  " is 2, then the second address is at 0xca4.  Defaults"
1004                  " to 1.");
1005 module_param_array(regsizes, int, &num_regsizes, 0);
1006 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1007                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1008                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1009                  " the 8-bit IPMI register has to be read from a larger"
1010                  " register.");
1011 module_param_array(regshifts, int, &num_regshifts, 0);
1012 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1013                  " IPMI register, in bits.  For instance, if the data"
1014                  " is read from a 32-bit word and the IPMI data is in"
1015                  " bit 8-15, then the shift would be 8");
1016 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1017 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1018                  " the controller.  Normally this is 0x20, but can be"
1019                  " overridden by this parm.  This is an array indexed"
1020                  " by interface number.");
1021
1022
1023 #define IPMI_IO_ADDR_SPACE  0
1024 #define IPMI_MEM_ADDR_SPACE 1
1025 static char *addr_space_to_str[] = { "I/O", "memory" };
1026
1027 static void std_irq_cleanup(struct smi_info *info)
1028 {
1029         if (info->si_type == SI_BT)
1030                 /* Disable the interrupt in the BT interface. */
1031                 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1032         free_irq(info->irq, info);
1033 }
1034
1035 static int std_irq_setup(struct smi_info *info)
1036 {
1037         int rv;
1038
1039         if (!info->irq)
1040                 return 0;
1041
1042         if (info->si_type == SI_BT) {
1043                 rv = request_irq(info->irq,
1044                                  si_bt_irq_handler,
1045                                  SA_INTERRUPT,
1046                                  DEVICE_NAME,
1047                                  info);
1048                 if (!rv)
1049                         /* Enable the interrupt in the BT interface. */
1050                         info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1051                                          IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1052         } else
1053                 rv = request_irq(info->irq,
1054                                  si_irq_handler,
1055                                  SA_INTERRUPT,
1056                                  DEVICE_NAME,
1057                                  info);
1058         if (rv) {
1059                 printk(KERN_WARNING
1060                        "ipmi_si: %s unable to claim interrupt %d,"
1061                        " running polled\n",
1062                        DEVICE_NAME, info->irq);
1063                 info->irq = 0;
1064         } else {
1065                 info->irq_cleanup = std_irq_cleanup;
1066                 printk("  Using irq %d\n", info->irq);
1067         }
1068
1069         return rv;
1070 }
1071
1072 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1073 {
1074         unsigned int addr = io->addr_data;
1075
1076         return inb(addr + (offset * io->regspacing));
1077 }
1078
1079 static void port_outb(struct si_sm_io *io, unsigned int offset,
1080                       unsigned char b)
1081 {
1082         unsigned int addr = io->addr_data;
1083
1084         outb(b, addr + (offset * io->regspacing));
1085 }
1086
1087 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1088 {
1089         unsigned int addr = io->addr_data;
1090
1091         return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1092 }
1093
1094 static void port_outw(struct si_sm_io *io, unsigned int offset,
1095                       unsigned char b)
1096 {
1097         unsigned int addr = io->addr_data;
1098
1099         outw(b << io->regshift, addr + (offset * io->regspacing));
1100 }
1101
1102 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1103 {
1104         unsigned int addr = io->addr_data;
1105
1106         return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1107 }
1108
1109 static void port_outl(struct si_sm_io *io, unsigned int offset,
1110                       unsigned char b)
1111 {
1112         unsigned int addr = io->addr_data;
1113
1114         outl(b << io->regshift, addr+(offset * io->regspacing));
1115 }
1116
1117 static void port_cleanup(struct smi_info *info)
1118 {
1119         unsigned int addr = info->io.addr_data;
1120         int          idx;
1121
1122         if (addr) {
1123                 for (idx = 0; idx < info->io_size; idx++) {
1124                         release_region(addr + idx * info->io.regspacing,
1125                                        info->io.regsize);
1126                 }
1127         }
1128 }
1129
1130 static int port_setup(struct smi_info *info)
1131 {
1132         unsigned int addr = info->io.addr_data;
1133         int          idx;
1134
1135         if (!addr)
1136                 return -ENODEV;
1137
1138         info->io_cleanup = port_cleanup;
1139
1140         /* Figure out the actual inb/inw/inl/etc routine to use based
1141            upon the register size. */
1142         switch (info->io.regsize) {
1143         case 1:
1144                 info->io.inputb = port_inb;
1145                 info->io.outputb = port_outb;
1146                 break;
1147         case 2:
1148                 info->io.inputb = port_inw;
1149                 info->io.outputb = port_outw;
1150                 break;
1151         case 4:
1152                 info->io.inputb = port_inl;
1153                 info->io.outputb = port_outl;
1154                 break;
1155         default:
1156                 printk("ipmi_si: Invalid register size: %d\n",
1157                        info->io.regsize);
1158                 return -EINVAL;
1159         }
1160
1161         /* Some BIOSes reserve disjoint I/O regions in their ACPI
1162          * tables.  This causes problems when trying to register the
1163          * entire I/O region.  Therefore we must register each I/O
1164          * port separately.
1165          */
1166         for (idx = 0; idx < info->io_size; idx++) {
1167                 if (request_region(addr + idx * info->io.regspacing,
1168                                    info->io.regsize, DEVICE_NAME) == NULL) {
1169                         /* Undo allocations */
1170                         while (idx--) {
1171                                 release_region(addr + idx * info->io.regspacing,
1172                                                info->io.regsize);
1173                         }
1174                         return -EIO;
1175                 }
1176         }
1177         return 0;
1178 }
1179
1180 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1181 {
1182         return readb((io->addr)+(offset * io->regspacing));
1183 }
1184
1185 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1186                      unsigned char b)
1187 {
1188         writeb(b, (io->addr)+(offset * io->regspacing));
1189 }
1190
1191 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1192 {
1193         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1194                 && 0xff;
1195 }
1196
1197 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1198                      unsigned char b)
1199 {
1200         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1201 }
1202
1203 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1204 {
1205         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1206                 && 0xff;
1207 }
1208
1209 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1210                      unsigned char b)
1211 {
1212         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1213 }
1214
1215 #ifdef readq
1216 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1217 {
1218         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1219                 && 0xff;
1220 }
1221
1222 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1223                      unsigned char b)
1224 {
1225         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1226 }
1227 #endif
1228
1229 static void mem_cleanup(struct smi_info *info)
1230 {
1231         unsigned long addr = info->io.addr_data;
1232         int           mapsize;
1233
1234         if (info->io.addr) {
1235                 iounmap(info->io.addr);
1236
1237                 mapsize = ((info->io_size * info->io.regspacing)
1238                            - (info->io.regspacing - info->io.regsize));
1239
1240                 release_mem_region(addr, mapsize);
1241         }
1242 }
1243
1244 static int mem_setup(struct smi_info *info)
1245 {
1246         unsigned long addr = info->io.addr_data;
1247         int           mapsize;
1248
1249         if (!addr)
1250                 return -ENODEV;
1251
1252         info->io_cleanup = mem_cleanup;
1253
1254         /* Figure out the actual readb/readw/readl/etc routine to use based
1255            upon the register size. */
1256         switch (info->io.regsize) {
1257         case 1:
1258                 info->io.inputb = intf_mem_inb;
1259                 info->io.outputb = intf_mem_outb;
1260                 break;
1261         case 2:
1262                 info->io.inputb = intf_mem_inw;
1263                 info->io.outputb = intf_mem_outw;
1264                 break;
1265         case 4:
1266                 info->io.inputb = intf_mem_inl;
1267                 info->io.outputb = intf_mem_outl;
1268                 break;
1269 #ifdef readq
1270         case 8:
1271                 info->io.inputb = mem_inq;
1272                 info->io.outputb = mem_outq;
1273                 break;
1274 #endif
1275         default:
1276                 printk("ipmi_si: Invalid register size: %d\n",
1277                        info->io.regsize);
1278                 return -EINVAL;
1279         }
1280
1281         /* Calculate the total amount of memory to claim.  This is an
1282          * unusual looking calculation, but it avoids claiming any
1283          * more memory than it has to.  It will claim everything
1284          * between the first address to the end of the last full
1285          * register. */
1286         mapsize = ((info->io_size * info->io.regspacing)
1287                    - (info->io.regspacing - info->io.regsize));
1288
1289         if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1290                 return -EIO;
1291
1292         info->io.addr = ioremap(addr, mapsize);
1293         if (info->io.addr == NULL) {
1294                 release_mem_region(addr, mapsize);
1295                 return -EIO;
1296         }
1297         return 0;
1298 }
1299
1300
1301 static __devinit void hardcode_find_bmc(void)
1302 {
1303         int             i;
1304         struct smi_info *info;
1305
1306         for (i = 0; i < SI_MAX_PARMS; i++) {
1307                 if (!ports[i] && !addrs[i])
1308                         continue;
1309
1310                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1311                 if (!info)
1312                         return;
1313
1314                 info->addr_source = "hardcoded";
1315
1316                 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1317                         info->si_type = SI_KCS;
1318                 } else if (strcmp(si_type[i], "smic") == 0) {
1319                         info->si_type = SI_SMIC;
1320                 } else if (strcmp(si_type[i], "bt") == 0) {
1321                         info->si_type = SI_BT;
1322                 } else {
1323                         printk(KERN_WARNING
1324                                "ipmi_si: Interface type specified "
1325                                "for interface %d, was invalid: %s\n",
1326                                i, si_type[i]);
1327                         kfree(info);
1328                         continue;
1329                 }
1330
1331                 if (ports[i]) {
1332                         /* An I/O port */
1333                         info->io_setup = port_setup;
1334                         info->io.addr_data = ports[i];
1335                         info->io.addr_type = IPMI_IO_ADDR_SPACE;
1336                 } else if (addrs[i]) {
1337                         /* A memory port */
1338                         info->io_setup = mem_setup;
1339                         info->io.addr_data = addrs[i];
1340                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1341                 } else {
1342                         printk(KERN_WARNING
1343                                "ipmi_si: Interface type specified "
1344                                "for interface %d, "
1345                                "but port and address were not set or "
1346                                "set to zero.\n", i);
1347                         kfree(info);
1348                         continue;
1349                 }
1350
1351                 info->io.addr = NULL;
1352                 info->io.regspacing = regspacings[i];
1353                 if (!info->io.regspacing)
1354                         info->io.regspacing = DEFAULT_REGSPACING;
1355                 info->io.regsize = regsizes[i];
1356                 if (!info->io.regsize)
1357                         info->io.regsize = DEFAULT_REGSPACING;
1358                 info->io.regshift = regshifts[i];
1359                 info->irq = irqs[i];
1360                 if (info->irq)
1361                         info->irq_setup = std_irq_setup;
1362
1363                 try_smi_init(info);
1364         }
1365 }
1366
1367 #ifdef CONFIG_ACPI
1368
1369 #include <linux/acpi.h>
1370
1371 /* Once we get an ACPI failure, we don't try any more, because we go
1372    through the tables sequentially.  Once we don't find a table, there
1373    are no more. */
1374 static int acpi_failure = 0;
1375
1376 /* For GPE-type interrupts. */
1377 static u32 ipmi_acpi_gpe(void *context)
1378 {
1379         struct smi_info *smi_info = context;
1380         unsigned long   flags;
1381 #ifdef DEBUG_TIMING
1382         struct timeval t;
1383 #endif
1384
1385         spin_lock_irqsave(&(smi_info->si_lock), flags);
1386
1387         spin_lock(&smi_info->count_lock);
1388         smi_info->interrupts++;
1389         spin_unlock(&smi_info->count_lock);
1390
1391         if (atomic_read(&smi_info->stop_operation))
1392                 goto out;
1393
1394 #ifdef DEBUG_TIMING
1395         do_gettimeofday(&t);
1396         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1397 #endif
1398         smi_event_handler(smi_info, 0);
1399  out:
1400         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1401
1402         return ACPI_INTERRUPT_HANDLED;
1403 }
1404
1405 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1406 {
1407         if (!info->irq)
1408                 return;
1409
1410         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1411 }
1412
1413 static int acpi_gpe_irq_setup(struct smi_info *info)
1414 {
1415         acpi_status status;
1416
1417         if (!info->irq)
1418                 return 0;
1419
1420         /* FIXME - is level triggered right? */
1421         status = acpi_install_gpe_handler(NULL,
1422                                           info->irq,
1423                                           ACPI_GPE_LEVEL_TRIGGERED,
1424                                           &ipmi_acpi_gpe,
1425                                           info);
1426         if (status != AE_OK) {
1427                 printk(KERN_WARNING
1428                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1429                        " running polled\n",
1430                        DEVICE_NAME, info->irq);
1431                 info->irq = 0;
1432                 return -EINVAL;
1433         } else {
1434                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1435                 printk("  Using ACPI GPE %d\n", info->irq);
1436                 return 0;
1437         }
1438 }
1439
1440 /*
1441  * Defined at
1442  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1443  */
1444 struct SPMITable {
1445         s8      Signature[4];
1446         u32     Length;
1447         u8      Revision;
1448         u8      Checksum;
1449         s8      OEMID[6];
1450         s8      OEMTableID[8];
1451         s8      OEMRevision[4];
1452         s8      CreatorID[4];
1453         s8      CreatorRevision[4];
1454         u8      InterfaceType;
1455         u8      IPMIlegacy;
1456         s16     SpecificationRevision;
1457
1458         /*
1459          * Bit 0 - SCI interrupt supported
1460          * Bit 1 - I/O APIC/SAPIC
1461          */
1462         u8      InterruptType;
1463
1464         /* If bit 0 of InterruptType is set, then this is the SCI
1465            interrupt in the GPEx_STS register. */
1466         u8      GPE;
1467
1468         s16     Reserved;
1469
1470         /* If bit 1 of InterruptType is set, then this is the I/O
1471            APIC/SAPIC interrupt. */
1472         u32     GlobalSystemInterrupt;
1473
1474         /* The actual register address. */
1475         struct acpi_generic_address addr;
1476
1477         u8      UID[4];
1478
1479         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1480 };
1481
1482 static __devinit int try_init_acpi(struct SPMITable *spmi)
1483 {
1484         struct smi_info  *info;
1485         char             *io_type;
1486         u8               addr_space;
1487
1488         if (spmi->IPMIlegacy != 1) {
1489             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1490             return -ENODEV;
1491         }
1492
1493         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1494                 addr_space = IPMI_MEM_ADDR_SPACE;
1495         else
1496                 addr_space = IPMI_IO_ADDR_SPACE;
1497
1498         info = kzalloc(sizeof(*info), GFP_KERNEL);
1499         if (!info) {
1500                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1501                 return -ENOMEM;
1502         }
1503
1504         info->addr_source = "ACPI";
1505
1506         /* Figure out the interface type. */
1507         switch (spmi->InterfaceType)
1508         {
1509         case 1: /* KCS */
1510                 info->si_type = SI_KCS;
1511                 break;
1512         case 2: /* SMIC */
1513                 info->si_type = SI_SMIC;
1514                 break;
1515         case 3: /* BT */
1516                 info->si_type = SI_BT;
1517                 break;
1518         default:
1519                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1520                         spmi->InterfaceType);
1521                 kfree(info);
1522                 return -EIO;
1523         }
1524
1525         if (spmi->InterruptType & 1) {
1526                 /* We've got a GPE interrupt. */
1527                 info->irq = spmi->GPE;
1528                 info->irq_setup = acpi_gpe_irq_setup;
1529         } else if (spmi->InterruptType & 2) {
1530                 /* We've got an APIC/SAPIC interrupt. */
1531                 info->irq = spmi->GlobalSystemInterrupt;
1532                 info->irq_setup = std_irq_setup;
1533         } else {
1534                 /* Use the default interrupt setting. */
1535                 info->irq = 0;
1536                 info->irq_setup = NULL;
1537         }
1538
1539         if (spmi->addr.register_bit_width) {
1540                 /* A (hopefully) properly formed register bit width. */
1541                 info->io.regspacing = spmi->addr.register_bit_width / 8;
1542         } else {
1543                 info->io.regspacing = DEFAULT_REGSPACING;
1544         }
1545         info->io.regsize = info->io.regspacing;
1546         info->io.regshift = spmi->addr.register_bit_offset;
1547
1548         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1549                 io_type = "memory";
1550                 info->io_setup = mem_setup;
1551                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1552         } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1553                 io_type = "I/O";
1554                 info->io_setup = port_setup;
1555                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1556         } else {
1557                 kfree(info);
1558                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1559                 return -EIO;
1560         }
1561         info->io.addr_data = spmi->addr.address;
1562
1563         try_smi_init(info);
1564
1565         return 0;
1566 }
1567
1568 static __devinit void acpi_find_bmc(void)
1569 {
1570         acpi_status      status;
1571         struct SPMITable *spmi;
1572         int              i;
1573
1574         if (acpi_disabled)
1575                 return;
1576
1577         if (acpi_failure)
1578                 return;
1579
1580         for (i = 0; ; i++) {
1581                 status = acpi_get_firmware_table("SPMI", i+1,
1582                                                  ACPI_LOGICAL_ADDRESSING,
1583                                                  (struct acpi_table_header **)
1584                                                  &spmi);
1585                 if (status != AE_OK)
1586                         return;
1587
1588                 try_init_acpi(spmi);
1589         }
1590 }
1591 #endif
1592
1593 #ifdef CONFIG_DMI
1594 struct dmi_ipmi_data
1595 {
1596         u8              type;
1597         u8              addr_space;
1598         unsigned long   base_addr;
1599         u8              irq;
1600         u8              offset;
1601         u8              slave_addr;
1602 };
1603
1604 static int __devinit decode_dmi(struct dmi_header *dm,
1605                                 struct dmi_ipmi_data *dmi)
1606 {
1607         u8              *data = (u8 *)dm;
1608         unsigned long   base_addr;
1609         u8              reg_spacing;
1610         u8              len = dm->length;
1611
1612         dmi->type = data[4];
1613
1614         memcpy(&base_addr, data+8, sizeof(unsigned long));
1615         if (len >= 0x11) {
1616                 if (base_addr & 1) {
1617                         /* I/O */
1618                         base_addr &= 0xFFFE;
1619                         dmi->addr_space = IPMI_IO_ADDR_SPACE;
1620                 }
1621                 else {
1622                         /* Memory */
1623                         dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1624                 }
1625                 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1626                    is odd. */
1627                 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1628
1629                 dmi->irq = data[0x11];
1630
1631                 /* The top two bits of byte 0x10 hold the register spacing. */
1632                 reg_spacing = (data[0x10] & 0xC0) >> 6;
1633                 switch(reg_spacing){
1634                 case 0x00: /* Byte boundaries */
1635                     dmi->offset = 1;
1636                     break;
1637                 case 0x01: /* 32-bit boundaries */
1638                     dmi->offset = 4;
1639                     break;
1640                 case 0x02: /* 16-byte boundaries */
1641                     dmi->offset = 16;
1642                     break;
1643                 default:
1644                     /* Some other interface, just ignore it. */
1645                     return -EIO;
1646                 }
1647         } else {
1648                 /* Old DMI spec. */
1649                 /* Note that technically, the lower bit of the base
1650                  * address should be 1 if the address is I/O and 0 if
1651                  * the address is in memory.  So many systems get that
1652                  * wrong (and all that I have seen are I/O) so we just
1653                  * ignore that bit and assume I/O.  Systems that use
1654                  * memory should use the newer spec, anyway. */
1655                 dmi->base_addr = base_addr & 0xfffe;
1656                 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1657                 dmi->offset = 1;
1658         }
1659
1660         dmi->slave_addr = data[6];
1661
1662         return 0;
1663 }
1664
1665 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1666 {
1667         struct smi_info *info;
1668
1669         info = kzalloc(sizeof(*info), GFP_KERNEL);
1670         if (!info) {
1671                 printk(KERN_ERR
1672                        "ipmi_si: Could not allocate SI data\n");
1673                 return;
1674         }
1675
1676         info->addr_source = "SMBIOS";
1677
1678         switch (ipmi_data->type) {
1679         case 0x01: /* KCS */
1680                 info->si_type = SI_KCS;
1681                 break;
1682         case 0x02: /* SMIC */
1683                 info->si_type = SI_SMIC;
1684                 break;
1685         case 0x03: /* BT */
1686                 info->si_type = SI_BT;
1687                 break;
1688         default:
1689                 return;
1690         }
1691
1692         switch (ipmi_data->addr_space) {
1693         case IPMI_MEM_ADDR_SPACE:
1694                 info->io_setup = mem_setup;
1695                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1696                 break;
1697
1698         case IPMI_IO_ADDR_SPACE:
1699                 info->io_setup = port_setup;
1700                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1701                 break;
1702
1703         default:
1704                 kfree(info);
1705                 printk(KERN_WARNING
1706                        "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
1707                        ipmi_data->addr_space);
1708                 return;
1709         }
1710         info->io.addr_data = ipmi_data->base_addr;
1711
1712         info->io.regspacing = ipmi_data->offset;
1713         if (!info->io.regspacing)
1714                 info->io.regspacing = DEFAULT_REGSPACING;
1715         info->io.regsize = DEFAULT_REGSPACING;
1716         info->io.regshift = 0;
1717
1718         info->slave_addr = ipmi_data->slave_addr;
1719
1720         info->irq = ipmi_data->irq;
1721         if (info->irq)
1722                 info->irq_setup = std_irq_setup;
1723
1724         try_smi_init(info);
1725 }
1726
1727 static void __devinit dmi_find_bmc(void)
1728 {
1729         struct dmi_device    *dev = NULL;
1730         struct dmi_ipmi_data data;
1731         int                  rv;
1732
1733         while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
1734                 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
1735                 if (!rv)
1736                         try_init_dmi(&data);
1737         }
1738 }
1739 #endif /* CONFIG_DMI */
1740
1741 #ifdef CONFIG_PCI
1742
1743 #define PCI_ERMC_CLASSCODE              0x0C0700
1744 #define PCI_ERMC_CLASSCODE_MASK         0xffffff00
1745 #define PCI_ERMC_CLASSCODE_TYPE_MASK    0xff
1746 #define PCI_ERMC_CLASSCODE_TYPE_SMIC    0x00
1747 #define PCI_ERMC_CLASSCODE_TYPE_KCS     0x01
1748 #define PCI_ERMC_CLASSCODE_TYPE_BT      0x02
1749
1750 #define PCI_HP_VENDOR_ID    0x103C
1751 #define PCI_MMC_DEVICE_ID   0x121A
1752 #define PCI_MMC_ADDR_CW     0x10
1753
1754 static void ipmi_pci_cleanup(struct smi_info *info)
1755 {
1756         struct pci_dev *pdev = info->addr_source_data;
1757
1758         pci_disable_device(pdev);
1759 }
1760
1761 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
1762                                     const struct pci_device_id *ent)
1763 {
1764         int rv;
1765         int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
1766         struct smi_info *info;
1767         int first_reg_offset = 0;
1768
1769         info = kzalloc(sizeof(*info), GFP_KERNEL);
1770         if (!info)
1771                 return ENOMEM;
1772
1773         info->addr_source = "PCI";
1774
1775         switch (class_type) {
1776         case PCI_ERMC_CLASSCODE_TYPE_SMIC:
1777                 info->si_type = SI_SMIC;
1778                 break;
1779
1780         case PCI_ERMC_CLASSCODE_TYPE_KCS:
1781                 info->si_type = SI_KCS;
1782                 break;
1783
1784         case PCI_ERMC_CLASSCODE_TYPE_BT:
1785                 info->si_type = SI_BT;
1786                 break;
1787
1788         default:
1789                 kfree(info);
1790                 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
1791                        pci_name(pdev), class_type);
1792                 return ENOMEM;
1793         }
1794
1795         rv = pci_enable_device(pdev);
1796         if (rv) {
1797                 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
1798                        pci_name(pdev));
1799                 kfree(info);
1800                 return rv;
1801         }
1802
1803         info->addr_source_cleanup = ipmi_pci_cleanup;
1804         info->addr_source_data = pdev;
1805
1806         if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
1807                 first_reg_offset = 1;
1808
1809         if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
1810                 info->io_setup = port_setup;
1811                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1812         } else {
1813                 info->io_setup = mem_setup;
1814                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1815         }
1816         info->io.addr_data = pci_resource_start(pdev, 0);
1817
1818         info->io.regspacing = DEFAULT_REGSPACING;
1819         info->io.regsize = DEFAULT_REGSPACING;
1820         info->io.regshift = 0;
1821
1822         info->irq = pdev->irq;
1823         if (info->irq)
1824                 info->irq_setup = std_irq_setup;
1825
1826         info->dev = &pdev->dev;
1827
1828         return try_smi_init(info);
1829 }
1830
1831 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
1832 {
1833 }
1834
1835 #ifdef CONFIG_PM
1836 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1837 {
1838         return 0;
1839 }
1840
1841 static int ipmi_pci_resume(struct pci_dev *pdev)
1842 {
1843         return 0;
1844 }
1845 #endif
1846
1847 static struct pci_device_id ipmi_pci_devices[] = {
1848         { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
1849         { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE) }
1850 };
1851 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
1852
1853 static struct pci_driver ipmi_pci_driver = {
1854         .name =         DEVICE_NAME,
1855         .id_table =     ipmi_pci_devices,
1856         .probe =        ipmi_pci_probe,
1857         .remove =       __devexit_p(ipmi_pci_remove),
1858 #ifdef CONFIG_PM
1859         .suspend =      ipmi_pci_suspend,
1860         .resume =       ipmi_pci_resume,
1861 #endif
1862 };
1863 #endif /* CONFIG_PCI */
1864
1865
1866 static int try_get_dev_id(struct smi_info *smi_info)
1867 {
1868         unsigned char         msg[2];
1869         unsigned char         *resp;
1870         unsigned long         resp_len;
1871         enum si_sm_result     smi_result;
1872         int                   rv = 0;
1873
1874         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1875         if (!resp)
1876                 return -ENOMEM;
1877
1878         /* Do a Get Device ID command, since it comes back with some
1879            useful info. */
1880         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1881         msg[1] = IPMI_GET_DEVICE_ID_CMD;
1882         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1883
1884         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1885         for (;;)
1886         {
1887                 if (smi_result == SI_SM_CALL_WITH_DELAY ||
1888                     smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
1889                         schedule_timeout_uninterruptible(1);
1890                         smi_result = smi_info->handlers->event(
1891                                 smi_info->si_sm, 100);
1892                 }
1893                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1894                 {
1895                         smi_result = smi_info->handlers->event(
1896                                 smi_info->si_sm, 0);
1897                 }
1898                 else
1899                         break;
1900         }
1901         if (smi_result == SI_SM_HOSED) {
1902                 /* We couldn't get the state machine to run, so whatever's at
1903                    the port is probably not an IPMI SMI interface. */
1904                 rv = -ENODEV;
1905                 goto out;
1906         }
1907
1908         /* Otherwise, we got some data. */
1909         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1910                                                   resp, IPMI_MAX_MSG_LENGTH);
1911         if (resp_len < 14) {
1912                 /* That's odd, it should be longer. */
1913                 rv = -EINVAL;
1914                 goto out;
1915         }
1916
1917         if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
1918                 /* That's odd, it shouldn't be able to fail. */
1919                 rv = -EINVAL;
1920                 goto out;
1921         }
1922
1923         /* Record info from the get device id, in case we need it. */
1924         ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
1925
1926  out:
1927         kfree(resp);
1928         return rv;
1929 }
1930
1931 static int type_file_read_proc(char *page, char **start, off_t off,
1932                                int count, int *eof, void *data)
1933 {
1934         char            *out = (char *) page;
1935         struct smi_info *smi = data;
1936
1937         switch (smi->si_type) {
1938             case SI_KCS:
1939                 return sprintf(out, "kcs\n");
1940             case SI_SMIC:
1941                 return sprintf(out, "smic\n");
1942             case SI_BT:
1943                 return sprintf(out, "bt\n");
1944             default:
1945                 return 0;
1946         }
1947 }
1948
1949 static int stat_file_read_proc(char *page, char **start, off_t off,
1950                                int count, int *eof, void *data)
1951 {
1952         char            *out = (char *) page;
1953         struct smi_info *smi = data;
1954
1955         out += sprintf(out, "interrupts_enabled:    %d\n",
1956                        smi->irq && !smi->interrupt_disabled);
1957         out += sprintf(out, "short_timeouts:        %ld\n",
1958                        smi->short_timeouts);
1959         out += sprintf(out, "long_timeouts:         %ld\n",
1960                        smi->long_timeouts);
1961         out += sprintf(out, "timeout_restarts:      %ld\n",
1962                        smi->timeout_restarts);
1963         out += sprintf(out, "idles:                 %ld\n",
1964                        smi->idles);
1965         out += sprintf(out, "interrupts:            %ld\n",
1966                        smi->interrupts);
1967         out += sprintf(out, "attentions:            %ld\n",
1968                        smi->attentions);
1969         out += sprintf(out, "flag_fetches:          %ld\n",
1970                        smi->flag_fetches);
1971         out += sprintf(out, "hosed_count:           %ld\n",
1972                        smi->hosed_count);
1973         out += sprintf(out, "complete_transactions: %ld\n",
1974                        smi->complete_transactions);
1975         out += sprintf(out, "events:                %ld\n",
1976                        smi->events);
1977         out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
1978                        smi->watchdog_pretimeouts);
1979         out += sprintf(out, "incoming_messages:     %ld\n",
1980                        smi->incoming_messages);
1981
1982         return (out - ((char *) page));
1983 }
1984
1985 /*
1986  * oem_data_avail_to_receive_msg_avail
1987  * @info - smi_info structure with msg_flags set
1988  *
1989  * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
1990  * Returns 1 indicating need to re-run handle_flags().
1991  */
1992 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
1993 {
1994         smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
1995                                 RECEIVE_MSG_AVAIL);
1996         return 1;
1997 }
1998
1999 /*
2000  * setup_dell_poweredge_oem_data_handler
2001  * @info - smi_info.device_id must be populated
2002  *
2003  * Systems that match, but have firmware version < 1.40 may assert
2004  * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2005  * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
2006  * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2007  * as RECEIVE_MSG_AVAIL instead.
2008  *
2009  * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2010  * assert the OEM[012] bits, and if it did, the driver would have to
2011  * change to handle that properly, we don't actually check for the
2012  * firmware version.
2013  * Device ID = 0x20                BMC on PowerEdge 8G servers
2014  * Device Revision = 0x80
2015  * Firmware Revision1 = 0x01       BMC version 1.40
2016  * Firmware Revision2 = 0x40       BCD encoded
2017  * IPMI Version = 0x51             IPMI 1.5
2018  * Manufacturer ID = A2 02 00      Dell IANA
2019  *
2020  * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2021  * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2022  *
2023  */
2024 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
2025 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2026 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2027 #define DELL_IANA_MFR_ID 0x0002a2
2028 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2029 {
2030         struct ipmi_device_id *id = &smi_info->device_id;
2031         if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2032                 if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
2033                     id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2034                     id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2035                         smi_info->oem_data_avail_handler =
2036                                 oem_data_avail_to_receive_msg_avail;
2037                 }
2038                 else if (ipmi_version_major(id) < 1 ||
2039                          (ipmi_version_major(id) == 1 &&
2040                           ipmi_version_minor(id) < 5)) {
2041                         smi_info->oem_data_avail_handler =
2042                                 oem_data_avail_to_receive_msg_avail;
2043                 }
2044         }
2045 }
2046
2047 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2048 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2049 {
2050         struct ipmi_smi_msg *msg = smi_info->curr_msg;
2051
2052         /* Make it a reponse */
2053         msg->rsp[0] = msg->data[0] | 4;
2054         msg->rsp[1] = msg->data[1];
2055         msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2056         msg->rsp_size = 3;
2057         smi_info->curr_msg = NULL;
2058         deliver_recv_msg(smi_info, msg);
2059 }
2060
2061 /*
2062  * dell_poweredge_bt_xaction_handler
2063  * @info - smi_info.device_id must be populated
2064  *
2065  * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2066  * not respond to a Get SDR command if the length of the data
2067  * requested is exactly 0x3A, which leads to command timeouts and no
2068  * data returned.  This intercepts such commands, and causes userspace
2069  * callers to try again with a different-sized buffer, which succeeds.
2070  */
2071
2072 #define STORAGE_NETFN 0x0A
2073 #define STORAGE_CMD_GET_SDR 0x23
2074 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2075                                              unsigned long unused,
2076                                              void *in)
2077 {
2078         struct smi_info *smi_info = in;
2079         unsigned char *data = smi_info->curr_msg->data;
2080         unsigned int size   = smi_info->curr_msg->data_size;
2081         if (size >= 8 &&
2082             (data[0]>>2) == STORAGE_NETFN &&
2083             data[1] == STORAGE_CMD_GET_SDR &&
2084             data[7] == 0x3A) {
2085                 return_hosed_msg_badsize(smi_info);
2086                 return NOTIFY_STOP;
2087         }
2088         return NOTIFY_DONE;
2089 }
2090
2091 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2092         .notifier_call  = dell_poweredge_bt_xaction_handler,
2093 };
2094
2095 /*
2096  * setup_dell_poweredge_bt_xaction_handler
2097  * @info - smi_info.device_id must be filled in already
2098  *
2099  * Fills in smi_info.device_id.start_transaction_pre_hook
2100  * when we know what function to use there.
2101  */
2102 static void
2103 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2104 {
2105         struct ipmi_device_id *id = &smi_info->device_id;
2106         if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2107             smi_info->si_type == SI_BT)
2108                 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2109 }
2110
2111 /*
2112  * setup_oem_data_handler
2113  * @info - smi_info.device_id must be filled in already
2114  *
2115  * Fills in smi_info.device_id.oem_data_available_handler
2116  * when we know what function to use there.
2117  */
2118
2119 static void setup_oem_data_handler(struct smi_info *smi_info)
2120 {
2121         setup_dell_poweredge_oem_data_handler(smi_info);
2122 }
2123
2124 static void setup_xaction_handlers(struct smi_info *smi_info)
2125 {
2126         setup_dell_poweredge_bt_xaction_handler(smi_info);
2127 }
2128
2129 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2130 {
2131         if (smi_info->intf) {
2132                 /* The timer and thread are only running if the
2133                    interface has been started up and registered. */
2134                 if (smi_info->thread != NULL)
2135                         kthread_stop(smi_info->thread);
2136                 del_timer_sync(&smi_info->si_timer);
2137         }
2138 }
2139
2140 static __devinitdata struct ipmi_default_vals
2141 {
2142         int type;
2143         int port;
2144 } ipmi_defaults[] =
2145 {
2146         { .type = SI_KCS, .port = 0xca2 },
2147         { .type = SI_SMIC, .port = 0xca9 },
2148         { .type = SI_BT, .port = 0xe4 },
2149         { .port = 0 }
2150 };
2151
2152 static __devinit void default_find_bmc(void)
2153 {
2154         struct smi_info *info;
2155         int             i;
2156
2157         for (i = 0; ; i++) {
2158                 if (!ipmi_defaults[i].port)
2159                         break;
2160
2161                 info = kzalloc(sizeof(*info), GFP_KERNEL);
2162                 if (!info)
2163                         return;
2164
2165                 info->addr_source = NULL;
2166
2167                 info->si_type = ipmi_defaults[i].type;
2168                 info->io_setup = port_setup;
2169                 info->io.addr_data = ipmi_defaults[i].port;
2170                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2171
2172                 info->io.addr = NULL;
2173                 info->io.regspacing = DEFAULT_REGSPACING;
2174                 info->io.regsize = DEFAULT_REGSPACING;
2175                 info->io.regshift = 0;
2176
2177                 if (try_smi_init(info) == 0) {
2178                         /* Found one... */
2179                         printk(KERN_INFO "ipmi_si: Found default %s state"
2180                                " machine at %s address 0x%lx\n",
2181                                si_to_str[info->si_type],
2182                                addr_space_to_str[info->io.addr_type],
2183                                info->io.addr_data);
2184                         return;
2185                 }
2186         }
2187 }
2188
2189 static int is_new_interface(struct smi_info *info)
2190 {
2191         struct smi_info *e;
2192
2193         list_for_each_entry(e, &smi_infos, link) {
2194                 if (e->io.addr_type != info->io.addr_type)
2195                         continue;
2196                 if (e->io.addr_data == info->io.addr_data)
2197                         return 0;
2198         }
2199
2200         return 1;
2201 }
2202
2203 static int try_smi_init(struct smi_info *new_smi)
2204 {
2205         int rv;
2206
2207         if (new_smi->addr_source) {
2208                 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2209                        " machine at %s address 0x%lx, slave address 0x%x,"
2210                        " irq %d\n",
2211                        new_smi->addr_source,
2212                        si_to_str[new_smi->si_type],
2213                        addr_space_to_str[new_smi->io.addr_type],
2214                        new_smi->io.addr_data,
2215                        new_smi->slave_addr, new_smi->irq);
2216         }
2217
2218         mutex_lock(&smi_infos_lock);
2219         if (!is_new_interface(new_smi)) {
2220                 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2221                 rv = -EBUSY;
2222                 goto out_err;
2223         }
2224
2225         /* So we know not to free it unless we have allocated one. */
2226         new_smi->intf = NULL;
2227         new_smi->si_sm = NULL;
2228         new_smi->handlers = NULL;
2229
2230         switch (new_smi->si_type) {
2231         case SI_KCS:
2232                 new_smi->handlers = &kcs_smi_handlers;
2233                 break;
2234
2235         case SI_SMIC:
2236                 new_smi->handlers = &smic_smi_handlers;
2237                 break;
2238
2239         case SI_BT:
2240                 new_smi->handlers = &bt_smi_handlers;
2241                 break;
2242
2243         default:
2244                 /* No support for anything else yet. */
2245                 rv = -EIO;
2246                 goto out_err;
2247         }
2248
2249         /* Allocate the state machine's data and initialize it. */
2250         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2251         if (!new_smi->si_sm) {
2252                 printk(" Could not allocate state machine memory\n");
2253                 rv = -ENOMEM;
2254                 goto out_err;
2255         }
2256         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2257                                                         &new_smi->io);
2258
2259         /* Now that we know the I/O size, we can set up the I/O. */
2260         rv = new_smi->io_setup(new_smi);
2261         if (rv) {
2262                 printk(" Could not set up I/O space\n");
2263                 goto out_err;
2264         }
2265
2266         spin_lock_init(&(new_smi->si_lock));
2267         spin_lock_init(&(new_smi->msg_lock));
2268         spin_lock_init(&(new_smi->count_lock));
2269
2270         /* Do low-level detection first. */
2271         if (new_smi->handlers->detect(new_smi->si_sm)) {
2272                 if (new_smi->addr_source)
2273                         printk(KERN_INFO "ipmi_si: Interface detection"
2274                                " failed\n");
2275                 rv = -ENODEV;
2276                 goto out_err;
2277         }
2278
2279         /* Attempt a get device id command.  If it fails, we probably
2280            don't have a BMC here. */
2281         rv = try_get_dev_id(new_smi);
2282         if (rv) {
2283                 if (new_smi->addr_source)
2284                         printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2285                                " at this location\n");
2286                 goto out_err;
2287         }
2288
2289         setup_oem_data_handler(new_smi);
2290         setup_xaction_handlers(new_smi);
2291
2292         /* Try to claim any interrupts. */
2293         if (new_smi->irq_setup)
2294                 new_smi->irq_setup(new_smi);
2295
2296         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2297         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2298         new_smi->curr_msg = NULL;
2299         atomic_set(&new_smi->req_events, 0);
2300         new_smi->run_to_completion = 0;
2301
2302         new_smi->interrupt_disabled = 0;
2303         atomic_set(&new_smi->stop_operation, 0);
2304         new_smi->intf_num = smi_num;
2305         smi_num++;
2306
2307         /* Start clearing the flags before we enable interrupts or the
2308            timer to avoid racing with the timer. */
2309         start_clear_flags(new_smi);
2310         /* IRQ is defined to be set when non-zero. */
2311         if (new_smi->irq)
2312                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2313
2314         if (!new_smi->dev) {
2315                 /* If we don't already have a device from something
2316                  * else (like PCI), then register a new one. */
2317                 new_smi->pdev = platform_device_alloc("ipmi_si",
2318                                                       new_smi->intf_num);
2319                 if (rv) {
2320                         printk(KERN_ERR
2321                                "ipmi_si_intf:"
2322                                " Unable to allocate platform device\n");
2323                         goto out_err;
2324                 }
2325                 new_smi->dev = &new_smi->pdev->dev;
2326                 new_smi->dev->driver = &ipmi_driver;
2327
2328                 rv = platform_device_register(new_smi->pdev);
2329                 if (rv) {
2330                         printk(KERN_ERR
2331                                "ipmi_si_intf:"
2332                                " Unable to register system interface device:"
2333                                " %d\n",
2334                                rv);
2335                         goto out_err;
2336                 }
2337                 new_smi->dev_registered = 1;
2338         }
2339
2340         rv = ipmi_register_smi(&handlers,
2341                                new_smi,
2342                                &new_smi->device_id,
2343                                new_smi->dev,
2344                                new_smi->slave_addr);
2345         if (rv) {
2346                 printk(KERN_ERR
2347                        "ipmi_si: Unable to register device: error %d\n",
2348                        rv);
2349                 goto out_err_stop_timer;
2350         }
2351
2352         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2353                                      type_file_read_proc, NULL,
2354                                      new_smi, THIS_MODULE);
2355         if (rv) {
2356                 printk(KERN_ERR
2357                        "ipmi_si: Unable to create proc entry: %d\n",
2358                        rv);
2359                 goto out_err_stop_timer;
2360         }
2361
2362         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2363                                      stat_file_read_proc, NULL,
2364                                      new_smi, THIS_MODULE);
2365         if (rv) {
2366                 printk(KERN_ERR
2367                        "ipmi_si: Unable to create proc entry: %d\n",
2368                        rv);
2369                 goto out_err_stop_timer;
2370         }
2371
2372         list_add_tail(&new_smi->link, &smi_infos);
2373
2374         mutex_unlock(&smi_infos_lock);
2375
2376         printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2377
2378         return 0;
2379
2380  out_err_stop_timer:
2381         atomic_inc(&new_smi->stop_operation);
2382         wait_for_timer_and_thread(new_smi);
2383
2384  out_err:
2385         if (new_smi->intf)
2386                 ipmi_unregister_smi(new_smi->intf);
2387
2388         if (new_smi->irq_cleanup)
2389                 new_smi->irq_cleanup(new_smi);
2390
2391         /* Wait until we know that we are out of any interrupt
2392            handlers might have been running before we freed the
2393            interrupt. */
2394         synchronize_sched();
2395
2396         if (new_smi->si_sm) {
2397                 if (new_smi->handlers)
2398                         new_smi->handlers->cleanup(new_smi->si_sm);
2399                 kfree(new_smi->si_sm);
2400         }
2401         if (new_smi->addr_source_cleanup)
2402                 new_smi->addr_source_cleanup(new_smi);
2403         if (new_smi->io_cleanup)
2404                 new_smi->io_cleanup(new_smi);
2405
2406         if (new_smi->dev_registered)
2407                 platform_device_unregister(new_smi->pdev);
2408
2409         kfree(new_smi);
2410
2411         mutex_unlock(&smi_infos_lock);
2412
2413         return rv;
2414 }
2415
2416 static __devinit int init_ipmi_si(void)
2417 {
2418         int  i;
2419         char *str;
2420         int  rv;
2421
2422         if (initialized)
2423                 return 0;
2424         initialized = 1;
2425
2426         /* Register the device drivers. */
2427         rv = driver_register(&ipmi_driver);
2428         if (rv) {
2429                 printk(KERN_ERR
2430                        "init_ipmi_si: Unable to register driver: %d\n",
2431                        rv);
2432                 return rv;
2433         }
2434
2435
2436         /* Parse out the si_type string into its components. */
2437         str = si_type_str;
2438         if (*str != '\0') {
2439                 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2440                         si_type[i] = str;
2441                         str = strchr(str, ',');
2442                         if (str) {
2443                                 *str = '\0';
2444                                 str++;
2445                         } else {
2446                                 break;
2447                         }
2448                 }
2449         }
2450
2451         printk(KERN_INFO "IPMI System Interface driver.\n");
2452
2453         hardcode_find_bmc();
2454
2455 #ifdef CONFIG_DMI
2456         dmi_find_bmc();
2457 #endif
2458
2459 #ifdef CONFIG_ACPI
2460         if (si_trydefaults)
2461                 acpi_find_bmc();
2462 #endif
2463
2464 #ifdef CONFIG_PCI
2465         pci_module_init(&ipmi_pci_driver);
2466 #endif
2467
2468         if (si_trydefaults) {
2469                 mutex_lock(&smi_infos_lock);
2470                 if (list_empty(&smi_infos)) {
2471                         /* No BMC was found, try defaults. */
2472                         mutex_unlock(&smi_infos_lock);
2473                         default_find_bmc();
2474                 } else {
2475                         mutex_unlock(&smi_infos_lock);
2476                 }
2477         }
2478
2479         mutex_lock(&smi_infos_lock);
2480         if (list_empty(&smi_infos)) {
2481                 mutex_unlock(&smi_infos_lock);
2482 #ifdef CONFIG_PCI
2483                 pci_unregister_driver(&ipmi_pci_driver);
2484 #endif
2485                 printk("ipmi_si: Unable to find any System Interface(s)\n");
2486                 return -ENODEV;
2487         } else {
2488                 mutex_unlock(&smi_infos_lock);
2489                 return 0;
2490         }
2491 }
2492 module_init(init_ipmi_si);
2493
2494 static void __devexit cleanup_one_si(struct smi_info *to_clean)
2495 {
2496         int           rv;
2497         unsigned long flags;
2498
2499         if (!to_clean)
2500                 return;
2501
2502         list_del(&to_clean->link);
2503
2504         /* Tell the timer and interrupt handlers that we are shutting
2505            down. */
2506         spin_lock_irqsave(&(to_clean->si_lock), flags);
2507         spin_lock(&(to_clean->msg_lock));
2508
2509         atomic_inc(&to_clean->stop_operation);
2510
2511         if (to_clean->irq_cleanup)
2512                 to_clean->irq_cleanup(to_clean);
2513
2514         spin_unlock(&(to_clean->msg_lock));
2515         spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2516
2517         /* Wait until we know that we are out of any interrupt
2518            handlers might have been running before we freed the
2519            interrupt. */
2520         synchronize_sched();
2521
2522         wait_for_timer_and_thread(to_clean);
2523
2524         /* Interrupts and timeouts are stopped, now make sure the
2525            interface is in a clean state. */
2526         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2527                 poll(to_clean);
2528                 schedule_timeout_uninterruptible(1);
2529         }
2530
2531         rv = ipmi_unregister_smi(to_clean->intf);
2532         if (rv) {
2533                 printk(KERN_ERR
2534                        "ipmi_si: Unable to unregister device: errno=%d\n",
2535                        rv);
2536         }
2537
2538         to_clean->handlers->cleanup(to_clean->si_sm);
2539
2540         kfree(to_clean->si_sm);
2541
2542         if (to_clean->addr_source_cleanup)
2543                 to_clean->addr_source_cleanup(to_clean);
2544         if (to_clean->io_cleanup)
2545                 to_clean->io_cleanup(to_clean);
2546
2547         if (to_clean->dev_registered)
2548                 platform_device_unregister(to_clean->pdev);
2549
2550         kfree(to_clean);
2551 }
2552
2553 static __exit void cleanup_ipmi_si(void)
2554 {
2555         struct smi_info *e, *tmp_e;
2556
2557         if (!initialized)
2558                 return;
2559
2560 #ifdef CONFIG_PCI
2561         pci_unregister_driver(&ipmi_pci_driver);
2562 #endif
2563
2564         mutex_lock(&smi_infos_lock);
2565         list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2566                 cleanup_one_si(e);
2567         mutex_unlock(&smi_infos_lock);
2568
2569         driver_unregister(&ipmi_driver);
2570 }
2571 module_exit(cleanup_ipmi_si);
2572
2573 MODULE_LICENSE("GPL");
2574 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2575 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");