Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[sfrench/cifs-2.6.git] / drivers / acpi / acpica / evgpe.c
1 /******************************************************************************
2  *
3  * Module Name: evgpe - General Purpose Event handling and dispatch
4  *
5  *****************************************************************************/
6
7 /*
8  * Copyright (C) 2000 - 2010, Intel Corp.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions, and the following disclaimer,
16  *    without modification.
17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18  *    substantially similar to the "NO WARRANTY" disclaimer below
19  *    ("Disclaimer") and any redistribution must be conditioned upon
20  *    including a substantially similar Disclaimer requirement for further
21  *    binary redistribution.
22  * 3. Neither the names of the above-listed copyright holders nor the names
23  *    of any contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * Alternatively, this software may be distributed under the terms of the
27  * GNU General Public License ("GPL") version 2 as published by the Free
28  * Software Foundation.
29  *
30  * NO WARRANTY
31  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41  * POSSIBILITY OF SUCH DAMAGES.
42  */
43
44 #include <acpi/acpi.h>
45 #include "accommon.h"
46 #include "acevents.h"
47 #include "acnamesp.h"
48
49 #define _COMPONENT          ACPI_EVENTS
50 ACPI_MODULE_NAME("evgpe")
51
52 /* Local prototypes */
53 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
54
55 /*******************************************************************************
56  *
57  * FUNCTION:    acpi_ev_update_gpe_enable_masks
58  *
59  * PARAMETERS:  gpe_event_info          - GPE to update
60  *
61  * RETURN:      Status
62  *
63  * DESCRIPTION: Updates GPE register enable masks based on the GPE type
64  *
65  ******************************************************************************/
66
67 acpi_status
68 acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
69 {
70         struct acpi_gpe_register_info *gpe_register_info;
71         u8 register_bit;
72
73         ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks);
74
75         gpe_register_info = gpe_event_info->register_info;
76         if (!gpe_register_info) {
77                 return_ACPI_STATUS(AE_NOT_EXIST);
78         }
79
80         register_bit = (u8)
81             (1 <<
82              (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
83
84         ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit);
85         ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
86
87         if (gpe_event_info->runtime_count)
88                 ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
89
90         if (gpe_event_info->wakeup_count)
91                 ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
92
93         return_ACPI_STATUS(AE_OK);
94 }
95
96 /*******************************************************************************
97  *
98  * FUNCTION:    acpi_ev_enable_gpe
99  *
100  * PARAMETERS:  gpe_event_info          - GPE to enable
101  *
102  * RETURN:      Status
103  *
104  * DESCRIPTION: Enable a GPE based on the GPE type
105  *
106  ******************************************************************************/
107
108 acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
109 {
110         acpi_status status;
111
112         ACPI_FUNCTION_TRACE(ev_enable_gpe);
113
114         /* Make sure HW enable masks are updated */
115
116         status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
117         if (ACPI_FAILURE(status))
118                 return_ACPI_STATUS(status);
119
120         /* Clear the GPE (of stale events), then enable it */
121         status = acpi_hw_clear_gpe(gpe_event_info);
122         if (ACPI_FAILURE(status))
123                 return_ACPI_STATUS(status);
124
125         /* Enable the requested GPE */
126         status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
127         return_ACPI_STATUS(status);
128 }
129
130 /*******************************************************************************
131  *
132  * FUNCTION:    acpi_ev_disable_gpe
133  *
134  * PARAMETERS:  gpe_event_info          - GPE to disable
135  *
136  * RETURN:      Status
137  *
138  * DESCRIPTION: Disable a GPE based on the GPE type
139  *
140  ******************************************************************************/
141
142 acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
143 {
144         acpi_status status;
145
146         ACPI_FUNCTION_TRACE(ev_disable_gpe);
147
148         /* Make sure HW enable masks are updated */
149
150         status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
151         if (ACPI_FAILURE(status))
152                 return_ACPI_STATUS(status);
153
154         /*
155          * Even if we don't know the GPE type, make sure that we always
156          * disable it. low_disable_gpe will just clear the enable bit for this
157          * GPE and write it. It will not write out the current GPE enable mask,
158          * since this may inadvertently enable GPEs too early, if a rogue GPE has
159          * come in during ACPICA initialization - possibly as a result of AML or
160          * other code that has enabled the GPE.
161          */
162         status = acpi_hw_low_disable_gpe(gpe_event_info);
163         return_ACPI_STATUS(status);
164 }
165
166 /*******************************************************************************
167  *
168  * FUNCTION:    acpi_ev_get_gpe_event_info
169  *
170  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
171  *              gpe_number          - Raw GPE number
172  *
173  * RETURN:      A GPE event_info struct. NULL if not a valid GPE
174  *
175  * DESCRIPTION: Returns the event_info struct associated with this GPE.
176  *              Validates the gpe_block and the gpe_number
177  *
178  *              Should be called only when the GPE lists are semaphore locked
179  *              and not subject to change.
180  *
181  ******************************************************************************/
182
183 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
184                                                        u32 gpe_number)
185 {
186         union acpi_operand_object *obj_desc;
187         struct acpi_gpe_block_info *gpe_block;
188         u32 i;
189
190         ACPI_FUNCTION_ENTRY();
191
192         /* A NULL gpe_block means use the FADT-defined GPE block(s) */
193
194         if (!gpe_device) {
195
196                 /* Examine GPE Block 0 and 1 (These blocks are permanent) */
197
198                 for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
199                         gpe_block = acpi_gbl_gpe_fadt_blocks[i];
200                         if (gpe_block) {
201                                 if ((gpe_number >= gpe_block->block_base_number)
202                                     && (gpe_number <
203                                         gpe_block->block_base_number +
204                                         (gpe_block->register_count * 8))) {
205                                         return (&gpe_block->
206                                                 event_info[gpe_number -
207                                                            gpe_block->
208                                                            block_base_number]);
209                                 }
210                         }
211                 }
212
213                 /* The gpe_number was not in the range of either FADT GPE block */
214
215                 return (NULL);
216         }
217
218         /* A Non-NULL gpe_device means this is a GPE Block Device */
219
220         obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
221                                                gpe_device);
222         if (!obj_desc || !obj_desc->device.gpe_block) {
223                 return (NULL);
224         }
225
226         gpe_block = obj_desc->device.gpe_block;
227
228         if ((gpe_number >= gpe_block->block_base_number) &&
229             (gpe_number <
230              gpe_block->block_base_number + (gpe_block->register_count * 8))) {
231                 return (&gpe_block->
232                         event_info[gpe_number - gpe_block->block_base_number]);
233         }
234
235         return (NULL);
236 }
237
238 /*******************************************************************************
239  *
240  * FUNCTION:    acpi_ev_gpe_detect
241  *
242  * PARAMETERS:  gpe_xrupt_list      - Interrupt block for this interrupt.
243  *                                    Can have multiple GPE blocks attached.
244  *
245  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
246  *
247  * DESCRIPTION: Detect if any GP events have occurred. This function is
248  *              executed at interrupt level.
249  *
250  ******************************************************************************/
251
252 u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
253 {
254         acpi_status status;
255         struct acpi_gpe_block_info *gpe_block;
256         struct acpi_gpe_register_info *gpe_register_info;
257         u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
258         u8 enabled_status_byte;
259         u32 status_reg;
260         u32 enable_reg;
261         acpi_cpu_flags flags;
262         u32 i;
263         u32 j;
264
265         ACPI_FUNCTION_NAME(ev_gpe_detect);
266
267         /* Check for the case where there are no GPEs */
268
269         if (!gpe_xrupt_list) {
270                 return (int_status);
271         }
272
273         /*
274          * We need to obtain the GPE lock for both the data structs and registers
275          * Note: Not necessary to obtain the hardware lock, since the GPE
276          * registers are owned by the gpe_lock.
277          */
278         flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
279
280         /* Examine all GPE blocks attached to this interrupt level */
281
282         gpe_block = gpe_xrupt_list->gpe_block_list_head;
283         while (gpe_block) {
284                 /*
285                  * Read all of the 8-bit GPE status and enable registers in this GPE
286                  * block, saving all of them. Find all currently active GP events.
287                  */
288                 for (i = 0; i < gpe_block->register_count; i++) {
289
290                         /* Get the next status/enable pair */
291
292                         gpe_register_info = &gpe_block->register_info[i];
293
294                         /* Read the Status Register */
295
296                         status =
297                             acpi_hw_read(&status_reg,
298                                          &gpe_register_info->status_address);
299                         if (ACPI_FAILURE(status)) {
300                                 goto unlock_and_exit;
301                         }
302
303                         /* Read the Enable Register */
304
305                         status =
306                             acpi_hw_read(&enable_reg,
307                                          &gpe_register_info->enable_address);
308                         if (ACPI_FAILURE(status)) {
309                                 goto unlock_and_exit;
310                         }
311
312                         ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
313                                           "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n",
314                                           gpe_register_info->base_gpe_number,
315                                           status_reg, enable_reg));
316
317                         /* Check if there is anything active at all in this register */
318
319                         enabled_status_byte = (u8) (status_reg & enable_reg);
320                         if (!enabled_status_byte) {
321
322                                 /* No active GPEs in this register, move on */
323
324                                 continue;
325                         }
326
327                         /* Now look at the individual GPEs in this byte register */
328
329                         for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
330
331                                 /* Examine one GPE bit */
332
333                                 if (enabled_status_byte & (1 << j)) {
334                                         /*
335                                          * Found an active GPE. Dispatch the event to a handler
336                                          * or method.
337                                          */
338                                         int_status |=
339                                             acpi_ev_gpe_dispatch(&gpe_block->
340                                                 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
341                                 }
342                         }
343                 }
344
345                 gpe_block = gpe_block->next;
346         }
347
348       unlock_and_exit:
349
350         acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
351         return (int_status);
352 }
353
354 /*******************************************************************************
355  *
356  * FUNCTION:    acpi_ev_asynch_execute_gpe_method
357  *
358  * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
359  *
360  * RETURN:      None
361  *
362  * DESCRIPTION: Perform the actual execution of a GPE control method. This
363  *              function is called from an invocation of acpi_os_execute and
364  *              therefore does NOT execute at interrupt level - so that
365  *              the control method itself is not executed in the context of
366  *              an interrupt handler.
367  *
368  ******************************************************************************/
369 static void acpi_ev_asynch_enable_gpe(void *context);
370
371 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
372 {
373         struct acpi_gpe_event_info *gpe_event_info = (void *)context;
374         acpi_status status;
375         struct acpi_gpe_event_info local_gpe_event_info;
376         struct acpi_evaluate_info *info;
377
378         ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
379
380         status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
381         if (ACPI_FAILURE(status)) {
382                 return_VOID;
383         }
384
385         /* Must revalidate the gpe_number/gpe_block */
386
387         if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
388                 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
389                 return_VOID;
390         }
391
392         /* Set the GPE flags for return to enabled state */
393
394         (void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
395
396         /*
397          * Take a snapshot of the GPE info for this level - we copy the info to
398          * prevent a race condition with remove_handler/remove_block.
399          */
400         ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info,
401                     sizeof(struct acpi_gpe_event_info));
402
403         status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
404         if (ACPI_FAILURE(status)) {
405                 return_VOID;
406         }
407
408         /*
409          * Must check for control method type dispatch one more time to avoid a
410          * race with ev_gpe_install_handler
411          */
412         if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
413             ACPI_GPE_DISPATCH_METHOD) {
414
415                 /* Allocate the evaluation information block */
416
417                 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
418                 if (!info) {
419                         status = AE_NO_MEMORY;
420                 } else {
421                         /*
422                          * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
423                          * control method that corresponds to this GPE
424                          */
425                         info->prefix_node =
426                             local_gpe_event_info.dispatch.method_node;
427                         info->flags = ACPI_IGNORE_RETURN_VALUE;
428
429                         status = acpi_ns_evaluate(info);
430                         ACPI_FREE(info);
431                 }
432
433                 if (ACPI_FAILURE(status)) {
434                         ACPI_EXCEPTION((AE_INFO, status,
435                                         "while evaluating GPE method [%4.4s]",
436                                         acpi_ut_get_node_name
437                                         (local_gpe_event_info.dispatch.
438                                          method_node)));
439                 }
440         }
441         /* Defer enabling of GPE until all notify handlers are done */
442         acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
443                                 gpe_event_info);
444         return_VOID;
445 }
446
447 static void acpi_ev_asynch_enable_gpe(void *context)
448 {
449         struct acpi_gpe_event_info *gpe_event_info = context;
450         acpi_status status;
451         if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
452             ACPI_GPE_LEVEL_TRIGGERED) {
453                 /*
454                  * GPE is level-triggered, we clear the GPE status bit after handling
455                  * the event.
456                  */
457                 status = acpi_hw_clear_gpe(gpe_event_info);
458                 if (ACPI_FAILURE(status)) {
459                         return_VOID;
460                 }
461         }
462
463         /* Enable this GPE */
464         (void)acpi_hw_write_gpe_enable_reg(gpe_event_info);
465         return_VOID;
466 }
467
468 /*******************************************************************************
469  *
470  * FUNCTION:    acpi_ev_gpe_dispatch
471  *
472  * PARAMETERS:  gpe_event_info  - Info for this GPE
473  *              gpe_number      - Number relative to the parent GPE block
474  *
475  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
476  *
477  * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
478  *              or method (e.g. _Lxx/_Exx) handler.
479  *
480  *              This function executes at interrupt level.
481  *
482  ******************************************************************************/
483
484 u32
485 acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
486 {
487         acpi_status status;
488
489         ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
490
491         acpi_os_gpe_count(gpe_number);
492
493         /*
494          * If edge-triggered, clear the GPE status bit now. Note that
495          * level-triggered events are cleared after the GPE is serviced.
496          */
497         if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
498             ACPI_GPE_EDGE_TRIGGERED) {
499                 status = acpi_hw_clear_gpe(gpe_event_info);
500                 if (ACPI_FAILURE(status)) {
501                         ACPI_EXCEPTION((AE_INFO, status,
502                                         "Unable to clear GPE[%2X]",
503                                         gpe_number));
504                         return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
505                 }
506         }
507
508         /*
509          * Dispatch the GPE to either an installed handler, or the control method
510          * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
511          * it and do not attempt to run the method. If there is neither a handler
512          * nor a method, we disable this GPE to prevent further such pointless
513          * events from firing.
514          */
515         switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
516         case ACPI_GPE_DISPATCH_HANDLER:
517
518                 /*
519                  * Invoke the installed handler (at interrupt level)
520                  * Ignore return status for now.
521                  * TBD: leave GPE disabled on error?
522                  */
523                 (void)gpe_event_info->dispatch.handler->address(gpe_event_info->
524                                                                 dispatch.
525                                                                 handler->
526                                                                 context);
527
528                 /* It is now safe to clear level-triggered events. */
529
530                 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
531                     ACPI_GPE_LEVEL_TRIGGERED) {
532                         status = acpi_hw_clear_gpe(gpe_event_info);
533                         if (ACPI_FAILURE(status)) {
534                                 ACPI_EXCEPTION((AE_INFO, status,
535                                                 "Unable to clear GPE[%2X]",
536                                                 gpe_number));
537                                 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
538                         }
539                 }
540                 break;
541
542         case ACPI_GPE_DISPATCH_METHOD:
543
544                 /*
545                  * Disable the GPE, so it doesn't keep firing before the method has a
546                  * chance to run (it runs asynchronously with interrupts enabled).
547                  */
548                 status = acpi_ev_disable_gpe(gpe_event_info);
549                 if (ACPI_FAILURE(status)) {
550                         ACPI_EXCEPTION((AE_INFO, status,
551                                         "Unable to disable GPE[%2X]",
552                                         gpe_number));
553                         return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
554                 }
555
556                 /*
557                  * Execute the method associated with the GPE
558                  * NOTE: Level-triggered GPEs are cleared after the method completes.
559                  */
560                 status = acpi_os_execute(OSL_GPE_HANDLER,
561                                          acpi_ev_asynch_execute_gpe_method,
562                                          gpe_event_info);
563                 if (ACPI_FAILURE(status)) {
564                         ACPI_EXCEPTION((AE_INFO, status,
565                                         "Unable to queue handler for GPE[%2X] - event disabled",
566                                         gpe_number));
567                 }
568                 break;
569
570         default:
571
572                 /* No handler or method to run! */
573
574                 ACPI_ERROR((AE_INFO,
575                             "No handler or method for GPE[%2X], disabling event",
576                             gpe_number));
577
578                 /*
579                  * Disable the GPE. The GPE will remain disabled until the ACPICA
580                  * Core Subsystem is restarted, or a handler is installed.
581                  */
582                 status = acpi_ev_disable_gpe(gpe_event_info);
583                 if (ACPI_FAILURE(status)) {
584                         ACPI_EXCEPTION((AE_INFO, status,
585                                         "Unable to disable GPE[%2X]",
586                                         gpe_number));
587                         return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
588                 }
589                 break;
590         }
591
592         return_UINT32(ACPI_INTERRUPT_HANDLED);
593 }