Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[sfrench/cifs-2.6.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *
8  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License as published by
12  *  the Free Software Foundation; either version 2 of the License, or
13  *  (at your option) any later version.
14  *
15  *  This program is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU General Public License for more details.
19  *
20  *  You should have received a copy of the GNU General Public License
21  *  along with this program; if not, write to the Free Software
22  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23  *
24  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25  *
26  */
27
28 #include <linux/module.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/mm.h>
32 #include <linux/pci.h>
33 #include <linux/interrupt.h>
34 #include <linux/kmod.h>
35 #include <linux/delay.h>
36 #include <linux/dmi.h>
37 #include <linux/workqueue.h>
38 #include <linux/nmi.h>
39 #include <linux/acpi.h>
40 #include <acpi/acpi.h>
41 #include <asm/io.h>
42 #include <acpi/acpi_bus.h>
43 #include <acpi/processor.h>
44 #include <asm/uaccess.h>
45
46 #include <linux/efi.h>
47 #include <linux/ioport.h>
48 #include <linux/list.h>
49
50 #define _COMPONENT              ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("osl");
52 #define PREFIX          "ACPI: "
53 struct acpi_os_dpc {
54         acpi_osd_exec_callback function;
55         void *context;
56         struct work_struct work;
57 };
58
59 #ifdef CONFIG_ACPI_CUSTOM_DSDT
60 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
61 #endif
62
63 #ifdef ENABLE_DEBUGGER
64 #include <linux/kdb.h>
65
66 /* stuff for debugger support */
67 int acpi_in_debugger;
68 EXPORT_SYMBOL(acpi_in_debugger);
69
70 extern char line_buf[80];
71 #endif                          /*ENABLE_DEBUGGER */
72
73 static unsigned int acpi_irq_irq;
74 static acpi_osd_handler acpi_irq_handler;
75 static void *acpi_irq_context;
76 static struct workqueue_struct *kacpid_wq;
77 static struct workqueue_struct *kacpi_notify_wq;
78
79 struct acpi_res_list {
80         resource_size_t start;
81         resource_size_t end;
82         acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
83         char name[5];   /* only can have a length of 4 chars, make use of this
84                            one instead of res->name, no need to kalloc then */
85         struct list_head resource_list;
86 };
87
88 static LIST_HEAD(resource_list_head);
89 static DEFINE_SPINLOCK(acpi_res_lock);
90
91 #define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
92 static char osi_additional_string[OSI_STRING_LENGTH_MAX];
93
94 /*
95  * "Ode to _OSI(Linux)"
96  *
97  * osi_linux -- Control response to BIOS _OSI(Linux) query.
98  *
99  * As Linux evolves, the features that it supports change.
100  * So an OSI string such as "Linux" is not specific enough
101  * to be useful across multiple versions of Linux.  It
102  * doesn't identify any particular feature, interface,
103  * or even any particular version of Linux...
104  *
105  * Unfortunately, Linux-2.6.22 and earlier responded "yes"
106  * to a BIOS _OSI(Linux) query.  When
107  * a reference mobile BIOS started using it, its use
108  * started to spread to many vendor platforms.
109  * As it is not supportable, we need to halt that spread.
110  *
111  * Today, most BIOS references to _OSI(Linux) are noise --
112  * they have no functional effect and are just dead code
113  * carried over from the reference BIOS.
114  *
115  * The next most common case is that _OSI(Linux) harms Linux,
116  * usually by causing the BIOS to follow paths that are
117  * not tested during Windows validation.
118  *
119  * Finally, there is a short list of platforms
120  * where OSI(Linux) benefits Linux.
121  *
122  * In Linux-2.6.23, OSI(Linux) is first disabled by default.
123  * DMI is used to disable the dmesg warning about OSI(Linux)
124  * on platforms where it is known to have no effect.
125  * But a dmesg warning remains for systems where
126  * we do not know if OSI(Linux) is good or bad for the system.
127  * DMI is also used to enable OSI(Linux) for the machines
128  * that are known to need it.
129  *
130  * BIOS writers should NOT query _OSI(Linux) on future systems.
131  * It will be ignored by default, and to get Linux to
132  * not ignore it will require a kernel source update to
133  * add a DMI entry, or a boot-time "acpi_osi=Linux" invocation.
134  */
135 #define OSI_LINUX_ENABLE 0
136
137 static struct osi_linux {
138         unsigned int    enable:1;
139         unsigned int    dmi:1;
140         unsigned int    cmdline:1;
141         unsigned int    known:1;
142 } osi_linux = { OSI_LINUX_ENABLE, 0, 0, 0};
143
144 static void __init acpi_request_region (struct acpi_generic_address *addr,
145         unsigned int length, char *desc)
146 {
147         struct resource *res;
148
149         if (!addr->address || !length)
150                 return;
151
152         if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
153                 res = request_region(addr->address, length, desc);
154         else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
155                 res = request_mem_region(addr->address, length, desc);
156 }
157
158 static int __init acpi_reserve_resources(void)
159 {
160         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
161                 "ACPI PM1a_EVT_BLK");
162
163         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
164                 "ACPI PM1b_EVT_BLK");
165
166         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
167                 "ACPI PM1a_CNT_BLK");
168
169         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
170                 "ACPI PM1b_CNT_BLK");
171
172         if (acpi_gbl_FADT.pm_timer_length == 4)
173                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
174
175         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
176                 "ACPI PM2_CNT_BLK");
177
178         /* Length of GPE blocks must be a non-negative multiple of 2 */
179
180         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
181                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
182                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
183
184         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
185                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
186                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
187
188         return 0;
189 }
190 device_initcall(acpi_reserve_resources);
191
192 acpi_status __init acpi_os_initialize(void)
193 {
194         return AE_OK;
195 }
196
197 acpi_status acpi_os_initialize1(void)
198 {
199         kacpid_wq = create_singlethread_workqueue("kacpid");
200         kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
201         BUG_ON(!kacpid_wq);
202         BUG_ON(!kacpi_notify_wq);
203         return AE_OK;
204 }
205
206 acpi_status acpi_os_terminate(void)
207 {
208         if (acpi_irq_handler) {
209                 acpi_os_remove_interrupt_handler(acpi_irq_irq,
210                                                  acpi_irq_handler);
211         }
212
213         destroy_workqueue(kacpid_wq);
214         destroy_workqueue(kacpi_notify_wq);
215
216         return AE_OK;
217 }
218
219 void acpi_os_printf(const char *fmt, ...)
220 {
221         va_list args;
222         va_start(args, fmt);
223         acpi_os_vprintf(fmt, args);
224         va_end(args);
225 }
226
227 void acpi_os_vprintf(const char *fmt, va_list args)
228 {
229         static char buffer[512];
230
231         vsprintf(buffer, fmt, args);
232
233 #ifdef ENABLE_DEBUGGER
234         if (acpi_in_debugger) {
235                 kdb_printf("%s", buffer);
236         } else {
237                 printk("%s", buffer);
238         }
239 #else
240         printk("%s", buffer);
241 #endif
242 }
243
244 acpi_physical_address __init acpi_os_get_root_pointer(void)
245 {
246         if (efi_enabled) {
247                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
248                         return efi.acpi20;
249                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
250                         return efi.acpi;
251                 else {
252                         printk(KERN_ERR PREFIX
253                                "System description tables not found\n");
254                         return 0;
255                 }
256         } else {
257                 acpi_physical_address pa = 0;
258
259                 acpi_find_root_pointer(&pa);
260                 return pa;
261         }
262 }
263
264 void __iomem *__init_refok
265 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
266 {
267         if (phys > ULONG_MAX) {
268                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
269                 return NULL;
270         }
271         if (acpi_gbl_permanent_mmap)
272                 /*
273                 * ioremap checks to ensure this is in reserved space
274                 */
275                 return ioremap((unsigned long)phys, size);
276         else
277                 return __acpi_map_table((unsigned long)phys, size);
278 }
279 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
280
281 void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
282 {
283         if (acpi_gbl_permanent_mmap) {
284                 iounmap(virt);
285         }
286 }
287 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
288
289 #ifdef ACPI_FUTURE_USAGE
290 acpi_status
291 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
292 {
293         if (!phys || !virt)
294                 return AE_BAD_PARAMETER;
295
296         *phys = virt_to_phys(virt);
297
298         return AE_OK;
299 }
300 #endif
301
302 #define ACPI_MAX_OVERRIDE_LEN 100
303
304 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
305
306 acpi_status
307 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
308                             acpi_string * new_val)
309 {
310         if (!init_val || !new_val)
311                 return AE_BAD_PARAMETER;
312
313         *new_val = NULL;
314         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
315                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
316                        acpi_os_name);
317                 *new_val = acpi_os_name;
318         }
319
320         return AE_OK;
321 }
322
323 acpi_status
324 acpi_os_table_override(struct acpi_table_header * existing_table,
325                        struct acpi_table_header ** new_table)
326 {
327         if (!existing_table || !new_table)
328                 return AE_BAD_PARAMETER;
329
330         *new_table = NULL;
331
332 #ifdef CONFIG_ACPI_CUSTOM_DSDT
333         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
334                 *new_table = (struct acpi_table_header *)AmlCode;
335 #endif
336         if (*new_table != NULL) {
337                 printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
338                            "this is unsafe: tainting kernel\n",
339                        existing_table->signature,
340                        existing_table->oem_table_id);
341                 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
342         }
343         return AE_OK;
344 }
345
346 static irqreturn_t acpi_irq(int irq, void *dev_id)
347 {
348         u32 handled;
349
350         handled = (*acpi_irq_handler) (acpi_irq_context);
351
352         if (handled) {
353                 acpi_irq_handled++;
354                 return IRQ_HANDLED;
355         } else
356                 return IRQ_NONE;
357 }
358
359 acpi_status
360 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
361                                   void *context)
362 {
363         unsigned int irq;
364
365         acpi_irq_stats_init();
366
367         /*
368          * Ignore the GSI from the core, and use the value in our copy of the
369          * FADT. It may not be the same if an interrupt source override exists
370          * for the SCI.
371          */
372         gsi = acpi_gbl_FADT.sci_interrupt;
373         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
374                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
375                        gsi);
376                 return AE_OK;
377         }
378
379         acpi_irq_handler = handler;
380         acpi_irq_context = context;
381         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
382                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
383                 return AE_NOT_ACQUIRED;
384         }
385         acpi_irq_irq = irq;
386
387         return AE_OK;
388 }
389
390 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
391 {
392         if (irq) {
393                 free_irq(irq, acpi_irq);
394                 acpi_irq_handler = NULL;
395                 acpi_irq_irq = 0;
396         }
397
398         return AE_OK;
399 }
400
401 /*
402  * Running in interpreter thread context, safe to sleep
403  */
404
405 void acpi_os_sleep(acpi_integer ms)
406 {
407         schedule_timeout_interruptible(msecs_to_jiffies(ms));
408 }
409
410 void acpi_os_stall(u32 us)
411 {
412         while (us) {
413                 u32 delay = 1000;
414
415                 if (delay > us)
416                         delay = us;
417                 udelay(delay);
418                 touch_nmi_watchdog();
419                 us -= delay;
420         }
421 }
422
423 /*
424  * Support ACPI 3.0 AML Timer operand
425  * Returns 64-bit free-running, monotonically increasing timer
426  * with 100ns granularity
427  */
428 u64 acpi_os_get_timer(void)
429 {
430         static u64 t;
431
432 #ifdef  CONFIG_HPET
433         /* TBD: use HPET if available */
434 #endif
435
436 #ifdef  CONFIG_X86_PM_TIMER
437         /* TBD: default to PM timer if HPET was not available */
438 #endif
439         if (!t)
440                 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
441
442         return ++t;
443 }
444
445 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
446 {
447         u32 dummy;
448
449         if (!value)
450                 value = &dummy;
451
452         *value = 0;
453         if (width <= 8) {
454                 *(u8 *) value = inb(port);
455         } else if (width <= 16) {
456                 *(u16 *) value = inw(port);
457         } else if (width <= 32) {
458                 *(u32 *) value = inl(port);
459         } else {
460                 BUG();
461         }
462
463         return AE_OK;
464 }
465
466 EXPORT_SYMBOL(acpi_os_read_port);
467
468 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
469 {
470         if (width <= 8) {
471                 outb(value, port);
472         } else if (width <= 16) {
473                 outw(value, port);
474         } else if (width <= 32) {
475                 outl(value, port);
476         } else {
477                 BUG();
478         }
479
480         return AE_OK;
481 }
482
483 EXPORT_SYMBOL(acpi_os_write_port);
484
485 acpi_status
486 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
487 {
488         u32 dummy;
489         void __iomem *virt_addr;
490
491         virt_addr = ioremap(phys_addr, width);
492         if (!value)
493                 value = &dummy;
494
495         switch (width) {
496         case 8:
497                 *(u8 *) value = readb(virt_addr);
498                 break;
499         case 16:
500                 *(u16 *) value = readw(virt_addr);
501                 break;
502         case 32:
503                 *(u32 *) value = readl(virt_addr);
504                 break;
505         default:
506                 BUG();
507         }
508
509         iounmap(virt_addr);
510
511         return AE_OK;
512 }
513
514 acpi_status
515 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
516 {
517         void __iomem *virt_addr;
518
519         virt_addr = ioremap(phys_addr, width);
520
521         switch (width) {
522         case 8:
523                 writeb(value, virt_addr);
524                 break;
525         case 16:
526                 writew(value, virt_addr);
527                 break;
528         case 32:
529                 writel(value, virt_addr);
530                 break;
531         default:
532                 BUG();
533         }
534
535         iounmap(virt_addr);
536
537         return AE_OK;
538 }
539
540 acpi_status
541 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
542                                u32 *value, u32 width)
543 {
544         int result, size;
545
546         if (!value)
547                 return AE_BAD_PARAMETER;
548
549         switch (width) {
550         case 8:
551                 size = 1;
552                 break;
553         case 16:
554                 size = 2;
555                 break;
556         case 32:
557                 size = 4;
558                 break;
559         default:
560                 return AE_ERROR;
561         }
562
563         result = raw_pci_read(pci_id->segment, pci_id->bus,
564                                 PCI_DEVFN(pci_id->device, pci_id->function),
565                                 reg, size, value);
566
567         return (result ? AE_ERROR : AE_OK);
568 }
569
570 acpi_status
571 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
572                                 acpi_integer value, u32 width)
573 {
574         int result, size;
575
576         switch (width) {
577         case 8:
578                 size = 1;
579                 break;
580         case 16:
581                 size = 2;
582                 break;
583         case 32:
584                 size = 4;
585                 break;
586         default:
587                 return AE_ERROR;
588         }
589
590         result = raw_pci_write(pci_id->segment, pci_id->bus,
591                                 PCI_DEVFN(pci_id->device, pci_id->function),
592                                 reg, size, value);
593
594         return (result ? AE_ERROR : AE_OK);
595 }
596
597 /* TODO: Change code to take advantage of driver model more */
598 static void acpi_os_derive_pci_id_2(acpi_handle rhandle,        /* upper bound  */
599                                     acpi_handle chandle,        /* current node */
600                                     struct acpi_pci_id **id,
601                                     int *is_bridge, u8 * bus_number)
602 {
603         acpi_handle handle;
604         struct acpi_pci_id *pci_id = *id;
605         acpi_status status;
606         unsigned long temp;
607         acpi_object_type type;
608
609         acpi_get_parent(chandle, &handle);
610         if (handle != rhandle) {
611                 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
612                                         bus_number);
613
614                 status = acpi_get_type(handle, &type);
615                 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
616                         return;
617
618                 status =
619                     acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
620                                           &temp);
621                 if (ACPI_SUCCESS(status)) {
622                         u32 val;
623                         pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
624                         pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
625
626                         if (*is_bridge)
627                                 pci_id->bus = *bus_number;
628
629                         /* any nicer way to get bus number of bridge ? */
630                         status =
631                             acpi_os_read_pci_configuration(pci_id, 0x0e, &val,
632                                                            8);
633                         if (ACPI_SUCCESS(status)
634                             && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) {
635                                 status =
636                                     acpi_os_read_pci_configuration(pci_id, 0x18,
637                                                                    &val, 8);
638                                 if (!ACPI_SUCCESS(status)) {
639                                         /* Certainly broken...  FIX ME */
640                                         return;
641                                 }
642                                 *is_bridge = 1;
643                                 pci_id->bus = val;
644                                 status =
645                                     acpi_os_read_pci_configuration(pci_id, 0x19,
646                                                                    &val, 8);
647                                 if (ACPI_SUCCESS(status)) {
648                                         *bus_number = val;
649                                 }
650                         } else
651                                 *is_bridge = 0;
652                 }
653         }
654 }
655
656 void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound  */
657                            acpi_handle chandle, /* current node */
658                            struct acpi_pci_id **id)
659 {
660         int is_bridge = 1;
661         u8 bus_number = (*id)->bus;
662
663         acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
664 }
665
666 static void acpi_os_execute_deferred(struct work_struct *work)
667 {
668         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
669         if (!dpc) {
670                 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
671                 return;
672         }
673
674         dpc->function(dpc->context);
675         kfree(dpc);
676
677         return;
678 }
679
680 /*******************************************************************************
681  *
682  * FUNCTION:    acpi_os_execute
683  *
684  * PARAMETERS:  Type               - Type of the callback
685  *              Function           - Function to be executed
686  *              Context            - Function parameters
687  *
688  * RETURN:      Status
689  *
690  * DESCRIPTION: Depending on type, either queues function for deferred execution or
691  *              immediately executes function on a separate thread.
692  *
693  ******************************************************************************/
694
695 acpi_status acpi_os_execute(acpi_execute_type type,
696                             acpi_osd_exec_callback function, void *context)
697 {
698         acpi_status status = AE_OK;
699         struct acpi_os_dpc *dpc;
700         struct workqueue_struct *queue;
701         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
702                           "Scheduling function [%p(%p)] for deferred execution.\n",
703                           function, context));
704
705         if (!function)
706                 return AE_BAD_PARAMETER;
707
708         /*
709          * Allocate/initialize DPC structure.  Note that this memory will be
710          * freed by the callee.  The kernel handles the work_struct list  in a
711          * way that allows us to also free its memory inside the callee.
712          * Because we may want to schedule several tasks with different
713          * parameters we can't use the approach some kernel code uses of
714          * having a static work_struct.
715          */
716
717         dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
718         if (!dpc)
719                 return_ACPI_STATUS(AE_NO_MEMORY);
720
721         dpc->function = function;
722         dpc->context = context;
723
724         INIT_WORK(&dpc->work, acpi_os_execute_deferred);
725         queue = (type == OSL_NOTIFY_HANDLER) ? kacpi_notify_wq : kacpid_wq;
726         if (!queue_work(queue, &dpc->work)) {
727                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
728                           "Call to queue_work() failed.\n"));
729                 status = AE_ERROR;
730                 kfree(dpc);
731         }
732         return_ACPI_STATUS(status);
733 }
734
735 EXPORT_SYMBOL(acpi_os_execute);
736
737 void acpi_os_wait_events_complete(void *context)
738 {
739         flush_workqueue(kacpid_wq);
740 }
741
742 EXPORT_SYMBOL(acpi_os_wait_events_complete);
743
744 /*
745  * Allocate the memory for a spinlock and initialize it.
746  */
747 acpi_status acpi_os_create_lock(acpi_spinlock * handle)
748 {
749         spin_lock_init(*handle);
750
751         return AE_OK;
752 }
753
754 /*
755  * Deallocate the memory for a spinlock.
756  */
757 void acpi_os_delete_lock(acpi_spinlock handle)
758 {
759         return;
760 }
761
762 acpi_status
763 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
764 {
765         struct semaphore *sem = NULL;
766
767
768         sem = acpi_os_allocate(sizeof(struct semaphore));
769         if (!sem)
770                 return AE_NO_MEMORY;
771         memset(sem, 0, sizeof(struct semaphore));
772
773         sema_init(sem, initial_units);
774
775         *handle = (acpi_handle *) sem;
776
777         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
778                           *handle, initial_units));
779
780         return AE_OK;
781 }
782
783 /*
784  * TODO: A better way to delete semaphores?  Linux doesn't have a
785  * 'delete_semaphore()' function -- may result in an invalid
786  * pointer dereference for non-synchronized consumers.  Should
787  * we at least check for blocked threads and signal/cancel them?
788  */
789
790 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
791 {
792         struct semaphore *sem = (struct semaphore *)handle;
793
794
795         if (!sem)
796                 return AE_BAD_PARAMETER;
797
798         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
799
800         kfree(sem);
801         sem = NULL;
802
803         return AE_OK;
804 }
805
806 /*
807  * TODO: The kernel doesn't have a 'down_timeout' function -- had to
808  * improvise.  The process is to sleep for one scheduler quantum
809  * until the semaphore becomes available.  Downside is that this
810  * may result in starvation for timeout-based waits when there's
811  * lots of semaphore activity.
812  *
813  * TODO: Support for units > 1?
814  */
815 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
816 {
817         acpi_status status = AE_OK;
818         struct semaphore *sem = (struct semaphore *)handle;
819         int ret = 0;
820
821
822         if (!sem || (units < 1))
823                 return AE_BAD_PARAMETER;
824
825         if (units > 1)
826                 return AE_SUPPORT;
827
828         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
829                           handle, units, timeout));
830
831         /*
832          * This can be called during resume with interrupts off.
833          * Like boot-time, we should be single threaded and will
834          * always get the lock if we try -- timeout or not.
835          * If this doesn't succeed, then we will oops courtesy of
836          * might_sleep() in down().
837          */
838         if (!down_trylock(sem))
839                 return AE_OK;
840
841         switch (timeout) {
842                 /*
843                  * No Wait:
844                  * --------
845                  * A zero timeout value indicates that we shouldn't wait - just
846                  * acquire the semaphore if available otherwise return AE_TIME
847                  * (a.k.a. 'would block').
848                  */
849         case 0:
850                 if (down_trylock(sem))
851                         status = AE_TIME;
852                 break;
853
854                 /*
855                  * Wait Indefinitely:
856                  * ------------------
857                  */
858         case ACPI_WAIT_FOREVER:
859                 down(sem);
860                 break;
861
862                 /*
863                  * Wait w/ Timeout:
864                  * ----------------
865                  */
866         default:
867                 // TODO: A better timeout algorithm?
868                 {
869                         int i = 0;
870                         static const int quantum_ms = 1000 / HZ;
871
872                         ret = down_trylock(sem);
873                         for (i = timeout; (i > 0 && ret != 0); i -= quantum_ms) {
874                                 schedule_timeout_interruptible(1);
875                                 ret = down_trylock(sem);
876                         }
877
878                         if (ret != 0)
879                                 status = AE_TIME;
880                 }
881                 break;
882         }
883
884         if (ACPI_FAILURE(status)) {
885                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
886                                   "Failed to acquire semaphore[%p|%d|%d], %s",
887                                   handle, units, timeout,
888                                   acpi_format_exception(status)));
889         } else {
890                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
891                                   "Acquired semaphore[%p|%d|%d]", handle,
892                                   units, timeout));
893         }
894
895         return status;
896 }
897
898 /*
899  * TODO: Support for units > 1?
900  */
901 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
902 {
903         struct semaphore *sem = (struct semaphore *)handle;
904
905
906         if (!sem || (units < 1))
907                 return AE_BAD_PARAMETER;
908
909         if (units > 1)
910                 return AE_SUPPORT;
911
912         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
913                           units));
914
915         up(sem);
916
917         return AE_OK;
918 }
919
920 #ifdef ACPI_FUTURE_USAGE
921 u32 acpi_os_get_line(char *buffer)
922 {
923
924 #ifdef ENABLE_DEBUGGER
925         if (acpi_in_debugger) {
926                 u32 chars;
927
928                 kdb_read(buffer, sizeof(line_buf));
929
930                 /* remove the CR kdb includes */
931                 chars = strlen(buffer) - 1;
932                 buffer[chars] = '\0';
933         }
934 #endif
935
936         return 0;
937 }
938 #endif                          /*  ACPI_FUTURE_USAGE  */
939
940 acpi_status acpi_os_signal(u32 function, void *info)
941 {
942         switch (function) {
943         case ACPI_SIGNAL_FATAL:
944                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
945                 break;
946         case ACPI_SIGNAL_BREAKPOINT:
947                 /*
948                  * AML Breakpoint
949                  * ACPI spec. says to treat it as a NOP unless
950                  * you are debugging.  So if/when we integrate
951                  * AML debugger into the kernel debugger its
952                  * hook will go here.  But until then it is
953                  * not useful to print anything on breakpoints.
954                  */
955                 break;
956         default:
957                 break;
958         }
959
960         return AE_OK;
961 }
962
963 static int __init acpi_os_name_setup(char *str)
964 {
965         char *p = acpi_os_name;
966         int count = ACPI_MAX_OVERRIDE_LEN - 1;
967
968         if (!str || !*str)
969                 return 0;
970
971         for (; count-- && str && *str; str++) {
972                 if (isalnum(*str) || *str == ' ' || *str == ':')
973                         *p++ = *str;
974                 else if (*str == '\'' || *str == '"')
975                         continue;
976                 else
977                         break;
978         }
979         *p = 0;
980
981         return 1;
982
983 }
984
985 __setup("acpi_os_name=", acpi_os_name_setup);
986
987 static void __init set_osi_linux(unsigned int enable)
988 {
989         if (osi_linux.enable != enable) {
990                 osi_linux.enable = enable;
991                 printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
992                         enable ? "Add": "Delet");
993         }
994         return;
995 }
996
997 static void __init acpi_cmdline_osi_linux(unsigned int enable)
998 {
999         osi_linux.cmdline = 1;  /* cmdline set the default */
1000         set_osi_linux(enable);
1001
1002         return;
1003 }
1004
1005 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1006 {
1007         osi_linux.dmi = 1;      /* DMI knows that this box asks OSI(Linux) */
1008
1009         printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1010
1011         if (enable == -1)
1012                 return;
1013
1014         osi_linux.known = 1;    /* DMI knows which OSI(Linux) default needed */
1015
1016         set_osi_linux(enable);
1017
1018         return;
1019 }
1020
1021 /*
1022  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1023  *
1024  * empty string disables _OSI
1025  * string starting with '!' disables that string
1026  * otherwise string is added to list, augmenting built-in strings
1027  */
1028 int __init acpi_osi_setup(char *str)
1029 {
1030         if (str == NULL || *str == '\0') {
1031                 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1032                 acpi_gbl_create_osi_method = FALSE;
1033         } else if (!strcmp("!Linux", str)) {
1034                 acpi_cmdline_osi_linux(0);      /* !enable */
1035         } else if (*str == '!') {
1036                 if (acpi_osi_invalidate(++str) == AE_OK)
1037                         printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1038         } else if (!strcmp("Linux", str)) {
1039                 acpi_cmdline_osi_linux(1);      /* enable */
1040         } else if (*osi_additional_string == '\0') {
1041                 strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
1042                 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1043         }
1044
1045         return 1;
1046 }
1047
1048 __setup("acpi_osi=", acpi_osi_setup);
1049
1050 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1051 static int __init acpi_serialize_setup(char *str)
1052 {
1053         printk(KERN_INFO PREFIX "serialize enabled\n");
1054
1055         acpi_gbl_all_methods_serialized = TRUE;
1056
1057         return 1;
1058 }
1059
1060 __setup("acpi_serialize", acpi_serialize_setup);
1061
1062 /*
1063  * Wake and Run-Time GPES are expected to be separate.
1064  * We disable wake-GPEs at run-time to prevent spurious
1065  * interrupts.
1066  *
1067  * However, if a system exists that shares Wake and
1068  * Run-time events on the same GPE this flag is available
1069  * to tell Linux to keep the wake-time GPEs enabled at run-time.
1070  */
1071 static int __init acpi_wake_gpes_always_on_setup(char *str)
1072 {
1073         printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
1074
1075         acpi_gbl_leave_wake_gpes_disabled = FALSE;
1076
1077         return 1;
1078 }
1079
1080 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1081
1082 /* Check of resource interference between native drivers and ACPI
1083  * OperationRegions (SystemIO and System Memory only).
1084  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1085  * in arbitrary AML code and can interfere with legacy drivers.
1086  * acpi_enforce_resources= can be set to:
1087  *
1088  *   - strict           (2)
1089  *     -> further driver trying to access the resources will not load
1090  *   - lax (default)    (1)
1091  *     -> further driver trying to access the resources will load, but you
1092  *     get a system message that something might go wrong...
1093  *
1094  *   - no               (0)
1095  *     -> ACPI Operation Region resources will not be registered
1096  *
1097  */
1098 #define ENFORCE_RESOURCES_STRICT 2
1099 #define ENFORCE_RESOURCES_LAX    1
1100 #define ENFORCE_RESOURCES_NO     0
1101
1102 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1103
1104 static int __init acpi_enforce_resources_setup(char *str)
1105 {
1106         if (str == NULL || *str == '\0')
1107                 return 0;
1108
1109         if (!strcmp("strict", str))
1110                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1111         else if (!strcmp("lax", str))
1112                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1113         else if (!strcmp("no", str))
1114                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1115
1116         return 1;
1117 }
1118
1119 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1120
1121 /* Check for resource conflicts between ACPI OperationRegions and native
1122  * drivers */
1123 int acpi_check_resource_conflict(struct resource *res)
1124 {
1125         struct acpi_res_list *res_list_elem;
1126         int ioport;
1127         int clash = 0;
1128
1129         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1130                 return 0;
1131         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1132                 return 0;
1133
1134         ioport = res->flags & IORESOURCE_IO;
1135
1136         spin_lock(&acpi_res_lock);
1137         list_for_each_entry(res_list_elem, &resource_list_head,
1138                             resource_list) {
1139                 if (ioport && (res_list_elem->resource_type
1140                                != ACPI_ADR_SPACE_SYSTEM_IO))
1141                         continue;
1142                 if (!ioport && (res_list_elem->resource_type
1143                                 != ACPI_ADR_SPACE_SYSTEM_MEMORY))
1144                         continue;
1145
1146                 if (res->end < res_list_elem->start
1147                     || res_list_elem->end < res->start)
1148                         continue;
1149                 clash = 1;
1150                 break;
1151         }
1152         spin_unlock(&acpi_res_lock);
1153
1154         if (clash) {
1155                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1156                         printk("%sACPI: %s resource %s [0x%llx-0x%llx]"
1157                                " conflicts with ACPI region %s"
1158                                " [0x%llx-0x%llx]\n",
1159                                acpi_enforce_resources == ENFORCE_RESOURCES_LAX
1160                                ? KERN_WARNING : KERN_ERR,
1161                                ioport ? "I/O" : "Memory", res->name,
1162                                (long long) res->start, (long long) res->end,
1163                                res_list_elem->name,
1164                                (long long) res_list_elem->start,
1165                                (long long) res_list_elem->end);
1166                         printk(KERN_INFO "ACPI: Device needs an ACPI driver\n");
1167                 }
1168                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1169                         return -EBUSY;
1170         }
1171         return 0;
1172 }
1173 EXPORT_SYMBOL(acpi_check_resource_conflict);
1174
1175 int acpi_check_region(resource_size_t start, resource_size_t n,
1176                       const char *name)
1177 {
1178         struct resource res = {
1179                 .start = start,
1180                 .end   = start + n - 1,
1181                 .name  = name,
1182                 .flags = IORESOURCE_IO,
1183         };
1184
1185         return acpi_check_resource_conflict(&res);
1186 }
1187 EXPORT_SYMBOL(acpi_check_region);
1188
1189 int acpi_check_mem_region(resource_size_t start, resource_size_t n,
1190                       const char *name)
1191 {
1192         struct resource res = {
1193                 .start = start,
1194                 .end   = start + n - 1,
1195                 .name  = name,
1196                 .flags = IORESOURCE_MEM,
1197         };
1198
1199         return acpi_check_resource_conflict(&res);
1200
1201 }
1202 EXPORT_SYMBOL(acpi_check_mem_region);
1203
1204 /*
1205  * Acquire a spinlock.
1206  *
1207  * handle is a pointer to the spinlock_t.
1208  */
1209
1210 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1211 {
1212         acpi_cpu_flags flags;
1213         spin_lock_irqsave(lockp, flags);
1214         return flags;
1215 }
1216
1217 /*
1218  * Release a spinlock. See above.
1219  */
1220
1221 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1222 {
1223         spin_unlock_irqrestore(lockp, flags);
1224 }
1225
1226 #ifndef ACPI_USE_LOCAL_CACHE
1227
1228 /*******************************************************************************
1229  *
1230  * FUNCTION:    acpi_os_create_cache
1231  *
1232  * PARAMETERS:  name      - Ascii name for the cache
1233  *              size      - Size of each cached object
1234  *              depth     - Maximum depth of the cache (in objects) <ignored>
1235  *              cache     - Where the new cache object is returned
1236  *
1237  * RETURN:      status
1238  *
1239  * DESCRIPTION: Create a cache object
1240  *
1241  ******************************************************************************/
1242
1243 acpi_status
1244 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1245 {
1246         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1247         if (*cache == NULL)
1248                 return AE_ERROR;
1249         else
1250                 return AE_OK;
1251 }
1252
1253 /*******************************************************************************
1254  *
1255  * FUNCTION:    acpi_os_purge_cache
1256  *
1257  * PARAMETERS:  Cache           - Handle to cache object
1258  *
1259  * RETURN:      Status
1260  *
1261  * DESCRIPTION: Free all objects within the requested cache.
1262  *
1263  ******************************************************************************/
1264
1265 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1266 {
1267         kmem_cache_shrink(cache);
1268         return (AE_OK);
1269 }
1270
1271 /*******************************************************************************
1272  *
1273  * FUNCTION:    acpi_os_delete_cache
1274  *
1275  * PARAMETERS:  Cache           - Handle to cache object
1276  *
1277  * RETURN:      Status
1278  *
1279  * DESCRIPTION: Free all objects within the requested cache and delete the
1280  *              cache object.
1281  *
1282  ******************************************************************************/
1283
1284 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1285 {
1286         kmem_cache_destroy(cache);
1287         return (AE_OK);
1288 }
1289
1290 /*******************************************************************************
1291  *
1292  * FUNCTION:    acpi_os_release_object
1293  *
1294  * PARAMETERS:  Cache       - Handle to cache object
1295  *              Object      - The object to be released
1296  *
1297  * RETURN:      None
1298  *
1299  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1300  *              the object is deleted.
1301  *
1302  ******************************************************************************/
1303
1304 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1305 {
1306         kmem_cache_free(cache, object);
1307         return (AE_OK);
1308 }
1309
1310 /**
1311  *      acpi_dmi_dump - dump DMI slots needed for blacklist entry
1312  *
1313  *      Returns 0 on success
1314  */
1315 static int acpi_dmi_dump(void)
1316 {
1317
1318         if (!dmi_available)
1319                 return -1;
1320
1321         printk(KERN_NOTICE PREFIX "DMI System Vendor: %s\n",
1322                 dmi_get_system_info(DMI_SYS_VENDOR));
1323         printk(KERN_NOTICE PREFIX "DMI Product Name: %s\n",
1324                 dmi_get_system_info(DMI_PRODUCT_NAME));
1325         printk(KERN_NOTICE PREFIX "DMI Product Version: %s\n",
1326                 dmi_get_system_info(DMI_PRODUCT_VERSION));
1327         printk(KERN_NOTICE PREFIX "DMI Board Name: %s\n",
1328                 dmi_get_system_info(DMI_BOARD_NAME));
1329         printk(KERN_NOTICE PREFIX "DMI BIOS Vendor: %s\n",
1330                 dmi_get_system_info(DMI_BIOS_VENDOR));
1331         printk(KERN_NOTICE PREFIX "DMI BIOS Date: %s\n",
1332                 dmi_get_system_info(DMI_BIOS_DATE));
1333
1334         return 0;
1335 }
1336
1337
1338 /******************************************************************************
1339  *
1340  * FUNCTION:    acpi_os_validate_interface
1341  *
1342  * PARAMETERS:  interface           - Requested interface to be validated
1343  *
1344  * RETURN:      AE_OK if interface is supported, AE_SUPPORT otherwise
1345  *
1346  * DESCRIPTION: Match an interface string to the interfaces supported by the
1347  *              host. Strings originate from an AML call to the _OSI method.
1348  *
1349  *****************************************************************************/
1350
1351 acpi_status
1352 acpi_os_validate_interface (char *interface)
1353 {
1354         if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1355                 return AE_OK;
1356         if (!strcmp("Linux", interface)) {
1357
1358                 printk(KERN_NOTICE PREFIX
1359                         "BIOS _OSI(Linux) query %s%s\n",
1360                         osi_linux.enable ? "honored" : "ignored",
1361                         osi_linux.cmdline ? " via cmdline" :
1362                         osi_linux.dmi ? " via DMI" : "");
1363
1364                 if (!osi_linux.dmi) {
1365                         if (acpi_dmi_dump())
1366                                 printk(KERN_NOTICE PREFIX
1367                                         "[please extract dmidecode output]\n");
1368                         printk(KERN_NOTICE PREFIX
1369                                 "Please send DMI info above to "
1370                                 "linux-acpi@vger.kernel.org\n");
1371                 }
1372                 if (!osi_linux.known && !osi_linux.cmdline) {
1373                         printk(KERN_NOTICE PREFIX
1374                                 "If \"acpi_osi=%sLinux\" works better, "
1375                                 "please notify linux-acpi@vger.kernel.org\n",
1376                                 osi_linux.enable ? "!" : "");
1377                 }
1378
1379                 if (osi_linux.enable)
1380                         return AE_OK;
1381         }
1382         return AE_SUPPORT;
1383 }
1384
1385 /******************************************************************************
1386  *
1387  * FUNCTION:    acpi_os_validate_address
1388  *
1389  * PARAMETERS:  space_id             - ACPI space ID
1390  *              address             - Physical address
1391  *              length              - Address length
1392  *
1393  * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1394  *              should return AE_AML_ILLEGAL_ADDRESS.
1395  *
1396  * DESCRIPTION: Validate a system address via the host OS. Used to validate
1397  *              the addresses accessed by AML operation regions.
1398  *
1399  *****************************************************************************/
1400
1401 acpi_status
1402 acpi_os_validate_address (
1403     u8                   space_id,
1404     acpi_physical_address   address,
1405     acpi_size               length,
1406     char *name)
1407 {
1408         struct acpi_res_list *res;
1409         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1410                 return AE_OK;
1411
1412         switch (space_id) {
1413         case ACPI_ADR_SPACE_SYSTEM_IO:
1414         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1415                 /* Only interference checks against SystemIO and SytemMemory
1416                    are needed */
1417                 res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1418                 if (!res)
1419                         return AE_OK;
1420                 /* ACPI names are fixed to 4 bytes, still better use strlcpy */
1421                 strlcpy(res->name, name, 5);
1422                 res->start = address;
1423                 res->end = address + length - 1;
1424                 res->resource_type = space_id;
1425                 spin_lock(&acpi_res_lock);
1426                 list_add(&res->resource_list, &resource_list_head);
1427                 spin_unlock(&acpi_res_lock);
1428                 pr_debug("Added %s resource: start: 0x%llx, end: 0x%llx, "
1429                          "name: %s\n", (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1430                          ? "SystemIO" : "System Memory",
1431                          (unsigned long long)res->start,
1432                          (unsigned long long)res->end,
1433                          res->name);
1434                 break;
1435         case ACPI_ADR_SPACE_PCI_CONFIG:
1436         case ACPI_ADR_SPACE_EC:
1437         case ACPI_ADR_SPACE_SMBUS:
1438         case ACPI_ADR_SPACE_CMOS:
1439         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1440         case ACPI_ADR_SPACE_DATA_TABLE:
1441         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1442                 break;
1443         }
1444         return AE_OK;
1445 }
1446
1447 #endif