Merge branch 'topic/hda' into for-linus
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / acpi / osl.c
1 /*
2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3 *
4 * Copyright (C) 2000 Andrew Henroid
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (c) 2008 Intel Corporation
8 * Author: Matthew Wilcox <willy@linux.intel.com>
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 *
28 */
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/mm.h>
34 #include <linux/pci.h>
35 #include <linux/interrupt.h>
36 #include <linux/kmod.h>
37 #include <linux/delay.h>
38 #include <linux/workqueue.h>
39 #include <linux/nmi.h>
40 #include <linux/acpi.h>
41 #include <linux/efi.h>
42 #include <linux/ioport.h>
43 #include <linux/list.h>
44 #include <linux/jiffies.h>
45 #include <linux/semaphore.h>
46
47 #include <asm/io.h>
48 #include <asm/uaccess.h>
49
50 #include <acpi/acpi.h>
51 #include <acpi/acpi_bus.h>
52 #include <acpi/processor.h>
53
54 #define _COMPONENT ACPI_OS_SERVICES
55 ACPI_MODULE_NAME("osl");
56 #define PREFIX "ACPI: "
57 struct acpi_os_dpc {
58 acpi_osd_exec_callback function;
59 void *context;
60 struct work_struct work;
61 };
62
63 #ifdef CONFIG_ACPI_CUSTOM_DSDT
64 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
65 #endif
66
67 #ifdef ENABLE_DEBUGGER
68 #include <linux/kdb.h>
69
70 /* stuff for debugger support */
71 int acpi_in_debugger;
72 EXPORT_SYMBOL(acpi_in_debugger);
73
74 extern char line_buf[80];
75 #endif /*ENABLE_DEBUGGER */
76
77 static unsigned int acpi_irq_irq;
78 static acpi_osd_handler acpi_irq_handler;
79 static void *acpi_irq_context;
80 static struct workqueue_struct *kacpid_wq;
81 static struct workqueue_struct *kacpi_notify_wq;
82
83 struct acpi_res_list {
84 resource_size_t start;
85 resource_size_t end;
86 acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
87 char name[5]; /* only can have a length of 4 chars, make use of this
88 one instead of res->name, no need to kalloc then */
89 struct list_head resource_list;
90 };
91
92 static LIST_HEAD(resource_list_head);
93 static DEFINE_SPINLOCK(acpi_res_lock);
94
95 #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
96 static char osi_additional_string[OSI_STRING_LENGTH_MAX];
97
98 /*
99 * The story of _OSI(Linux)
100 *
101 * From pre-history through Linux-2.6.22,
102 * Linux responded TRUE upon a BIOS OSI(Linux) query.
103 *
104 * Unfortunately, reference BIOS writers got wind of this
105 * and put OSI(Linux) in their example code, quickly exposing
106 * this string as ill-conceived and opening the door to
107 * an un-bounded number of BIOS incompatibilities.
108 *
109 * For example, OSI(Linux) was used on resume to re-POST a
110 * video card on one system, because Linux at that time
111 * could not do a speedy restore in its native driver.
112 * But then upon gaining quick native restore capability,
113 * Linux has no way to tell the BIOS to skip the time-consuming
114 * POST -- putting Linux at a permanent performance disadvantage.
115 * On another system, the BIOS writer used OSI(Linux)
116 * to infer native OS support for IPMI! On other systems,
117 * OSI(Linux) simply got in the way of Linux claiming to
118 * be compatible with other operating systems, exposing
119 * BIOS issues such as skipped device initialization.
120 *
121 * So "Linux" turned out to be a really poor chose of
122 * OSI string, and from Linux-2.6.23 onward we respond FALSE.
123 *
124 * BIOS writers should NOT query _OSI(Linux) on future systems.
125 * Linux will complain on the console when it sees it, and return FALSE.
126 * To get Linux to return TRUE for your system will require
127 * a kernel source update to add a DMI entry,
128 * or boot with "acpi_osi=Linux"
129 */
130
131 static struct osi_linux {
132 unsigned int enable:1;
133 unsigned int dmi:1;
134 unsigned int cmdline:1;
135 unsigned int known:1;
136 } osi_linux = { 0, 0, 0, 0};
137
138 static void __init acpi_request_region (struct acpi_generic_address *addr,
139 unsigned int length, char *desc)
140 {
141 struct resource *res;
142
143 if (!addr->address || !length)
144 return;
145
146 if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
147 res = request_region(addr->address, length, desc);
148 else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
149 res = request_mem_region(addr->address, length, desc);
150 }
151
152 static int __init acpi_reserve_resources(void)
153 {
154 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
155 "ACPI PM1a_EVT_BLK");
156
157 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
158 "ACPI PM1b_EVT_BLK");
159
160 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
161 "ACPI PM1a_CNT_BLK");
162
163 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
164 "ACPI PM1b_CNT_BLK");
165
166 if (acpi_gbl_FADT.pm_timer_length == 4)
167 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
168
169 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
170 "ACPI PM2_CNT_BLK");
171
172 /* Length of GPE blocks must be a non-negative multiple of 2 */
173
174 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
175 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
176 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
177
178 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
179 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
180 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
181
182 return 0;
183 }
184 device_initcall(acpi_reserve_resources);
185
186 acpi_status __init acpi_os_initialize(void)
187 {
188 return AE_OK;
189 }
190
191 acpi_status acpi_os_initialize1(void)
192 {
193 kacpid_wq = create_singlethread_workqueue("kacpid");
194 kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
195 BUG_ON(!kacpid_wq);
196 BUG_ON(!kacpi_notify_wq);
197 return AE_OK;
198 }
199
200 acpi_status acpi_os_terminate(void)
201 {
202 if (acpi_irq_handler) {
203 acpi_os_remove_interrupt_handler(acpi_irq_irq,
204 acpi_irq_handler);
205 }
206
207 destroy_workqueue(kacpid_wq);
208 destroy_workqueue(kacpi_notify_wq);
209
210 return AE_OK;
211 }
212
213 void acpi_os_printf(const char *fmt, ...)
214 {
215 va_list args;
216 va_start(args, fmt);
217 acpi_os_vprintf(fmt, args);
218 va_end(args);
219 }
220
221 void acpi_os_vprintf(const char *fmt, va_list args)
222 {
223 static char buffer[512];
224
225 vsprintf(buffer, fmt, args);
226
227 #ifdef ENABLE_DEBUGGER
228 if (acpi_in_debugger) {
229 kdb_printf("%s", buffer);
230 } else {
231 printk(KERN_CONT "%s", buffer);
232 }
233 #else
234 printk(KERN_CONT "%s", buffer);
235 #endif
236 }
237
238 acpi_physical_address __init acpi_os_get_root_pointer(void)
239 {
240 if (efi_enabled) {
241 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
242 return efi.acpi20;
243 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
244 return efi.acpi;
245 else {
246 printk(KERN_ERR PREFIX
247 "System description tables not found\n");
248 return 0;
249 }
250 } else {
251 acpi_physical_address pa = 0;
252
253 acpi_find_root_pointer(&pa);
254 return pa;
255 }
256 }
257
258 void __iomem *__init_refok
259 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
260 {
261 if (phys > ULONG_MAX) {
262 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
263 return NULL;
264 }
265 if (acpi_gbl_permanent_mmap)
266 /*
267 * ioremap checks to ensure this is in reserved space
268 */
269 return ioremap((unsigned long)phys, size);
270 else
271 return __acpi_map_table((unsigned long)phys, size);
272 }
273 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
274
275 void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
276 {
277 if (acpi_gbl_permanent_mmap)
278 iounmap(virt);
279 else
280 __acpi_unmap_table(virt, size);
281 }
282 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
283
284 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
285 {
286 if (!acpi_gbl_permanent_mmap)
287 __acpi_unmap_table(virt, size);
288 }
289
290 #ifdef ACPI_FUTURE_USAGE
291 acpi_status
292 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
293 {
294 if (!phys || !virt)
295 return AE_BAD_PARAMETER;
296
297 *phys = virt_to_phys(virt);
298
299 return AE_OK;
300 }
301 #endif
302
303 #define ACPI_MAX_OVERRIDE_LEN 100
304
305 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
306
307 acpi_status
308 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
309 acpi_string * new_val)
310 {
311 if (!init_val || !new_val)
312 return AE_BAD_PARAMETER;
313
314 *new_val = NULL;
315 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
316 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
317 acpi_os_name);
318 *new_val = acpi_os_name;
319 }
320
321 return AE_OK;
322 }
323
324 acpi_status
325 acpi_os_table_override(struct acpi_table_header * existing_table,
326 struct acpi_table_header ** new_table)
327 {
328 if (!existing_table || !new_table)
329 return AE_BAD_PARAMETER;
330
331 *new_table = NULL;
332
333 #ifdef CONFIG_ACPI_CUSTOM_DSDT
334 if (strncmp(existing_table->signature, "DSDT", 4) == 0)
335 *new_table = (struct acpi_table_header *)AmlCode;
336 #endif
337 if (*new_table != NULL) {
338 printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
339 "this is unsafe: tainting kernel\n",
340 existing_table->signature,
341 existing_table->oem_table_id);
342 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
343 }
344 return AE_OK;
345 }
346
347 static irqreturn_t acpi_irq(int irq, void *dev_id)
348 {
349 u32 handled;
350
351 handled = (*acpi_irq_handler) (acpi_irq_context);
352
353 if (handled) {
354 acpi_irq_handled++;
355 return IRQ_HANDLED;
356 } else {
357 acpi_irq_not_handled++;
358 return IRQ_NONE;
359 }
360 }
361
362 acpi_status
363 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
364 void *context)
365 {
366 unsigned int irq;
367
368 acpi_irq_stats_init();
369
370 /*
371 * Ignore the GSI from the core, and use the value in our copy of the
372 * FADT. It may not be the same if an interrupt source override exists
373 * for the SCI.
374 */
375 gsi = acpi_gbl_FADT.sci_interrupt;
376 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
377 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
378 gsi);
379 return AE_OK;
380 }
381
382 acpi_irq_handler = handler;
383 acpi_irq_context = context;
384 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
385 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
386 return AE_NOT_ACQUIRED;
387 }
388 acpi_irq_irq = irq;
389
390 return AE_OK;
391 }
392
393 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
394 {
395 if (irq) {
396 free_irq(irq, acpi_irq);
397 acpi_irq_handler = NULL;
398 acpi_irq_irq = 0;
399 }
400
401 return AE_OK;
402 }
403
404 /*
405 * Running in interpreter thread context, safe to sleep
406 */
407
408 void acpi_os_sleep(acpi_integer ms)
409 {
410 schedule_timeout_interruptible(msecs_to_jiffies(ms));
411 }
412
413 void acpi_os_stall(u32 us)
414 {
415 while (us) {
416 u32 delay = 1000;
417
418 if (delay > us)
419 delay = us;
420 udelay(delay);
421 touch_nmi_watchdog();
422 us -= delay;
423 }
424 }
425
426 /*
427 * Support ACPI 3.0 AML Timer operand
428 * Returns 64-bit free-running, monotonically increasing timer
429 * with 100ns granularity
430 */
431 u64 acpi_os_get_timer(void)
432 {
433 static u64 t;
434
435 #ifdef CONFIG_HPET
436 /* TBD: use HPET if available */
437 #endif
438
439 #ifdef CONFIG_X86_PM_TIMER
440 /* TBD: default to PM timer if HPET was not available */
441 #endif
442 if (!t)
443 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
444
445 return ++t;
446 }
447
448 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
449 {
450 u32 dummy;
451
452 if (!value)
453 value = &dummy;
454
455 *value = 0;
456 if (width <= 8) {
457 *(u8 *) value = inb(port);
458 } else if (width <= 16) {
459 *(u16 *) value = inw(port);
460 } else if (width <= 32) {
461 *(u32 *) value = inl(port);
462 } else {
463 BUG();
464 }
465
466 return AE_OK;
467 }
468
469 EXPORT_SYMBOL(acpi_os_read_port);
470
471 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
472 {
473 if (width <= 8) {
474 outb(value, port);
475 } else if (width <= 16) {
476 outw(value, port);
477 } else if (width <= 32) {
478 outl(value, port);
479 } else {
480 BUG();
481 }
482
483 return AE_OK;
484 }
485
486 EXPORT_SYMBOL(acpi_os_write_port);
487
488 acpi_status
489 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
490 {
491 u32 dummy;
492 void __iomem *virt_addr;
493
494 virt_addr = ioremap(phys_addr, width);
495 if (!value)
496 value = &dummy;
497
498 switch (width) {
499 case 8:
500 *(u8 *) value = readb(virt_addr);
501 break;
502 case 16:
503 *(u16 *) value = readw(virt_addr);
504 break;
505 case 32:
506 *(u32 *) value = readl(virt_addr);
507 break;
508 default:
509 BUG();
510 }
511
512 iounmap(virt_addr);
513
514 return AE_OK;
515 }
516
517 acpi_status
518 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
519 {
520 void __iomem *virt_addr;
521
522 virt_addr = ioremap(phys_addr, width);
523
524 switch (width) {
525 case 8:
526 writeb(value, virt_addr);
527 break;
528 case 16:
529 writew(value, virt_addr);
530 break;
531 case 32:
532 writel(value, virt_addr);
533 break;
534 default:
535 BUG();
536 }
537
538 iounmap(virt_addr);
539
540 return AE_OK;
541 }
542
543 acpi_status
544 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
545 u32 *value, u32 width)
546 {
547 int result, size;
548
549 if (!value)
550 return AE_BAD_PARAMETER;
551
552 switch (width) {
553 case 8:
554 size = 1;
555 break;
556 case 16:
557 size = 2;
558 break;
559 case 32:
560 size = 4;
561 break;
562 default:
563 return AE_ERROR;
564 }
565
566 result = raw_pci_read(pci_id->segment, pci_id->bus,
567 PCI_DEVFN(pci_id->device, pci_id->function),
568 reg, size, value);
569
570 return (result ? AE_ERROR : AE_OK);
571 }
572
573 acpi_status
574 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
575 acpi_integer value, u32 width)
576 {
577 int result, size;
578
579 switch (width) {
580 case 8:
581 size = 1;
582 break;
583 case 16:
584 size = 2;
585 break;
586 case 32:
587 size = 4;
588 break;
589 default:
590 return AE_ERROR;
591 }
592
593 result = raw_pci_write(pci_id->segment, pci_id->bus,
594 PCI_DEVFN(pci_id->device, pci_id->function),
595 reg, size, value);
596
597 return (result ? AE_ERROR : AE_OK);
598 }
599
600 /* TODO: Change code to take advantage of driver model more */
601 static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */
602 acpi_handle chandle, /* current node */
603 struct acpi_pci_id **id,
604 int *is_bridge, u8 * bus_number)
605 {
606 acpi_handle handle;
607 struct acpi_pci_id *pci_id = *id;
608 acpi_status status;
609 unsigned long long temp;
610 acpi_object_type type;
611
612 acpi_get_parent(chandle, &handle);
613 if (handle != rhandle) {
614 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
615 bus_number);
616
617 status = acpi_get_type(handle, &type);
618 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
619 return;
620
621 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
622 &temp);
623 if (ACPI_SUCCESS(status)) {
624 u32 val;
625 pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
626 pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
627
628 if (*is_bridge)
629 pci_id->bus = *bus_number;
630
631 /* any nicer way to get bus number of bridge ? */
632 status =
633 acpi_os_read_pci_configuration(pci_id, 0x0e, &val,
634 8);
635 if (ACPI_SUCCESS(status)
636 && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) {
637 status =
638 acpi_os_read_pci_configuration(pci_id, 0x18,
639 &val, 8);
640 if (!ACPI_SUCCESS(status)) {
641 /* Certainly broken... FIX ME */
642 return;
643 }
644 *is_bridge = 1;
645 pci_id->bus = val;
646 status =
647 acpi_os_read_pci_configuration(pci_id, 0x19,
648 &val, 8);
649 if (ACPI_SUCCESS(status)) {
650 *bus_number = val;
651 }
652 } else
653 *is_bridge = 0;
654 }
655 }
656 }
657
658 void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
659 acpi_handle chandle, /* current node */
660 struct acpi_pci_id **id)
661 {
662 int is_bridge = 1;
663 u8 bus_number = (*id)->bus;
664
665 acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
666 }
667
668 static void acpi_os_execute_deferred(struct work_struct *work)
669 {
670 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
671 if (!dpc) {
672 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
673 return;
674 }
675
676 dpc->function(dpc->context);
677 kfree(dpc);
678
679 return;
680 }
681
682 static void acpi_os_execute_hp_deferred(struct work_struct *work)
683 {
684 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
685 if (!dpc) {
686 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
687 return;
688 }
689
690 acpi_os_wait_events_complete(NULL);
691
692 dpc->function(dpc->context);
693 kfree(dpc);
694
695 return;
696 }
697
698 /*******************************************************************************
699 *
700 * FUNCTION: acpi_os_execute
701 *
702 * PARAMETERS: Type - Type of the callback
703 * Function - Function to be executed
704 * Context - Function parameters
705 *
706 * RETURN: Status
707 *
708 * DESCRIPTION: Depending on type, either queues function for deferred execution or
709 * immediately executes function on a separate thread.
710 *
711 ******************************************************************************/
712
713 static acpi_status __acpi_os_execute(acpi_execute_type type,
714 acpi_osd_exec_callback function, void *context, int hp)
715 {
716 acpi_status status = AE_OK;
717 struct acpi_os_dpc *dpc;
718 struct workqueue_struct *queue;
719 int ret;
720 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
721 "Scheduling function [%p(%p)] for deferred execution.\n",
722 function, context));
723
724 if (!function)
725 return AE_BAD_PARAMETER;
726
727 /*
728 * Allocate/initialize DPC structure. Note that this memory will be
729 * freed by the callee. The kernel handles the work_struct list in a
730 * way that allows us to also free its memory inside the callee.
731 * Because we may want to schedule several tasks with different
732 * parameters we can't use the approach some kernel code uses of
733 * having a static work_struct.
734 */
735
736 dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
737 if (!dpc)
738 return AE_NO_MEMORY;
739
740 dpc->function = function;
741 dpc->context = context;
742
743 if (!hp) {
744 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
745 queue = (type == OSL_NOTIFY_HANDLER) ?
746 kacpi_notify_wq : kacpid_wq;
747 ret = queue_work(queue, &dpc->work);
748 } else {
749 INIT_WORK(&dpc->work, acpi_os_execute_hp_deferred);
750 ret = schedule_work(&dpc->work);
751 }
752
753 if (!ret) {
754 printk(KERN_ERR PREFIX
755 "Call to queue_work() failed.\n");
756 status = AE_ERROR;
757 kfree(dpc);
758 }
759 return status;
760 }
761
762 acpi_status acpi_os_execute(acpi_execute_type type,
763 acpi_osd_exec_callback function, void *context)
764 {
765 return __acpi_os_execute(type, function, context, 0);
766 }
767 EXPORT_SYMBOL(acpi_os_execute);
768
769 acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
770 void *context)
771 {
772 return __acpi_os_execute(0, function, context, 1);
773 }
774
775 void acpi_os_wait_events_complete(void *context)
776 {
777 flush_workqueue(kacpid_wq);
778 flush_workqueue(kacpi_notify_wq);
779 }
780
781 EXPORT_SYMBOL(acpi_os_wait_events_complete);
782
783 /*
784 * Allocate the memory for a spinlock and initialize it.
785 */
786 acpi_status acpi_os_create_lock(acpi_spinlock * handle)
787 {
788 spin_lock_init(*handle);
789
790 return AE_OK;
791 }
792
793 /*
794 * Deallocate the memory for a spinlock.
795 */
796 void acpi_os_delete_lock(acpi_spinlock handle)
797 {
798 return;
799 }
800
801 acpi_status
802 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
803 {
804 struct semaphore *sem = NULL;
805
806 sem = acpi_os_allocate(sizeof(struct semaphore));
807 if (!sem)
808 return AE_NO_MEMORY;
809 memset(sem, 0, sizeof(struct semaphore));
810
811 sema_init(sem, initial_units);
812
813 *handle = (acpi_handle *) sem;
814
815 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
816 *handle, initial_units));
817
818 return AE_OK;
819 }
820
821 /*
822 * TODO: A better way to delete semaphores? Linux doesn't have a
823 * 'delete_semaphore()' function -- may result in an invalid
824 * pointer dereference for non-synchronized consumers. Should
825 * we at least check for blocked threads and signal/cancel them?
826 */
827
828 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
829 {
830 struct semaphore *sem = (struct semaphore *)handle;
831
832 if (!sem)
833 return AE_BAD_PARAMETER;
834
835 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
836
837 BUG_ON(!list_empty(&sem->wait_list));
838 kfree(sem);
839 sem = NULL;
840
841 return AE_OK;
842 }
843
844 /*
845 * TODO: Support for units > 1?
846 */
847 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
848 {
849 acpi_status status = AE_OK;
850 struct semaphore *sem = (struct semaphore *)handle;
851 long jiffies;
852 int ret = 0;
853
854 if (!sem || (units < 1))
855 return AE_BAD_PARAMETER;
856
857 if (units > 1)
858 return AE_SUPPORT;
859
860 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
861 handle, units, timeout));
862
863 if (timeout == ACPI_WAIT_FOREVER)
864 jiffies = MAX_SCHEDULE_TIMEOUT;
865 else
866 jiffies = msecs_to_jiffies(timeout);
867
868 ret = down_timeout(sem, jiffies);
869 if (ret)
870 status = AE_TIME;
871
872 if (ACPI_FAILURE(status)) {
873 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
874 "Failed to acquire semaphore[%p|%d|%d], %s",
875 handle, units, timeout,
876 acpi_format_exception(status)));
877 } else {
878 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
879 "Acquired semaphore[%p|%d|%d]", handle,
880 units, timeout));
881 }
882
883 return status;
884 }
885
886 /*
887 * TODO: Support for units > 1?
888 */
889 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
890 {
891 struct semaphore *sem = (struct semaphore *)handle;
892
893 if (!sem || (units < 1))
894 return AE_BAD_PARAMETER;
895
896 if (units > 1)
897 return AE_SUPPORT;
898
899 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
900 units));
901
902 up(sem);
903
904 return AE_OK;
905 }
906
907 #ifdef ACPI_FUTURE_USAGE
908 u32 acpi_os_get_line(char *buffer)
909 {
910
911 #ifdef ENABLE_DEBUGGER
912 if (acpi_in_debugger) {
913 u32 chars;
914
915 kdb_read(buffer, sizeof(line_buf));
916
917 /* remove the CR kdb includes */
918 chars = strlen(buffer) - 1;
919 buffer[chars] = '\0';
920 }
921 #endif
922
923 return 0;
924 }
925 #endif /* ACPI_FUTURE_USAGE */
926
927 acpi_status acpi_os_signal(u32 function, void *info)
928 {
929 switch (function) {
930 case ACPI_SIGNAL_FATAL:
931 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
932 break;
933 case ACPI_SIGNAL_BREAKPOINT:
934 /*
935 * AML Breakpoint
936 * ACPI spec. says to treat it as a NOP unless
937 * you are debugging. So if/when we integrate
938 * AML debugger into the kernel debugger its
939 * hook will go here. But until then it is
940 * not useful to print anything on breakpoints.
941 */
942 break;
943 default:
944 break;
945 }
946
947 return AE_OK;
948 }
949
950 static int __init acpi_os_name_setup(char *str)
951 {
952 char *p = acpi_os_name;
953 int count = ACPI_MAX_OVERRIDE_LEN - 1;
954
955 if (!str || !*str)
956 return 0;
957
958 for (; count-- && str && *str; str++) {
959 if (isalnum(*str) || *str == ' ' || *str == ':')
960 *p++ = *str;
961 else if (*str == '\'' || *str == '"')
962 continue;
963 else
964 break;
965 }
966 *p = 0;
967
968 return 1;
969
970 }
971
972 __setup("acpi_os_name=", acpi_os_name_setup);
973
974 static void __init set_osi_linux(unsigned int enable)
975 {
976 if (osi_linux.enable != enable) {
977 osi_linux.enable = enable;
978 printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
979 enable ? "Add": "Delet");
980 }
981 return;
982 }
983
984 static void __init acpi_cmdline_osi_linux(unsigned int enable)
985 {
986 osi_linux.cmdline = 1; /* cmdline set the default */
987 set_osi_linux(enable);
988
989 return;
990 }
991
992 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
993 {
994 osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */
995
996 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
997
998 if (enable == -1)
999 return;
1000
1001 osi_linux.known = 1; /* DMI knows which OSI(Linux) default needed */
1002
1003 set_osi_linux(enable);
1004
1005 return;
1006 }
1007
1008 /*
1009 * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1010 *
1011 * empty string disables _OSI
1012 * string starting with '!' disables that string
1013 * otherwise string is added to list, augmenting built-in strings
1014 */
1015 int __init acpi_osi_setup(char *str)
1016 {
1017 if (str == NULL || *str == '\0') {
1018 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1019 acpi_gbl_create_osi_method = FALSE;
1020 } else if (!strcmp("!Linux", str)) {
1021 acpi_cmdline_osi_linux(0); /* !enable */
1022 } else if (*str == '!') {
1023 if (acpi_osi_invalidate(++str) == AE_OK)
1024 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1025 } else if (!strcmp("Linux", str)) {
1026 acpi_cmdline_osi_linux(1); /* enable */
1027 } else if (*osi_additional_string == '\0') {
1028 strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
1029 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1030 }
1031
1032 return 1;
1033 }
1034
1035 __setup("acpi_osi=", acpi_osi_setup);
1036
1037 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1038 static int __init acpi_serialize_setup(char *str)
1039 {
1040 printk(KERN_INFO PREFIX "serialize enabled\n");
1041
1042 acpi_gbl_all_methods_serialized = TRUE;
1043
1044 return 1;
1045 }
1046
1047 __setup("acpi_serialize", acpi_serialize_setup);
1048
1049 /*
1050 * Wake and Run-Time GPES are expected to be separate.
1051 * We disable wake-GPEs at run-time to prevent spurious
1052 * interrupts.
1053 *
1054 * However, if a system exists that shares Wake and
1055 * Run-time events on the same GPE this flag is available
1056 * to tell Linux to keep the wake-time GPEs enabled at run-time.
1057 */
1058 static int __init acpi_wake_gpes_always_on_setup(char *str)
1059 {
1060 printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
1061
1062 acpi_gbl_leave_wake_gpes_disabled = FALSE;
1063
1064 return 1;
1065 }
1066
1067 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1068
1069 /* Check of resource interference between native drivers and ACPI
1070 * OperationRegions (SystemIO and System Memory only).
1071 * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1072 * in arbitrary AML code and can interfere with legacy drivers.
1073 * acpi_enforce_resources= can be set to:
1074 *
1075 * - strict (default) (2)
1076 * -> further driver trying to access the resources will not load
1077 * - lax (1)
1078 * -> further driver trying to access the resources will load, but you
1079 * get a system message that something might go wrong...
1080 *
1081 * - no (0)
1082 * -> ACPI Operation Region resources will not be registered
1083 *
1084 */
1085 #define ENFORCE_RESOURCES_STRICT 2
1086 #define ENFORCE_RESOURCES_LAX 1
1087 #define ENFORCE_RESOURCES_NO 0
1088
1089 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1090
1091 static int __init acpi_enforce_resources_setup(char *str)
1092 {
1093 if (str == NULL || *str == '\0')
1094 return 0;
1095
1096 if (!strcmp("strict", str))
1097 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1098 else if (!strcmp("lax", str))
1099 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1100 else if (!strcmp("no", str))
1101 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1102
1103 return 1;
1104 }
1105
1106 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1107
1108 /* Check for resource conflicts between ACPI OperationRegions and native
1109 * drivers */
1110 int acpi_check_resource_conflict(struct resource *res)
1111 {
1112 struct acpi_res_list *res_list_elem;
1113 int ioport;
1114 int clash = 0;
1115
1116 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1117 return 0;
1118 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1119 return 0;
1120
1121 ioport = res->flags & IORESOURCE_IO;
1122
1123 spin_lock(&acpi_res_lock);
1124 list_for_each_entry(res_list_elem, &resource_list_head,
1125 resource_list) {
1126 if (ioport && (res_list_elem->resource_type
1127 != ACPI_ADR_SPACE_SYSTEM_IO))
1128 continue;
1129 if (!ioport && (res_list_elem->resource_type
1130 != ACPI_ADR_SPACE_SYSTEM_MEMORY))
1131 continue;
1132
1133 if (res->end < res_list_elem->start
1134 || res_list_elem->end < res->start)
1135 continue;
1136 clash = 1;
1137 break;
1138 }
1139 spin_unlock(&acpi_res_lock);
1140
1141 if (clash) {
1142 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1143 printk("%sACPI: %s resource %s [0x%llx-0x%llx]"
1144 " conflicts with ACPI region %s"
1145 " [0x%llx-0x%llx]\n",
1146 acpi_enforce_resources == ENFORCE_RESOURCES_LAX
1147 ? KERN_WARNING : KERN_ERR,
1148 ioport ? "I/O" : "Memory", res->name,
1149 (long long) res->start, (long long) res->end,
1150 res_list_elem->name,
1151 (long long) res_list_elem->start,
1152 (long long) res_list_elem->end);
1153 printk(KERN_INFO "ACPI: Device needs an ACPI driver\n");
1154 }
1155 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1156 return -EBUSY;
1157 }
1158 return 0;
1159 }
1160 EXPORT_SYMBOL(acpi_check_resource_conflict);
1161
1162 int acpi_check_region(resource_size_t start, resource_size_t n,
1163 const char *name)
1164 {
1165 struct resource res = {
1166 .start = start,
1167 .end = start + n - 1,
1168 .name = name,
1169 .flags = IORESOURCE_IO,
1170 };
1171
1172 return acpi_check_resource_conflict(&res);
1173 }
1174 EXPORT_SYMBOL(acpi_check_region);
1175
1176 int acpi_check_mem_region(resource_size_t start, resource_size_t n,
1177 const char *name)
1178 {
1179 struct resource res = {
1180 .start = start,
1181 .end = start + n - 1,
1182 .name = name,
1183 .flags = IORESOURCE_MEM,
1184 };
1185
1186 return acpi_check_resource_conflict(&res);
1187
1188 }
1189 EXPORT_SYMBOL(acpi_check_mem_region);
1190
1191 /*
1192 * Acquire a spinlock.
1193 *
1194 * handle is a pointer to the spinlock_t.
1195 */
1196
1197 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1198 {
1199 acpi_cpu_flags flags;
1200 spin_lock_irqsave(lockp, flags);
1201 return flags;
1202 }
1203
1204 /*
1205 * Release a spinlock. See above.
1206 */
1207
1208 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1209 {
1210 spin_unlock_irqrestore(lockp, flags);
1211 }
1212
1213 #ifndef ACPI_USE_LOCAL_CACHE
1214
1215 /*******************************************************************************
1216 *
1217 * FUNCTION: acpi_os_create_cache
1218 *
1219 * PARAMETERS: name - Ascii name for the cache
1220 * size - Size of each cached object
1221 * depth - Maximum depth of the cache (in objects) <ignored>
1222 * cache - Where the new cache object is returned
1223 *
1224 * RETURN: status
1225 *
1226 * DESCRIPTION: Create a cache object
1227 *
1228 ******************************************************************************/
1229
1230 acpi_status
1231 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1232 {
1233 *cache = kmem_cache_create(name, size, 0, 0, NULL);
1234 if (*cache == NULL)
1235 return AE_ERROR;
1236 else
1237 return AE_OK;
1238 }
1239
1240 /*******************************************************************************
1241 *
1242 * FUNCTION: acpi_os_purge_cache
1243 *
1244 * PARAMETERS: Cache - Handle to cache object
1245 *
1246 * RETURN: Status
1247 *
1248 * DESCRIPTION: Free all objects within the requested cache.
1249 *
1250 ******************************************************************************/
1251
1252 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1253 {
1254 kmem_cache_shrink(cache);
1255 return (AE_OK);
1256 }
1257
1258 /*******************************************************************************
1259 *
1260 * FUNCTION: acpi_os_delete_cache
1261 *
1262 * PARAMETERS: Cache - Handle to cache object
1263 *
1264 * RETURN: Status
1265 *
1266 * DESCRIPTION: Free all objects within the requested cache and delete the
1267 * cache object.
1268 *
1269 ******************************************************************************/
1270
1271 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1272 {
1273 kmem_cache_destroy(cache);
1274 return (AE_OK);
1275 }
1276
1277 /*******************************************************************************
1278 *
1279 * FUNCTION: acpi_os_release_object
1280 *
1281 * PARAMETERS: Cache - Handle to cache object
1282 * Object - The object to be released
1283 *
1284 * RETURN: None
1285 *
1286 * DESCRIPTION: Release an object to the specified cache. If cache is full,
1287 * the object is deleted.
1288 *
1289 ******************************************************************************/
1290
1291 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1292 {
1293 kmem_cache_free(cache, object);
1294 return (AE_OK);
1295 }
1296
1297 /******************************************************************************
1298 *
1299 * FUNCTION: acpi_os_validate_interface
1300 *
1301 * PARAMETERS: interface - Requested interface to be validated
1302 *
1303 * RETURN: AE_OK if interface is supported, AE_SUPPORT otherwise
1304 *
1305 * DESCRIPTION: Match an interface string to the interfaces supported by the
1306 * host. Strings originate from an AML call to the _OSI method.
1307 *
1308 *****************************************************************************/
1309
1310 acpi_status
1311 acpi_os_validate_interface (char *interface)
1312 {
1313 if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1314 return AE_OK;
1315 if (!strcmp("Linux", interface)) {
1316
1317 printk(KERN_NOTICE PREFIX
1318 "BIOS _OSI(Linux) query %s%s\n",
1319 osi_linux.enable ? "honored" : "ignored",
1320 osi_linux.cmdline ? " via cmdline" :
1321 osi_linux.dmi ? " via DMI" : "");
1322
1323 if (osi_linux.enable)
1324 return AE_OK;
1325 }
1326 return AE_SUPPORT;
1327 }
1328
1329 /******************************************************************************
1330 *
1331 * FUNCTION: acpi_os_validate_address
1332 *
1333 * PARAMETERS: space_id - ACPI space ID
1334 * address - Physical address
1335 * length - Address length
1336 *
1337 * RETURN: AE_OK if address/length is valid for the space_id. Otherwise,
1338 * should return AE_AML_ILLEGAL_ADDRESS.
1339 *
1340 * DESCRIPTION: Validate a system address via the host OS. Used to validate
1341 * the addresses accessed by AML operation regions.
1342 *
1343 *****************************************************************************/
1344
1345 acpi_status
1346 acpi_os_validate_address (
1347 u8 space_id,
1348 acpi_physical_address address,
1349 acpi_size length,
1350 char *name)
1351 {
1352 struct acpi_res_list *res;
1353 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1354 return AE_OK;
1355
1356 switch (space_id) {
1357 case ACPI_ADR_SPACE_SYSTEM_IO:
1358 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1359 /* Only interference checks against SystemIO and SytemMemory
1360 are needed */
1361 res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1362 if (!res)
1363 return AE_OK;
1364 /* ACPI names are fixed to 4 bytes, still better use strlcpy */
1365 strlcpy(res->name, name, 5);
1366 res->start = address;
1367 res->end = address + length - 1;
1368 res->resource_type = space_id;
1369 spin_lock(&acpi_res_lock);
1370 list_add(&res->resource_list, &resource_list_head);
1371 spin_unlock(&acpi_res_lock);
1372 pr_debug("Added %s resource: start: 0x%llx, end: 0x%llx, "
1373 "name: %s\n", (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1374 ? "SystemIO" : "System Memory",
1375 (unsigned long long)res->start,
1376 (unsigned long long)res->end,
1377 res->name);
1378 break;
1379 case ACPI_ADR_SPACE_PCI_CONFIG:
1380 case ACPI_ADR_SPACE_EC:
1381 case ACPI_ADR_SPACE_SMBUS:
1382 case ACPI_ADR_SPACE_CMOS:
1383 case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1384 case ACPI_ADR_SPACE_DATA_TABLE:
1385 case ACPI_ADR_SPACE_FIXED_HARDWARE:
1386 break;
1387 }
1388 return AE_OK;
1389 }
1390
1391 #endif