2 * drivers/acpi/resource.c - ACPI device resources interpretation.
4 * Copyright (C) 2012, Intel Corp.
5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 #include <linux/acpi.h>
26 #include <linux/device.h>
27 #include <linux/export.h>
28 #include <linux/ioport.h>
29 #include <linux/list.h>
30 #include <linux/slab.h>
33 #define valid_IRQ(i) (((i) != 0) && ((i) != 2))
35 #define valid_IRQ(i) (true)
38 static unsigned long acpi_dev_memresource_flags(u64 len
, u8 write_protect
,
41 unsigned long flags
= IORESOURCE_MEM
;
44 flags
|= IORESOURCE_DISABLED
;
46 if (write_protect
== ACPI_READ_WRITE_MEMORY
)
47 flags
|= IORESOURCE_MEM_WRITEABLE
;
50 flags
|= IORESOURCE_WINDOW
;
55 static void acpi_dev_get_memresource(struct resource
*res
, u64 start
, u64 len
,
59 res
->end
= start
+ len
- 1;
60 res
->flags
= acpi_dev_memresource_flags(len
, write_protect
, false);
64 * acpi_dev_resource_memory - Extract ACPI memory resource information.
65 * @ares: Input ACPI resource object.
66 * @res: Output generic resource object.
68 * Check if the given ACPI resource object represents a memory resource and
69 * if that's the case, use the information in it to populate the generic
70 * resource object pointed to by @res.
72 bool acpi_dev_resource_memory(struct acpi_resource
*ares
, struct resource
*res
)
74 struct acpi_resource_memory24
*memory24
;
75 struct acpi_resource_memory32
*memory32
;
76 struct acpi_resource_fixed_memory32
*fixed_memory32
;
79 case ACPI_RESOURCE_TYPE_MEMORY24
:
80 memory24
= &ares
->data
.memory24
;
81 if (!memory24
->minimum
&& !memory24
->address_length
)
83 acpi_dev_get_memresource(res
, memory24
->minimum
,
84 memory24
->address_length
,
85 memory24
->write_protect
);
87 case ACPI_RESOURCE_TYPE_MEMORY32
:
88 memory32
= &ares
->data
.memory32
;
89 if (!memory32
->minimum
&& !memory32
->address_length
)
91 acpi_dev_get_memresource(res
, memory32
->minimum
,
92 memory32
->address_length
,
93 memory32
->write_protect
);
95 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32
:
96 fixed_memory32
= &ares
->data
.fixed_memory32
;
97 if (!fixed_memory32
->address
&& !fixed_memory32
->address_length
)
99 acpi_dev_get_memresource(res
, fixed_memory32
->address
,
100 fixed_memory32
->address_length
,
101 fixed_memory32
->write_protect
);
108 EXPORT_SYMBOL_GPL(acpi_dev_resource_memory
);
110 static unsigned int acpi_dev_ioresource_flags(u64 start
, u64 end
, u8 io_decode
,
113 int flags
= IORESOURCE_IO
;
115 if (io_decode
== ACPI_DECODE_16
)
116 flags
|= IORESOURCE_IO_16BIT_ADDR
;
118 if (start
> end
|| end
>= 0x10003)
119 flags
|= IORESOURCE_DISABLED
;
122 flags
|= IORESOURCE_WINDOW
;
127 static void acpi_dev_get_ioresource(struct resource
*res
, u64 start
, u64 len
,
130 u64 end
= start
+ len
- 1;
134 res
->flags
= acpi_dev_ioresource_flags(start
, end
, io_decode
, false);
138 * acpi_dev_resource_io - Extract ACPI I/O resource information.
139 * @ares: Input ACPI resource object.
140 * @res: Output generic resource object.
142 * Check if the given ACPI resource object represents an I/O resource and
143 * if that's the case, use the information in it to populate the generic
144 * resource object pointed to by @res.
146 bool acpi_dev_resource_io(struct acpi_resource
*ares
, struct resource
*res
)
148 struct acpi_resource_io
*io
;
149 struct acpi_resource_fixed_io
*fixed_io
;
151 switch (ares
->type
) {
152 case ACPI_RESOURCE_TYPE_IO
:
154 if (!io
->minimum
&& !io
->address_length
)
156 acpi_dev_get_ioresource(res
, io
->minimum
,
160 case ACPI_RESOURCE_TYPE_FIXED_IO
:
161 fixed_io
= &ares
->data
.fixed_io
;
162 if (!fixed_io
->address
&& !fixed_io
->address_length
)
164 acpi_dev_get_ioresource(res
, fixed_io
->address
,
165 fixed_io
->address_length
,
173 EXPORT_SYMBOL_GPL(acpi_dev_resource_io
);
176 * acpi_dev_resource_address_space - Extract ACPI address space information.
177 * @ares: Input ACPI resource object.
178 * @res: Output generic resource object.
180 * Check if the given ACPI resource object represents an address space resource
181 * and if that's the case, use the information in it to populate the generic
182 * resource object pointed to by @res.
184 bool acpi_dev_resource_address_space(struct acpi_resource
*ares
,
185 struct resource
*res
)
188 struct acpi_resource_address64 addr
;
193 switch (ares
->type
) {
194 case ACPI_RESOURCE_TYPE_ADDRESS16
:
195 case ACPI_RESOURCE_TYPE_ADDRESS32
:
196 case ACPI_RESOURCE_TYPE_ADDRESS64
:
202 status
= acpi_resource_to_address64(ares
, &addr
);
203 if (ACPI_FAILURE(status
))
206 res
->start
= addr
.minimum
;
207 res
->end
= addr
.maximum
;
208 window
= addr
.producer_consumer
== ACPI_PRODUCER
;
210 switch(addr
.resource_type
) {
211 case ACPI_MEMORY_RANGE
:
212 len
= addr
.maximum
- addr
.minimum
+ 1;
213 res
->flags
= acpi_dev_memresource_flags(len
,
214 addr
.info
.mem
.write_protect
,
218 io_decode
= addr
.granularity
== 0xfff ?
219 ACPI_DECODE_10
: ACPI_DECODE_16
;
220 res
->flags
= acpi_dev_ioresource_flags(addr
.minimum
,
224 case ACPI_BUS_NUMBER_RANGE
:
225 res
->flags
= IORESOURCE_BUS
;
233 EXPORT_SYMBOL_GPL(acpi_dev_resource_address_space
);
236 * acpi_dev_resource_ext_address_space - Extract ACPI address space information.
237 * @ares: Input ACPI resource object.
238 * @res: Output generic resource object.
240 * Check if the given ACPI resource object represents an extended address space
241 * resource and if that's the case, use the information in it to populate the
242 * generic resource object pointed to by @res.
244 bool acpi_dev_resource_ext_address_space(struct acpi_resource
*ares
,
245 struct resource
*res
)
247 struct acpi_resource_extended_address64
*ext_addr
;
252 if (ares
->type
!= ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64
)
255 ext_addr
= &ares
->data
.ext_address64
;
257 res
->start
= ext_addr
->minimum
;
258 res
->end
= ext_addr
->maximum
;
259 window
= ext_addr
->producer_consumer
== ACPI_PRODUCER
;
261 switch(ext_addr
->resource_type
) {
262 case ACPI_MEMORY_RANGE
:
263 len
= ext_addr
->maximum
- ext_addr
->minimum
+ 1;
264 res
->flags
= acpi_dev_memresource_flags(len
,
265 ext_addr
->info
.mem
.write_protect
,
269 io_decode
= ext_addr
->granularity
== 0xfff ?
270 ACPI_DECODE_10
: ACPI_DECODE_16
;
271 res
->flags
= acpi_dev_ioresource_flags(ext_addr
->minimum
,
275 case ACPI_BUS_NUMBER_RANGE
:
276 res
->flags
= IORESOURCE_BUS
;
284 EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space
);
287 * acpi_dev_irq_flags - Determine IRQ resource flags.
288 * @triggering: Triggering type as provided by ACPI.
289 * @polarity: Interrupt polarity as provided by ACPI.
290 * @shareable: Whether or not the interrupt is shareable.
292 unsigned long acpi_dev_irq_flags(u8 triggering
, u8 polarity
, u8 shareable
)
296 if (triggering
== ACPI_LEVEL_SENSITIVE
)
297 flags
= polarity
== ACPI_ACTIVE_LOW
?
298 IORESOURCE_IRQ_LOWLEVEL
: IORESOURCE_IRQ_HIGHLEVEL
;
300 flags
= polarity
== ACPI_ACTIVE_LOW
?
301 IORESOURCE_IRQ_LOWEDGE
: IORESOURCE_IRQ_HIGHEDGE
;
303 if (shareable
== ACPI_SHARED
)
304 flags
|= IORESOURCE_IRQ_SHAREABLE
;
306 return flags
| IORESOURCE_IRQ
;
308 EXPORT_SYMBOL_GPL(acpi_dev_irq_flags
);
310 static void acpi_dev_irqresource_disabled(struct resource
*res
, u32 gsi
)
314 res
->flags
= IORESOURCE_IRQ
| IORESOURCE_DISABLED
;
317 static void acpi_dev_get_irqresource(struct resource
*res
, u32 gsi
,
318 u8 triggering
, u8 polarity
, u8 shareable
,
323 if (!valid_IRQ(gsi
)) {
324 acpi_dev_irqresource_disabled(res
, gsi
);
329 * In IO-APIC mode, use overrided attribute. Two reasons:
330 * 1. BIOS bug in DSDT
331 * 2. BIOS uses IO-APIC mode Interrupt Source Override
333 * We do this only if we are dealing with IRQ() or IRQNoFlags()
334 * resource (the legacy ISA resources). With modern ACPI 5 devices
335 * using extended IRQ descriptors we take the IRQ configuration
336 * from _CRS directly.
338 if (legacy
&& !acpi_get_override_irq(gsi
, &t
, &p
)) {
339 u8 trig
= t
? ACPI_LEVEL_SENSITIVE
: ACPI_EDGE_SENSITIVE
;
340 u8 pol
= p
? ACPI_ACTIVE_LOW
: ACPI_ACTIVE_HIGH
;
342 if (triggering
!= trig
|| polarity
!= pol
) {
343 pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi
,
344 t
? "level" : "edge", p
? "low" : "high");
350 res
->flags
= acpi_dev_irq_flags(triggering
, polarity
, shareable
);
351 irq
= acpi_register_gsi(NULL
, gsi
, triggering
, polarity
);
356 acpi_dev_irqresource_disabled(res
, gsi
);
361 * acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
362 * @ares: Input ACPI resource object.
363 * @index: Index into the array of GSIs represented by the resource.
364 * @res: Output generic resource object.
366 * Check if the given ACPI resource object represents an interrupt resource
367 * and @index does not exceed the resource's interrupt count (true is returned
368 * in that case regardless of the results of the other checks)). If that's the
369 * case, register the GSI corresponding to @index from the array of interrupts
370 * represented by the resource and populate the generic resource object pointed
371 * to by @res accordingly. If the registration of the GSI is not successful,
372 * IORESOURCE_DISABLED will be set it that object's flags.
374 bool acpi_dev_resource_interrupt(struct acpi_resource
*ares
, int index
,
375 struct resource
*res
)
377 struct acpi_resource_irq
*irq
;
378 struct acpi_resource_extended_irq
*ext_irq
;
380 switch (ares
->type
) {
381 case ACPI_RESOURCE_TYPE_IRQ
:
383 * Per spec, only one interrupt per descriptor is allowed in
384 * _CRS, but some firmware violates this, so parse them all.
386 irq
= &ares
->data
.irq
;
387 if (index
>= irq
->interrupt_count
) {
388 acpi_dev_irqresource_disabled(res
, 0);
391 acpi_dev_get_irqresource(res
, irq
->interrupts
[index
],
392 irq
->triggering
, irq
->polarity
,
393 irq
->sharable
, true);
395 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ
:
396 ext_irq
= &ares
->data
.extended_irq
;
397 if (index
>= ext_irq
->interrupt_count
) {
398 acpi_dev_irqresource_disabled(res
, 0);
401 acpi_dev_get_irqresource(res
, ext_irq
->interrupts
[index
],
402 ext_irq
->triggering
, ext_irq
->polarity
,
403 ext_irq
->sharable
, false);
411 EXPORT_SYMBOL_GPL(acpi_dev_resource_interrupt
);
414 * acpi_dev_free_resource_list - Free resource from %acpi_dev_get_resources().
415 * @list: The head of the resource list to free.
417 void acpi_dev_free_resource_list(struct list_head
*list
)
419 struct resource_list_entry
*rentry
, *re
;
421 list_for_each_entry_safe(rentry
, re
, list
, node
) {
422 list_del(&rentry
->node
);
426 EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list
);
428 struct res_proc_context
{
429 struct list_head
*list
;
430 int (*preproc
)(struct acpi_resource
*, void *);
436 static acpi_status
acpi_dev_new_resource_entry(struct resource
*r
,
437 struct res_proc_context
*c
)
439 struct resource_list_entry
*rentry
;
441 rentry
= kmalloc(sizeof(*rentry
), GFP_KERNEL
);
447 list_add_tail(&rentry
->node
, c
->list
);
452 static acpi_status
acpi_dev_process_resource(struct acpi_resource
*ares
,
455 struct res_proc_context
*c
= context
;
462 ret
= c
->preproc(ares
, c
->preproc_data
);
465 return AE_CTRL_TERMINATE
;
466 } else if (ret
> 0) {
471 memset(&r
, 0, sizeof(r
));
473 if (acpi_dev_resource_memory(ares
, &r
)
474 || acpi_dev_resource_io(ares
, &r
)
475 || acpi_dev_resource_address_space(ares
, &r
)
476 || acpi_dev_resource_ext_address_space(ares
, &r
))
477 return acpi_dev_new_resource_entry(&r
, c
);
479 for (i
= 0; acpi_dev_resource_interrupt(ares
, i
, &r
); i
++) {
482 status
= acpi_dev_new_resource_entry(&r
, c
);
483 if (ACPI_FAILURE(status
))
491 * acpi_dev_get_resources - Get current resources of a device.
492 * @adev: ACPI device node to get the resources for.
493 * @list: Head of the resultant list of resources (must be empty).
494 * @preproc: The caller's preprocessing routine.
495 * @preproc_data: Pointer passed to the caller's preprocessing routine.
497 * Evaluate the _CRS method for the given device node and process its output by
498 * (1) executing the @preproc() rountine provided by the caller, passing the
499 * resource pointer and @preproc_data to it as arguments, for each ACPI resource
500 * returned and (2) converting all of the returned ACPI resources into struct
501 * resource objects if possible. If the return value of @preproc() in step (1)
502 * is different from 0, step (2) is not applied to the given ACPI resource and
503 * if that value is negative, the whole processing is aborted and that value is
504 * returned as the final error code.
506 * The resultant struct resource objects are put on the list pointed to by
507 * @list, that must be empty initially, as members of struct resource_list_entry
508 * objects. Callers of this routine should use %acpi_dev_free_resource_list() to
511 * The number of resources in the output list is returned on success, an error
512 * code reflecting the error condition is returned otherwise.
514 int acpi_dev_get_resources(struct acpi_device
*adev
, struct list_head
*list
,
515 int (*preproc
)(struct acpi_resource
*, void *),
518 struct res_proc_context c
;
519 acpi_handle not_used
;
522 if (!adev
|| !adev
->handle
|| !list_empty(list
))
525 status
= acpi_get_handle(adev
->handle
, METHOD_NAME__CRS
, ¬_used
);
526 if (ACPI_FAILURE(status
))
531 c
.preproc_data
= preproc_data
;
534 status
= acpi_walk_resources(adev
->handle
, METHOD_NAME__CRS
,
535 acpi_dev_process_resource
, &c
);
536 if (ACPI_FAILURE(status
)) {
537 acpi_dev_free_resource_list(list
);
538 return c
.error
? c
.error
: -EIO
;
543 EXPORT_SYMBOL_GPL(acpi_dev_get_resources
);
545 struct reserved_region
{
546 struct list_head node
;
551 static LIST_HEAD(reserved_io_regions
);
552 static LIST_HEAD(reserved_mem_regions
);
554 static int request_range(u64 start
, u64 end
, u8 space_id
, unsigned long flags
,
557 unsigned int length
= end
- start
+ 1;
558 struct resource
*res
;
560 res
= space_id
== ACPI_ADR_SPACE_SYSTEM_IO
?
561 request_region(start
, length
, desc
) :
562 request_mem_region(start
, length
, desc
);
566 res
->flags
&= ~flags
;
570 static int add_region_before(u64 start
, u64 end
, u8 space_id
,
571 unsigned long flags
, char *desc
,
572 struct list_head
*head
)
574 struct reserved_region
*reg
;
577 reg
= kmalloc(sizeof(*reg
), GFP_KERNEL
);
581 error
= request_range(start
, end
, space_id
, flags
, desc
);
589 list_add_tail(®
->node
, head
);
594 * acpi_reserve_region - Reserve an I/O or memory region as a system resource.
595 * @start: Starting address of the region.
596 * @length: Length of the region.
597 * @space_id: Identifier of address space to reserve the region from.
598 * @flags: Resource flags to clear for the region after requesting it.
599 * @desc: Region description (for messages).
601 * Reserve an I/O or memory region as a system resource to prevent others from
602 * using it. If the new region overlaps with one of the regions (in the given
603 * address space) already reserved by this routine, only the non-overlapping
604 * parts of it will be reserved.
606 * Returned is either 0 (success) or a negative error code indicating a resource
607 * reservation problem. It is the code of the first encountered error, but the
608 * routine doesn't abort until it has attempted to request all of the parts of
609 * the new region that don't overlap with other regions reserved previously.
611 * The resources requested by this routine are never released.
613 int acpi_reserve_region(u64 start
, unsigned int length
, u8 space_id
,
614 unsigned long flags
, char *desc
)
616 struct list_head
*regions
;
617 struct reserved_region
*reg
;
618 u64 end
= start
+ length
- 1;
619 int ret
= 0, error
= 0;
621 if (space_id
== ACPI_ADR_SPACE_SYSTEM_IO
)
622 regions
= &reserved_io_regions
;
623 else if (space_id
== ACPI_ADR_SPACE_SYSTEM_MEMORY
)
624 regions
= &reserved_mem_regions
;
628 if (list_empty(regions
))
629 return add_region_before(start
, end
, space_id
, flags
, desc
, regions
);
631 list_for_each_entry(reg
, regions
, node
)
632 if (reg
->start
== end
+ 1) {
633 /* The new region can be prepended to this one. */
634 ret
= request_range(start
, end
, space_id
, flags
, desc
);
639 } else if (reg
->start
> end
) {
640 /* No overlap. Add the new region here and get out. */
641 return add_region_before(start
, end
, space_id
, flags
,
643 } else if (reg
->end
== start
- 1) {
645 } else if (reg
->end
>= start
) {
649 /* The new region goes after the last existing one. */
650 return add_region_before(start
, end
, space_id
, flags
, desc
, regions
);
654 * The new region overlaps an existing one.
656 * The head part of the new region immediately preceding the existing
657 * overlapping one can be combined with it right away.
659 if (reg
->start
> start
) {
660 error
= request_range(start
, reg
->start
- 1, space_id
, flags
, desc
);
669 * The new region is adjacent to an existing one. If it extends beyond
670 * that region all the way to the next one, it is possible to combine
673 while (reg
->end
< end
) {
674 struct reserved_region
*next
= NULL
;
675 u64 a
= reg
->end
+ 1, b
= end
;
677 if (!list_is_last(®
->node
, regions
)) {
678 next
= list_next_entry(reg
, node
);
679 if (next
->start
<= end
)
682 error
= request_range(a
, b
, space_id
, flags
, desc
);
684 if (next
&& next
->start
== b
+ 1) {
685 reg
->end
= next
->end
;
686 list_del(&next
->node
);
702 return ret
? ret
: error
;
704 EXPORT_SYMBOL_GPL(acpi_reserve_region
);