doc: Change urls for sparse
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / resource.c
1 /*
2 * linux/kernel/resource.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
6 *
7 * Arbitrary resource management.
8 */
9
10 #include <linux/module.h>
11 #include <linux/errno.h>
12 #include <linux/ioport.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/fs.h>
17 #include <linux/proc_fs.h>
18 #include <linux/seq_file.h>
19 #include <linux/device.h>
20 #include <linux/pfn.h>
21 #include <asm/io.h>
22
23
24 struct resource ioport_resource = {
25 .name = "PCI IO",
26 .start = 0,
27 .end = IO_SPACE_LIMIT,
28 .flags = IORESOURCE_IO,
29 };
30 EXPORT_SYMBOL(ioport_resource);
31
32 struct resource iomem_resource = {
33 .name = "PCI mem",
34 .start = 0,
35 .end = -1,
36 .flags = IORESOURCE_MEM,
37 };
38 EXPORT_SYMBOL(iomem_resource);
39
40 static DEFINE_RWLOCK(resource_lock);
41
42 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
43 {
44 struct resource *p = v;
45 (*pos)++;
46 if (p->child)
47 return p->child;
48 while (!p->sibling && p->parent)
49 p = p->parent;
50 return p->sibling;
51 }
52
53 #ifdef CONFIG_PROC_FS
54
55 enum { MAX_IORES_LEVEL = 5 };
56
57 static void *r_start(struct seq_file *m, loff_t *pos)
58 __acquires(resource_lock)
59 {
60 struct resource *p = m->private;
61 loff_t l = 0;
62 read_lock(&resource_lock);
63 for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
64 ;
65 return p;
66 }
67
68 static void r_stop(struct seq_file *m, void *v)
69 __releases(resource_lock)
70 {
71 read_unlock(&resource_lock);
72 }
73
74 static int r_show(struct seq_file *m, void *v)
75 {
76 struct resource *root = m->private;
77 struct resource *r = v, *p;
78 int width = root->end < 0x10000 ? 4 : 8;
79 int depth;
80
81 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
82 if (p->parent == root)
83 break;
84 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
85 depth * 2, "",
86 width, (unsigned long long) r->start,
87 width, (unsigned long long) r->end,
88 r->name ? r->name : "<BAD>");
89 return 0;
90 }
91
92 static const struct seq_operations resource_op = {
93 .start = r_start,
94 .next = r_next,
95 .stop = r_stop,
96 .show = r_show,
97 };
98
99 static int ioports_open(struct inode *inode, struct file *file)
100 {
101 int res = seq_open(file, &resource_op);
102 if (!res) {
103 struct seq_file *m = file->private_data;
104 m->private = &ioport_resource;
105 }
106 return res;
107 }
108
109 static int iomem_open(struct inode *inode, struct file *file)
110 {
111 int res = seq_open(file, &resource_op);
112 if (!res) {
113 struct seq_file *m = file->private_data;
114 m->private = &iomem_resource;
115 }
116 return res;
117 }
118
119 static const struct file_operations proc_ioports_operations = {
120 .open = ioports_open,
121 .read = seq_read,
122 .llseek = seq_lseek,
123 .release = seq_release,
124 };
125
126 static const struct file_operations proc_iomem_operations = {
127 .open = iomem_open,
128 .read = seq_read,
129 .llseek = seq_lseek,
130 .release = seq_release,
131 };
132
133 static int __init ioresources_init(void)
134 {
135 proc_create("ioports", 0, NULL, &proc_ioports_operations);
136 proc_create("iomem", 0, NULL, &proc_iomem_operations);
137 return 0;
138 }
139 __initcall(ioresources_init);
140
141 #endif /* CONFIG_PROC_FS */
142
143 /* Return the conflict entry if you can't request it */
144 static struct resource * __request_resource(struct resource *root, struct resource *new)
145 {
146 resource_size_t start = new->start;
147 resource_size_t end = new->end;
148 struct resource *tmp, **p;
149
150 if (end < start)
151 return root;
152 if (start < root->start)
153 return root;
154 if (end > root->end)
155 return root;
156 p = &root->child;
157 for (;;) {
158 tmp = *p;
159 if (!tmp || tmp->start > end) {
160 new->sibling = tmp;
161 *p = new;
162 new->parent = root;
163 return NULL;
164 }
165 p = &tmp->sibling;
166 if (tmp->end < start)
167 continue;
168 return tmp;
169 }
170 }
171
172 static int __release_resource(struct resource *old)
173 {
174 struct resource *tmp, **p;
175
176 p = &old->parent->child;
177 for (;;) {
178 tmp = *p;
179 if (!tmp)
180 break;
181 if (tmp == old) {
182 *p = tmp->sibling;
183 old->parent = NULL;
184 return 0;
185 }
186 p = &tmp->sibling;
187 }
188 return -EINVAL;
189 }
190
191 static void __release_child_resources(struct resource *r)
192 {
193 struct resource *tmp, *p;
194 resource_size_t size;
195
196 p = r->child;
197 r->child = NULL;
198 while (p) {
199 tmp = p;
200 p = p->sibling;
201
202 tmp->parent = NULL;
203 tmp->sibling = NULL;
204 __release_child_resources(tmp);
205
206 printk(KERN_DEBUG "release child resource %pR\n", tmp);
207 /* need to restore size, and keep flags */
208 size = resource_size(tmp);
209 tmp->start = 0;
210 tmp->end = size - 1;
211 }
212 }
213
214 void release_child_resources(struct resource *r)
215 {
216 write_lock(&resource_lock);
217 __release_child_resources(r);
218 write_unlock(&resource_lock);
219 }
220
221 /**
222 * request_resource_conflict - request and reserve an I/O or memory resource
223 * @root: root resource descriptor
224 * @new: resource descriptor desired by caller
225 *
226 * Returns 0 for success, conflict resource on error.
227 */
228 struct resource *request_resource_conflict(struct resource *root, struct resource *new)
229 {
230 struct resource *conflict;
231
232 write_lock(&resource_lock);
233 conflict = __request_resource(root, new);
234 write_unlock(&resource_lock);
235 return conflict;
236 }
237
238 /**
239 * request_resource - request and reserve an I/O or memory resource
240 * @root: root resource descriptor
241 * @new: resource descriptor desired by caller
242 *
243 * Returns 0 for success, negative error code on error.
244 */
245 int request_resource(struct resource *root, struct resource *new)
246 {
247 struct resource *conflict;
248
249 conflict = request_resource_conflict(root, new);
250 return conflict ? -EBUSY : 0;
251 }
252
253 EXPORT_SYMBOL(request_resource);
254
255 /**
256 * release_resource - release a previously reserved resource
257 * @old: resource pointer
258 */
259 int release_resource(struct resource *old)
260 {
261 int retval;
262
263 write_lock(&resource_lock);
264 retval = __release_resource(old);
265 write_unlock(&resource_lock);
266 return retval;
267 }
268
269 EXPORT_SYMBOL(release_resource);
270
271 #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
272 /*
273 * Finds the lowest memory reosurce exists within [res->start.res->end)
274 * the caller must specify res->start, res->end, res->flags and "name".
275 * If found, returns 0, res is overwritten, if not found, returns -1.
276 */
277 static int find_next_system_ram(struct resource *res, char *name)
278 {
279 resource_size_t start, end;
280 struct resource *p;
281
282 BUG_ON(!res);
283
284 start = res->start;
285 end = res->end;
286 BUG_ON(start >= end);
287
288 read_lock(&resource_lock);
289 for (p = iomem_resource.child; p ; p = p->sibling) {
290 /* system ram is just marked as IORESOURCE_MEM */
291 if (p->flags != res->flags)
292 continue;
293 if (name && strcmp(p->name, name))
294 continue;
295 if (p->start > end) {
296 p = NULL;
297 break;
298 }
299 if ((p->end >= start) && (p->start < end))
300 break;
301 }
302 read_unlock(&resource_lock);
303 if (!p)
304 return -1;
305 /* copy data */
306 if (res->start < p->start)
307 res->start = p->start;
308 if (res->end > p->end)
309 res->end = p->end;
310 return 0;
311 }
312
313 /*
314 * This function calls callback against all memory range of "System RAM"
315 * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY.
316 * Now, this function is only for "System RAM".
317 */
318 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
319 void *arg, int (*func)(unsigned long, unsigned long, void *))
320 {
321 struct resource res;
322 unsigned long pfn, end_pfn;
323 u64 orig_end;
324 int ret = -1;
325
326 res.start = (u64) start_pfn << PAGE_SHIFT;
327 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
328 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
329 orig_end = res.end;
330 while ((res.start < res.end) &&
331 (find_next_system_ram(&res, "System RAM") >= 0)) {
332 pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
333 end_pfn = (res.end + 1) >> PAGE_SHIFT;
334 if (end_pfn > pfn)
335 ret = (*func)(pfn, end_pfn - pfn, arg);
336 if (ret)
337 break;
338 res.start = res.end + 1;
339 res.end = orig_end;
340 }
341 return ret;
342 }
343
344 #endif
345
346 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
347 {
348 return 1;
349 }
350 /*
351 * This generic page_is_ram() returns true if specified address is
352 * registered as "System RAM" in iomem_resource list.
353 */
354 int __weak page_is_ram(unsigned long pfn)
355 {
356 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
357 }
358
359 /*
360 * Find empty slot in the resource tree given range and alignment.
361 */
362 static int find_resource(struct resource *root, struct resource *new,
363 resource_size_t size, resource_size_t min,
364 resource_size_t max, resource_size_t align,
365 resource_size_t (*alignf)(void *,
366 const struct resource *,
367 resource_size_t,
368 resource_size_t),
369 void *alignf_data)
370 {
371 struct resource *this = root->child;
372 struct resource tmp = *new;
373
374 tmp.start = root->start;
375 /*
376 * Skip past an allocated resource that starts at 0, since the assignment
377 * of this->start - 1 to tmp->end below would cause an underflow.
378 */
379 if (this && this->start == 0) {
380 tmp.start = this->end + 1;
381 this = this->sibling;
382 }
383 for(;;) {
384 if (this)
385 tmp.end = this->start - 1;
386 else
387 tmp.end = root->end;
388 if (tmp.start < min)
389 tmp.start = min;
390 if (tmp.end > max)
391 tmp.end = max;
392 tmp.start = ALIGN(tmp.start, align);
393 if (alignf)
394 tmp.start = alignf(alignf_data, &tmp, size, align);
395 if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) {
396 new->start = tmp.start;
397 new->end = tmp.start + size - 1;
398 return 0;
399 }
400 if (!this)
401 break;
402 tmp.start = this->end + 1;
403 this = this->sibling;
404 }
405 return -EBUSY;
406 }
407
408 /**
409 * allocate_resource - allocate empty slot in the resource tree given range & alignment
410 * @root: root resource descriptor
411 * @new: resource descriptor desired by caller
412 * @size: requested resource region size
413 * @min: minimum size to allocate
414 * @max: maximum size to allocate
415 * @align: alignment requested, in bytes
416 * @alignf: alignment function, optional, called if not NULL
417 * @alignf_data: arbitrary data to pass to the @alignf function
418 */
419 int allocate_resource(struct resource *root, struct resource *new,
420 resource_size_t size, resource_size_t min,
421 resource_size_t max, resource_size_t align,
422 resource_size_t (*alignf)(void *,
423 const struct resource *,
424 resource_size_t,
425 resource_size_t),
426 void *alignf_data)
427 {
428 int err;
429
430 write_lock(&resource_lock);
431 err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
432 if (err >= 0 && __request_resource(root, new))
433 err = -EBUSY;
434 write_unlock(&resource_lock);
435 return err;
436 }
437
438 EXPORT_SYMBOL(allocate_resource);
439
440 /*
441 * Insert a resource into the resource tree. If successful, return NULL,
442 * otherwise return the conflicting resource (compare to __request_resource())
443 */
444 static struct resource * __insert_resource(struct resource *parent, struct resource *new)
445 {
446 struct resource *first, *next;
447
448 for (;; parent = first) {
449 first = __request_resource(parent, new);
450 if (!first)
451 return first;
452
453 if (first == parent)
454 return first;
455
456 if ((first->start > new->start) || (first->end < new->end))
457 break;
458 if ((first->start == new->start) && (first->end == new->end))
459 break;
460 }
461
462 for (next = first; ; next = next->sibling) {
463 /* Partial overlap? Bad, and unfixable */
464 if (next->start < new->start || next->end > new->end)
465 return next;
466 if (!next->sibling)
467 break;
468 if (next->sibling->start > new->end)
469 break;
470 }
471
472 new->parent = parent;
473 new->sibling = next->sibling;
474 new->child = first;
475
476 next->sibling = NULL;
477 for (next = first; next; next = next->sibling)
478 next->parent = new;
479
480 if (parent->child == first) {
481 parent->child = new;
482 } else {
483 next = parent->child;
484 while (next->sibling != first)
485 next = next->sibling;
486 next->sibling = new;
487 }
488 return NULL;
489 }
490
491 /**
492 * insert_resource_conflict - Inserts resource in the resource tree
493 * @parent: parent of the new resource
494 * @new: new resource to insert
495 *
496 * Returns 0 on success, conflict resource if the resource can't be inserted.
497 *
498 * This function is equivalent to request_resource_conflict when no conflict
499 * happens. If a conflict happens, and the conflicting resources
500 * entirely fit within the range of the new resource, then the new
501 * resource is inserted and the conflicting resources become children of
502 * the new resource.
503 */
504 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
505 {
506 struct resource *conflict;
507
508 write_lock(&resource_lock);
509 conflict = __insert_resource(parent, new);
510 write_unlock(&resource_lock);
511 return conflict;
512 }
513
514 /**
515 * insert_resource - Inserts a resource in the resource tree
516 * @parent: parent of the new resource
517 * @new: new resource to insert
518 *
519 * Returns 0 on success, -EBUSY if the resource can't be inserted.
520 */
521 int insert_resource(struct resource *parent, struct resource *new)
522 {
523 struct resource *conflict;
524
525 conflict = insert_resource_conflict(parent, new);
526 return conflict ? -EBUSY : 0;
527 }
528
529 /**
530 * insert_resource_expand_to_fit - Insert a resource into the resource tree
531 * @root: root resource descriptor
532 * @new: new resource to insert
533 *
534 * Insert a resource into the resource tree, possibly expanding it in order
535 * to make it encompass any conflicting resources.
536 */
537 void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
538 {
539 if (new->parent)
540 return;
541
542 write_lock(&resource_lock);
543 for (;;) {
544 struct resource *conflict;
545
546 conflict = __insert_resource(root, new);
547 if (!conflict)
548 break;
549 if (conflict == root)
550 break;
551
552 /* Ok, expand resource to cover the conflict, then try again .. */
553 if (conflict->start < new->start)
554 new->start = conflict->start;
555 if (conflict->end > new->end)
556 new->end = conflict->end;
557
558 printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
559 }
560 write_unlock(&resource_lock);
561 }
562
563 /**
564 * adjust_resource - modify a resource's start and size
565 * @res: resource to modify
566 * @start: new start value
567 * @size: new size
568 *
569 * Given an existing resource, change its start and size to match the
570 * arguments. Returns 0 on success, -EBUSY if it can't fit.
571 * Existing children of the resource are assumed to be immutable.
572 */
573 int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size)
574 {
575 struct resource *tmp, *parent = res->parent;
576 resource_size_t end = start + size - 1;
577 int result = -EBUSY;
578
579 write_lock(&resource_lock);
580
581 if ((start < parent->start) || (end > parent->end))
582 goto out;
583
584 for (tmp = res->child; tmp; tmp = tmp->sibling) {
585 if ((tmp->start < start) || (tmp->end > end))
586 goto out;
587 }
588
589 if (res->sibling && (res->sibling->start <= end))
590 goto out;
591
592 tmp = parent->child;
593 if (tmp != res) {
594 while (tmp->sibling != res)
595 tmp = tmp->sibling;
596 if (start <= tmp->end)
597 goto out;
598 }
599
600 res->start = start;
601 res->end = end;
602 result = 0;
603
604 out:
605 write_unlock(&resource_lock);
606 return result;
607 }
608
609 static void __init __reserve_region_with_split(struct resource *root,
610 resource_size_t start, resource_size_t end,
611 const char *name)
612 {
613 struct resource *parent = root;
614 struct resource *conflict;
615 struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
616
617 if (!res)
618 return;
619
620 res->name = name;
621 res->start = start;
622 res->end = end;
623 res->flags = IORESOURCE_BUSY;
624
625 conflict = __request_resource(parent, res);
626 if (!conflict)
627 return;
628
629 /* failed, split and try again */
630 kfree(res);
631
632 /* conflict covered whole area */
633 if (conflict->start <= start && conflict->end >= end)
634 return;
635
636 if (conflict->start > start)
637 __reserve_region_with_split(root, start, conflict->start-1, name);
638 if (conflict->end < end)
639 __reserve_region_with_split(root, conflict->end+1, end, name);
640 }
641
642 void __init reserve_region_with_split(struct resource *root,
643 resource_size_t start, resource_size_t end,
644 const char *name)
645 {
646 write_lock(&resource_lock);
647 __reserve_region_with_split(root, start, end, name);
648 write_unlock(&resource_lock);
649 }
650
651 EXPORT_SYMBOL(adjust_resource);
652
653 /**
654 * resource_alignment - calculate resource's alignment
655 * @res: resource pointer
656 *
657 * Returns alignment on success, 0 (invalid alignment) on failure.
658 */
659 resource_size_t resource_alignment(struct resource *res)
660 {
661 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
662 case IORESOURCE_SIZEALIGN:
663 return resource_size(res);
664 case IORESOURCE_STARTALIGN:
665 return res->start;
666 default:
667 return 0;
668 }
669 }
670
671 /*
672 * This is compatibility stuff for IO resources.
673 *
674 * Note how this, unlike the above, knows about
675 * the IO flag meanings (busy etc).
676 *
677 * request_region creates a new busy region.
678 *
679 * check_region returns non-zero if the area is already busy.
680 *
681 * release_region releases a matching busy region.
682 */
683
684 /**
685 * __request_region - create a new busy resource region
686 * @parent: parent resource descriptor
687 * @start: resource start address
688 * @n: resource region size
689 * @name: reserving caller's ID string
690 * @flags: IO resource flags
691 */
692 struct resource * __request_region(struct resource *parent,
693 resource_size_t start, resource_size_t n,
694 const char *name, int flags)
695 {
696 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
697
698 if (!res)
699 return NULL;
700
701 res->name = name;
702 res->start = start;
703 res->end = start + n - 1;
704 res->flags = IORESOURCE_BUSY;
705 res->flags |= flags;
706
707 write_lock(&resource_lock);
708
709 for (;;) {
710 struct resource *conflict;
711
712 conflict = __request_resource(parent, res);
713 if (!conflict)
714 break;
715 if (conflict != parent) {
716 parent = conflict;
717 if (!(conflict->flags & IORESOURCE_BUSY))
718 continue;
719 }
720
721 /* Uhhuh, that didn't work out.. */
722 kfree(res);
723 res = NULL;
724 break;
725 }
726 write_unlock(&resource_lock);
727 return res;
728 }
729 EXPORT_SYMBOL(__request_region);
730
731 /**
732 * __check_region - check if a resource region is busy or free
733 * @parent: parent resource descriptor
734 * @start: resource start address
735 * @n: resource region size
736 *
737 * Returns 0 if the region is free at the moment it is checked,
738 * returns %-EBUSY if the region is busy.
739 *
740 * NOTE:
741 * This function is deprecated because its use is racy.
742 * Even if it returns 0, a subsequent call to request_region()
743 * may fail because another driver etc. just allocated the region.
744 * Do NOT use it. It will be removed from the kernel.
745 */
746 int __check_region(struct resource *parent, resource_size_t start,
747 resource_size_t n)
748 {
749 struct resource * res;
750
751 res = __request_region(parent, start, n, "check-region", 0);
752 if (!res)
753 return -EBUSY;
754
755 release_resource(res);
756 kfree(res);
757 return 0;
758 }
759 EXPORT_SYMBOL(__check_region);
760
761 /**
762 * __release_region - release a previously reserved resource region
763 * @parent: parent resource descriptor
764 * @start: resource start address
765 * @n: resource region size
766 *
767 * The described resource region must match a currently busy region.
768 */
769 void __release_region(struct resource *parent, resource_size_t start,
770 resource_size_t n)
771 {
772 struct resource **p;
773 resource_size_t end;
774
775 p = &parent->child;
776 end = start + n - 1;
777
778 write_lock(&resource_lock);
779
780 for (;;) {
781 struct resource *res = *p;
782
783 if (!res)
784 break;
785 if (res->start <= start && res->end >= end) {
786 if (!(res->flags & IORESOURCE_BUSY)) {
787 p = &res->child;
788 continue;
789 }
790 if (res->start != start || res->end != end)
791 break;
792 *p = res->sibling;
793 write_unlock(&resource_lock);
794 kfree(res);
795 return;
796 }
797 p = &res->sibling;
798 }
799
800 write_unlock(&resource_lock);
801
802 printk(KERN_WARNING "Trying to free nonexistent resource "
803 "<%016llx-%016llx>\n", (unsigned long long)start,
804 (unsigned long long)end);
805 }
806 EXPORT_SYMBOL(__release_region);
807
808 /*
809 * Managed region resource
810 */
811 struct region_devres {
812 struct resource *parent;
813 resource_size_t start;
814 resource_size_t n;
815 };
816
817 static void devm_region_release(struct device *dev, void *res)
818 {
819 struct region_devres *this = res;
820
821 __release_region(this->parent, this->start, this->n);
822 }
823
824 static int devm_region_match(struct device *dev, void *res, void *match_data)
825 {
826 struct region_devres *this = res, *match = match_data;
827
828 return this->parent == match->parent &&
829 this->start == match->start && this->n == match->n;
830 }
831
832 struct resource * __devm_request_region(struct device *dev,
833 struct resource *parent, resource_size_t start,
834 resource_size_t n, const char *name)
835 {
836 struct region_devres *dr = NULL;
837 struct resource *res;
838
839 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
840 GFP_KERNEL);
841 if (!dr)
842 return NULL;
843
844 dr->parent = parent;
845 dr->start = start;
846 dr->n = n;
847
848 res = __request_region(parent, start, n, name, 0);
849 if (res)
850 devres_add(dev, dr);
851 else
852 devres_free(dr);
853
854 return res;
855 }
856 EXPORT_SYMBOL(__devm_request_region);
857
858 void __devm_release_region(struct device *dev, struct resource *parent,
859 resource_size_t start, resource_size_t n)
860 {
861 struct region_devres match_data = { parent, start, n };
862
863 __release_region(parent, start, n);
864 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
865 &match_data));
866 }
867 EXPORT_SYMBOL(__devm_release_region);
868
869 /*
870 * Called from init/main.c to reserve IO ports.
871 */
872 #define MAXRESERVE 4
873 static int __init reserve_setup(char *str)
874 {
875 static int reserved;
876 static struct resource reserve[MAXRESERVE];
877
878 for (;;) {
879 unsigned int io_start, io_num;
880 int x = reserved;
881
882 if (get_option (&str, &io_start) != 2)
883 break;
884 if (get_option (&str, &io_num) == 0)
885 break;
886 if (x < MAXRESERVE) {
887 struct resource *res = reserve + x;
888 res->name = "reserved";
889 res->start = io_start;
890 res->end = io_start + io_num - 1;
891 res->flags = IORESOURCE_BUSY;
892 res->child = NULL;
893 if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0)
894 reserved = x+1;
895 }
896 }
897 return 1;
898 }
899
900 __setup("reserve=", reserve_setup);
901
902 /*
903 * Check if the requested addr and size spans more than any slot in the
904 * iomem resource tree.
905 */
906 int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
907 {
908 struct resource *p = &iomem_resource;
909 int err = 0;
910 loff_t l;
911
912 read_lock(&resource_lock);
913 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
914 /*
915 * We can probably skip the resources without
916 * IORESOURCE_IO attribute?
917 */
918 if (p->start >= addr + size)
919 continue;
920 if (p->end < addr)
921 continue;
922 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
923 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
924 continue;
925 /*
926 * if a resource is "BUSY", it's not a hardware resource
927 * but a driver mapping of such a resource; we don't want
928 * to warn for those; some drivers legitimately map only
929 * partial hardware resources. (example: vesafb)
930 */
931 if (p->flags & IORESOURCE_BUSY)
932 continue;
933
934 printk(KERN_WARNING "resource map sanity check conflict: "
935 "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
936 (unsigned long long)addr,
937 (unsigned long long)(addr + size - 1),
938 (unsigned long long)p->start,
939 (unsigned long long)p->end,
940 p->name);
941 err = -1;
942 break;
943 }
944 read_unlock(&resource_lock);
945
946 return err;
947 }
948
949 #ifdef CONFIG_STRICT_DEVMEM
950 static int strict_iomem_checks = 1;
951 #else
952 static int strict_iomem_checks;
953 #endif
954
955 /*
956 * check if an address is reserved in the iomem resource tree
957 * returns 1 if reserved, 0 if not reserved.
958 */
959 int iomem_is_exclusive(u64 addr)
960 {
961 struct resource *p = &iomem_resource;
962 int err = 0;
963 loff_t l;
964 int size = PAGE_SIZE;
965
966 if (!strict_iomem_checks)
967 return 0;
968
969 addr = addr & PAGE_MASK;
970
971 read_lock(&resource_lock);
972 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
973 /*
974 * We can probably skip the resources without
975 * IORESOURCE_IO attribute?
976 */
977 if (p->start >= addr + size)
978 break;
979 if (p->end < addr)
980 continue;
981 if (p->flags & IORESOURCE_BUSY &&
982 p->flags & IORESOURCE_EXCLUSIVE) {
983 err = 1;
984 break;
985 }
986 }
987 read_unlock(&resource_lock);
988
989 return err;
990 }
991
992 static int __init strict_iomem(char *str)
993 {
994 if (strstr(str, "relaxed"))
995 strict_iomem_checks = 0;
996 if (strstr(str, "strict"))
997 strict_iomem_checks = 1;
998 return 1;
999 }
1000
1001 __setup("iomem=", strict_iomem);