include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / pci / mmconfig-shared.c
1 /*
2 * mmconfig-shared.c - Low-level direct PCI config space access via
3 * MMCONFIG - common code between i386 and x86-64.
4 *
5 * This code does:
6 * - known chipset handling
7 * - ACPI decoding and validation
8 *
9 * Per-architecture code takes care of the mappings and accesses
10 * themselves.
11 */
12
13 #include <linux/pci.h>
14 #include <linux/init.h>
15 #include <linux/acpi.h>
16 #include <linux/sfi_acpi.h>
17 #include <linux/bitmap.h>
18 #include <linux/dmi.h>
19 #include <linux/slab.h>
20 #include <asm/e820.h>
21 #include <asm/pci_x86.h>
22 #include <asm/acpi.h>
23
24 #define PREFIX "PCI: "
25
26 /* Indicate if the mmcfg resources have been placed into the resource table. */
27 static int __initdata pci_mmcfg_resources_inserted;
28
29 LIST_HEAD(pci_mmcfg_list);
30
31 static __init void pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
32 {
33 if (cfg->res.parent)
34 release_resource(&cfg->res);
35 list_del(&cfg->list);
36 kfree(cfg);
37 }
38
39 static __init void free_all_mmcfg(void)
40 {
41 struct pci_mmcfg_region *cfg, *tmp;
42
43 pci_mmcfg_arch_free();
44 list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list)
45 pci_mmconfig_remove(cfg);
46 }
47
48 static __init void list_add_sorted(struct pci_mmcfg_region *new)
49 {
50 struct pci_mmcfg_region *cfg;
51
52 /* keep list sorted by segment and starting bus number */
53 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
54 if (cfg->segment > new->segment ||
55 (cfg->segment == new->segment &&
56 cfg->start_bus >= new->start_bus)) {
57 list_add_tail(&new->list, &cfg->list);
58 return;
59 }
60 }
61 list_add_tail(&new->list, &pci_mmcfg_list);
62 }
63
64 static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
65 int end, u64 addr)
66 {
67 struct pci_mmcfg_region *new;
68 int num_buses;
69 struct resource *res;
70
71 if (addr == 0)
72 return NULL;
73
74 new = kzalloc(sizeof(*new), GFP_KERNEL);
75 if (!new)
76 return NULL;
77
78 new->address = addr;
79 new->segment = segment;
80 new->start_bus = start;
81 new->end_bus = end;
82
83 list_add_sorted(new);
84
85 num_buses = end - start + 1;
86 res = &new->res;
87 res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
88 res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
89 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
90 snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
91 "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
92 res->name = new->name;
93
94 printk(KERN_INFO PREFIX "MMCONFIG for domain %04x [bus %02x-%02x] at "
95 "%pR (base %#lx)\n", segment, start, end, &new->res,
96 (unsigned long) addr);
97
98 return new;
99 }
100
101 struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
102 {
103 struct pci_mmcfg_region *cfg;
104
105 list_for_each_entry(cfg, &pci_mmcfg_list, list)
106 if (cfg->segment == segment &&
107 cfg->start_bus <= bus && bus <= cfg->end_bus)
108 return cfg;
109
110 return NULL;
111 }
112
113 static const char __init *pci_mmcfg_e7520(void)
114 {
115 u32 win;
116 raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win);
117
118 win = win & 0xf000;
119 if (win == 0x0000 || win == 0xf000)
120 return NULL;
121
122 if (pci_mmconfig_add(0, 0, 255, win << 16) == NULL)
123 return NULL;
124
125 return "Intel Corporation E7520 Memory Controller Hub";
126 }
127
128 static const char __init *pci_mmcfg_intel_945(void)
129 {
130 u32 pciexbar, mask = 0, len = 0;
131
132 raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0x48, 4, &pciexbar);
133
134 /* Enable bit */
135 if (!(pciexbar & 1))
136 return NULL;
137
138 /* Size bits */
139 switch ((pciexbar >> 1) & 3) {
140 case 0:
141 mask = 0xf0000000U;
142 len = 0x10000000U;
143 break;
144 case 1:
145 mask = 0xf8000000U;
146 len = 0x08000000U;
147 break;
148 case 2:
149 mask = 0xfc000000U;
150 len = 0x04000000U;
151 break;
152 default:
153 return NULL;
154 }
155
156 /* Errata #2, things break when not aligned on a 256Mb boundary */
157 /* Can only happen in 64M/128M mode */
158
159 if ((pciexbar & mask) & 0x0fffffffU)
160 return NULL;
161
162 /* Don't hit the APIC registers and their friends */
163 if ((pciexbar & mask) >= 0xf0000000U)
164 return NULL;
165
166 if (pci_mmconfig_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL)
167 return NULL;
168
169 return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
170 }
171
172 static const char __init *pci_mmcfg_amd_fam10h(void)
173 {
174 u32 low, high, address;
175 u64 base, msr;
176 int i;
177 unsigned segnbits = 0, busnbits, end_bus;
178
179 if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF))
180 return NULL;
181
182 address = MSR_FAM10H_MMIO_CONF_BASE;
183 if (rdmsr_safe(address, &low, &high))
184 return NULL;
185
186 msr = high;
187 msr <<= 32;
188 msr |= low;
189
190 /* mmconfig is not enable */
191 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
192 return NULL;
193
194 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
195
196 busnbits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
197 FAM10H_MMIO_CONF_BUSRANGE_MASK;
198
199 /*
200 * only handle bus 0 ?
201 * need to skip it
202 */
203 if (!busnbits)
204 return NULL;
205
206 if (busnbits > 8) {
207 segnbits = busnbits - 8;
208 busnbits = 8;
209 }
210
211 end_bus = (1 << busnbits) - 1;
212 for (i = 0; i < (1 << segnbits); i++)
213 if (pci_mmconfig_add(i, 0, end_bus,
214 base + (1<<28) * i) == NULL) {
215 free_all_mmcfg();
216 return NULL;
217 }
218
219 return "AMD Family 10h NB";
220 }
221
222 static bool __initdata mcp55_checked;
223 static const char __init *pci_mmcfg_nvidia_mcp55(void)
224 {
225 int bus;
226 int mcp55_mmconf_found = 0;
227
228 static const u32 extcfg_regnum = 0x90;
229 static const u32 extcfg_regsize = 4;
230 static const u32 extcfg_enable_mask = 1<<31;
231 static const u32 extcfg_start_mask = 0xff<<16;
232 static const int extcfg_start_shift = 16;
233 static const u32 extcfg_size_mask = 0x3<<28;
234 static const int extcfg_size_shift = 28;
235 static const int extcfg_sizebus[] = {0x100, 0x80, 0x40, 0x20};
236 static const u32 extcfg_base_mask[] = {0x7ff8, 0x7ffc, 0x7ffe, 0x7fff};
237 static const int extcfg_base_lshift = 25;
238
239 /*
240 * do check if amd fam10h already took over
241 */
242 if (!acpi_disabled || !list_empty(&pci_mmcfg_list) || mcp55_checked)
243 return NULL;
244
245 mcp55_checked = true;
246 for (bus = 0; bus < 256; bus++) {
247 u64 base;
248 u32 l, extcfg;
249 u16 vendor, device;
250 int start, size_index, end;
251
252 raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), 0, 4, &l);
253 vendor = l & 0xffff;
254 device = (l >> 16) & 0xffff;
255
256 if (PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device)
257 continue;
258
259 raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), extcfg_regnum,
260 extcfg_regsize, &extcfg);
261
262 if (!(extcfg & extcfg_enable_mask))
263 continue;
264
265 size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift;
266 base = extcfg & extcfg_base_mask[size_index];
267 /* base could > 4G */
268 base <<= extcfg_base_lshift;
269 start = (extcfg & extcfg_start_mask) >> extcfg_start_shift;
270 end = start + extcfg_sizebus[size_index] - 1;
271 if (pci_mmconfig_add(0, start, end, base) == NULL)
272 continue;
273 mcp55_mmconf_found++;
274 }
275
276 if (!mcp55_mmconf_found)
277 return NULL;
278
279 return "nVidia MCP55";
280 }
281
282 struct pci_mmcfg_hostbridge_probe {
283 u32 bus;
284 u32 devfn;
285 u32 vendor;
286 u32 device;
287 const char *(*probe)(void);
288 };
289
290 static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = {
291 { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
292 PCI_DEVICE_ID_INTEL_E7520_MCH, pci_mmcfg_e7520 },
293 { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
294 PCI_DEVICE_ID_INTEL_82945G_HB, pci_mmcfg_intel_945 },
295 { 0, PCI_DEVFN(0x18, 0), PCI_VENDOR_ID_AMD,
296 0x1200, pci_mmcfg_amd_fam10h },
297 { 0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD,
298 0x1200, pci_mmcfg_amd_fam10h },
299 { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_NVIDIA,
300 0x0369, pci_mmcfg_nvidia_mcp55 },
301 };
302
303 static void __init pci_mmcfg_check_end_bus_number(void)
304 {
305 struct pci_mmcfg_region *cfg, *cfgx;
306
307 /* Fixup overlaps */
308 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
309 if (cfg->end_bus < cfg->start_bus)
310 cfg->end_bus = 255;
311
312 /* Don't access the list head ! */
313 if (cfg->list.next == &pci_mmcfg_list)
314 break;
315
316 cfgx = list_entry(cfg->list.next, typeof(*cfg), list);
317 if (cfg->end_bus >= cfgx->start_bus)
318 cfg->end_bus = cfgx->start_bus - 1;
319 }
320 }
321
322 static int __init pci_mmcfg_check_hostbridge(void)
323 {
324 u32 l;
325 u32 bus, devfn;
326 u16 vendor, device;
327 int i;
328 const char *name;
329
330 if (!raw_pci_ops)
331 return 0;
332
333 free_all_mmcfg();
334
335 for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) {
336 bus = pci_mmcfg_probes[i].bus;
337 devfn = pci_mmcfg_probes[i].devfn;
338 raw_pci_ops->read(0, bus, devfn, 0, 4, &l);
339 vendor = l & 0xffff;
340 device = (l >> 16) & 0xffff;
341
342 name = NULL;
343 if (pci_mmcfg_probes[i].vendor == vendor &&
344 pci_mmcfg_probes[i].device == device)
345 name = pci_mmcfg_probes[i].probe();
346
347 if (name)
348 printk(KERN_INFO PREFIX "%s with MMCONFIG support\n",
349 name);
350 }
351
352 /* some end_bus_number is crazy, fix it */
353 pci_mmcfg_check_end_bus_number();
354
355 return !list_empty(&pci_mmcfg_list);
356 }
357
358 static void __init pci_mmcfg_insert_resources(void)
359 {
360 struct pci_mmcfg_region *cfg;
361
362 list_for_each_entry(cfg, &pci_mmcfg_list, list)
363 insert_resource(&iomem_resource, &cfg->res);
364
365 /* Mark that the resources have been inserted. */
366 pci_mmcfg_resources_inserted = 1;
367 }
368
369 static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
370 void *data)
371 {
372 struct resource *mcfg_res = data;
373 struct acpi_resource_address64 address;
374 acpi_status status;
375
376 if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
377 struct acpi_resource_fixed_memory32 *fixmem32 =
378 &res->data.fixed_memory32;
379 if (!fixmem32)
380 return AE_OK;
381 if ((mcfg_res->start >= fixmem32->address) &&
382 (mcfg_res->end < (fixmem32->address +
383 fixmem32->address_length))) {
384 mcfg_res->flags = 1;
385 return AE_CTRL_TERMINATE;
386 }
387 }
388 if ((res->type != ACPI_RESOURCE_TYPE_ADDRESS32) &&
389 (res->type != ACPI_RESOURCE_TYPE_ADDRESS64))
390 return AE_OK;
391
392 status = acpi_resource_to_address64(res, &address);
393 if (ACPI_FAILURE(status) ||
394 (address.address_length <= 0) ||
395 (address.resource_type != ACPI_MEMORY_RANGE))
396 return AE_OK;
397
398 if ((mcfg_res->start >= address.minimum) &&
399 (mcfg_res->end < (address.minimum + address.address_length))) {
400 mcfg_res->flags = 1;
401 return AE_CTRL_TERMINATE;
402 }
403 return AE_OK;
404 }
405
406 static acpi_status __init find_mboard_resource(acpi_handle handle, u32 lvl,
407 void *context, void **rv)
408 {
409 struct resource *mcfg_res = context;
410
411 acpi_walk_resources(handle, METHOD_NAME__CRS,
412 check_mcfg_resource, context);
413
414 if (mcfg_res->flags)
415 return AE_CTRL_TERMINATE;
416
417 return AE_OK;
418 }
419
420 static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used)
421 {
422 struct resource mcfg_res;
423
424 mcfg_res.start = start;
425 mcfg_res.end = end - 1;
426 mcfg_res.flags = 0;
427
428 acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL);
429
430 if (!mcfg_res.flags)
431 acpi_get_devices("PNP0C02", find_mboard_resource, &mcfg_res,
432 NULL);
433
434 return mcfg_res.flags;
435 }
436
437 typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type);
438
439 static int __init is_mmconf_reserved(check_reserved_t is_reserved,
440 struct pci_mmcfg_region *cfg, int with_e820)
441 {
442 u64 addr = cfg->res.start;
443 u64 size = resource_size(&cfg->res);
444 u64 old_size = size;
445 int valid = 0, num_buses;
446
447 while (!is_reserved(addr, addr + size, E820_RESERVED)) {
448 size >>= 1;
449 if (size < (16UL<<20))
450 break;
451 }
452
453 if (size >= (16UL<<20) || size == old_size) {
454 printk(KERN_INFO PREFIX "MMCONFIG at %pR reserved in %s\n",
455 &cfg->res,
456 with_e820 ? "E820" : "ACPI motherboard resources");
457 valid = 1;
458
459 if (old_size != size) {
460 /* update end_bus */
461 cfg->end_bus = cfg->start_bus + ((size>>20) - 1);
462 num_buses = cfg->end_bus - cfg->start_bus + 1;
463 cfg->res.end = cfg->res.start +
464 PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
465 snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN,
466 "PCI MMCONFIG %04x [bus %02x-%02x]",
467 cfg->segment, cfg->start_bus, cfg->end_bus);
468 printk(KERN_INFO PREFIX
469 "MMCONFIG for %04x [bus%02x-%02x] "
470 "at %pR (base %#lx) (size reduced!)\n",
471 cfg->segment, cfg->start_bus, cfg->end_bus,
472 &cfg->res, (unsigned long) cfg->address);
473 }
474 }
475
476 return valid;
477 }
478
479 static void __init pci_mmcfg_reject_broken(int early)
480 {
481 struct pci_mmcfg_region *cfg;
482
483 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
484 int valid = 0;
485
486 if (!early && !acpi_disabled)
487 valid = is_mmconf_reserved(is_acpi_reserved, cfg, 0);
488
489 if (valid)
490 continue;
491
492 if (!early)
493 printk(KERN_ERR FW_BUG PREFIX
494 "MMCONFIG at %pR not reserved in "
495 "ACPI motherboard resources\n", &cfg->res);
496
497 /* Don't try to do this check unless configuration
498 type 1 is available. how about type 2 ?*/
499 if (raw_pci_ops)
500 valid = is_mmconf_reserved(e820_all_mapped, cfg, 1);
501
502 if (!valid)
503 goto reject;
504 }
505
506 return;
507
508 reject:
509 printk(KERN_INFO PREFIX "not using MMCONFIG\n");
510 free_all_mmcfg();
511 }
512
513 static int __initdata known_bridge;
514
515 static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
516 struct acpi_mcfg_allocation *cfg)
517 {
518 int year;
519
520 if (cfg->address < 0xFFFFFFFF)
521 return 0;
522
523 if (!strcmp(mcfg->header.oem_id, "SGI"))
524 return 0;
525
526 if (mcfg->header.revision >= 1) {
527 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
528 year >= 2010)
529 return 0;
530 }
531
532 printk(KERN_ERR PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx "
533 "is above 4GB, ignored\n", cfg->pci_segment,
534 cfg->start_bus_number, cfg->end_bus_number, cfg->address);
535 return -EINVAL;
536 }
537
538 static int __init pci_parse_mcfg(struct acpi_table_header *header)
539 {
540 struct acpi_table_mcfg *mcfg;
541 struct acpi_mcfg_allocation *cfg_table, *cfg;
542 unsigned long i;
543 int entries;
544
545 if (!header)
546 return -EINVAL;
547
548 mcfg = (struct acpi_table_mcfg *)header;
549
550 /* how many config structures do we have */
551 free_all_mmcfg();
552 entries = 0;
553 i = header->length - sizeof(struct acpi_table_mcfg);
554 while (i >= sizeof(struct acpi_mcfg_allocation)) {
555 entries++;
556 i -= sizeof(struct acpi_mcfg_allocation);
557 };
558 if (entries == 0) {
559 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
560 return -ENODEV;
561 }
562
563 cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1];
564 for (i = 0; i < entries; i++) {
565 cfg = &cfg_table[i];
566 if (acpi_mcfg_check_entry(mcfg, cfg)) {
567 free_all_mmcfg();
568 return -ENODEV;
569 }
570
571 if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
572 cfg->end_bus_number, cfg->address) == NULL) {
573 printk(KERN_WARNING PREFIX
574 "no memory for MCFG entries\n");
575 free_all_mmcfg();
576 return -ENOMEM;
577 }
578 }
579
580 return 0;
581 }
582
583 static void __init __pci_mmcfg_init(int early)
584 {
585 /* MMCONFIG disabled */
586 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
587 return;
588
589 /* MMCONFIG already enabled */
590 if (!early && !(pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF))
591 return;
592
593 /* for late to exit */
594 if (known_bridge)
595 return;
596
597 if (early) {
598 if (pci_mmcfg_check_hostbridge())
599 known_bridge = 1;
600 }
601
602 if (!known_bridge)
603 acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
604
605 pci_mmcfg_reject_broken(early);
606
607 if (list_empty(&pci_mmcfg_list))
608 return;
609
610 if (pci_mmcfg_arch_init())
611 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
612 else {
613 /*
614 * Signal not to attempt to insert mmcfg resources because
615 * the architecture mmcfg setup could not initialize.
616 */
617 pci_mmcfg_resources_inserted = 1;
618 }
619 }
620
621 void __init pci_mmcfg_early_init(void)
622 {
623 __pci_mmcfg_init(1);
624 }
625
626 void __init pci_mmcfg_late_init(void)
627 {
628 __pci_mmcfg_init(0);
629 }
630
631 static int __init pci_mmcfg_late_insert_resources(void)
632 {
633 /*
634 * If resources are already inserted or we are not using MMCONFIG,
635 * don't insert the resources.
636 */
637 if ((pci_mmcfg_resources_inserted == 1) ||
638 (pci_probe & PCI_PROBE_MMCONF) == 0 ||
639 list_empty(&pci_mmcfg_list))
640 return 1;
641
642 /*
643 * Attempt to insert the mmcfg resources but not with the busy flag
644 * marked so it won't cause request errors when __request_region is
645 * called.
646 */
647 pci_mmcfg_insert_resources();
648
649 return 0;
650 }
651
652 /*
653 * Perform MMCONFIG resource insertion after PCI initialization to allow for
654 * misprogrammed MCFG tables that state larger sizes but actually conflict
655 * with other system resources.
656 */
657 late_initcall(pci_mmcfg_late_insert_resources);