Merge branch 'x86/platform' into x86/apic-cleanups
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / amd_nb.c
1 /*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
5 #include <linux/types.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/errno.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <asm/amd_nb.h>
12
13 static u32 *flush_words;
14
15 struct pci_device_id amd_nb_misc_ids[] = {
16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
19 {}
20 };
21 EXPORT_SYMBOL(amd_nb_misc_ids);
22
23 struct amd_northbridge_info amd_northbridges;
24 EXPORT_SYMBOL(amd_northbridges);
25
26 static struct pci_dev *next_northbridge(struct pci_dev *dev,
27 struct pci_device_id *ids)
28 {
29 do {
30 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
31 if (!dev)
32 break;
33 } while (!pci_match_id(ids, dev));
34 return dev;
35 }
36
37 int amd_cache_northbridges(void)
38 {
39 int i = 0;
40 struct amd_northbridge *nb;
41 struct pci_dev *misc;
42
43 if (amd_nb_num())
44 return 0;
45
46 misc = NULL;
47 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
48 i++;
49
50 if (i == 0)
51 return 0;
52
53 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
54 if (!nb)
55 return -ENOMEM;
56
57 amd_northbridges.nb = nb;
58 amd_northbridges.num = i;
59
60 misc = NULL;
61 for (i = 0; i != amd_nb_num(); i++) {
62 node_to_amd_nb(i)->misc = misc =
63 next_northbridge(misc, amd_nb_misc_ids);
64 }
65
66 /* some CPU families (e.g. family 0x11) do not support GART */
67 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
68 boot_cpu_data.x86 == 0x15)
69 amd_northbridges.flags |= AMD_NB_GART;
70
71 /*
72 * Some CPU families support L3 Cache Index Disable. There are some
73 * limitations because of E382 and E388 on family 0x10.
74 */
75 if (boot_cpu_data.x86 == 0x10 &&
76 boot_cpu_data.x86_model >= 0x8 &&
77 (boot_cpu_data.x86_model > 0x9 ||
78 boot_cpu_data.x86_mask >= 0x1))
79 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
80
81 return 0;
82 }
83 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
84
85 /* Ignores subdevice/subvendor but as far as I can figure out
86 they're useless anyways */
87 int __init early_is_amd_nb(u32 device)
88 {
89 struct pci_device_id *id;
90 u32 vendor = device & 0xffff;
91 device >>= 16;
92 for (id = amd_nb_misc_ids; id->vendor; id++)
93 if (vendor == id->vendor && device == id->device)
94 return 1;
95 return 0;
96 }
97
98 int amd_cache_gart(void)
99 {
100 int i;
101
102 if (!amd_nb_has_feature(AMD_NB_GART))
103 return 0;
104
105 flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
106 if (!flush_words) {
107 amd_northbridges.flags &= ~AMD_NB_GART;
108 return -ENOMEM;
109 }
110
111 for (i = 0; i != amd_nb_num(); i++)
112 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
113 &flush_words[i]);
114
115 return 0;
116 }
117
118 void amd_flush_garts(void)
119 {
120 int flushed, i;
121 unsigned long flags;
122 static DEFINE_SPINLOCK(gart_lock);
123
124 if (!amd_nb_has_feature(AMD_NB_GART))
125 return;
126
127 /* Avoid races between AGP and IOMMU. In theory it's not needed
128 but I'm not sure if the hardware won't lose flush requests
129 when another is pending. This whole thing is so expensive anyways
130 that it doesn't matter to serialize more. -AK */
131 spin_lock_irqsave(&gart_lock, flags);
132 flushed = 0;
133 for (i = 0; i < amd_nb_num(); i++) {
134 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
135 flush_words[i] | 1);
136 flushed++;
137 }
138 for (i = 0; i < amd_nb_num(); i++) {
139 u32 w;
140 /* Make sure the hardware actually executed the flush*/
141 for (;;) {
142 pci_read_config_dword(node_to_amd_nb(i)->misc,
143 0x9c, &w);
144 if (!(w & 1))
145 break;
146 cpu_relax();
147 }
148 }
149 spin_unlock_irqrestore(&gart_lock, flags);
150 if (!flushed)
151 printk("nothing to flush?\n");
152 }
153 EXPORT_SYMBOL_GPL(amd_flush_garts);
154
155 static __init int init_amd_nbs(void)
156 {
157 int err = 0;
158
159 err = amd_cache_northbridges();
160
161 if (err < 0)
162 printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
163
164 if (amd_cache_gart() < 0)
165 printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
166 "GART support disabled.\n");
167
168 return err;
169 }
170
171 /* This has to go after the PCI subsystem */
172 fs_initcall(init_amd_nbs);