Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
CommitLineData
1da177e4 1/*
cdcf772e 2 * Routines to indentify caches on Intel CPU.
1da177e4 3 *
cdcf772e
IM
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
8bdbd962 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
67cddd94 7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
1da177e4
LT
8 */
9
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/compiler.h>
14#include <linux/cpu.h>
4e57b681 15#include <linux/sched.h>
a24e8d36 16#include <linux/pci.h>
1da177e4
LT
17
18#include <asm/processor.h>
8bdbd962 19#include <linux/smp.h>
afd9fcee 20#include <asm/k8.h>
dcf39daf 21#include <asm/smp.h>
1da177e4
LT
22
23#define LVL_1_INST 1
24#define LVL_1_DATA 2
25#define LVL_2 3
26#define LVL_3 4
27#define LVL_TRACE 5
28
8bdbd962 29struct _cache_table {
1da177e4
LT
30 unsigned char descriptor;
31 char cache_type;
32 short size;
33};
34
2ca49b2f
DJ
35#define MB(x) ((x) * 1024)
36
8bdbd962
AC
37/* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
39
02dde8b4 40static const struct _cache_table __cpuinitconst cache_table[] =
1da177e4
LT
41{
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
9a8ecae8 44 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
1da177e4
LT
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
9a8ecae8
DJ
47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
48 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
1da177e4 49 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
2ca49b2f
DJ
50 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
51 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
1da177e4
LT
53 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
54 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
6fe8f479 56 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
1da177e4
LT
57 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
58 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
6fe8f479
DJ
59 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
60 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
04fa11ea 61 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
1da177e4
LT
62 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
63 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
64 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
2ca49b2f
DJ
65 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
66 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
67 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
68 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
69 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
70 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
71 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
1da177e4
LT
75 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
76 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
77 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
78 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
80 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
81 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
6fe8f479 82 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
2ca49b2f
DJ
83 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
84 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
85 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
86 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
89 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
90 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
91 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
92 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
93 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
94 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
95 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
96 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
97 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
98 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
99 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
100 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
101 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
102 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
103 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
104 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
105 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
106 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
107 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
108 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
109 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
110 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
1da177e4
LT
111 { 0x00, 0, 0}
112};
113
114
8bdbd962 115enum _cache_type {
1da177e4
LT
116 CACHE_TYPE_NULL = 0,
117 CACHE_TYPE_DATA = 1,
118 CACHE_TYPE_INST = 2,
119 CACHE_TYPE_UNIFIED = 3
120};
121
122union _cpuid4_leaf_eax {
123 struct {
124 enum _cache_type type:5;
125 unsigned int level:3;
126 unsigned int is_self_initializing:1;
127 unsigned int is_fully_associative:1;
128 unsigned int reserved:4;
129 unsigned int num_threads_sharing:12;
130 unsigned int num_cores_on_die:6;
131 } split;
132 u32 full;
133};
134
135union _cpuid4_leaf_ebx {
136 struct {
137 unsigned int coherency_line_size:12;
138 unsigned int physical_line_partition:10;
139 unsigned int ways_of_associativity:10;
140 } split;
141 u32 full;
142};
143
144union _cpuid4_leaf_ecx {
145 struct {
146 unsigned int number_of_sets:32;
147 } split;
148 u32 full;
149};
150
9350f982
BP
151struct amd_l3_cache {
152 struct pci_dev *dev;
153 bool can_disable;
154 unsigned indices;
155 u8 subcaches[4];
156};
157
1da177e4
LT
158struct _cpuid4_info {
159 union _cpuid4_leaf_eax eax;
160 union _cpuid4_leaf_ebx ebx;
161 union _cpuid4_leaf_ecx ecx;
162 unsigned long size;
9350f982 163 struct amd_l3_cache *l3;
f9b90566
MT
164 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
165};
166
167/* subset of above _cpuid4_info w/o shared_cpu_map */
168struct _cpuid4_info_regs {
169 union _cpuid4_leaf_eax eax;
170 union _cpuid4_leaf_ebx ebx;
171 union _cpuid4_leaf_ecx ecx;
172 unsigned long size;
9350f982 173 struct amd_l3_cache *l3;
1da177e4
LT
174};
175
240cd6a8
AK
176unsigned short num_cache_leaves;
177
178/* AMD doesn't have CPUID4. Emulate it here to report the same
179 information to the user. This makes some assumptions about the machine:
67cddd94 180 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
240cd6a8
AK
181
182 In theory the TLBs could be reported as fake type (they are in "dummy").
183 Maybe later */
184union l1_cache {
185 struct {
8bdbd962
AC
186 unsigned line_size:8;
187 unsigned lines_per_tag:8;
188 unsigned assoc:8;
189 unsigned size_in_kb:8;
240cd6a8
AK
190 };
191 unsigned val;
192};
193
194union l2_cache {
195 struct {
8bdbd962
AC
196 unsigned line_size:8;
197 unsigned lines_per_tag:4;
198 unsigned assoc:4;
199 unsigned size_in_kb:16;
240cd6a8
AK
200 };
201 unsigned val;
202};
203
67cddd94
AK
204union l3_cache {
205 struct {
8bdbd962
AC
206 unsigned line_size:8;
207 unsigned lines_per_tag:4;
208 unsigned assoc:4;
209 unsigned res:2;
210 unsigned size_encoded:14;
67cddd94
AK
211 };
212 unsigned val;
213};
214
02dde8b4 215static const unsigned short __cpuinitconst assocs[] = {
6265ff19
AH
216 [1] = 1,
217 [2] = 2,
218 [4] = 4,
219 [6] = 8,
220 [8] = 16,
221 [0xa] = 32,
222 [0xb] = 48,
67cddd94 223 [0xc] = 64,
6265ff19
AH
224 [0xd] = 96,
225 [0xe] = 128,
226 [0xf] = 0xffff /* fully associative - no way to show this currently */
67cddd94
AK
227};
228
02dde8b4
JB
229static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
230static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
240cd6a8 231
cdcf772e
IM
232static void __cpuinit
233amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
234 union _cpuid4_leaf_ebx *ebx,
235 union _cpuid4_leaf_ecx *ecx)
240cd6a8
AK
236{
237 unsigned dummy;
238 unsigned line_size, lines_per_tag, assoc, size_in_kb;
239 union l1_cache l1i, l1d;
240 union l2_cache l2;
67cddd94
AK
241 union l3_cache l3;
242 union l1_cache *l1 = &l1d;
240cd6a8
AK
243
244 eax->full = 0;
245 ebx->full = 0;
246 ecx->full = 0;
247
248 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
67cddd94 249 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
240cd6a8 250
67cddd94
AK
251 switch (leaf) {
252 case 1:
253 l1 = &l1i;
254 case 0:
255 if (!l1->val)
256 return;
a326e948 257 assoc = assocs[l1->assoc];
240cd6a8
AK
258 line_size = l1->line_size;
259 lines_per_tag = l1->lines_per_tag;
260 size_in_kb = l1->size_in_kb;
67cddd94
AK
261 break;
262 case 2:
263 if (!l2.val)
264 return;
a326e948 265 assoc = assocs[l2.assoc];
240cd6a8
AK
266 line_size = l2.line_size;
267 lines_per_tag = l2.lines_per_tag;
268 /* cpu_data has errata corrections for K7 applied */
269 size_in_kb = current_cpu_data.x86_cache_size;
67cddd94
AK
270 break;
271 case 3:
272 if (!l3.val)
273 return;
a326e948 274 assoc = assocs[l3.assoc];
67cddd94
AK
275 line_size = l3.line_size;
276 lines_per_tag = l3.lines_per_tag;
277 size_in_kb = l3.size_encoded * 512;
a326e948
AH
278 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
279 size_in_kb = size_in_kb >> 1;
280 assoc = assoc >> 1;
281 }
67cddd94
AK
282 break;
283 default:
284 return;
240cd6a8
AK
285 }
286
67cddd94
AK
287 eax->split.is_self_initializing = 1;
288 eax->split.type = types[leaf];
289 eax->split.level = levels[leaf];
a326e948 290 eax->split.num_threads_sharing = 0;
67cddd94
AK
291 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
292
293
a326e948 294 if (assoc == 0xffff)
240cd6a8
AK
295 eax->split.is_fully_associative = 1;
296 ebx->split.coherency_line_size = line_size - 1;
a326e948 297 ebx->split.ways_of_associativity = assoc - 1;
240cd6a8
AK
298 ebx->split.physical_line_partition = lines_per_tag - 1;
299 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
300 (ebx->split.ways_of_associativity + 1) - 1;
301}
1da177e4 302
cb19060a
BP
303struct _cache_attr {
304 struct attribute attr;
305 ssize_t (*show)(struct _cpuid4_info *, char *);
306 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
307};
308
309#ifdef CONFIG_CPU_SUP_AMD
ba06edb6
BP
310
311/*
312 * L3 cache descriptors
313 */
314static struct amd_l3_cache **__cpuinitdata l3_caches;
315
9350f982 316static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
048a8774 317{
048a8774 318 unsigned int sc0, sc1, sc2, sc3;
cb19060a 319 u32 val = 0;
048a8774 320
ba06edb6 321 pci_read_config_dword(l3->dev, 0x1C4, &val);
048a8774
BP
322
323 /* calculate subcache sizes */
9350f982
BP
324 l3->subcaches[0] = sc0 = !(val & BIT(0));
325 l3->subcaches[1] = sc1 = !(val & BIT(4));
326 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
327 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
328
329 l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
ba06edb6
BP
330}
331
332static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
333{
334 struct amd_l3_cache *l3;
335 struct pci_dev *dev = node_to_k8_nb_misc(node);
336
337 l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC);
338 if (!l3) {
339 printk(KERN_WARNING "Error allocating L3 struct\n");
340 return NULL;
341 }
048a8774 342
9350f982 343 l3->dev = dev;
ba06edb6
BP
344
345 amd_calc_l3_indices(l3);
346
347 return l3;
048a8774
BP
348}
349
7a4983bb 350static void __cpuinit
f9b90566 351amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
8cb22bcb 352{
ba06edb6
BP
353 int node;
354
b1ab1b4d 355 if (boot_cpu_data.x86 != 0x10)
8cb22bcb 356 return;
bda869c6 357
b1ab1b4d 358 if (index < 3)
bda869c6
AH
359 return;
360
dcf39daf 361 /* see errata #382 and #388 */
b1ab1b4d 362 if (boot_cpu_data.x86_model < 0x8)
bda869c6
AH
363 return;
364
b1ab1b4d
BP
365 if ((boot_cpu_data.x86_model == 0x8 ||
366 boot_cpu_data.x86_model == 0x9)
367 &&
368 boot_cpu_data.x86_mask < 0x1)
369 return;
370
f2b20e41
FA
371 /* not in virtualized environments */
372 if (num_k8_northbridges == 0)
373 return;
374
ba06edb6
BP
375 /*
376 * Strictly speaking, the amount in @size below is leaked since it is
377 * never freed but this is done only on shutdown so it doesn't matter.
378 */
379 if (!l3_caches) {
380 int size = num_k8_northbridges * sizeof(struct amd_l3_cache *);
381
382 l3_caches = kzalloc(size, GFP_ATOMIC);
383 if (!l3_caches)
384 return;
9350f982
BP
385 }
386
ba06edb6
BP
387 node = amd_get_nb_id(smp_processor_id());
388
389 if (!l3_caches[node]) {
390 l3_caches[node] = amd_init_l3_cache(node);
391 l3_caches[node]->can_disable = true;
392 }
393
394 WARN_ON(!l3_caches[node]);
395
396 this_leaf->l3 = l3_caches[node];
8cb22bcb
ML
397}
398
cb19060a 399static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
59d3b388 400 unsigned int slot)
cb19060a 401{
9350f982 402 struct pci_dev *dev = this_leaf->l3->dev;
cb19060a
BP
403 unsigned int reg = 0;
404
9350f982 405 if (!this_leaf->l3 || !this_leaf->l3->can_disable)
cb19060a
BP
406 return -EINVAL;
407
408 if (!dev)
409 return -EINVAL;
410
59d3b388 411 pci_read_config_dword(dev, 0x1BC + slot * 4, &reg);
cb19060a
BP
412 return sprintf(buf, "0x%08x\n", reg);
413}
414
59d3b388 415#define SHOW_CACHE_DISABLE(slot) \
cb19060a 416static ssize_t \
59d3b388 417show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf) \
cb19060a 418{ \
59d3b388 419 return show_cache_disable(this_leaf, buf, slot); \
cb19060a
BP
420}
421SHOW_CACHE_DISABLE(0)
422SHOW_CACHE_DISABLE(1)
423
59d3b388
BP
424static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
425 unsigned slot, unsigned long idx)
426{
427 int i;
428
429 idx |= BIT(30);
430
431 /*
432 * disable index in all 4 subcaches
433 */
434 for (i = 0; i < 4; i++) {
435 u32 reg = idx | (i << 20);
436
437 if (!l3->subcaches[i])
438 continue;
439
440 pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg);
441
442 /*
443 * We need to WBINVD on a core on the node containing the L3
444 * cache which indices we disable therefore a simple wbinvd()
445 * is not sufficient.
446 */
447 wbinvd_on_cpu(cpu);
448
449 reg |= BIT(31);
450 pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg);
451 }
452}
453
454
cb19060a 455static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
59d3b388
BP
456 const char *buf, size_t count,
457 unsigned int slot)
cb19060a 458{
9350f982 459 struct pci_dev *dev = this_leaf->l3->dev;
cb19060a 460 int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
cb19060a
BP
461 unsigned long val = 0;
462
463#define SUBCACHE_MASK (3UL << 20)
464#define SUBCACHE_INDEX 0xfff
465
9350f982 466 if (!this_leaf->l3 || !this_leaf->l3->can_disable)
cb19060a
BP
467 return -EINVAL;
468
469 if (!capable(CAP_SYS_ADMIN))
470 return -EPERM;
471
472 if (!dev)
473 return -EINVAL;
474
475 if (strict_strtoul(buf, 10, &val) < 0)
476 return -EINVAL;
477
478 /* do not allow writes outside of allowed bits */
479 if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
9350f982 480 ((val & SUBCACHE_INDEX) > this_leaf->l3->indices))
cb19060a
BP
481 return -EINVAL;
482
59d3b388
BP
483 amd_l3_disable_index(this_leaf->l3, cpu, slot, val);
484
cb19060a
BP
485 return count;
486}
487
59d3b388 488#define STORE_CACHE_DISABLE(slot) \
cb19060a 489static ssize_t \
59d3b388 490store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
cb19060a
BP
491 const char *buf, size_t count) \
492{ \
59d3b388 493 return store_cache_disable(this_leaf, buf, count, slot); \
8cb22bcb 494}
cb19060a
BP
495STORE_CACHE_DISABLE(0)
496STORE_CACHE_DISABLE(1)
497
498static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
499 show_cache_disable_0, store_cache_disable_0);
500static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
501 show_cache_disable_1, store_cache_disable_1);
502
503#else /* CONFIG_CPU_SUP_AMD */
504static void __cpuinit
505amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
506{
507};
508#endif /* CONFIG_CPU_SUP_AMD */
8cb22bcb 509
7a4983bb 510static int
f9b90566
MT
511__cpuinit cpuid4_cache_lookup_regs(int index,
512 struct _cpuid4_info_regs *this_leaf)
1da177e4 513{
240cd6a8
AK
514 union _cpuid4_leaf_eax eax;
515 union _cpuid4_leaf_ebx ebx;
516 union _cpuid4_leaf_ecx ecx;
517 unsigned edx;
1da177e4 518
8cb22bcb 519 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
240cd6a8 520 amd_cpuid4(index, &eax, &ebx, &ecx);
b1ab1b4d 521 amd_check_l3_disable(index, this_leaf);
7a4983bb
IM
522 } else {
523 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
524 }
525
240cd6a8 526 if (eax.split.type == CACHE_TYPE_NULL)
e2cac789 527 return -EIO; /* better error ? */
1da177e4 528
240cd6a8
AK
529 this_leaf->eax = eax;
530 this_leaf->ebx = ebx;
531 this_leaf->ecx = ecx;
7a4983bb
IM
532 this_leaf->size = (ecx.split.number_of_sets + 1) *
533 (ebx.split.coherency_line_size + 1) *
534 (ebx.split.physical_line_partition + 1) *
535 (ebx.split.ways_of_associativity + 1);
1da177e4
LT
536 return 0;
537}
538
61d488da 539static int __cpuinit find_num_cache_leaves(void)
1da177e4
LT
540{
541 unsigned int eax, ebx, ecx, edx;
542 union _cpuid4_leaf_eax cache_eax;
d16aafff 543 int i = -1;
1da177e4 544
d16aafff
SS
545 do {
546 ++i;
547 /* Do cpuid(4) loop to find out num_cache_leaves */
1da177e4
LT
548 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
549 cache_eax.full = eax;
d16aafff
SS
550 } while (cache_eax.split.type != CACHE_TYPE_NULL);
551 return i;
1da177e4
LT
552}
553
1aa1a9f9 554unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
1da177e4 555{
8bdbd962
AC
556 /* Cache sizes */
557 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
1da177e4
LT
558 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
559 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
1e9f28fa 560 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
96c52749 561#ifdef CONFIG_X86_HT
92cb7612 562 unsigned int cpu = c->cpu_index;
1e9f28fa 563#endif
1da177e4 564
f2d0d263 565 if (c->cpuid_level > 3) {
1da177e4
LT
566 static int is_initialized;
567
568 if (is_initialized == 0) {
569 /* Init num_cache_leaves from boot CPU */
570 num_cache_leaves = find_num_cache_leaves();
571 is_initialized++;
572 }
573
574 /*
575 * Whenever possible use cpuid(4), deterministic cache
576 * parameters cpuid leaf to find the cache details
577 */
578 for (i = 0; i < num_cache_leaves; i++) {
f9b90566 579 struct _cpuid4_info_regs this_leaf;
1da177e4
LT
580 int retval;
581
f9b90566 582 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
1da177e4 583 if (retval >= 0) {
8bdbd962
AC
584 switch (this_leaf.eax.split.level) {
585 case 1:
1da177e4
LT
586 if (this_leaf.eax.split.type ==
587 CACHE_TYPE_DATA)
588 new_l1d = this_leaf.size/1024;
589 else if (this_leaf.eax.split.type ==
590 CACHE_TYPE_INST)
591 new_l1i = this_leaf.size/1024;
592 break;
8bdbd962 593 case 2:
1da177e4 594 new_l2 = this_leaf.size/1024;
1e9f28fa
SS
595 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
596 index_msb = get_count_order(num_threads_sharing);
597 l2_id = c->apicid >> index_msb;
1da177e4 598 break;
8bdbd962 599 case 3:
1da177e4 600 new_l3 = this_leaf.size/1024;
1e9f28fa 601 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
8bdbd962
AC
602 index_msb = get_count_order(
603 num_threads_sharing);
1e9f28fa 604 l3_id = c->apicid >> index_msb;
1da177e4 605 break;
8bdbd962 606 default:
1da177e4
LT
607 break;
608 }
609 }
610 }
611 }
b06be912
SL
612 /*
613 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
614 * trace cache
615 */
616 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
1da177e4 617 /* supports eax=2 call */
c1666e66
HH
618 int j, n;
619 unsigned int regs[4];
1da177e4 620 unsigned char *dp = (unsigned char *)regs;
b06be912
SL
621 int only_trace = 0;
622
623 if (num_cache_leaves != 0 && c->x86 == 15)
624 only_trace = 1;
1da177e4
LT
625
626 /* Number of times to iterate */
627 n = cpuid_eax(2) & 0xFF;
628
8bdbd962 629 for (i = 0 ; i < n ; i++) {
1da177e4
LT
630 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
631
632 /* If bit 31 is set, this is an unknown format */
8bdbd962
AC
633 for (j = 0 ; j < 3 ; j++)
634 if (regs[j] & (1 << 31))
635 regs[j] = 0;
1da177e4
LT
636
637 /* Byte 0 is level count, not a descriptor */
8bdbd962 638 for (j = 1 ; j < 16 ; j++) {
1da177e4
LT
639 unsigned char des = dp[j];
640 unsigned char k = 0;
641
642 /* look up this descriptor in the table */
8bdbd962 643 while (cache_table[k].descriptor != 0) {
1da177e4 644 if (cache_table[k].descriptor == des) {
b06be912
SL
645 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
646 break;
1da177e4
LT
647 switch (cache_table[k].cache_type) {
648 case LVL_1_INST:
649 l1i += cache_table[k].size;
650 break;
651 case LVL_1_DATA:
652 l1d += cache_table[k].size;
653 break;
654 case LVL_2:
655 l2 += cache_table[k].size;
656 break;
657 case LVL_3:
658 l3 += cache_table[k].size;
659 break;
660 case LVL_TRACE:
661 trace += cache_table[k].size;
662 break;
663 }
664
665 break;
666 }
667
668 k++;
669 }
670 }
671 }
b06be912 672 }
1da177e4 673
b06be912
SL
674 if (new_l1d)
675 l1d = new_l1d;
1da177e4 676
b06be912
SL
677 if (new_l1i)
678 l1i = new_l1i;
1da177e4 679
b06be912
SL
680 if (new_l2) {
681 l2 = new_l2;
96c52749 682#ifdef CONFIG_X86_HT
b6278470 683 per_cpu(cpu_llc_id, cpu) = l2_id;
1e9f28fa 684#endif
b06be912 685 }
1da177e4 686
b06be912
SL
687 if (new_l3) {
688 l3 = new_l3;
96c52749 689#ifdef CONFIG_X86_HT
b6278470 690 per_cpu(cpu_llc_id, cpu) = l3_id;
1e9f28fa 691#endif
1da177e4
LT
692 }
693
b06be912
SL
694 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
695
1da177e4
LT
696 return l2;
697}
698
ba1d755a
IM
699#ifdef CONFIG_SYSFS
700
1da177e4 701/* pointer to _cpuid4_info array (for each cache leaf) */
0fe1e009
TH
702static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
703#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
1da177e4
LT
704
705#ifdef CONFIG_SMP
1aa1a9f9 706static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
1da177e4 707{
2b091875 708 struct _cpuid4_info *this_leaf, *sibling_leaf;
1da177e4 709 unsigned long num_threads_sharing;
ebb682f5 710 int index_msb, i, sibling;
92cb7612 711 struct cpuinfo_x86 *c = &cpu_data(cpu);
1da177e4 712
a326e948 713 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
ebb682f5 714 for_each_cpu(i, c->llc_shared_map) {
0fe1e009 715 if (!per_cpu(ici_cpuid4_info, i))
a326e948 716 continue;
a326e948 717 this_leaf = CPUID4_INFO_IDX(i, index);
ebb682f5
PB
718 for_each_cpu(sibling, c->llc_shared_map) {
719 if (!cpu_online(sibling))
720 continue;
721 set_bit(sibling, this_leaf->shared_cpu_map);
722 }
a326e948
AH
723 }
724 return;
725 }
1da177e4
LT
726 this_leaf = CPUID4_INFO_IDX(cpu, index);
727 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
728
729 if (num_threads_sharing == 1)
f9b90566 730 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
2b091875
SS
731 else {
732 index_msb = get_count_order(num_threads_sharing);
733
734 for_each_online_cpu(i) {
92cb7612
MT
735 if (cpu_data(i).apicid >> index_msb ==
736 c->apicid >> index_msb) {
f9b90566
MT
737 cpumask_set_cpu(i,
738 to_cpumask(this_leaf->shared_cpu_map));
0fe1e009 739 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
f9b90566
MT
740 sibling_leaf =
741 CPUID4_INFO_IDX(i, index);
742 cpumask_set_cpu(cpu, to_cpumask(
743 sibling_leaf->shared_cpu_map));
2b091875
SS
744 }
745 }
746 }
747 }
748}
3bc9b76b 749static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
2b091875
SS
750{
751 struct _cpuid4_info *this_leaf, *sibling_leaf;
752 int sibling;
753
754 this_leaf = CPUID4_INFO_IDX(cpu, index);
f9b90566 755 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
cdcf772e 756 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
f9b90566
MT
757 cpumask_clear_cpu(cpu,
758 to_cpumask(sibling_leaf->shared_cpu_map));
2b091875 759 }
1da177e4
LT
760}
761#else
8bdbd962
AC
762static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
763{
764}
765
766static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
767{
768}
1da177e4
LT
769#endif
770
f22d9bc1 771static void __cpuinit free_cache_attributes(unsigned int cpu)
1da177e4 772{
ef1d7151
AM
773 int i;
774
775 for (i = 0; i < num_cache_leaves; i++)
776 cache_remove_shared_cpu_map(cpu, i);
777
9350f982 778 kfree(per_cpu(ici_cpuid4_info, cpu)->l3);
0fe1e009
TH
779 kfree(per_cpu(ici_cpuid4_info, cpu));
780 per_cpu(ici_cpuid4_info, cpu) = NULL;
1da177e4
LT
781}
782
bd0838fc
HS
783static int
784__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
785{
786 struct _cpuid4_info_regs *leaf_regs =
787 (struct _cpuid4_info_regs *)this_leaf;
788
789 return cpuid4_cache_lookup_regs(index, leaf_regs);
790}
791
6092848a 792static void __cpuinit get_cpu_leaves(void *_retval)
1da177e4 793{
b2bb8554 794 int j, *retval = _retval, cpu = smp_processor_id();
e2cac789 795
1da177e4
LT
796 /* Do cpuid and store the results */
797 for (j = 0; j < num_cache_leaves; j++) {
b2bb8554 798 struct _cpuid4_info *this_leaf;
1da177e4 799 this_leaf = CPUID4_INFO_IDX(cpu, j);
b2bb8554
MT
800 *retval = cpuid4_cache_lookup(j, this_leaf);
801 if (unlikely(*retval < 0)) {
ef1d7151
AM
802 int i;
803
804 for (i = 0; i < j; i++)
805 cache_remove_shared_cpu_map(cpu, i);
e2cac789 806 break;
ef1d7151 807 }
1da177e4
LT
808 cache_shared_cpu_map_setup(cpu, j);
809 }
b2bb8554
MT
810}
811
812static int __cpuinit detect_cache_attributes(unsigned int cpu)
813{
814 int retval;
815
816 if (num_cache_leaves == 0)
817 return -ENOENT;
818
0fe1e009 819 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
b2bb8554 820 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
0fe1e009 821 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
b2bb8554 822 return -ENOMEM;
1da177e4 823
b2bb8554 824 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
ef1d7151 825 if (retval) {
0fe1e009
TH
826 kfree(per_cpu(ici_cpuid4_info, cpu));
827 per_cpu(ici_cpuid4_info, cpu) = NULL;
ef1d7151
AM
828 }
829
e2cac789 830 return retval;
1da177e4
LT
831}
832
1da177e4
LT
833#include <linux/kobject.h>
834#include <linux/sysfs.h>
835
836extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
837
838/* pointer to kobject for cpuX/cache */
0fe1e009 839static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
1da177e4
LT
840
841struct _index_kobject {
842 struct kobject kobj;
843 unsigned int cpu;
844 unsigned short index;
845};
846
847/* pointer to array of kobjects for cpuX/cache/indexY */
0fe1e009
TH
848static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
849#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
1da177e4
LT
850
851#define show_one_plus(file_name, object, val) \
852static ssize_t show_##file_name \
853 (struct _cpuid4_info *this_leaf, char *buf) \
854{ \
8bdbd962 855 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
1da177e4
LT
856}
857
858show_one_plus(level, eax.split.level, 0);
859show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
860show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
861show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
862show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
863
864static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
865{
8bdbd962 866 return sprintf(buf, "%luK\n", this_leaf->size / 1024);
1da177e4
LT
867}
868
fb0f330e
MT
869static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
870 int type, char *buf)
1da177e4 871{
fb0f330e 872 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
6b6309b4 873 int n = 0;
6b6309b4 874
fb0f330e 875 if (len > 1) {
f9b90566 876 const struct cpumask *mask;
fb0f330e 877
f9b90566 878 mask = to_cpumask(this_leaf->shared_cpu_map);
8bdbd962 879 n = type ?
29c0177e
RR
880 cpulist_scnprintf(buf, len-2, mask) :
881 cpumask_scnprintf(buf, len-2, mask);
fb0f330e
MT
882 buf[n++] = '\n';
883 buf[n] = '\0';
6b6309b4
MT
884 }
885 return n;
1da177e4
LT
886}
887
fb0f330e
MT
888static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
889{
890 return show_shared_cpu_map_func(leaf, 0, buf);
891}
892
893static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
894{
895 return show_shared_cpu_map_func(leaf, 1, buf);
896}
897
4385cecf
JS
898static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
899{
900 switch (this_leaf->eax.split.type) {
901 case CACHE_TYPE_DATA:
1da177e4 902 return sprintf(buf, "Data\n");
4385cecf 903 case CACHE_TYPE_INST:
1da177e4 904 return sprintf(buf, "Instruction\n");
4385cecf 905 case CACHE_TYPE_UNIFIED:
1da177e4 906 return sprintf(buf, "Unified\n");
4385cecf 907 default:
1da177e4 908 return sprintf(buf, "Unknown\n");
1da177e4
LT
909 }
910}
911
7a4983bb
IM
912#define to_object(k) container_of(k, struct _index_kobject, kobj)
913#define to_attr(a) container_of(a, struct _cache_attr, attr)
8cb22bcb 914
1da177e4
LT
915#define define_one_ro(_name) \
916static struct _cache_attr _name = \
917 __ATTR(_name, 0444, show_##_name, NULL)
918
919define_one_ro(level);
920define_one_ro(type);
921define_one_ro(coherency_line_size);
922define_one_ro(physical_line_partition);
923define_one_ro(ways_of_associativity);
924define_one_ro(number_of_sets);
925define_one_ro(size);
926define_one_ro(shared_cpu_map);
fb0f330e 927define_one_ro(shared_cpu_list);
1da177e4 928
897de50e
BP
929#define DEFAULT_SYSFS_CACHE_ATTRS \
930 &type.attr, \
931 &level.attr, \
932 &coherency_line_size.attr, \
933 &physical_line_partition.attr, \
934 &ways_of_associativity.attr, \
935 &number_of_sets.attr, \
936 &size.attr, \
937 &shared_cpu_map.attr, \
938 &shared_cpu_list.attr
8cb22bcb 939
8bdbd962 940static struct attribute *default_attrs[] = {
897de50e
BP
941 DEFAULT_SYSFS_CACHE_ATTRS,
942 NULL
943};
944
945static struct attribute *default_l3_attrs[] = {
946 DEFAULT_SYSFS_CACHE_ATTRS,
cb19060a 947#ifdef CONFIG_CPU_SUP_AMD
f8b201fc
ML
948 &cache_disable_0.attr,
949 &cache_disable_1.attr,
cb19060a 950#endif
1da177e4
LT
951 NULL
952};
953
8bdbd962 954static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4
LT
955{
956 struct _cache_attr *fattr = to_attr(attr);
957 struct _index_kobject *this_leaf = to_object(kobj);
958 ssize_t ret;
959
960 ret = fattr->show ?
961 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
962 buf) :
cdcf772e 963 0;
1da177e4
LT
964 return ret;
965}
966
8bdbd962
AC
967static ssize_t store(struct kobject *kobj, struct attribute *attr,
968 const char *buf, size_t count)
1da177e4 969{
8cb22bcb
ML
970 struct _cache_attr *fattr = to_attr(attr);
971 struct _index_kobject *this_leaf = to_object(kobj);
972 ssize_t ret;
973
cdcf772e
IM
974 ret = fattr->store ?
975 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
976 buf, count) :
8cb22bcb
ML
977 0;
978 return ret;
1da177e4
LT
979}
980
52cf25d0 981static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
982 .show = show,
983 .store = store,
984};
985
986static struct kobj_type ktype_cache = {
987 .sysfs_ops = &sysfs_ops,
988 .default_attrs = default_attrs,
989};
990
991static struct kobj_type ktype_percpu_entry = {
992 .sysfs_ops = &sysfs_ops,
993};
994
ef1d7151 995static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1da177e4 996{
0fe1e009
TH
997 kfree(per_cpu(ici_cache_kobject, cpu));
998 kfree(per_cpu(ici_index_kobject, cpu));
999 per_cpu(ici_cache_kobject, cpu) = NULL;
1000 per_cpu(ici_index_kobject, cpu) = NULL;
1da177e4
LT
1001 free_cache_attributes(cpu);
1002}
1003
1aa1a9f9 1004static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
1da177e4 1005{
ef1d7151 1006 int err;
1da177e4
LT
1007
1008 if (num_cache_leaves == 0)
1009 return -ENOENT;
1010
ef1d7151
AM
1011 err = detect_cache_attributes(cpu);
1012 if (err)
1013 return err;
1da177e4
LT
1014
1015 /* Allocate all required memory */
0fe1e009 1016 per_cpu(ici_cache_kobject, cpu) =
6b6309b4 1017 kzalloc(sizeof(struct kobject), GFP_KERNEL);
0fe1e009 1018 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
1da177e4 1019 goto err_out;
1da177e4 1020
0fe1e009 1021 per_cpu(ici_index_kobject, cpu) = kzalloc(
8bdbd962 1022 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
0fe1e009 1023 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
1da177e4 1024 goto err_out;
1da177e4
LT
1025
1026 return 0;
1027
1028err_out:
1029 cpuid4_cache_sysfs_exit(cpu);
1030 return -ENOMEM;
1031}
1032
f9b90566 1033static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
ef1d7151 1034
1da177e4 1035/* Add/Remove cache interface for CPU device */
1aa1a9f9 1036static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
1da177e4
LT
1037{
1038 unsigned int cpu = sys_dev->id;
1039 unsigned long i, j;
1040 struct _index_kobject *this_object;
897de50e 1041 struct _cpuid4_info *this_leaf;
ef1d7151 1042 int retval;
1da177e4
LT
1043
1044 retval = cpuid4_cache_sysfs_init(cpu);
1045 if (unlikely(retval < 0))
1046 return retval;
1047
0fe1e009 1048 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
6b6309b4 1049 &ktype_percpu_entry,
5b3f355d 1050 &sys_dev->kobj, "%s", "cache");
ef1d7151
AM
1051 if (retval < 0) {
1052 cpuid4_cache_sysfs_exit(cpu);
1053 return retval;
1054 }
1da177e4
LT
1055
1056 for (i = 0; i < num_cache_leaves; i++) {
8bdbd962 1057 this_object = INDEX_KOBJECT_PTR(cpu, i);
1da177e4
LT
1058 this_object->cpu = cpu;
1059 this_object->index = i;
897de50e
BP
1060
1061 this_leaf = CPUID4_INFO_IDX(cpu, i);
1062
9350f982 1063 if (this_leaf->l3 && this_leaf->l3->can_disable)
897de50e
BP
1064 ktype_cache.default_attrs = default_l3_attrs;
1065 else
1066 ktype_cache.default_attrs = default_attrs;
1067
5b3f355d 1068 retval = kobject_init_and_add(&(this_object->kobj),
6b6309b4 1069 &ktype_cache,
0fe1e009 1070 per_cpu(ici_cache_kobject, cpu),
5b3f355d 1071 "index%1lu", i);
1da177e4 1072 if (unlikely(retval)) {
8bdbd962
AC
1073 for (j = 0; j < i; j++)
1074 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
0fe1e009 1075 kobject_put(per_cpu(ici_cache_kobject, cpu));
1da177e4 1076 cpuid4_cache_sysfs_exit(cpu);
8b2b9c1a 1077 return retval;
1da177e4 1078 }
5b3f355d 1079 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
1da177e4 1080 }
f9b90566 1081 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
ef1d7151 1082
0fe1e009 1083 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
8b2b9c1a 1084 return 0;
1da177e4
LT
1085}
1086
114ab8e9 1087static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
1da177e4
LT
1088{
1089 unsigned int cpu = sys_dev->id;
1090 unsigned long i;
1091
0fe1e009 1092 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
2966c6a0 1093 return;
f9b90566 1094 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
ef1d7151 1095 return;
f9b90566 1096 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
ef1d7151
AM
1097
1098 for (i = 0; i < num_cache_leaves; i++)
8bdbd962 1099 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
0fe1e009 1100 kobject_put(per_cpu(ici_cache_kobject, cpu));
1da177e4 1101 cpuid4_cache_sysfs_exit(cpu);
1aa1a9f9
AR
1102}
1103
9c7b216d 1104static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1aa1a9f9
AR
1105 unsigned long action, void *hcpu)
1106{
1107 unsigned int cpu = (unsigned long)hcpu;
1108 struct sys_device *sys_dev;
1109
1110 sys_dev = get_cpu_sysdev(cpu);
1111 switch (action) {
1112 case CPU_ONLINE:
8bb78442 1113 case CPU_ONLINE_FROZEN:
1aa1a9f9
AR
1114 cache_add_dev(sys_dev);
1115 break;
1116 case CPU_DEAD:
8bb78442 1117 case CPU_DEAD_FROZEN:
1aa1a9f9
AR
1118 cache_remove_dev(sys_dev);
1119 break;
1120 }
1121 return NOTIFY_OK;
1da177e4
LT
1122}
1123
8bdbd962 1124static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
ef1d7151 1125 .notifier_call = cacheinfo_cpu_callback,
1da177e4
LT
1126};
1127
1aa1a9f9 1128static int __cpuinit cache_sysfs_init(void)
1da177e4 1129{
1aa1a9f9
AR
1130 int i;
1131
1da177e4
LT
1132 if (num_cache_leaves == 0)
1133 return 0;
1134
1aa1a9f9 1135 for_each_online_cpu(i) {
ef1d7151
AM
1136 int err;
1137 struct sys_device *sys_dev = get_cpu_sysdev(i);
c789c037 1138
ef1d7151
AM
1139 err = cache_add_dev(sys_dev);
1140 if (err)
1141 return err;
1aa1a9f9 1142 }
ef1d7151 1143 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1aa1a9f9 1144 return 0;
1da177e4
LT
1145}
1146
1aa1a9f9 1147device_initcall(cache_sysfs_init);
1da177e4
LT
1148
1149#endif