Merge remote-tracking branch 'origin/x86/boot' into x86/mm2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
CommitLineData
1da177e4 1/*
cdcf772e 2 * Routines to indentify caches on Intel CPU.
1da177e4 3 *
cdcf772e
IM
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
8bdbd962 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
67cddd94 7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
1da177e4
LT
8 */
9
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/compiler.h>
14#include <linux/cpu.h>
4e57b681 15#include <linux/sched.h>
a24e8d36 16#include <linux/pci.h>
1da177e4
LT
17
18#include <asm/processor.h>
8bdbd962 19#include <linux/smp.h>
23ac4ae8 20#include <asm/amd_nb.h>
dcf39daf 21#include <asm/smp.h>
1da177e4
LT
22
23#define LVL_1_INST 1
24#define LVL_1_DATA 2
25#define LVL_2 3
26#define LVL_3 4
27#define LVL_TRACE 5
28
8bdbd962 29struct _cache_table {
1da177e4
LT
30 unsigned char descriptor;
31 char cache_type;
32 short size;
33};
34
2ca49b2f
DJ
35#define MB(x) ((x) * 1024)
36
8bdbd962
AC
37/* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
39
02dde8b4 40static const struct _cache_table __cpuinitconst cache_table[] =
1da177e4
LT
41{
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
9a8ecae8 44 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
1da177e4
LT
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
9a8ecae8 47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
fb87ec38 48 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
9a8ecae8 49 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
1da177e4 50 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
2ca49b2f
DJ
51 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
53 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
1da177e4
LT
54 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
56 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
6fe8f479 57 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
1da177e4
LT
58 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
59 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
6fe8f479
DJ
60 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
61 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
04fa11ea 62 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
1da177e4
LT
63 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
64 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
65 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
2ca49b2f
DJ
66 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
67 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
68 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
69 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
fb87ec38 70 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
2ca49b2f
DJ
71 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
1da177e4
LT
77 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
82 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
83 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
6fe8f479 84 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
2ca49b2f
DJ
85 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
86 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
91 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
fb87ec38 92 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
2ca49b2f
DJ
93 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
94 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
95 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
96 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
97 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
99 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
101 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
102 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
103 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
104 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
106 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
108 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
109 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
110 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
111 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
112 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
113 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
1da177e4
LT
114 { 0x00, 0, 0}
115};
116
117
8bdbd962 118enum _cache_type {
1da177e4
LT
119 CACHE_TYPE_NULL = 0,
120 CACHE_TYPE_DATA = 1,
121 CACHE_TYPE_INST = 2,
122 CACHE_TYPE_UNIFIED = 3
123};
124
125union _cpuid4_leaf_eax {
126 struct {
127 enum _cache_type type:5;
128 unsigned int level:3;
129 unsigned int is_self_initializing:1;
130 unsigned int is_fully_associative:1;
131 unsigned int reserved:4;
132 unsigned int num_threads_sharing:12;
133 unsigned int num_cores_on_die:6;
134 } split;
135 u32 full;
136};
137
138union _cpuid4_leaf_ebx {
139 struct {
140 unsigned int coherency_line_size:12;
141 unsigned int physical_line_partition:10;
142 unsigned int ways_of_associativity:10;
143 } split;
144 u32 full;
145};
146
147union _cpuid4_leaf_ecx {
148 struct {
149 unsigned int number_of_sets:32;
150 } split;
151 u32 full;
152};
153
b7d11a76 154struct _cpuid4_info_regs {
1da177e4
LT
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
158 unsigned long size;
d2946041 159 struct amd_northbridge *nb;
f9b90566
MT
160};
161
b7d11a76
TG
162struct _cpuid4_info {
163 struct _cpuid4_info_regs base;
164 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
1da177e4
LT
165};
166
240cd6a8
AK
167unsigned short num_cache_leaves;
168
169/* AMD doesn't have CPUID4. Emulate it here to report the same
170 information to the user. This makes some assumptions about the machine:
67cddd94 171 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
240cd6a8
AK
172
173 In theory the TLBs could be reported as fake type (they are in "dummy").
174 Maybe later */
175union l1_cache {
176 struct {
8bdbd962
AC
177 unsigned line_size:8;
178 unsigned lines_per_tag:8;
179 unsigned assoc:8;
180 unsigned size_in_kb:8;
240cd6a8
AK
181 };
182 unsigned val;
183};
184
185union l2_cache {
186 struct {
8bdbd962
AC
187 unsigned line_size:8;
188 unsigned lines_per_tag:4;
189 unsigned assoc:4;
190 unsigned size_in_kb:16;
240cd6a8
AK
191 };
192 unsigned val;
193};
194
67cddd94
AK
195union l3_cache {
196 struct {
8bdbd962
AC
197 unsigned line_size:8;
198 unsigned lines_per_tag:4;
199 unsigned assoc:4;
200 unsigned res:2;
201 unsigned size_encoded:14;
67cddd94
AK
202 };
203 unsigned val;
204};
205
02dde8b4 206static const unsigned short __cpuinitconst assocs[] = {
6265ff19
AH
207 [1] = 1,
208 [2] = 2,
209 [4] = 4,
210 [6] = 8,
211 [8] = 16,
212 [0xa] = 32,
213 [0xb] = 48,
67cddd94 214 [0xc] = 64,
6265ff19
AH
215 [0xd] = 96,
216 [0xe] = 128,
217 [0xf] = 0xffff /* fully associative - no way to show this currently */
67cddd94
AK
218};
219
02dde8b4
JB
220static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
221static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
240cd6a8 222
cdcf772e
IM
223static void __cpuinit
224amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx)
240cd6a8
AK
227{
228 unsigned dummy;
229 unsigned line_size, lines_per_tag, assoc, size_in_kb;
230 union l1_cache l1i, l1d;
231 union l2_cache l2;
67cddd94
AK
232 union l3_cache l3;
233 union l1_cache *l1 = &l1d;
240cd6a8
AK
234
235 eax->full = 0;
236 ebx->full = 0;
237 ecx->full = 0;
238
239 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
67cddd94 240 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
240cd6a8 241
67cddd94
AK
242 switch (leaf) {
243 case 1:
244 l1 = &l1i;
245 case 0:
246 if (!l1->val)
247 return;
a326e948 248 assoc = assocs[l1->assoc];
240cd6a8
AK
249 line_size = l1->line_size;
250 lines_per_tag = l1->lines_per_tag;
251 size_in_kb = l1->size_in_kb;
67cddd94
AK
252 break;
253 case 2:
254 if (!l2.val)
255 return;
a326e948 256 assoc = assocs[l2.assoc];
240cd6a8
AK
257 line_size = l2.line_size;
258 lines_per_tag = l2.lines_per_tag;
259 /* cpu_data has errata corrections for K7 applied */
7b543a53 260 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
67cddd94
AK
261 break;
262 case 3:
263 if (!l3.val)
264 return;
a326e948 265 assoc = assocs[l3.assoc];
67cddd94
AK
266 line_size = l3.line_size;
267 lines_per_tag = l3.lines_per_tag;
268 size_in_kb = l3.size_encoded * 512;
a326e948
AH
269 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
270 size_in_kb = size_in_kb >> 1;
271 assoc = assoc >> 1;
272 }
67cddd94
AK
273 break;
274 default:
275 return;
240cd6a8
AK
276 }
277
67cddd94
AK
278 eax->split.is_self_initializing = 1;
279 eax->split.type = types[leaf];
280 eax->split.level = levels[leaf];
a326e948 281 eax->split.num_threads_sharing = 0;
7b543a53 282 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
67cddd94
AK
283
284
a326e948 285 if (assoc == 0xffff)
240cd6a8
AK
286 eax->split.is_fully_associative = 1;
287 ebx->split.coherency_line_size = line_size - 1;
a326e948 288 ebx->split.ways_of_associativity = assoc - 1;
240cd6a8
AK
289 ebx->split.physical_line_partition = lines_per_tag - 1;
290 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
291 (ebx->split.ways_of_associativity + 1) - 1;
292}
1da177e4 293
cb19060a
BP
294struct _cache_attr {
295 struct attribute attr;
cabb5bd7
HR
296 ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
297 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
298 unsigned int);
cb19060a
BP
299};
300
23ac4ae8 301#ifdef CONFIG_AMD_NB
ba06edb6
BP
302
303/*
304 * L3 cache descriptors
305 */
d2946041 306static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
048a8774 307{
d2946041 308 struct amd_l3_cache *l3 = &nb->l3_cache;
048a8774 309 unsigned int sc0, sc1, sc2, sc3;
cb19060a 310 u32 val = 0;
048a8774 311
d2946041 312 pci_read_config_dword(nb->misc, 0x1C4, &val);
048a8774
BP
313
314 /* calculate subcache sizes */
9350f982
BP
315 l3->subcaches[0] = sc0 = !(val & BIT(0));
316 l3->subcaches[1] = sc1 = !(val & BIT(4));
77e75fc7
FA
317
318 if (boot_cpu_data.x86 == 0x15) {
319 l3->subcaches[0] = sc0 += !(val & BIT(1));
320 l3->subcaches[1] = sc1 += !(val & BIT(5));
321 }
322
9350f982
BP
323 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
324 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
325
732eacc0 326 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
ba06edb6
BP
327}
328
32c32338 329static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
8cb22bcb 330{
ba06edb6
BP
331 int node;
332
f658bcfb 333 /* only for L3, and not in virtualized environments */
d2946041 334 if (index < 3)
f2b20e41
FA
335 return;
336
ba06edb6 337 node = amd_get_nb_id(smp_processor_id());
d2946041
TG
338 this_leaf->nb = node_to_amd_nb(node);
339 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
340 amd_calc_l3_indices(this_leaf->nb);
8cb22bcb
ML
341}
342
8cc1176e
BP
343/*
344 * check whether a slot used for disabling an L3 index is occupied.
345 * @l3: L3 cache descriptor
346 * @slot: slot number (0..1)
347 *
348 * @returns: the disabled index if used or negative value if slot free.
349 */
d2946041 350int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
8cc1176e
BP
351{
352 unsigned int reg = 0;
353
d2946041 354 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);
8cc1176e
BP
355
356 /* check whether this slot is activated already */
357 if (reg & (3UL << 30))
358 return reg & 0xfff;
359
360 return -1;
361}
362
cb19060a 363static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
59d3b388 364 unsigned int slot)
cb19060a 365{
8cc1176e 366 int index;
cb19060a 367
d2946041 368 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
cb19060a
BP
369 return -EINVAL;
370
d2946041 371 index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
8cc1176e
BP
372 if (index >= 0)
373 return sprintf(buf, "%d\n", index);
cb19060a 374
8cc1176e 375 return sprintf(buf, "FREE\n");
cb19060a
BP
376}
377
59d3b388 378#define SHOW_CACHE_DISABLE(slot) \
cb19060a 379static ssize_t \
cabb5bd7
HR
380show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
381 unsigned int cpu) \
cb19060a 382{ \
59d3b388 383 return show_cache_disable(this_leaf, buf, slot); \
cb19060a
BP
384}
385SHOW_CACHE_DISABLE(0)
386SHOW_CACHE_DISABLE(1)
387
d2946041 388static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
59d3b388
BP
389 unsigned slot, unsigned long idx)
390{
391 int i;
392
393 idx |= BIT(30);
394
395 /*
396 * disable index in all 4 subcaches
397 */
398 for (i = 0; i < 4; i++) {
399 u32 reg = idx | (i << 20);
400
d2946041 401 if (!nb->l3_cache.subcaches[i])
59d3b388
BP
402 continue;
403
d2946041 404 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
59d3b388
BP
405
406 /*
407 * We need to WBINVD on a core on the node containing the L3
408 * cache which indices we disable therefore a simple wbinvd()
409 * is not sufficient.
410 */
411 wbinvd_on_cpu(cpu);
412
413 reg |= BIT(31);
d2946041 414 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
59d3b388
BP
415 }
416}
417
8cc1176e
BP
418/*
419 * disable a L3 cache index by using a disable-slot
420 *
421 * @l3: L3 cache descriptor
422 * @cpu: A CPU on the node containing the L3 cache
423 * @slot: slot number (0..1)
424 * @index: index to disable
425 *
426 * @return: 0 on success, error status on failure
427 */
d2946041 428int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
8cc1176e 429 unsigned long index)
cb19060a 430{
8cc1176e 431 int ret = 0;
cb19060a 432
42be4505 433 /* check if @slot is already used or the index is already disabled */
d2946041 434 ret = amd_get_l3_disable_slot(nb, slot);
8cc1176e 435 if (ret >= 0)
a720b2dd 436 return -EEXIST;
cb19060a 437
d2946041 438 if (index > nb->l3_cache.indices)
8cc1176e
BP
439 return -EINVAL;
440
42be4505 441 /* check whether the other slot has disabled the same index already */
d2946041 442 if (index == amd_get_l3_disable_slot(nb, !slot))
a720b2dd 443 return -EEXIST;
8cc1176e 444
d2946041 445 amd_l3_disable_index(nb, cpu, slot, index);
8cc1176e
BP
446
447 return 0;
448}
449
450static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
451 const char *buf, size_t count,
452 unsigned int slot)
453{
454 unsigned long val = 0;
455 int cpu, err = 0;
456
cb19060a
BP
457 if (!capable(CAP_SYS_ADMIN))
458 return -EPERM;
459
d2946041 460 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
cb19060a
BP
461 return -EINVAL;
462
8cc1176e 463 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
cb19060a 464
8cc1176e 465 if (strict_strtoul(buf, 10, &val) < 0)
cb19060a
BP
466 return -EINVAL;
467
d2946041 468 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
8cc1176e
BP
469 if (err) {
470 if (err == -EEXIST)
a720b2dd
SB
471 pr_warning("L3 slot %d in use/index already disabled!\n",
472 slot);
8cc1176e
BP
473 return err;
474 }
cb19060a
BP
475 return count;
476}
477
59d3b388 478#define STORE_CACHE_DISABLE(slot) \
cb19060a 479static ssize_t \
59d3b388 480store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
cabb5bd7
HR
481 const char *buf, size_t count, \
482 unsigned int cpu) \
cb19060a 483{ \
59d3b388 484 return store_cache_disable(this_leaf, buf, count, slot); \
8cb22bcb 485}
cb19060a
BP
486STORE_CACHE_DISABLE(0)
487STORE_CACHE_DISABLE(1)
488
489static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
490 show_cache_disable_0, store_cache_disable_0);
491static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
492 show_cache_disable_1, store_cache_disable_1);
493
cabb5bd7
HR
494static ssize_t
495show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
496{
d2946041 497 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
cabb5bd7
HR
498 return -EINVAL;
499
500 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
501}
502
503static ssize_t
504store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
505 unsigned int cpu)
506{
507 unsigned long val;
508
509 if (!capable(CAP_SYS_ADMIN))
510 return -EPERM;
511
d2946041 512 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
cabb5bd7
HR
513 return -EINVAL;
514
515 if (strict_strtoul(buf, 16, &val) < 0)
516 return -EINVAL;
517
518 if (amd_set_subcaches(cpu, val))
519 return -EINVAL;
520
521 return count;
522}
523
524static struct _cache_attr subcaches =
525 __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
526
23ac4ae8 527#else /* CONFIG_AMD_NB */
f658bcfb 528#define amd_init_l3_cache(x, y)
23ac4ae8 529#endif /* CONFIG_AMD_NB */
8cb22bcb 530
7a4983bb 531static int
f9b90566
MT
532__cpuinit cpuid4_cache_lookup_regs(int index,
533 struct _cpuid4_info_regs *this_leaf)
1da177e4 534{
cabb5bd7
HR
535 union _cpuid4_leaf_eax eax;
536 union _cpuid4_leaf_ebx ebx;
537 union _cpuid4_leaf_ecx ecx;
240cd6a8 538 unsigned edx;
1da177e4 539
8cb22bcb 540 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
2e8458df
AH
541 if (cpu_has_topoext)
542 cpuid_count(0x8000001d, index, &eax.full,
543 &ebx.full, &ecx.full, &edx);
544 else
545 amd_cpuid4(index, &eax, &ebx, &ecx);
f658bcfb 546 amd_init_l3_cache(this_leaf, index);
7a4983bb
IM
547 } else {
548 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
549 }
550
240cd6a8 551 if (eax.split.type == CACHE_TYPE_NULL)
e2cac789 552 return -EIO; /* better error ? */
1da177e4 553
240cd6a8
AK
554 this_leaf->eax = eax;
555 this_leaf->ebx = ebx;
556 this_leaf->ecx = ecx;
7a4983bb
IM
557 this_leaf->size = (ecx.split.number_of_sets + 1) *
558 (ebx.split.coherency_line_size + 1) *
559 (ebx.split.physical_line_partition + 1) *
560 (ebx.split.ways_of_associativity + 1);
1da177e4
LT
561 return 0;
562}
563
04a15418 564static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c)
1da177e4 565{
04a15418 566 unsigned int eax, ebx, ecx, edx, op;
1da177e4 567 union _cpuid4_leaf_eax cache_eax;
d16aafff 568 int i = -1;
1da177e4 569
04a15418
AH
570 if (c->x86_vendor == X86_VENDOR_AMD)
571 op = 0x8000001d;
572 else
573 op = 4;
574
d16aafff
SS
575 do {
576 ++i;
04a15418
AH
577 /* Do cpuid(op) loop to find out num_cache_leaves */
578 cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
1da177e4 579 cache_eax.full = eax;
d16aafff
SS
580 } while (cache_eax.split.type != CACHE_TYPE_NULL);
581 return i;
1da177e4
LT
582}
583
04a15418
AH
584void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c)
585{
586
587 if (cpu_has_topoext) {
588 num_cache_leaves = find_num_cache_leaves(c);
589 } else if (c->extended_cpuid_level >= 0x80000006) {
590 if (cpuid_edx(0x80000006) & 0xf000)
591 num_cache_leaves = 4;
592 else
593 num_cache_leaves = 3;
594 }
595}
596
1aa1a9f9 597unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
1da177e4 598{
8bdbd962
AC
599 /* Cache sizes */
600 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
1da177e4
LT
601 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
602 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
1e9f28fa 603 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
96c52749 604#ifdef CONFIG_X86_HT
92cb7612 605 unsigned int cpu = c->cpu_index;
1e9f28fa 606#endif
1da177e4 607
f2d0d263 608 if (c->cpuid_level > 3) {
1da177e4
LT
609 static int is_initialized;
610
611 if (is_initialized == 0) {
612 /* Init num_cache_leaves from boot CPU */
04a15418 613 num_cache_leaves = find_num_cache_leaves(c);
1da177e4
LT
614 is_initialized++;
615 }
616
617 /*
618 * Whenever possible use cpuid(4), deterministic cache
619 * parameters cpuid leaf to find the cache details
620 */
621 for (i = 0; i < num_cache_leaves; i++) {
f9b90566 622 struct _cpuid4_info_regs this_leaf;
1da177e4
LT
623 int retval;
624
f9b90566 625 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
1da177e4 626 if (retval >= 0) {
8bdbd962
AC
627 switch (this_leaf.eax.split.level) {
628 case 1:
1da177e4
LT
629 if (this_leaf.eax.split.type ==
630 CACHE_TYPE_DATA)
631 new_l1d = this_leaf.size/1024;
632 else if (this_leaf.eax.split.type ==
633 CACHE_TYPE_INST)
634 new_l1i = this_leaf.size/1024;
635 break;
8bdbd962 636 case 2:
1da177e4 637 new_l2 = this_leaf.size/1024;
1e9f28fa
SS
638 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
639 index_msb = get_count_order(num_threads_sharing);
ddc5681e 640 l2_id = c->apicid & ~((1 << index_msb) - 1);
1da177e4 641 break;
8bdbd962 642 case 3:
1da177e4 643 new_l3 = this_leaf.size/1024;
1e9f28fa 644 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
8bdbd962
AC
645 index_msb = get_count_order(
646 num_threads_sharing);
ddc5681e 647 l3_id = c->apicid & ~((1 << index_msb) - 1);
1da177e4 648 break;
8bdbd962 649 default:
1da177e4
LT
650 break;
651 }
652 }
653 }
654 }
b06be912
SL
655 /*
656 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
657 * trace cache
658 */
659 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
1da177e4 660 /* supports eax=2 call */
c1666e66
HH
661 int j, n;
662 unsigned int regs[4];
1da177e4 663 unsigned char *dp = (unsigned char *)regs;
b06be912
SL
664 int only_trace = 0;
665
666 if (num_cache_leaves != 0 && c->x86 == 15)
667 only_trace = 1;
1da177e4
LT
668
669 /* Number of times to iterate */
670 n = cpuid_eax(2) & 0xFF;
671
8bdbd962 672 for (i = 0 ; i < n ; i++) {
1da177e4
LT
673 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
674
675 /* If bit 31 is set, this is an unknown format */
8bdbd962
AC
676 for (j = 0 ; j < 3 ; j++)
677 if (regs[j] & (1 << 31))
678 regs[j] = 0;
1da177e4
LT
679
680 /* Byte 0 is level count, not a descriptor */
8bdbd962 681 for (j = 1 ; j < 16 ; j++) {
1da177e4
LT
682 unsigned char des = dp[j];
683 unsigned char k = 0;
684
685 /* look up this descriptor in the table */
8bdbd962 686 while (cache_table[k].descriptor != 0) {
1da177e4 687 if (cache_table[k].descriptor == des) {
b06be912
SL
688 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
689 break;
1da177e4
LT
690 switch (cache_table[k].cache_type) {
691 case LVL_1_INST:
692 l1i += cache_table[k].size;
693 break;
694 case LVL_1_DATA:
695 l1d += cache_table[k].size;
696 break;
697 case LVL_2:
698 l2 += cache_table[k].size;
699 break;
700 case LVL_3:
701 l3 += cache_table[k].size;
702 break;
703 case LVL_TRACE:
704 trace += cache_table[k].size;
705 break;
706 }
707
708 break;
709 }
710
711 k++;
712 }
713 }
714 }
b06be912 715 }
1da177e4 716
b06be912
SL
717 if (new_l1d)
718 l1d = new_l1d;
1da177e4 719
b06be912
SL
720 if (new_l1i)
721 l1i = new_l1i;
1da177e4 722
b06be912
SL
723 if (new_l2) {
724 l2 = new_l2;
96c52749 725#ifdef CONFIG_X86_HT
b6278470 726 per_cpu(cpu_llc_id, cpu) = l2_id;
1e9f28fa 727#endif
b06be912 728 }
1da177e4 729
b06be912
SL
730 if (new_l3) {
731 l3 = new_l3;
96c52749 732#ifdef CONFIG_X86_HT
b6278470 733 per_cpu(cpu_llc_id, cpu) = l3_id;
1e9f28fa 734#endif
1da177e4
LT
735 }
736
b06be912
SL
737 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
738
1da177e4
LT
739 return l2;
740}
741
ba1d755a
IM
742#ifdef CONFIG_SYSFS
743
1da177e4 744/* pointer to _cpuid4_info array (for each cache leaf) */
0fe1e009
TH
745static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
746#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
1da177e4
LT
747
748#ifdef CONFIG_SMP
32c32338
AH
749
750static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
1da177e4 751{
32c32338 752 struct _cpuid4_info *this_leaf;
27d3a8a2 753 int i, sibling;
1da177e4 754
27d3a8a2
AH
755 if (cpu_has_topoext) {
756 unsigned int apicid, nshared, first, last;
757
758 if (!per_cpu(ici_cpuid4_info, cpu))
759 return 0;
760
761 this_leaf = CPUID4_INFO_IDX(cpu, index);
762 nshared = this_leaf->base.eax.split.num_threads_sharing + 1;
763 apicid = cpu_data(cpu).apicid;
764 first = apicid - (apicid % nshared);
765 last = first + nshared - 1;
766
767 for_each_online_cpu(i) {
768 apicid = cpu_data(i).apicid;
769 if ((apicid < first) || (apicid > last))
770 continue;
0fe1e009 771 if (!per_cpu(ici_cpuid4_info, i))
a326e948 772 continue;
a326e948 773 this_leaf = CPUID4_INFO_IDX(i, index);
27d3a8a2
AH
774
775 for_each_online_cpu(sibling) {
776 apicid = cpu_data(sibling).apicid;
777 if ((apicid < first) || (apicid > last))
ebb682f5
PB
778 continue;
779 set_bit(sibling, this_leaf->shared_cpu_map);
780 }
a326e948 781 }
27d3a8a2
AH
782 } else if (index == 3) {
783 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
32c32338
AH
784 if (!per_cpu(ici_cpuid4_info, i))
785 continue;
786 this_leaf = CPUID4_INFO_IDX(i, index);
27d3a8a2 787 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
32c32338
AH
788 if (!cpu_online(sibling))
789 continue;
790 set_bit(sibling, this_leaf->shared_cpu_map);
791 }
792 }
27d3a8a2
AH
793 } else
794 return 0;
32c32338 795
27d3a8a2 796 return 1;
32c32338
AH
797}
798
799static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
800{
801 struct _cpuid4_info *this_leaf, *sibling_leaf;
802 unsigned long num_threads_sharing;
803 int index_msb, i;
804 struct cpuinfo_x86 *c = &cpu_data(cpu);
805
806 if (c->x86_vendor == X86_VENDOR_AMD) {
807 if (cache_shared_amd_cpu_map_setup(cpu, index))
808 return;
809 }
810
1da177e4 811 this_leaf = CPUID4_INFO_IDX(cpu, index);
b7d11a76 812 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
1da177e4
LT
813
814 if (num_threads_sharing == 1)
f9b90566 815 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
2b091875
SS
816 else {
817 index_msb = get_count_order(num_threads_sharing);
818
819 for_each_online_cpu(i) {
92cb7612
MT
820 if (cpu_data(i).apicid >> index_msb ==
821 c->apicid >> index_msb) {
f9b90566
MT
822 cpumask_set_cpu(i,
823 to_cpumask(this_leaf->shared_cpu_map));
0fe1e009 824 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
f9b90566
MT
825 sibling_leaf =
826 CPUID4_INFO_IDX(i, index);
827 cpumask_set_cpu(cpu, to_cpumask(
828 sibling_leaf->shared_cpu_map));
2b091875
SS
829 }
830 }
831 }
832 }
833}
3bc9b76b 834static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
2b091875
SS
835{
836 struct _cpuid4_info *this_leaf, *sibling_leaf;
837 int sibling;
838
839 this_leaf = CPUID4_INFO_IDX(cpu, index);
f9b90566 840 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
cdcf772e 841 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
f9b90566
MT
842 cpumask_clear_cpu(cpu,
843 to_cpumask(sibling_leaf->shared_cpu_map));
2b091875 844 }
1da177e4
LT
845}
846#else
8bdbd962
AC
847static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
848{
849}
850
851static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
852{
853}
1da177e4
LT
854#endif
855
f22d9bc1 856static void __cpuinit free_cache_attributes(unsigned int cpu)
1da177e4 857{
ef1d7151
AM
858 int i;
859
860 for (i = 0; i < num_cache_leaves; i++)
861 cache_remove_shared_cpu_map(cpu, i);
862
0fe1e009
TH
863 kfree(per_cpu(ici_cpuid4_info, cpu));
864 per_cpu(ici_cpuid4_info, cpu) = NULL;
1da177e4
LT
865}
866
6092848a 867static void __cpuinit get_cpu_leaves(void *_retval)
1da177e4 868{
b2bb8554 869 int j, *retval = _retval, cpu = smp_processor_id();
e2cac789 870
1da177e4
LT
871 /* Do cpuid and store the results */
872 for (j = 0; j < num_cache_leaves; j++) {
b7d11a76
TG
873 struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
874
875 *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
b2bb8554 876 if (unlikely(*retval < 0)) {
ef1d7151
AM
877 int i;
878
879 for (i = 0; i < j; i++)
880 cache_remove_shared_cpu_map(cpu, i);
e2cac789 881 break;
ef1d7151 882 }
1da177e4
LT
883 cache_shared_cpu_map_setup(cpu, j);
884 }
b2bb8554
MT
885}
886
887static int __cpuinit detect_cache_attributes(unsigned int cpu)
888{
889 int retval;
890
891 if (num_cache_leaves == 0)
892 return -ENOENT;
893
0fe1e009 894 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
b2bb8554 895 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
0fe1e009 896 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
b2bb8554 897 return -ENOMEM;
1da177e4 898
b2bb8554 899 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
ef1d7151 900 if (retval) {
0fe1e009
TH
901 kfree(per_cpu(ici_cpuid4_info, cpu));
902 per_cpu(ici_cpuid4_info, cpu) = NULL;
ef1d7151
AM
903 }
904
e2cac789 905 return retval;
1da177e4
LT
906}
907
1da177e4
LT
908#include <linux/kobject.h>
909#include <linux/sysfs.h>
8a25a2fd 910#include <linux/cpu.h>
1da177e4
LT
911
912/* pointer to kobject for cpuX/cache */
0fe1e009 913static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
1da177e4
LT
914
915struct _index_kobject {
916 struct kobject kobj;
917 unsigned int cpu;
918 unsigned short index;
919};
920
921/* pointer to array of kobjects for cpuX/cache/indexY */
0fe1e009
TH
922static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
923#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
1da177e4
LT
924
925#define show_one_plus(file_name, object, val) \
cabb5bd7
HR
926static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
927 unsigned int cpu) \
1da177e4 928{ \
8bdbd962 929 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
1da177e4
LT
930}
931
b7d11a76
TG
932show_one_plus(level, base.eax.split.level, 0);
933show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
934show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
935show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
936show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
1da177e4 937
cabb5bd7
HR
938static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
939 unsigned int cpu)
1da177e4 940{
b7d11a76 941 return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
1da177e4
LT
942}
943
fb0f330e
MT
944static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
945 int type, char *buf)
1da177e4 946{
fb0f330e 947 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
6b6309b4 948 int n = 0;
6b6309b4 949
fb0f330e 950 if (len > 1) {
f9b90566 951 const struct cpumask *mask;
fb0f330e 952
f9b90566 953 mask = to_cpumask(this_leaf->shared_cpu_map);
8bdbd962 954 n = type ?
29c0177e
RR
955 cpulist_scnprintf(buf, len-2, mask) :
956 cpumask_scnprintf(buf, len-2, mask);
fb0f330e
MT
957 buf[n++] = '\n';
958 buf[n] = '\0';
6b6309b4
MT
959 }
960 return n;
1da177e4
LT
961}
962
cabb5bd7
HR
963static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
964 unsigned int cpu)
fb0f330e
MT
965{
966 return show_shared_cpu_map_func(leaf, 0, buf);
967}
968
cabb5bd7
HR
969static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
970 unsigned int cpu)
fb0f330e
MT
971{
972 return show_shared_cpu_map_func(leaf, 1, buf);
973}
974
cabb5bd7
HR
975static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
976 unsigned int cpu)
4385cecf 977{
b7d11a76 978 switch (this_leaf->base.eax.split.type) {
4385cecf 979 case CACHE_TYPE_DATA:
1da177e4 980 return sprintf(buf, "Data\n");
4385cecf 981 case CACHE_TYPE_INST:
1da177e4 982 return sprintf(buf, "Instruction\n");
4385cecf 983 case CACHE_TYPE_UNIFIED:
1da177e4 984 return sprintf(buf, "Unified\n");
4385cecf 985 default:
1da177e4 986 return sprintf(buf, "Unknown\n");
1da177e4
LT
987 }
988}
989
7a4983bb
IM
990#define to_object(k) container_of(k, struct _index_kobject, kobj)
991#define to_attr(a) container_of(a, struct _cache_attr, attr)
8cb22bcb 992
1da177e4
LT
993#define define_one_ro(_name) \
994static struct _cache_attr _name = \
995 __ATTR(_name, 0444, show_##_name, NULL)
996
997define_one_ro(level);
998define_one_ro(type);
999define_one_ro(coherency_line_size);
1000define_one_ro(physical_line_partition);
1001define_one_ro(ways_of_associativity);
1002define_one_ro(number_of_sets);
1003define_one_ro(size);
1004define_one_ro(shared_cpu_map);
fb0f330e 1005define_one_ro(shared_cpu_list);
1da177e4 1006
8bdbd962 1007static struct attribute *default_attrs[] = {
f658bcfb
HR
1008 &type.attr,
1009 &level.attr,
1010 &coherency_line_size.attr,
1011 &physical_line_partition.attr,
1012 &ways_of_associativity.attr,
1013 &number_of_sets.attr,
1014 &size.attr,
1015 &shared_cpu_map.attr,
1016 &shared_cpu_list.attr,
897de50e
BP
1017 NULL
1018};
1019
23ac4ae8 1020#ifdef CONFIG_AMD_NB
f658bcfb
HR
1021static struct attribute ** __cpuinit amd_l3_attrs(void)
1022{
1023 static struct attribute **attrs;
1024 int n;
1025
1026 if (attrs)
1027 return attrs;
1028
961c7976 1029 n = ARRAY_SIZE(default_attrs);
f658bcfb
HR
1030
1031 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
1032 n += 2;
1033
cabb5bd7
HR
1034 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1035 n += 1;
1036
f658bcfb
HR
1037 attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
1038 if (attrs == NULL)
1039 return attrs = default_attrs;
1040
1041 for (n = 0; default_attrs[n]; n++)
1042 attrs[n] = default_attrs[n];
1043
1044 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
1045 attrs[n++] = &cache_disable_0.attr;
1046 attrs[n++] = &cache_disable_1.attr;
1047 }
1048
cabb5bd7
HR
1049 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1050 attrs[n++] = &subcaches.attr;
1051
f658bcfb
HR
1052 return attrs;
1053}
cb19060a 1054#endif
1da177e4 1055
8bdbd962 1056static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4
LT
1057{
1058 struct _cache_attr *fattr = to_attr(attr);
1059 struct _index_kobject *this_leaf = to_object(kobj);
1060 ssize_t ret;
1061
1062 ret = fattr->show ?
1063 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
cabb5bd7 1064 buf, this_leaf->cpu) :
cdcf772e 1065 0;
1da177e4
LT
1066 return ret;
1067}
1068
8bdbd962
AC
1069static ssize_t store(struct kobject *kobj, struct attribute *attr,
1070 const char *buf, size_t count)
1da177e4 1071{
8cb22bcb
ML
1072 struct _cache_attr *fattr = to_attr(attr);
1073 struct _index_kobject *this_leaf = to_object(kobj);
1074 ssize_t ret;
1075
cdcf772e
IM
1076 ret = fattr->store ?
1077 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
cabb5bd7 1078 buf, count, this_leaf->cpu) :
8cb22bcb
ML
1079 0;
1080 return ret;
1da177e4
LT
1081}
1082
52cf25d0 1083static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
1084 .show = show,
1085 .store = store,
1086};
1087
1088static struct kobj_type ktype_cache = {
1089 .sysfs_ops = &sysfs_ops,
1090 .default_attrs = default_attrs,
1091};
1092
1093static struct kobj_type ktype_percpu_entry = {
1094 .sysfs_ops = &sysfs_ops,
1095};
1096
ef1d7151 1097static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1da177e4 1098{
0fe1e009
TH
1099 kfree(per_cpu(ici_cache_kobject, cpu));
1100 kfree(per_cpu(ici_index_kobject, cpu));
1101 per_cpu(ici_cache_kobject, cpu) = NULL;
1102 per_cpu(ici_index_kobject, cpu) = NULL;
1da177e4
LT
1103 free_cache_attributes(cpu);
1104}
1105
1aa1a9f9 1106static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
1da177e4 1107{
ef1d7151 1108 int err;
1da177e4
LT
1109
1110 if (num_cache_leaves == 0)
1111 return -ENOENT;
1112
ef1d7151
AM
1113 err = detect_cache_attributes(cpu);
1114 if (err)
1115 return err;
1da177e4
LT
1116
1117 /* Allocate all required memory */
0fe1e009 1118 per_cpu(ici_cache_kobject, cpu) =
6b6309b4 1119 kzalloc(sizeof(struct kobject), GFP_KERNEL);
0fe1e009 1120 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
1da177e4 1121 goto err_out;
1da177e4 1122
0fe1e009 1123 per_cpu(ici_index_kobject, cpu) = kzalloc(
8bdbd962 1124 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
0fe1e009 1125 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
1da177e4 1126 goto err_out;
1da177e4
LT
1127
1128 return 0;
1129
1130err_out:
1131 cpuid4_cache_sysfs_exit(cpu);
1132 return -ENOMEM;
1133}
1134
f9b90566 1135static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
ef1d7151 1136
1da177e4 1137/* Add/Remove cache interface for CPU device */
8a25a2fd 1138static int __cpuinit cache_add_dev(struct device *dev)
1da177e4 1139{
8a25a2fd 1140 unsigned int cpu = dev->id;
1da177e4
LT
1141 unsigned long i, j;
1142 struct _index_kobject *this_object;
897de50e 1143 struct _cpuid4_info *this_leaf;
ef1d7151 1144 int retval;
1da177e4
LT
1145
1146 retval = cpuid4_cache_sysfs_init(cpu);
1147 if (unlikely(retval < 0))
1148 return retval;
1149
0fe1e009 1150 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
6b6309b4 1151 &ktype_percpu_entry,
8a25a2fd 1152 &dev->kobj, "%s", "cache");
ef1d7151
AM
1153 if (retval < 0) {
1154 cpuid4_cache_sysfs_exit(cpu);
1155 return retval;
1156 }
1da177e4
LT
1157
1158 for (i = 0; i < num_cache_leaves; i++) {
8bdbd962 1159 this_object = INDEX_KOBJECT_PTR(cpu, i);
1da177e4
LT
1160 this_object->cpu = cpu;
1161 this_object->index = i;
897de50e
BP
1162
1163 this_leaf = CPUID4_INFO_IDX(cpu, i);
1164
f658bcfb
HR
1165 ktype_cache.default_attrs = default_attrs;
1166#ifdef CONFIG_AMD_NB
d2946041 1167 if (this_leaf->base.nb)
f658bcfb
HR
1168 ktype_cache.default_attrs = amd_l3_attrs();
1169#endif
5b3f355d 1170 retval = kobject_init_and_add(&(this_object->kobj),
6b6309b4 1171 &ktype_cache,
0fe1e009 1172 per_cpu(ici_cache_kobject, cpu),
5b3f355d 1173 "index%1lu", i);
1da177e4 1174 if (unlikely(retval)) {
8bdbd962
AC
1175 for (j = 0; j < i; j++)
1176 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
0fe1e009 1177 kobject_put(per_cpu(ici_cache_kobject, cpu));
1da177e4 1178 cpuid4_cache_sysfs_exit(cpu);
8b2b9c1a 1179 return retval;
1da177e4 1180 }
5b3f355d 1181 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
1da177e4 1182 }
f9b90566 1183 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
ef1d7151 1184
0fe1e009 1185 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
8b2b9c1a 1186 return 0;
1da177e4
LT
1187}
1188
8a25a2fd 1189static void __cpuinit cache_remove_dev(struct device *dev)
1da177e4 1190{
8a25a2fd 1191 unsigned int cpu = dev->id;
1da177e4
LT
1192 unsigned long i;
1193
0fe1e009 1194 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
2966c6a0 1195 return;
f9b90566 1196 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
ef1d7151 1197 return;
f9b90566 1198 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
ef1d7151
AM
1199
1200 for (i = 0; i < num_cache_leaves; i++)
8bdbd962 1201 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
0fe1e009 1202 kobject_put(per_cpu(ici_cache_kobject, cpu));
1da177e4 1203 cpuid4_cache_sysfs_exit(cpu);
1aa1a9f9
AR
1204}
1205
9c7b216d 1206static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1aa1a9f9
AR
1207 unsigned long action, void *hcpu)
1208{
1209 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1210 struct device *dev;
1aa1a9f9 1211
8a25a2fd 1212 dev = get_cpu_device(cpu);
1aa1a9f9
AR
1213 switch (action) {
1214 case CPU_ONLINE:
8bb78442 1215 case CPU_ONLINE_FROZEN:
8a25a2fd 1216 cache_add_dev(dev);
1aa1a9f9
AR
1217 break;
1218 case CPU_DEAD:
8bb78442 1219 case CPU_DEAD_FROZEN:
8a25a2fd 1220 cache_remove_dev(dev);
1aa1a9f9
AR
1221 break;
1222 }
1223 return NOTIFY_OK;
1da177e4
LT
1224}
1225
8bdbd962 1226static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
ef1d7151 1227 .notifier_call = cacheinfo_cpu_callback,
1da177e4
LT
1228};
1229
1aa1a9f9 1230static int __cpuinit cache_sysfs_init(void)
1da177e4 1231{
1aa1a9f9
AR
1232 int i;
1233
1da177e4
LT
1234 if (num_cache_leaves == 0)
1235 return 0;
1236
1aa1a9f9 1237 for_each_online_cpu(i) {
ef1d7151 1238 int err;
8a25a2fd 1239 struct device *dev = get_cpu_device(i);
c789c037 1240
8a25a2fd 1241 err = cache_add_dev(dev);
ef1d7151
AM
1242 if (err)
1243 return err;
1aa1a9f9 1244 }
ef1d7151 1245 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1aa1a9f9 1246 return 0;
1da177e4
LT
1247}
1248
1aa1a9f9 1249device_initcall(cache_sysfs_init);
1da177e4
LT
1250
1251#endif