| 1 | /* |
| 2 | * (c) 2005, 2006 Advanced Micro Devices, Inc. |
| 3 | * Your use of this code is subject to the terms and conditions of the |
| 4 | * GNU general public license version 2. See "COPYING" or |
| 5 | * http://www.gnu.org/licenses/gpl.html |
| 6 | * |
| 7 | * Written by Jacob Shin - AMD, Inc. |
| 8 | * |
| 9 | * Support : jacob.shin@amd.com |
| 10 | * |
| 11 | * April 2006 |
| 12 | * - added support for AMD Family 0x10 processors |
| 13 | * |
| 14 | * All MC4_MISCi registers are shared between multi-cores |
| 15 | */ |
| 16 | |
| 17 | #include <linux/cpu.h> |
| 18 | #include <linux/errno.h> |
| 19 | #include <linux/init.h> |
| 20 | #include <linux/interrupt.h> |
| 21 | #include <linux/kobject.h> |
| 22 | #include <linux/notifier.h> |
| 23 | #include <linux/sched.h> |
| 24 | #include <linux/smp.h> |
| 25 | #include <linux/sysdev.h> |
| 26 | #include <linux/sysfs.h> |
| 27 | #include <asm/apic.h> |
| 28 | #include <asm/mce.h> |
| 29 | #include <asm/msr.h> |
| 30 | #include <asm/percpu.h> |
| 31 | #include <asm/idle.h> |
| 32 | |
| 33 | #define PFX "mce_threshold: " |
| 34 | #define VERSION "version 1.1.1" |
| 35 | #define NR_BANKS 6 |
| 36 | #define NR_BLOCKS 9 |
| 37 | #define THRESHOLD_MAX 0xFFF |
| 38 | #define INT_TYPE_APIC 0x00020000 |
| 39 | #define MASK_VALID_HI 0x80000000 |
| 40 | #define MASK_CNTP_HI 0x40000000 |
| 41 | #define MASK_LOCKED_HI 0x20000000 |
| 42 | #define MASK_LVTOFF_HI 0x00F00000 |
| 43 | #define MASK_COUNT_EN_HI 0x00080000 |
| 44 | #define MASK_INT_TYPE_HI 0x00060000 |
| 45 | #define MASK_OVERFLOW_HI 0x00010000 |
| 46 | #define MASK_ERR_COUNT_HI 0x00000FFF |
| 47 | #define MASK_BLKPTR_LO 0xFF000000 |
| 48 | #define MCG_XBLK_ADDR 0xC0000400 |
| 49 | |
| 50 | struct threshold_block { |
| 51 | unsigned int block; |
| 52 | unsigned int bank; |
| 53 | unsigned int cpu; |
| 54 | u32 address; |
| 55 | u16 interrupt_enable; |
| 56 | u16 threshold_limit; |
| 57 | struct kobject kobj; |
| 58 | struct list_head miscj; |
| 59 | }; |
| 60 | |
| 61 | /* defaults used early on boot */ |
| 62 | static struct threshold_block threshold_defaults = { |
| 63 | .interrupt_enable = 0, |
| 64 | .threshold_limit = THRESHOLD_MAX, |
| 65 | }; |
| 66 | |
| 67 | struct threshold_bank { |
| 68 | struct kobject *kobj; |
| 69 | struct threshold_block *blocks; |
| 70 | cpumask_var_t cpus; |
| 71 | }; |
| 72 | static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); |
| 73 | |
| 74 | #ifdef CONFIG_SMP |
| 75 | static unsigned char shared_bank[NR_BANKS] = { |
| 76 | 0, 0, 0, 0, 1 |
| 77 | }; |
| 78 | #endif |
| 79 | |
| 80 | static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ |
| 81 | |
| 82 | static void amd_threshold_interrupt(void); |
| 83 | |
| 84 | /* |
| 85 | * CPU Initialization |
| 86 | */ |
| 87 | |
| 88 | struct thresh_restart { |
| 89 | struct threshold_block *b; |
| 90 | int reset; |
| 91 | u16 old_limit; |
| 92 | }; |
| 93 | |
| 94 | /* must be called with correct cpu affinity */ |
| 95 | /* Called via smp_call_function_single() */ |
| 96 | static void threshold_restart_bank(void *_tr) |
| 97 | { |
| 98 | struct thresh_restart *tr = _tr; |
| 99 | u32 mci_misc_hi, mci_misc_lo; |
| 100 | |
| 101 | rdmsr(tr->b->address, mci_misc_lo, mci_misc_hi); |
| 102 | |
| 103 | if (tr->b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) |
| 104 | tr->reset = 1; /* limit cannot be lower than err count */ |
| 105 | |
| 106 | if (tr->reset) { /* reset err count and overflow bit */ |
| 107 | mci_misc_hi = |
| 108 | (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | |
| 109 | (THRESHOLD_MAX - tr->b->threshold_limit); |
| 110 | } else if (tr->old_limit) { /* change limit w/o reset */ |
| 111 | int new_count = (mci_misc_hi & THRESHOLD_MAX) + |
| 112 | (tr->old_limit - tr->b->threshold_limit); |
| 113 | mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | |
| 114 | (new_count & THRESHOLD_MAX); |
| 115 | } |
| 116 | |
| 117 | tr->b->interrupt_enable ? |
| 118 | (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : |
| 119 | (mci_misc_hi &= ~MASK_INT_TYPE_HI); |
| 120 | |
| 121 | mci_misc_hi |= MASK_COUNT_EN_HI; |
| 122 | wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi); |
| 123 | } |
| 124 | |
| 125 | /* cpu init entry point, called from mce.c with preempt off */ |
| 126 | void mce_amd_feature_init(struct cpuinfo_x86 *c) |
| 127 | { |
| 128 | unsigned int bank, block; |
| 129 | unsigned int cpu = smp_processor_id(); |
| 130 | u8 lvt_off; |
| 131 | u32 low = 0, high = 0, address = 0; |
| 132 | struct thresh_restart tr; |
| 133 | |
| 134 | for (bank = 0; bank < NR_BANKS; ++bank) { |
| 135 | for (block = 0; block < NR_BLOCKS; ++block) { |
| 136 | if (block == 0) |
| 137 | address = MSR_IA32_MC0_MISC + bank * 4; |
| 138 | else if (block == 1) { |
| 139 | address = (low & MASK_BLKPTR_LO) >> 21; |
| 140 | if (!address) |
| 141 | break; |
| 142 | address += MCG_XBLK_ADDR; |
| 143 | } |
| 144 | else |
| 145 | ++address; |
| 146 | |
| 147 | if (rdmsr_safe(address, &low, &high)) |
| 148 | break; |
| 149 | |
| 150 | if (!(high & MASK_VALID_HI)) { |
| 151 | if (block) |
| 152 | continue; |
| 153 | else |
| 154 | break; |
| 155 | } |
| 156 | |
| 157 | if (!(high & MASK_CNTP_HI) || |
| 158 | (high & MASK_LOCKED_HI)) |
| 159 | continue; |
| 160 | |
| 161 | if (!block) |
| 162 | per_cpu(bank_map, cpu) |= (1 << bank); |
| 163 | #ifdef CONFIG_SMP |
| 164 | if (shared_bank[bank] && c->cpu_core_id) |
| 165 | break; |
| 166 | #endif |
| 167 | lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR, |
| 168 | APIC_EILVT_MSG_FIX, 0); |
| 169 | |
| 170 | high &= ~MASK_LVTOFF_HI; |
| 171 | high |= lvt_off << 20; |
| 172 | wrmsr(address, low, high); |
| 173 | |
| 174 | threshold_defaults.address = address; |
| 175 | tr.b = &threshold_defaults; |
| 176 | tr.reset = 0; |
| 177 | tr.old_limit = 0; |
| 178 | threshold_restart_bank(&tr); |
| 179 | |
| 180 | mce_threshold_vector = amd_threshold_interrupt; |
| 181 | } |
| 182 | } |
| 183 | } |
| 184 | |
| 185 | /* |
| 186 | * APIC Interrupt Handler |
| 187 | */ |
| 188 | |
| 189 | /* |
| 190 | * threshold interrupt handler will service THRESHOLD_APIC_VECTOR. |
| 191 | * the interrupt goes off when error_count reaches threshold_limit. |
| 192 | * the handler will simply log mcelog w/ software defined bank number. |
| 193 | */ |
| 194 | static void amd_threshold_interrupt(void) |
| 195 | { |
| 196 | unsigned int bank, block; |
| 197 | struct mce m; |
| 198 | u32 low = 0, high = 0, address = 0; |
| 199 | |
| 200 | mce_setup(&m); |
| 201 | |
| 202 | /* assume first bank caused it */ |
| 203 | for (bank = 0; bank < NR_BANKS; ++bank) { |
| 204 | if (!(per_cpu(bank_map, m.cpu) & (1 << bank))) |
| 205 | continue; |
| 206 | for (block = 0; block < NR_BLOCKS; ++block) { |
| 207 | if (block == 0) |
| 208 | address = MSR_IA32_MC0_MISC + bank * 4; |
| 209 | else if (block == 1) { |
| 210 | address = (low & MASK_BLKPTR_LO) >> 21; |
| 211 | if (!address) |
| 212 | break; |
| 213 | address += MCG_XBLK_ADDR; |
| 214 | } |
| 215 | else |
| 216 | ++address; |
| 217 | |
| 218 | if (rdmsr_safe(address, &low, &high)) |
| 219 | break; |
| 220 | |
| 221 | if (!(high & MASK_VALID_HI)) { |
| 222 | if (block) |
| 223 | continue; |
| 224 | else |
| 225 | break; |
| 226 | } |
| 227 | |
| 228 | if (!(high & MASK_CNTP_HI) || |
| 229 | (high & MASK_LOCKED_HI)) |
| 230 | continue; |
| 231 | |
| 232 | /* Log the machine check that caused the threshold |
| 233 | event. */ |
| 234 | machine_check_poll(MCP_TIMESTAMP, |
| 235 | &__get_cpu_var(mce_poll_banks)); |
| 236 | |
| 237 | if (high & MASK_OVERFLOW_HI) { |
| 238 | rdmsrl(address, m.misc); |
| 239 | rdmsrl(MSR_IA32_MC0_STATUS + bank * 4, |
| 240 | m.status); |
| 241 | m.bank = K8_MCE_THRESHOLD_BASE |
| 242 | + bank * NR_BLOCKS |
| 243 | + block; |
| 244 | mce_log(&m); |
| 245 | return; |
| 246 | } |
| 247 | } |
| 248 | } |
| 249 | } |
| 250 | |
| 251 | /* |
| 252 | * Sysfs Interface |
| 253 | */ |
| 254 | |
| 255 | struct threshold_attr { |
| 256 | struct attribute attr; |
| 257 | ssize_t(*show) (struct threshold_block *, char *); |
| 258 | ssize_t(*store) (struct threshold_block *, const char *, size_t count); |
| 259 | }; |
| 260 | |
| 261 | #define SHOW_FIELDS(name) \ |
| 262 | static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ |
| 263 | { \ |
| 264 | return sprintf(buf, "%lx\n", (unsigned long) b->name); \ |
| 265 | } |
| 266 | SHOW_FIELDS(interrupt_enable) |
| 267 | SHOW_FIELDS(threshold_limit) |
| 268 | |
| 269 | static ssize_t store_interrupt_enable(struct threshold_block *b, |
| 270 | const char *buf, size_t count) |
| 271 | { |
| 272 | char *end; |
| 273 | struct thresh_restart tr; |
| 274 | unsigned long new = simple_strtoul(buf, &end, 0); |
| 275 | if (end == buf) |
| 276 | return -EINVAL; |
| 277 | b->interrupt_enable = !!new; |
| 278 | |
| 279 | tr.b = b; |
| 280 | tr.reset = 0; |
| 281 | tr.old_limit = 0; |
| 282 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); |
| 283 | |
| 284 | return end - buf; |
| 285 | } |
| 286 | |
| 287 | static ssize_t store_threshold_limit(struct threshold_block *b, |
| 288 | const char *buf, size_t count) |
| 289 | { |
| 290 | char *end; |
| 291 | struct thresh_restart tr; |
| 292 | unsigned long new = simple_strtoul(buf, &end, 0); |
| 293 | if (end == buf) |
| 294 | return -EINVAL; |
| 295 | if (new > THRESHOLD_MAX) |
| 296 | new = THRESHOLD_MAX; |
| 297 | if (new < 1) |
| 298 | new = 1; |
| 299 | tr.old_limit = b->threshold_limit; |
| 300 | b->threshold_limit = new; |
| 301 | tr.b = b; |
| 302 | tr.reset = 0; |
| 303 | |
| 304 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); |
| 305 | |
| 306 | return end - buf; |
| 307 | } |
| 308 | |
| 309 | struct threshold_block_cross_cpu { |
| 310 | struct threshold_block *tb; |
| 311 | long retval; |
| 312 | }; |
| 313 | |
| 314 | static void local_error_count_handler(void *_tbcc) |
| 315 | { |
| 316 | struct threshold_block_cross_cpu *tbcc = _tbcc; |
| 317 | struct threshold_block *b = tbcc->tb; |
| 318 | u32 low, high; |
| 319 | |
| 320 | rdmsr(b->address, low, high); |
| 321 | tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit); |
| 322 | } |
| 323 | |
| 324 | static ssize_t show_error_count(struct threshold_block *b, char *buf) |
| 325 | { |
| 326 | struct threshold_block_cross_cpu tbcc = { .tb = b, }; |
| 327 | |
| 328 | smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1); |
| 329 | return sprintf(buf, "%lx\n", tbcc.retval); |
| 330 | } |
| 331 | |
| 332 | static ssize_t store_error_count(struct threshold_block *b, |
| 333 | const char *buf, size_t count) |
| 334 | { |
| 335 | struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 }; |
| 336 | |
| 337 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); |
| 338 | return 1; |
| 339 | } |
| 340 | |
| 341 | #define THRESHOLD_ATTR(_name,_mode,_show,_store) { \ |
| 342 | .attr = {.name = __stringify(_name), .mode = _mode }, \ |
| 343 | .show = _show, \ |
| 344 | .store = _store, \ |
| 345 | }; |
| 346 | |
| 347 | #define RW_ATTR(name) \ |
| 348 | static struct threshold_attr name = \ |
| 349 | THRESHOLD_ATTR(name, 0644, show_## name, store_## name) |
| 350 | |
| 351 | RW_ATTR(interrupt_enable); |
| 352 | RW_ATTR(threshold_limit); |
| 353 | RW_ATTR(error_count); |
| 354 | |
| 355 | static struct attribute *default_attrs[] = { |
| 356 | &interrupt_enable.attr, |
| 357 | &threshold_limit.attr, |
| 358 | &error_count.attr, |
| 359 | NULL |
| 360 | }; |
| 361 | |
| 362 | #define to_block(k) container_of(k, struct threshold_block, kobj) |
| 363 | #define to_attr(a) container_of(a, struct threshold_attr, attr) |
| 364 | |
| 365 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) |
| 366 | { |
| 367 | struct threshold_block *b = to_block(kobj); |
| 368 | struct threshold_attr *a = to_attr(attr); |
| 369 | ssize_t ret; |
| 370 | ret = a->show ? a->show(b, buf) : -EIO; |
| 371 | return ret; |
| 372 | } |
| 373 | |
| 374 | static ssize_t store(struct kobject *kobj, struct attribute *attr, |
| 375 | const char *buf, size_t count) |
| 376 | { |
| 377 | struct threshold_block *b = to_block(kobj); |
| 378 | struct threshold_attr *a = to_attr(attr); |
| 379 | ssize_t ret; |
| 380 | ret = a->store ? a->store(b, buf, count) : -EIO; |
| 381 | return ret; |
| 382 | } |
| 383 | |
| 384 | static struct sysfs_ops threshold_ops = { |
| 385 | .show = show, |
| 386 | .store = store, |
| 387 | }; |
| 388 | |
| 389 | static struct kobj_type threshold_ktype = { |
| 390 | .sysfs_ops = &threshold_ops, |
| 391 | .default_attrs = default_attrs, |
| 392 | }; |
| 393 | |
| 394 | static __cpuinit int allocate_threshold_blocks(unsigned int cpu, |
| 395 | unsigned int bank, |
| 396 | unsigned int block, |
| 397 | u32 address) |
| 398 | { |
| 399 | int err; |
| 400 | u32 low, high; |
| 401 | struct threshold_block *b = NULL; |
| 402 | |
| 403 | if ((bank >= NR_BANKS) || (block >= NR_BLOCKS)) |
| 404 | return 0; |
| 405 | |
| 406 | if (rdmsr_safe_on_cpu(cpu, address, &low, &high)) |
| 407 | return 0; |
| 408 | |
| 409 | if (!(high & MASK_VALID_HI)) { |
| 410 | if (block) |
| 411 | goto recurse; |
| 412 | else |
| 413 | return 0; |
| 414 | } |
| 415 | |
| 416 | if (!(high & MASK_CNTP_HI) || |
| 417 | (high & MASK_LOCKED_HI)) |
| 418 | goto recurse; |
| 419 | |
| 420 | b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL); |
| 421 | if (!b) |
| 422 | return -ENOMEM; |
| 423 | |
| 424 | b->block = block; |
| 425 | b->bank = bank; |
| 426 | b->cpu = cpu; |
| 427 | b->address = address; |
| 428 | b->interrupt_enable = 0; |
| 429 | b->threshold_limit = THRESHOLD_MAX; |
| 430 | |
| 431 | INIT_LIST_HEAD(&b->miscj); |
| 432 | |
| 433 | if (per_cpu(threshold_banks, cpu)[bank]->blocks) |
| 434 | list_add(&b->miscj, |
| 435 | &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); |
| 436 | else |
| 437 | per_cpu(threshold_banks, cpu)[bank]->blocks = b; |
| 438 | |
| 439 | err = kobject_init_and_add(&b->kobj, &threshold_ktype, |
| 440 | per_cpu(threshold_banks, cpu)[bank]->kobj, |
| 441 | "misc%i", block); |
| 442 | if (err) |
| 443 | goto out_free; |
| 444 | recurse: |
| 445 | if (!block) { |
| 446 | address = (low & MASK_BLKPTR_LO) >> 21; |
| 447 | if (!address) |
| 448 | return 0; |
| 449 | address += MCG_XBLK_ADDR; |
| 450 | } else |
| 451 | ++address; |
| 452 | |
| 453 | err = allocate_threshold_blocks(cpu, bank, ++block, address); |
| 454 | if (err) |
| 455 | goto out_free; |
| 456 | |
| 457 | if (b) |
| 458 | kobject_uevent(&b->kobj, KOBJ_ADD); |
| 459 | |
| 460 | return err; |
| 461 | |
| 462 | out_free: |
| 463 | if (b) { |
| 464 | kobject_put(&b->kobj); |
| 465 | kfree(b); |
| 466 | } |
| 467 | return err; |
| 468 | } |
| 469 | |
| 470 | static __cpuinit long |
| 471 | local_allocate_threshold_blocks(int cpu, unsigned int bank) |
| 472 | { |
| 473 | return allocate_threshold_blocks(cpu, bank, 0, |
| 474 | MSR_IA32_MC0_MISC + bank * 4); |
| 475 | } |
| 476 | |
| 477 | /* symlinks sibling shared banks to first core. first core owns dir/files. */ |
| 478 | static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) |
| 479 | { |
| 480 | int i, err = 0; |
| 481 | struct threshold_bank *b = NULL; |
| 482 | char name[32]; |
| 483 | |
| 484 | sprintf(name, "threshold_bank%i", bank); |
| 485 | |
| 486 | #ifdef CONFIG_SMP |
| 487 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ |
| 488 | i = cpumask_first(cpu_core_mask(cpu)); |
| 489 | |
| 490 | /* first core not up yet */ |
| 491 | if (cpu_data(i).cpu_core_id) |
| 492 | goto out; |
| 493 | |
| 494 | /* already linked */ |
| 495 | if (per_cpu(threshold_banks, cpu)[bank]) |
| 496 | goto out; |
| 497 | |
| 498 | b = per_cpu(threshold_banks, i)[bank]; |
| 499 | |
| 500 | if (!b) |
| 501 | goto out; |
| 502 | |
| 503 | err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj, |
| 504 | b->kobj, name); |
| 505 | if (err) |
| 506 | goto out; |
| 507 | |
| 508 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); |
| 509 | per_cpu(threshold_banks, cpu)[bank] = b; |
| 510 | goto out; |
| 511 | } |
| 512 | #endif |
| 513 | |
| 514 | b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); |
| 515 | if (!b) { |
| 516 | err = -ENOMEM; |
| 517 | goto out; |
| 518 | } |
| 519 | if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) { |
| 520 | kfree(b); |
| 521 | err = -ENOMEM; |
| 522 | goto out; |
| 523 | } |
| 524 | |
| 525 | b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj); |
| 526 | if (!b->kobj) |
| 527 | goto out_free; |
| 528 | |
| 529 | #ifndef CONFIG_SMP |
| 530 | cpumask_setall(b->cpus); |
| 531 | #else |
| 532 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); |
| 533 | #endif |
| 534 | |
| 535 | per_cpu(threshold_banks, cpu)[bank] = b; |
| 536 | |
| 537 | err = local_allocate_threshold_blocks(cpu, bank); |
| 538 | if (err) |
| 539 | goto out_free; |
| 540 | |
| 541 | for_each_cpu(i, b->cpus) { |
| 542 | if (i == cpu) |
| 543 | continue; |
| 544 | |
| 545 | err = sysfs_create_link(&per_cpu(device_mce, i).kobj, |
| 546 | b->kobj, name); |
| 547 | if (err) |
| 548 | goto out; |
| 549 | |
| 550 | per_cpu(threshold_banks, i)[bank] = b; |
| 551 | } |
| 552 | |
| 553 | goto out; |
| 554 | |
| 555 | out_free: |
| 556 | per_cpu(threshold_banks, cpu)[bank] = NULL; |
| 557 | free_cpumask_var(b->cpus); |
| 558 | kfree(b); |
| 559 | out: |
| 560 | return err; |
| 561 | } |
| 562 | |
| 563 | /* create dir/files for all valid threshold banks */ |
| 564 | static __cpuinit int threshold_create_device(unsigned int cpu) |
| 565 | { |
| 566 | unsigned int bank; |
| 567 | int err = 0; |
| 568 | |
| 569 | for (bank = 0; bank < NR_BANKS; ++bank) { |
| 570 | if (!(per_cpu(bank_map, cpu) & (1 << bank))) |
| 571 | continue; |
| 572 | err = threshold_create_bank(cpu, bank); |
| 573 | if (err) |
| 574 | goto out; |
| 575 | } |
| 576 | out: |
| 577 | return err; |
| 578 | } |
| 579 | |
| 580 | /* |
| 581 | * let's be hotplug friendly. |
| 582 | * in case of multiple core processors, the first core always takes ownership |
| 583 | * of shared sysfs dir/files, and rest of the cores will be symlinked to it. |
| 584 | */ |
| 585 | |
| 586 | static void deallocate_threshold_block(unsigned int cpu, |
| 587 | unsigned int bank) |
| 588 | { |
| 589 | struct threshold_block *pos = NULL; |
| 590 | struct threshold_block *tmp = NULL; |
| 591 | struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank]; |
| 592 | |
| 593 | if (!head) |
| 594 | return; |
| 595 | |
| 596 | list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { |
| 597 | kobject_put(&pos->kobj); |
| 598 | list_del(&pos->miscj); |
| 599 | kfree(pos); |
| 600 | } |
| 601 | |
| 602 | kfree(per_cpu(threshold_banks, cpu)[bank]->blocks); |
| 603 | per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; |
| 604 | } |
| 605 | |
| 606 | static void threshold_remove_bank(unsigned int cpu, int bank) |
| 607 | { |
| 608 | int i = 0; |
| 609 | struct threshold_bank *b; |
| 610 | char name[32]; |
| 611 | |
| 612 | b = per_cpu(threshold_banks, cpu)[bank]; |
| 613 | |
| 614 | if (!b) |
| 615 | return; |
| 616 | |
| 617 | if (!b->blocks) |
| 618 | goto free_out; |
| 619 | |
| 620 | sprintf(name, "threshold_bank%i", bank); |
| 621 | |
| 622 | #ifdef CONFIG_SMP |
| 623 | /* sibling symlink */ |
| 624 | if (shared_bank[bank] && b->blocks->cpu != cpu) { |
| 625 | sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name); |
| 626 | per_cpu(threshold_banks, cpu)[bank] = NULL; |
| 627 | return; |
| 628 | } |
| 629 | #endif |
| 630 | |
| 631 | /* remove all sibling symlinks before unregistering */ |
| 632 | for_each_cpu(i, b->cpus) { |
| 633 | if (i == cpu) |
| 634 | continue; |
| 635 | |
| 636 | sysfs_remove_link(&per_cpu(device_mce, i).kobj, name); |
| 637 | per_cpu(threshold_banks, i)[bank] = NULL; |
| 638 | } |
| 639 | |
| 640 | deallocate_threshold_block(cpu, bank); |
| 641 | |
| 642 | free_out: |
| 643 | kobject_del(b->kobj); |
| 644 | kobject_put(b->kobj); |
| 645 | free_cpumask_var(b->cpus); |
| 646 | kfree(b); |
| 647 | per_cpu(threshold_banks, cpu)[bank] = NULL; |
| 648 | } |
| 649 | |
| 650 | static void threshold_remove_device(unsigned int cpu) |
| 651 | { |
| 652 | unsigned int bank; |
| 653 | |
| 654 | for (bank = 0; bank < NR_BANKS; ++bank) { |
| 655 | if (!(per_cpu(bank_map, cpu) & (1 << bank))) |
| 656 | continue; |
| 657 | threshold_remove_bank(cpu, bank); |
| 658 | } |
| 659 | } |
| 660 | |
| 661 | /* get notified when a cpu comes on/off */ |
| 662 | static void __cpuinit amd_64_threshold_cpu_callback(unsigned long action, |
| 663 | unsigned int cpu) |
| 664 | { |
| 665 | if (cpu >= NR_CPUS) |
| 666 | return; |
| 667 | |
| 668 | switch (action) { |
| 669 | case CPU_ONLINE: |
| 670 | case CPU_ONLINE_FROZEN: |
| 671 | threshold_create_device(cpu); |
| 672 | break; |
| 673 | case CPU_DEAD: |
| 674 | case CPU_DEAD_FROZEN: |
| 675 | threshold_remove_device(cpu); |
| 676 | break; |
| 677 | default: |
| 678 | break; |
| 679 | } |
| 680 | } |
| 681 | |
| 682 | static __init int threshold_init_device(void) |
| 683 | { |
| 684 | unsigned lcpu = 0; |
| 685 | |
| 686 | /* to hit CPUs online before the notifier is up */ |
| 687 | for_each_online_cpu(lcpu) { |
| 688 | int err = threshold_create_device(lcpu); |
| 689 | if (err) |
| 690 | return err; |
| 691 | } |
| 692 | threshold_cpu_callback = amd_64_threshold_cpu_callback; |
| 693 | return 0; |
| 694 | } |
| 695 | |
| 696 | device_initcall(threshold_init_device); |