.. | .. |
---|
197 | 197 | * A list of the banks enabled on each logical CPU. Controls which respective |
---|
198 | 198 | * descriptors to initialize later in mce_threshold_create_device(). |
---|
199 | 199 | */ |
---|
200 | | -static DEFINE_PER_CPU(unsigned int, bank_map); |
---|
| 200 | +static DEFINE_PER_CPU(u64, bank_map); |
---|
201 | 201 | |
---|
202 | 202 | /* Map of banks that have more than MCA_MISC0 available. */ |
---|
203 | | -static DEFINE_PER_CPU(u32, smca_misc_banks_map); |
---|
| 203 | +static DEFINE_PER_CPU(u64, smca_misc_banks_map); |
---|
204 | 204 | |
---|
205 | 205 | static void amd_threshold_interrupt(void); |
---|
206 | 206 | static void amd_deferred_error_interrupt(void); |
---|
.. | .. |
---|
229 | 229 | return; |
---|
230 | 230 | |
---|
231 | 231 | if (low & MASK_BLKPTR_LO) |
---|
232 | | - per_cpu(smca_misc_banks_map, cpu) |= BIT(bank); |
---|
| 232 | + per_cpu(smca_misc_banks_map, cpu) |= BIT_ULL(bank); |
---|
233 | 233 | |
---|
234 | 234 | } |
---|
235 | 235 | |
---|
.. | .. |
---|
492 | 492 | if (!block) |
---|
493 | 493 | return MSR_AMD64_SMCA_MCx_MISC(bank); |
---|
494 | 494 | |
---|
495 | | - if (!(per_cpu(smca_misc_banks_map, cpu) & BIT(bank))) |
---|
| 495 | + if (!(per_cpu(smca_misc_banks_map, cpu) & BIT_ULL(bank))) |
---|
496 | 496 | return 0; |
---|
497 | 497 | |
---|
498 | 498 | return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); |
---|
.. | .. |
---|
513 | 513 | /* Fall back to method we used for older processors: */ |
---|
514 | 514 | switch (block) { |
---|
515 | 515 | case 0: |
---|
516 | | - addr = msr_ops.misc(bank); |
---|
| 516 | + addr = mca_msr_reg(bank, MCA_MISC); |
---|
517 | 517 | break; |
---|
518 | 518 | case 1: |
---|
519 | 519 | offset = ((low & MASK_BLKPTR_LO) >> 21); |
---|
.. | .. |
---|
536 | 536 | int new; |
---|
537 | 537 | |
---|
538 | 538 | if (!block) |
---|
539 | | - per_cpu(bank_map, cpu) |= (1 << bank); |
---|
| 539 | + per_cpu(bank_map, cpu) |= BIT_ULL(bank); |
---|
540 | 540 | |
---|
541 | 541 | memset(&b, 0, sizeof(b)); |
---|
542 | 542 | b.cpu = cpu; |
---|
.. | .. |
---|
952 | 952 | return status & MCI_STATUS_DEFERRED; |
---|
953 | 953 | } |
---|
954 | 954 | |
---|
| 955 | +static bool _log_error_deferred(unsigned int bank, u32 misc) |
---|
| 956 | +{ |
---|
| 957 | + if (!_log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS), |
---|
| 958 | + mca_msr_reg(bank, MCA_ADDR), misc)) |
---|
| 959 | + return false; |
---|
| 960 | + |
---|
| 961 | + /* |
---|
| 962 | + * Non-SMCA systems don't have MCA_DESTAT/MCA_DEADDR registers. |
---|
| 963 | + * Return true here to avoid accessing these registers. |
---|
| 964 | + */ |
---|
| 965 | + if (!mce_flags.smca) |
---|
| 966 | + return true; |
---|
| 967 | + |
---|
| 968 | + /* Clear MCA_DESTAT if the deferred error was logged from MCA_STATUS. */ |
---|
| 969 | + wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); |
---|
| 970 | + return true; |
---|
| 971 | +} |
---|
| 972 | + |
---|
955 | 973 | /* |
---|
956 | 974 | * We have three scenarios for checking for Deferred errors: |
---|
957 | 975 | * |
---|
.. | .. |
---|
963 | 981 | */ |
---|
964 | 982 | static void log_error_deferred(unsigned int bank) |
---|
965 | 983 | { |
---|
966 | | - bool defrd; |
---|
967 | | - |
---|
968 | | - defrd = _log_error_bank(bank, msr_ops.status(bank), |
---|
969 | | - msr_ops.addr(bank), 0); |
---|
970 | | - |
---|
971 | | - if (!mce_flags.smca) |
---|
| 984 | + if (_log_error_deferred(bank, 0)) |
---|
972 | 985 | return; |
---|
973 | | - |
---|
974 | | - /* Clear MCA_DESTAT if we logged the deferred error from MCA_STATUS. */ |
---|
975 | | - if (defrd) { |
---|
976 | | - wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); |
---|
977 | | - return; |
---|
978 | | - } |
---|
979 | 986 | |
---|
980 | 987 | /* |
---|
981 | 988 | * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check |
---|
.. | .. |
---|
996 | 1003 | |
---|
997 | 1004 | static void log_error_thresholding(unsigned int bank, u64 misc) |
---|
998 | 1005 | { |
---|
999 | | - _log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc); |
---|
| 1006 | + _log_error_deferred(bank, misc); |
---|
1000 | 1007 | } |
---|
1001 | 1008 | |
---|
1002 | 1009 | static void log_and_reset_block(struct threshold_block *block) |
---|
.. | .. |
---|
1041 | 1048 | return; |
---|
1042 | 1049 | |
---|
1043 | 1050 | for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) { |
---|
1044 | | - if (!(per_cpu(bank_map, cpu) & (1 << bank))) |
---|
| 1051 | + if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank))) |
---|
1045 | 1052 | continue; |
---|
1046 | 1053 | |
---|
1047 | 1054 | first_block = bp[bank]->blocks; |
---|
.. | .. |
---|
1384 | 1391 | } |
---|
1385 | 1392 | } |
---|
1386 | 1393 | |
---|
1387 | | - err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank)); |
---|
| 1394 | + err = allocate_threshold_blocks(cpu, b, bank, 0, mca_msr_reg(bank, MCA_MISC)); |
---|
1388 | 1395 | if (err) |
---|
1389 | 1396 | goto out_kobj; |
---|
1390 | 1397 | |
---|
.. | .. |
---|
1518 | 1525 | return -ENOMEM; |
---|
1519 | 1526 | |
---|
1520 | 1527 | for (bank = 0; bank < numbanks; ++bank) { |
---|
1521 | | - if (!(this_cpu_read(bank_map) & (1 << bank))) |
---|
| 1528 | + if (!(this_cpu_read(bank_map) & BIT_ULL(bank))) |
---|
1522 | 1529 | continue; |
---|
1523 | 1530 | err = threshold_create_bank(bp, cpu, bank); |
---|
1524 | 1531 | if (err) { |
---|