| .. | .. |
|---|
| 1 | 1 | /* |
|---|
| 2 | | - * Copyright(c) 2015 - 2018 Intel Corporation. |
|---|
| 2 | + * Copyright(c) 2015 - 2020 Intel Corporation. |
|---|
| 3 | 3 | * |
|---|
| 4 | 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
|---|
| 5 | 5 | * redistributing this file, you may do so under either license. |
|---|
| .. | .. |
|---|
| 48 | 48 | #include <linux/cpumask.h> |
|---|
| 49 | 49 | #include <linux/module.h> |
|---|
| 50 | 50 | #include <linux/interrupt.h> |
|---|
| 51 | +#include <linux/numa.h> |
|---|
| 51 | 52 | |
|---|
| 52 | 53 | #include "hfi.h" |
|---|
| 53 | 54 | #include "affinity.h" |
|---|
| .. | .. |
|---|
| 63 | 64 | static const char * const irq_type_names[] = { |
|---|
| 64 | 65 | "SDMA", |
|---|
| 65 | 66 | "RCVCTXT", |
|---|
| 67 | + "NETDEVCTXT", |
|---|
| 66 | 68 | "GENERAL", |
|---|
| 67 | 69 | "OTHER", |
|---|
| 68 | 70 | }; |
|---|
| .. | .. |
|---|
| 216 | 218 | pr_err("HFI: System BIOS may need to be upgraded\n"); |
|---|
| 217 | 219 | for (node = 0; node < node_affinity.num_possible_nodes; node++) |
|---|
| 218 | 220 | hfi1_per_node_cntr[node] = 1; |
|---|
| 221 | + |
|---|
| 222 | + pci_dev_put(dev); |
|---|
| 219 | 223 | |
|---|
| 220 | 224 | return 0; |
|---|
| 221 | 225 | } |
|---|
| .. | .. |
|---|
| 630 | 634 | */ |
|---|
| 631 | 635 | int hfi1_dev_affinity_init(struct hfi1_devdata *dd) |
|---|
| 632 | 636 | { |
|---|
| 633 | | - int node = pcibus_to_node(dd->pcidev->bus); |
|---|
| 634 | 637 | struct hfi1_affinity_node *entry; |
|---|
| 635 | 638 | const struct cpumask *local_mask; |
|---|
| 636 | 639 | int curr_cpu, possible, i, ret; |
|---|
| 637 | 640 | bool new_entry = false; |
|---|
| 638 | | - |
|---|
| 639 | | - /* |
|---|
| 640 | | - * If the BIOS does not have the NUMA node information set, select |
|---|
| 641 | | - * NUMA 0 so we get consistent performance. |
|---|
| 642 | | - */ |
|---|
| 643 | | - if (node < 0) { |
|---|
| 644 | | - dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n"); |
|---|
| 645 | | - node = 0; |
|---|
| 646 | | - } |
|---|
| 647 | | - dd->node = node; |
|---|
| 648 | 641 | |
|---|
| 649 | 642 | local_mask = cpumask_of_node(dd->node); |
|---|
| 650 | 643 | if (cpumask_first(local_mask) >= nr_cpu_ids) |
|---|
| .. | .. |
|---|
| 658 | 651 | * create an entry in the global affinity structure and initialize it. |
|---|
| 659 | 652 | */ |
|---|
| 660 | 653 | if (!entry) { |
|---|
| 661 | | - entry = node_affinity_allocate(node); |
|---|
| 654 | + entry = node_affinity_allocate(dd->node); |
|---|
| 662 | 655 | if (!entry) { |
|---|
| 663 | 656 | dd_dev_err(dd, |
|---|
| 664 | 657 | "Unable to allocate global affinity node\n"); |
|---|
| .. | .. |
|---|
| 749 | 742 | if (new_entry) |
|---|
| 750 | 743 | node_affinity_add_tail(entry); |
|---|
| 751 | 744 | |
|---|
| 745 | + dd->affinity_entry = entry; |
|---|
| 752 | 746 | mutex_unlock(&node_affinity.lock); |
|---|
| 753 | 747 | |
|---|
| 754 | 748 | return 0; |
|---|
| .. | .. |
|---|
| 764 | 758 | { |
|---|
| 765 | 759 | struct hfi1_affinity_node *entry; |
|---|
| 766 | 760 | |
|---|
| 767 | | - if (dd->node < 0) |
|---|
| 768 | | - return; |
|---|
| 769 | | - |
|---|
| 770 | 761 | mutex_lock(&node_affinity.lock); |
|---|
| 762 | + if (!dd->affinity_entry) |
|---|
| 763 | + goto unlock; |
|---|
| 771 | 764 | entry = node_affinity_lookup(dd->node); |
|---|
| 772 | 765 | if (!entry) |
|---|
| 773 | 766 | goto unlock; |
|---|
| .. | .. |
|---|
| 778 | 771 | */ |
|---|
| 779 | 772 | _dev_comp_vect_cpu_mask_clean_up(dd, entry); |
|---|
| 780 | 773 | unlock: |
|---|
| 774 | + dd->affinity_entry = NULL; |
|---|
| 781 | 775 | mutex_unlock(&node_affinity.lock); |
|---|
| 782 | | - dd->node = -1; |
|---|
| 783 | 776 | } |
|---|
| 784 | 777 | |
|---|
| 785 | 778 | /* |
|---|
| .. | .. |
|---|
| 819 | 812 | set = &entry->def_intr; |
|---|
| 820 | 813 | cpumask_set_cpu(cpu, &set->mask); |
|---|
| 821 | 814 | cpumask_set_cpu(cpu, &set->used); |
|---|
| 822 | | - for (i = 0; i < dd->num_msix_entries; i++) { |
|---|
| 815 | + for (i = 0; i < dd->msix_info.max_requested; i++) { |
|---|
| 823 | 816 | struct hfi1_msix_entry *other_msix; |
|---|
| 824 | 817 | |
|---|
| 825 | | - other_msix = &dd->msix_entries[i]; |
|---|
| 818 | + other_msix = &dd->msix_info.msix_entries[i]; |
|---|
| 826 | 819 | if (other_msix->type != IRQ_SDMA || other_msix == msix) |
|---|
| 827 | 820 | continue; |
|---|
| 828 | 821 | |
|---|
| .. | .. |
|---|
| 914 | 907 | set = &entry->rcv_intr; |
|---|
| 915 | 908 | scnprintf(extra, 64, "ctxt %u", rcd->ctxt); |
|---|
| 916 | 909 | break; |
|---|
| 910 | + case IRQ_NETDEVCTXT: |
|---|
| 911 | + rcd = (struct hfi1_ctxtdata *)msix->arg; |
|---|
| 912 | + set = &entry->def_intr; |
|---|
| 913 | + scnprintf(extra, 64, "ctxt %u", rcd->ctxt); |
|---|
| 914 | + break; |
|---|
| 917 | 915 | default: |
|---|
| 918 | 916 | dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type); |
|---|
| 919 | 917 | return -EINVAL; |
|---|
| .. | .. |
|---|
| 986 | 984 | if (rcd->ctxt != HFI1_CTRL_CTXT) |
|---|
| 987 | 985 | set = &entry->rcv_intr; |
|---|
| 988 | 986 | break; |
|---|
| 987 | + case IRQ_NETDEVCTXT: |
|---|
| 988 | + rcd = (struct hfi1_ctxtdata *)msix->arg; |
|---|
| 989 | + set = &entry->def_intr; |
|---|
| 990 | + break; |
|---|
| 989 | 991 | default: |
|---|
| 990 | 992 | mutex_unlock(&node_affinity.lock); |
|---|
| 991 | 993 | return; |
|---|
| .. | .. |
|---|
| 1039 | 1041 | struct hfi1_affinity_node *entry; |
|---|
| 1040 | 1042 | cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask; |
|---|
| 1041 | 1043 | const struct cpumask *node_mask, |
|---|
| 1042 | | - *proc_mask = ¤t->cpus_allowed; |
|---|
| 1044 | + *proc_mask = current->cpus_ptr; |
|---|
| 1043 | 1045 | struct hfi1_affinity_node_list *affinity = &node_affinity; |
|---|
| 1044 | 1046 | struct cpu_mask_set *set = &affinity->proc; |
|---|
| 1045 | 1047 | |
|---|
| .. | .. |
|---|
| 1047 | 1049 | * check whether process/context affinity has already |
|---|
| 1048 | 1050 | * been set |
|---|
| 1049 | 1051 | */ |
|---|
| 1050 | | - if (cpumask_weight(proc_mask) == 1) { |
|---|
| 1052 | + if (current->nr_cpus_allowed == 1) { |
|---|
| 1051 | 1053 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", |
|---|
| 1052 | 1054 | current->pid, current->comm, |
|---|
| 1053 | 1055 | cpumask_pr_args(proc_mask)); |
|---|
| .. | .. |
|---|
| 1058 | 1060 | cpu = cpumask_first(proc_mask); |
|---|
| 1059 | 1061 | cpumask_set_cpu(cpu, &set->used); |
|---|
| 1060 | 1062 | goto done; |
|---|
| 1061 | | - } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { |
|---|
| 1063 | + } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) { |
|---|
| 1062 | 1064 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl", |
|---|
| 1063 | 1065 | current->pid, current->comm, |
|---|
| 1064 | 1066 | cpumask_pr_args(proc_mask)); |
|---|