2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/topology.h>
48 #include <linux/cpumask.h>
49 #include <linux/module.h>
56 struct hfi1_affinity_node_list node_affinity = {
57 .list = LIST_HEAD_INIT(node_affinity.list),
58 .lock = __SPIN_LOCK_UNLOCKED(&node_affinity.lock),
61 /* Name of IRQ types, indexed by enum irq_type */
62 static const char * const irq_type_names[] = {
69 static inline void init_cpu_mask_set(struct cpu_mask_set *set)
71 cpumask_clear(&set->mask);
72 cpumask_clear(&set->used);
76 /* Initialize non-HT cpu cores mask */
77 void init_real_cpu_mask(void)
79 int possible, curr_cpu, i, ht;
81 cpumask_clear(&node_affinity.real_cpu_mask);
83 /* Start with cpu online mask as the real cpu mask */
84 cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask);
87 * Remove HT cores from the real cpu mask. Do this in two steps below.
89 possible = cpumask_weight(&node_affinity.real_cpu_mask);
90 ht = cpumask_weight(topology_sibling_cpumask(
91 cpumask_first(&node_affinity.real_cpu_mask)));
93 * Step 1. Skip over the first N HT siblings and use them as the
94 * "real" cores. Assumes that HT cores are not enumerated in
95 * succession (except in the single core case).
97 curr_cpu = cpumask_first(&node_affinity.real_cpu_mask);
98 for (i = 0; i < possible / ht; i++)
99 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
101 * Step 2. Remove the remaining HT siblings. Use cpumask_next() to
104 for (; i < possible; i++) {
105 cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask);
106 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
110 void node_affinity_init(void)
112 cpumask_copy(&node_affinity.proc.mask, cpu_online_mask);
114 * The real cpu mask is part of the affinity struct but it has to be
115 * initialized early. It is needed to calculate the number of user
116 * contexts in set_up_context_variables().
118 init_real_cpu_mask();
121 void node_affinity_destroy(void)
123 struct list_head *pos, *q;
124 struct hfi1_affinity_node *entry;
126 spin_lock(&node_affinity.lock);
127 list_for_each_safe(pos, q, &node_affinity.list) {
128 entry = list_entry(pos, struct hfi1_affinity_node,
133 spin_unlock(&node_affinity.lock);
136 static struct hfi1_affinity_node *node_affinity_allocate(int node)
138 struct hfi1_affinity_node *entry;
140 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
144 INIT_LIST_HEAD(&entry->list);
150 * It appends an entry to the list.
151 * It *must* be called with node_affinity.lock held.
153 static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
155 list_add_tail(&entry->list, &node_affinity.list);
158 /* It must be called with node_affinity.lock held */
159 static struct hfi1_affinity_node *node_affinity_lookup(int node)
161 struct list_head *pos;
162 struct hfi1_affinity_node *entry;
164 list_for_each(pos, &node_affinity.list) {
165 entry = list_entry(pos, struct hfi1_affinity_node, list);
166 if (entry->node == node)
174 * Interrupt affinity.
176 * non-rcv avail gets a default mask that
177 * starts as possible cpus with threads reset
178 * and each rcv avail reset.
180 * rcv avail gets node relative 1 wrapping back
181 * to the node relative 1 as necessary.
184 int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
186 int node = pcibus_to_node(dd->pcidev->bus);
187 struct hfi1_affinity_node *entry;
188 const struct cpumask *local_mask;
189 int curr_cpu, possible, i;
192 node = numa_node_id();
195 local_mask = cpumask_of_node(dd->node);
196 if (cpumask_first(local_mask) >= nr_cpu_ids)
197 local_mask = topology_core_cpumask(0);
199 spin_lock(&node_affinity.lock);
200 entry = node_affinity_lookup(dd->node);
201 spin_unlock(&node_affinity.lock);
204 * If this is the first time this NUMA node's affinity is used,
205 * create an entry in the global affinity structure and initialize it.
208 entry = node_affinity_allocate(node);
211 "Unable to allocate global affinity node\n");
214 init_cpu_mask_set(&entry->def_intr);
215 init_cpu_mask_set(&entry->rcv_intr);
216 /* Use the "real" cpu mask of this node as the default */
217 cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
220 /* fill in the receive list */
221 possible = cpumask_weight(&entry->def_intr.mask);
222 curr_cpu = cpumask_first(&entry->def_intr.mask);
225 /* only one CPU, everyone will use it */
226 cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask);
229 * Retain the first CPU in the default list for the
232 curr_cpu = cpumask_next(curr_cpu,
233 &entry->def_intr.mask);
236 * Remove the remaining kernel receive queues from
237 * the default list and add them to the receive list.
239 for (i = 0; i < dd->n_krcv_queues - 1; i++) {
240 cpumask_clear_cpu(curr_cpu,
241 &entry->def_intr.mask);
242 cpumask_set_cpu(curr_cpu,
243 &entry->rcv_intr.mask);
244 curr_cpu = cpumask_next(curr_cpu,
245 &entry->def_intr.mask);
246 if (curr_cpu >= nr_cpu_ids)
251 spin_lock(&node_affinity.lock);
252 node_affinity_add_tail(entry);
253 spin_unlock(&node_affinity.lock);
259 int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
263 struct hfi1_affinity_node *entry;
264 struct cpu_mask_set *set;
265 struct sdma_engine *sde = NULL;
266 struct hfi1_ctxtdata *rcd = NULL;
271 cpumask_clear(&msix->mask);
273 ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
277 spin_lock(&node_affinity.lock);
278 entry = node_affinity_lookup(dd->node);
279 spin_unlock(&node_affinity.lock);
281 switch (msix->type) {
283 sde = (struct sdma_engine *)msix->arg;
284 scnprintf(extra, 64, "engine %u", sde->this_idx);
287 set = &entry->def_intr;
290 rcd = (struct hfi1_ctxtdata *)msix->arg;
291 if (rcd->ctxt == HFI1_CTRL_CTXT) {
292 set = &entry->def_intr;
293 cpu = cpumask_first(&set->mask);
295 set = &entry->rcv_intr;
297 scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
300 dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
305 * The control receive context is placed on a particular CPU, which
306 * is set above. Skip accounting for it. Everything else finds its
309 if (cpu == -1 && set) {
310 spin_lock(&node_affinity.lock);
311 if (cpumask_equal(&set->mask, &set->used)) {
313 * We've used up all the CPUs, bump up the generation
314 * and reset the 'used' map
317 cpumask_clear(&set->used);
319 cpumask_andnot(diff, &set->mask, &set->used);
320 cpu = cpumask_first(diff);
321 cpumask_set_cpu(cpu, &set->used);
322 spin_unlock(&node_affinity.lock);
325 switch (msix->type) {
335 cpumask_set_cpu(cpu, &msix->mask);
336 dd_dev_info(dd, "IRQ vector: %u, type %s %s -> cpu: %d\n",
337 msix->msix.vector, irq_type_names[msix->type],
339 irq_set_affinity_hint(msix->msix.vector, &msix->mask);
341 free_cpumask_var(diff);
345 void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
346 struct hfi1_msix_entry *msix)
348 struct cpu_mask_set *set = NULL;
349 struct hfi1_ctxtdata *rcd;
350 struct hfi1_affinity_node *entry;
352 spin_lock(&node_affinity.lock);
353 entry = node_affinity_lookup(dd->node);
354 spin_unlock(&node_affinity.lock);
356 switch (msix->type) {
359 set = &entry->def_intr;
362 rcd = (struct hfi1_ctxtdata *)msix->arg;
363 /* only do accounting for non control contexts */
364 if (rcd->ctxt != HFI1_CTRL_CTXT)
365 set = &entry->rcv_intr;
372 spin_lock(&node_affinity.lock);
373 cpumask_andnot(&set->used, &set->used, &msix->mask);
374 if (cpumask_empty(&set->used) && set->gen) {
376 cpumask_copy(&set->used, &set->mask);
378 spin_unlock(&node_affinity.lock);
381 irq_set_affinity_hint(msix->msix.vector, NULL);
382 cpumask_clear(&msix->mask);
385 int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
388 cpumask_var_t diff, mask, intrs;
389 struct hfi1_affinity_node *entry;
390 const struct cpumask *node_mask,
391 *proc_mask = tsk_cpus_allowed(current);
392 struct cpu_mask_set *set = &node_affinity.proc;
395 * check whether process/context affinity has already
398 if (cpumask_weight(proc_mask) == 1) {
399 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
400 current->pid, current->comm,
401 cpumask_pr_args(proc_mask));
403 * Mark the pre-set CPU as used. This is atomic so we don't
406 cpu = cpumask_first(proc_mask);
407 cpumask_set_cpu(cpu, &set->used);
409 } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
410 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
411 current->pid, current->comm,
412 cpumask_pr_args(proc_mask));
417 * The process does not have a preset CPU affinity so find one to
418 * recommend. We prefer CPUs on the same NUMA as the device.
421 ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
424 ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
427 ret = zalloc_cpumask_var(&intrs, GFP_KERNEL);
431 spin_lock(&node_affinity.lock);
433 * If we've used all available CPUs, clear the mask and start
436 if (cpumask_equal(&set->mask, &set->used)) {
438 cpumask_clear(&set->used);
441 entry = node_affinity_lookup(dd->node);
442 /* CPUs used by interrupt handlers */
443 cpumask_copy(intrs, (entry->def_intr.gen ?
444 &entry->def_intr.mask :
445 &entry->def_intr.used));
446 cpumask_or(intrs, intrs, (entry->rcv_intr.gen ?
447 &entry->rcv_intr.mask :
448 &entry->rcv_intr.used));
449 hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
450 cpumask_pr_args(intrs));
453 * If we don't have a NUMA node requested, preference is towards
458 node_mask = cpumask_of_node(node);
459 hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node,
460 cpumask_pr_args(node_mask));
462 /* diff will hold all unused cpus */
463 cpumask_andnot(diff, &set->mask, &set->used);
464 hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff));
466 /* get cpumask of available CPUs on preferred NUMA */
467 cpumask_and(mask, diff, node_mask);
468 hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask));
471 * At first, we don't want to place processes on the same
472 * CPUs as interrupt handlers.
474 cpumask_andnot(diff, mask, intrs);
475 if (!cpumask_empty(diff))
476 cpumask_copy(mask, diff);
479 * if we don't have a cpu on the preferred NUMA, get
480 * the list of the remaining available CPUs
482 if (cpumask_empty(mask)) {
483 cpumask_andnot(diff, &set->mask, &set->used);
484 cpumask_andnot(mask, diff, node_mask);
486 hfi1_cdbg(PROC, "possible CPUs for process %*pbl",
487 cpumask_pr_args(mask));
489 cpu = cpumask_first(mask);
490 if (cpu >= nr_cpu_ids) /* empty */
493 cpumask_set_cpu(cpu, &set->used);
494 spin_unlock(&node_affinity.lock);
496 free_cpumask_var(intrs);
498 free_cpumask_var(mask);
500 free_cpumask_var(diff);
505 void hfi1_put_proc_affinity(struct hfi1_devdata *dd, int cpu)
507 struct cpu_mask_set *set = &node_affinity.proc;
511 spin_lock(&node_affinity.lock);
512 cpumask_clear_cpu(cpu, &set->used);
513 if (cpumask_empty(&set->used) && set->gen) {
515 cpumask_copy(&set->used, &set->mask);
517 spin_unlock(&node_affinity.lock);