4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
19 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
21 * Copyright (c) 2012, 2015 Intel Corporation.
24 * This file is part of Lustre, http://www.lustre.org/
25 * Lustre is a trademark of Sun Microsystems, Inc.
27 * Author: liang@whamcloud.com
30 #define DEBUG_SUBSYSTEM S_LNET
32 #include <linux/cpu.h>
33 #include <linux/sched.h>
34 #include "../../../include/linux/libcfs/libcfs.h"
39 * modparam for setting number of partitions
41 * 0 : estimate best value based on cores or NUMA nodes
42 * 1 : disable multiple partitions
43 * >1 : specify number of partitions
45 static int cpu_npartitions;
46 module_param(cpu_npartitions, int, 0444);
47 MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions");
50 * modparam for setting CPU partitions patterns:
52 * i.e: "0[0,1,2,3] 1[4,5,6,7]", number before bracket is CPU partition ID,
53 * number in bracket is processor ID (core or HT)
55 * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket
56 * are NUMA node ID, number before bracket is CPU partition ID.
58 * NB: If user specified cpu_pattern, cpu_npartitions will be ignored
60 static char *cpu_pattern = "";
61 module_param(cpu_pattern, charp, 0444);
62 MODULE_PARM_DESC(cpu_pattern, "CPU partitions pattern");
65 /* serialize hotplug etc */
67 /* reserved for hotplug */
68 unsigned long cpt_version;
69 /* mutex to protect cpt_cpumask */
70 struct mutex cpt_mutex;
71 /* scratch buffer for set/unset_node */
72 cpumask_t *cpt_cpumask;
75 static struct cfs_cpt_data cpt_data;
78 cfs_cpt_table_free(struct cfs_cpt_table *cptab)
82 if (cptab->ctb_cpu2cpt) {
83 LIBCFS_FREE(cptab->ctb_cpu2cpt,
85 sizeof(cptab->ctb_cpu2cpt[0]));
88 for (i = 0; cptab->ctb_parts && i < cptab->ctb_nparts; i++) {
89 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
91 if (part->cpt_nodemask) {
92 LIBCFS_FREE(part->cpt_nodemask,
93 sizeof(*part->cpt_nodemask));
96 if (part->cpt_cpumask)
97 LIBCFS_FREE(part->cpt_cpumask, cpumask_size());
100 if (cptab->ctb_parts) {
101 LIBCFS_FREE(cptab->ctb_parts,
102 cptab->ctb_nparts * sizeof(cptab->ctb_parts[0]));
105 if (cptab->ctb_nodemask)
106 LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
107 if (cptab->ctb_cpumask)
108 LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size());
110 LIBCFS_FREE(cptab, sizeof(*cptab));
112 EXPORT_SYMBOL(cfs_cpt_table_free);
114 struct cfs_cpt_table *
115 cfs_cpt_table_alloc(unsigned int ncpt)
117 struct cfs_cpt_table *cptab;
120 LIBCFS_ALLOC(cptab, sizeof(*cptab));
124 cptab->ctb_nparts = ncpt;
126 LIBCFS_ALLOC(cptab->ctb_cpumask, cpumask_size());
127 LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
129 if (!cptab->ctb_cpumask || !cptab->ctb_nodemask)
132 LIBCFS_ALLOC(cptab->ctb_cpu2cpt,
133 num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
134 if (!cptab->ctb_cpu2cpt)
137 memset(cptab->ctb_cpu2cpt, -1,
138 num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
140 LIBCFS_ALLOC(cptab->ctb_parts, ncpt * sizeof(cptab->ctb_parts[0]));
141 if (!cptab->ctb_parts)
144 for (i = 0; i < ncpt; i++) {
145 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
147 LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size());
148 LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask));
149 if (!part->cpt_cpumask || !part->cpt_nodemask)
153 spin_lock(&cpt_data.cpt_lock);
154 /* Reserved for hotplug */
155 cptab->ctb_version = cpt_data.cpt_version;
156 spin_unlock(&cpt_data.cpt_lock);
161 cfs_cpt_table_free(cptab);
164 EXPORT_SYMBOL(cfs_cpt_table_alloc);
167 cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
174 for (i = 0; i < cptab->ctb_nparts; i++) {
176 rc = snprintf(tmp, len, "%d\t: ", i);
186 for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) {
187 rc = snprintf(tmp, len, "%d ", j);
207 EXPORT_SYMBOL(cfs_cpt_table_print);
210 cfs_cpt_number(struct cfs_cpt_table *cptab)
212 return cptab->ctb_nparts;
214 EXPORT_SYMBOL(cfs_cpt_number);
217 cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt)
219 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
221 return cpt == CFS_CPT_ANY ?
222 cpumask_weight(cptab->ctb_cpumask) :
223 cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask);
225 EXPORT_SYMBOL(cfs_cpt_weight);
228 cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
230 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
232 return cpt == CFS_CPT_ANY ?
233 cpumask_any_and(cptab->ctb_cpumask,
234 cpu_online_mask) < nr_cpu_ids :
235 cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask,
236 cpu_online_mask) < nr_cpu_ids;
238 EXPORT_SYMBOL(cfs_cpt_online);
241 cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
243 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
245 return cpt == CFS_CPT_ANY ?
246 cptab->ctb_cpumask : cptab->ctb_parts[cpt].cpt_cpumask;
248 EXPORT_SYMBOL(cfs_cpt_cpumask);
251 cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt)
253 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
255 return cpt == CFS_CPT_ANY ?
256 cptab->ctb_nodemask : cptab->ctb_parts[cpt].cpt_nodemask;
258 EXPORT_SYMBOL(cfs_cpt_nodemask);
261 cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
265 LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
267 if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) {
268 CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu);
272 if (cptab->ctb_cpu2cpt[cpu] != -1) {
273 CDEBUG(D_INFO, "CPU %d is already in partition %d\n",
274 cpu, cptab->ctb_cpu2cpt[cpu]);
278 cptab->ctb_cpu2cpt[cpu] = cpt;
280 LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_cpumask));
281 LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
283 cpumask_set_cpu(cpu, cptab->ctb_cpumask);
284 cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
286 node = cpu_to_node(cpu);
288 /* first CPU of @node in this CPT table */
289 if (!node_isset(node, *cptab->ctb_nodemask))
290 node_set(node, *cptab->ctb_nodemask);
292 /* first CPU of @node in this partition */
293 if (!node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask))
294 node_set(node, *cptab->ctb_parts[cpt].cpt_nodemask);
298 EXPORT_SYMBOL(cfs_cpt_set_cpu);
301 cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
306 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
308 if (cpu < 0 || cpu >= nr_cpu_ids) {
309 CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu);
313 if (cpt == CFS_CPT_ANY) {
314 /* caller doesn't know the partition ID */
315 cpt = cptab->ctb_cpu2cpt[cpu];
316 if (cpt < 0) { /* not set in this CPT-table */
317 CDEBUG(D_INFO, "Try to unset cpu %d which is not in CPT-table %p\n",
322 } else if (cpt != cptab->ctb_cpu2cpt[cpu]) {
324 "CPU %d is not in cpu-partition %d\n", cpu, cpt);
328 LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
329 LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask));
331 cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
332 cpumask_clear_cpu(cpu, cptab->ctb_cpumask);
333 cptab->ctb_cpu2cpt[cpu] = -1;
335 node = cpu_to_node(cpu);
337 LASSERT(node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask));
338 LASSERT(node_isset(node, *cptab->ctb_nodemask));
340 for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) {
341 /* this CPT has other CPU belonging to this node? */
342 if (cpu_to_node(i) == node)
347 node_clear(node, *cptab->ctb_parts[cpt].cpt_nodemask);
349 for_each_cpu(i, cptab->ctb_cpumask) {
350 /* this CPT-table has other CPU belonging to this node? */
351 if (cpu_to_node(i) == node)
356 node_clear(node, *cptab->ctb_nodemask);
358 EXPORT_SYMBOL(cfs_cpt_unset_cpu);
361 cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
365 if (cpumask_weight(mask) == 0 ||
366 cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) {
367 CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n",
372 for_each_cpu(i, mask) {
373 if (!cfs_cpt_set_cpu(cptab, cpt, i))
379 EXPORT_SYMBOL(cfs_cpt_set_cpumask);
382 cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
386 for_each_cpu(i, mask)
387 cfs_cpt_unset_cpu(cptab, cpt, i);
389 EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
392 cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
397 if (node < 0 || node >= MAX_NUMNODES) {
399 "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
403 mutex_lock(&cpt_data.cpt_mutex);
405 mask = cpt_data.cpt_cpumask;
406 cpumask_copy(mask, cpumask_of_node(node));
408 rc = cfs_cpt_set_cpumask(cptab, cpt, mask);
410 mutex_unlock(&cpt_data.cpt_mutex);
414 EXPORT_SYMBOL(cfs_cpt_set_node);
417 cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
421 if (node < 0 || node >= MAX_NUMNODES) {
423 "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
427 mutex_lock(&cpt_data.cpt_mutex);
429 mask = cpt_data.cpt_cpumask;
430 cpumask_copy(mask, cpumask_of_node(node));
432 cfs_cpt_unset_cpumask(cptab, cpt, mask);
434 mutex_unlock(&cpt_data.cpt_mutex);
436 EXPORT_SYMBOL(cfs_cpt_unset_node);
439 cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
443 for_each_node_mask(i, *mask) {
444 if (!cfs_cpt_set_node(cptab, cpt, i))
450 EXPORT_SYMBOL(cfs_cpt_set_nodemask);
453 cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
457 for_each_node_mask(i, *mask)
458 cfs_cpt_unset_node(cptab, cpt, i);
460 EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
463 cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt)
468 if (cpt == CFS_CPT_ANY) {
469 last = cptab->ctb_nparts - 1;
475 for (; cpt <= last; cpt++) {
476 for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask)
477 cfs_cpt_unset_cpu(cptab, cpt, i);
480 EXPORT_SYMBOL(cfs_cpt_clear);
483 cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
490 /* convert CPU partition ID to HW node id */
492 if (cpt < 0 || cpt >= cptab->ctb_nparts) {
493 mask = cptab->ctb_nodemask;
494 rotor = cptab->ctb_spread_rotor++;
496 mask = cptab->ctb_parts[cpt].cpt_nodemask;
497 rotor = cptab->ctb_parts[cpt].cpt_spread_rotor++;
500 weight = nodes_weight(*mask);
505 for_each_node_mask(node, *mask) {
513 EXPORT_SYMBOL(cfs_cpt_spread_node);
516 cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
518 int cpu = smp_processor_id();
519 int cpt = cptab->ctb_cpu2cpt[cpu];
525 /* don't return negative value for safety of upper layer,
526 * instead we shadow the unknown cpu to a valid partition ID
528 cpt = cpu % cptab->ctb_nparts;
533 EXPORT_SYMBOL(cfs_cpt_current);
536 cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu)
538 LASSERT(cpu >= 0 && cpu < nr_cpu_ids);
540 return cptab->ctb_cpu2cpt[cpu];
542 EXPORT_SYMBOL(cfs_cpt_of_cpu);
545 cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
548 nodemask_t *nodemask;
552 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
554 if (cpt == CFS_CPT_ANY) {
555 cpumask = cptab->ctb_cpumask;
556 nodemask = cptab->ctb_nodemask;
558 cpumask = cptab->ctb_parts[cpt].cpt_cpumask;
559 nodemask = cptab->ctb_parts[cpt].cpt_nodemask;
562 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) {
563 CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n",
568 for_each_online_cpu(i) {
569 if (cpumask_test_cpu(i, cpumask))
572 rc = set_cpus_allowed_ptr(current, cpumask);
573 set_mems_allowed(*nodemask);
575 schedule(); /* switch to allowed CPU */
580 /* don't need to set affinity because all online CPUs are covered */
583 EXPORT_SYMBOL(cfs_cpt_bind);
586 * Choose max to \a number CPUs from \a node and set them in \a cpt.
587 * We always prefer to choose CPU in the same core/socket.
590 cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
591 cpumask_t *node, int number)
593 cpumask_t *socket = NULL;
594 cpumask_t *core = NULL;
600 if (number >= cpumask_weight(node)) {
601 while (!cpumask_empty(node)) {
602 cpu = cpumask_first(node);
604 rc = cfs_cpt_set_cpu(cptab, cpt, cpu);
607 cpumask_clear_cpu(cpu, node);
612 /* allocate scratch buffer */
613 LIBCFS_ALLOC(socket, cpumask_size());
614 LIBCFS_ALLOC(core, cpumask_size());
615 if (!socket || !core) {
620 while (!cpumask_empty(node)) {
621 cpu = cpumask_first(node);
623 /* get cpumask for cores in the same socket */
624 cpumask_copy(socket, topology_core_cpumask(cpu));
625 cpumask_and(socket, socket, node);
627 LASSERT(!cpumask_empty(socket));
629 while (!cpumask_empty(socket)) {
632 /* get cpumask for hts in the same core */
633 cpumask_copy(core, topology_sibling_cpumask(cpu));
634 cpumask_and(core, core, node);
636 LASSERT(!cpumask_empty(core));
638 for_each_cpu(i, core) {
639 cpumask_clear_cpu(i, socket);
640 cpumask_clear_cpu(i, node);
642 rc = cfs_cpt_set_cpu(cptab, cpt, i);
651 cpu = cpumask_first(socket);
657 LIBCFS_FREE(socket, cpumask_size());
659 LIBCFS_FREE(core, cpumask_size());
663 #define CPT_WEIGHT_MIN 4u
666 cfs_cpt_num_estimate(void)
668 unsigned nnode = num_online_nodes();
669 unsigned ncpu = num_online_cpus();
672 if (ncpu <= CPT_WEIGHT_MIN) {
677 /* generate reasonable number of CPU partitions based on total number
678 * of CPUs, Preferred N should be power2 and match this condition:
679 * 2 * (N - 1)^2 < NCPUS <= 2 * N^2
681 for (ncpt = 2; ncpu > 2 * ncpt * ncpt; ncpt <<= 1)
684 if (ncpt <= nnode) { /* fat numa system */
688 } else { /* ncpt > nnode */
689 while ((nnode << 1) <= ncpt)
696 #if (BITS_PER_LONG == 32)
697 /* config many CPU partitions on 32-bit system could consume
700 ncpt = min(2U, ncpt);
702 while (ncpu % ncpt != 0)
703 ncpt--; /* worst case is 1 */
708 static struct cfs_cpt_table *
709 cfs_cpt_table_create(int ncpt)
711 struct cfs_cpt_table *cptab = NULL;
712 cpumask_t *mask = NULL;
718 rc = cfs_cpt_num_estimate();
722 if (ncpt > num_online_cpus() || ncpt > 4 * rc) {
723 CWARN("CPU partition number %d is larger than suggested value (%d), your system may have performance issue or run out of memory while under pressure\n",
727 if (num_online_cpus() % ncpt != 0) {
728 CERROR("CPU number %d is not multiple of cpu_npartition %d, please try different cpu_npartitions value or set pattern string by cpu_pattern=STRING\n",
729 (int)num_online_cpus(), ncpt);
733 cptab = cfs_cpt_table_alloc(ncpt);
735 CERROR("Failed to allocate CPU map(%d)\n", ncpt);
739 num = num_online_cpus() / ncpt;
741 CERROR("CPU changed while setting CPU partition\n");
745 LIBCFS_ALLOC(mask, cpumask_size());
747 CERROR("Failed to allocate scratch cpumask\n");
751 for_each_online_node(i) {
752 cpumask_copy(mask, cpumask_of_node(i));
754 while (!cpumask_empty(mask)) {
755 struct cfs_cpu_partition *part;
761 part = &cptab->ctb_parts[cpt];
763 n = num - cpumask_weight(part->cpt_cpumask);
766 rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n);
770 LASSERT(num >= cpumask_weight(part->cpt_cpumask));
771 if (num == cpumask_weight(part->cpt_cpumask))
777 num != cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)) {
778 CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n",
779 cptab->ctb_nparts, num, cpt,
780 cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask));
784 LIBCFS_FREE(mask, cpumask_size());
789 CERROR("Failed to setup CPU-partition-table with %d CPU-partitions, online HW nodes: %d, HW cpus: %d.\n",
790 ncpt, num_online_nodes(), num_online_cpus());
793 LIBCFS_FREE(mask, cpumask_size());
796 cfs_cpt_table_free(cptab);
801 static struct cfs_cpt_table *
802 cfs_cpt_table_create_pattern(char *pattern)
804 struct cfs_cpt_table *cptab;
811 for (ncpt = 0;; ncpt++) { /* quick scan bracket */
812 str = strchr(str, '[');
818 str = cfs_trimwhite(pattern);
819 if (*str == 'n' || *str == 'N') {
825 (node && ncpt > num_online_nodes()) ||
826 (!node && ncpt > num_online_cpus())) {
827 CERROR("Invalid pattern %s, or too many partitions %d\n",
832 high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
834 cptab = cfs_cpt_table_alloc(ncpt);
836 CERROR("Failed to allocate cpu partition table\n");
840 for (str = cfs_trimwhite(pattern), c = 0;; c++) {
841 struct cfs_range_expr *range;
842 struct cfs_expr_list *el;
843 char *bracket = strchr(str, '[');
851 CERROR("Invalid pattern %s\n", str);
855 CERROR("expect %d partitions but found %d\n",
862 if (sscanf(str, "%d%n", &cpt, &n) < 1) {
863 CERROR("Invalid cpu pattern %s\n", str);
867 if (cpt < 0 || cpt >= ncpt) {
868 CERROR("Invalid partition id %d, total partitions %d\n",
873 if (cfs_cpt_weight(cptab, cpt) != 0) {
874 CERROR("Partition %d has already been set.\n", cpt);
878 str = cfs_trimwhite(str + n);
879 if (str != bracket) {
880 CERROR("Invalid pattern %s\n", str);
884 bracket = strchr(str, ']');
886 CERROR("missing right bracket for cpt %d, %s\n",
891 if (cfs_expr_list_parse(str, (bracket - str) + 1,
892 0, high, &el) != 0) {
893 CERROR("Can't parse number range: %s\n", str);
897 list_for_each_entry(range, &el->el_exprs, re_link) {
898 for (i = range->re_lo; i <= range->re_hi; i++) {
899 if ((i - range->re_lo) % range->re_stride != 0)
902 rc = node ? cfs_cpt_set_node(cptab, cpt, i) :
903 cfs_cpt_set_cpu(cptab, cpt, i);
905 cfs_expr_list_free(el);
911 cfs_expr_list_free(el);
913 if (!cfs_cpt_online(cptab, cpt)) {
914 CERROR("No online CPU is found on partition %d\n", cpt);
918 str = cfs_trimwhite(bracket + 1);
924 cfs_cpt_table_free(cptab);
928 #ifdef CONFIG_HOTPLUG_CPU
930 cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
932 unsigned int cpu = (unsigned long)hcpu;
937 case CPU_DEAD_FROZEN:
939 case CPU_ONLINE_FROZEN:
940 spin_lock(&cpt_data.cpt_lock);
941 cpt_data.cpt_version++;
942 spin_unlock(&cpt_data.cpt_lock);
945 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
946 CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
951 mutex_lock(&cpt_data.cpt_mutex);
952 /* if all HTs in a core are offline, it may break affinity */
953 cpumask_copy(cpt_data.cpt_cpumask,
954 topology_sibling_cpumask(cpu));
955 warn = cpumask_any_and(cpt_data.cpt_cpumask,
956 cpu_online_mask) >= nr_cpu_ids;
957 mutex_unlock(&cpt_data.cpt_mutex);
958 CDEBUG(warn ? D_WARNING : D_INFO,
959 "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n",
966 static struct notifier_block cfs_cpu_notifier = {
967 .notifier_call = cfs_cpu_notify,
977 cfs_cpt_table_free(cfs_cpt_table);
979 #ifdef CONFIG_HOTPLUG_CPU
980 unregister_hotcpu_notifier(&cfs_cpu_notifier);
982 if (cpt_data.cpt_cpumask)
983 LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size());
989 LASSERT(!cfs_cpt_table);
991 memset(&cpt_data, 0, sizeof(cpt_data));
993 LIBCFS_ALLOC(cpt_data.cpt_cpumask, cpumask_size());
994 if (!cpt_data.cpt_cpumask) {
995 CERROR("Failed to allocate scratch buffer\n");
999 spin_lock_init(&cpt_data.cpt_lock);
1000 mutex_init(&cpt_data.cpt_mutex);
1002 #ifdef CONFIG_HOTPLUG_CPU
1003 register_hotcpu_notifier(&cfs_cpu_notifier);
1006 if (*cpu_pattern != 0) {
1007 cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
1008 if (!cfs_cpt_table) {
1009 CERROR("Failed to create cptab from pattern %s\n",
1015 cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions);
1016 if (!cfs_cpt_table) {
1017 CERROR("Failed to create ptable with npartitions %d\n",
1023 spin_lock(&cpt_data.cpt_lock);
1024 if (cfs_cpt_table->ctb_version != cpt_data.cpt_version) {
1025 spin_unlock(&cpt_data.cpt_lock);
1026 CERROR("CPU hotplug/unplug during setup\n");
1029 spin_unlock(&cpt_data.cpt_lock);
1031 LCONSOLE(0, "HW CPU cores: %d, npartitions: %d\n",
1032 num_online_cpus(), cfs_cpt_number(cfs_cpt_table));